gem5 v24.0.0.0
Loading...
Searching...
No Matches
tlb.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "arch/sparc/tlb.hh"
30
31#include <cstring>
32
33#include "arch/sparc/asi.hh"
34#include "arch/sparc/faults.hh"
36#include "arch/sparc/mmu.hh"
38#include "arch/sparc/types.hh"
39#include "base/bitfield.hh"
40#include "base/compiler.hh"
41#include "base/trace.hh"
42#include "cpu/base.hh"
43#include "cpu/thread_context.hh"
44#include "debug/IPR.hh"
45#include "debug/TLB.hh"
46#include "mem/packet_access.hh"
47#include "mem/page_table.hh"
48#include "mem/request.hh"
49#include "sim/full_system.hh"
50#include "sim/process.hh"
51#include "sim/system.hh"
52
53namespace gem5
54{
55
56/* @todo remove some of the magic constants. -- ali
57 * */
58namespace SparcISA {
59
61 : BaseTLB(p), size(p.size), usedEntries(0), lastReplaced(0),
62 cacheState(0), cacheValid(false)
63{
64 // To make this work you'll have to change the hypervisor and OS
65 if (size > 64)
66 fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
67
68 tlb = new TlbEntry[size];
69 std::memset((void *)tlb, 0, sizeof(TlbEntry) * size);
70
71 for (int x = 0; x < size; x++)
72 freeList.push_back(&tlb[x]);
73
74 c0_tsb_ps0 = 0;
75 c0_tsb_ps1 = 0;
76 c0_config = 0;
77 cx_tsb_ps0 = 0;
78 cx_tsb_ps1 = 0;
79 cx_config = 0;
80 sfsr = 0;
81 tag_access = 0;
82 sfar = 0;
83 cacheEntry[0] = NULL;
84 cacheEntry[1] = NULL;
85}
86
87void
89{
90 MapIter i;
91 for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
92 TlbEntry *t = i->second;
93 if (!t->pte.locked()) {
94 t->used = false;
96 }
97 }
98}
99
100
101void
102TLB::insert(Addr va, int partition_id, int context_id, bool real,
103 const PageTableEntry& PTE, int entry)
104{
105 MapIter i;
106 TlbEntry *new_entry = NULL;
107 int x;
108
109 cacheValid = false;
110 va &= ~(PTE.size()-1);
111
112 DPRINTF(TLB,
113 "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
114 va, PTE.paddr(), partition_id, context_id, (int)real, entry);
115
116 // Demap any entry that conflicts
117 for (x = 0; x < size; x++) {
118 if (tlb[x].range.real == real &&
119 tlb[x].range.partitionId == partition_id &&
120 tlb[x].range.va < va + PTE.size() - 1 &&
121 tlb[x].range.va + tlb[x].range.size >= va &&
122 (real || tlb[x].range.contextId == context_id ))
123 {
124 if (tlb[x].valid) {
125 freeList.push_front(&tlb[x]);
126 DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
127
128 tlb[x].valid = false;
129 if (tlb[x].used) {
130 tlb[x].used = false;
131 usedEntries--;
132 }
133 lookupTable.erase(tlb[x].range);
134 }
135 }
136 }
137
138 if (entry != -1) {
139 assert(entry < size && entry >= 0);
140 new_entry = &tlb[entry];
141 } else {
142 if (!freeList.empty()) {
143 new_entry = freeList.front();
144 } else {
145 x = lastReplaced;
146 do {
147 ++x;
148 if (x == size)
149 x = 0;
150 if (x == lastReplaced)
151 goto insertAllLocked;
152 } while (tlb[x].pte.locked());
153 lastReplaced = x;
154 new_entry = &tlb[x];
155 }
156 }
157
158insertAllLocked:
159 // Update the last ently if their all locked
160 if (!new_entry) {
161 new_entry = &tlb[size-1];
162 }
163
164 freeList.remove(new_entry);
165 if (new_entry->valid && new_entry->used)
166 usedEntries--;
167 if (new_entry->valid)
168 lookupTable.erase(new_entry->range);
169
170
171 assert(PTE.valid());
172 new_entry->range.va = va;
173 new_entry->range.size = PTE.size() - 1;
174 new_entry->range.partitionId = partition_id;
175 new_entry->range.contextId = context_id;
176 new_entry->range.real = real;
177 new_entry->pte = PTE;
178 new_entry->used = true;;
179 new_entry->valid = true;
180 usedEntries++;
181
182 i = lookupTable.insert(new_entry->range, new_entry);
183 assert(i != lookupTable.end());
184
185 // If all entries have their used bit set, clear it on them all,
186 // but the one we just inserted
187 if (usedEntries == size) {
189 new_entry->used = true;
190 usedEntries++;
191 }
192}
193
194
196TLB::lookup(Addr va, int partition_id, bool real, int context_id,
197 bool update_used)
198{
199 MapIter i;
200 TlbRange tr;
201 TlbEntry *t;
202
203 DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
204 va, partition_id, context_id, real);
205 // Assemble full address structure
206 tr.va = va;
207 tr.size = 1;
208 tr.contextId = context_id;
209 tr.partitionId = partition_id;
210 tr.real = real;
211
212 // Try to find the entry
213 i = lookupTable.find(tr);
214 if (i == lookupTable.end()) {
215 DPRINTF(TLB, "TLB: No valid entry found\n");
216 return NULL;
217 }
218
219 // Mark the entries used bit and clear other used bits in needed
220 t = i->second;
221 DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
222 t->pte.size());
223
224 // Update the used bits only if this is a real access (not a fake
225 // one from virttophys()
226 if (!t->used && update_used) {
227 t->used = true;
228 usedEntries++;
229 if (usedEntries == size) {
231 t->used = true;
232 usedEntries++;
233 }
234 }
235
236 return t;
237}
238
239void
241{
242 MapIter i;
243 for (int x = 0; x < size; x++) {
244 if (tlb[x].valid) {
245 DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
246 x, tlb[x].range.partitionId, tlb[x].range.contextId,
247 tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
248 tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
249 }
250 }
251}
252
253void
254TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
255{
256 TlbRange tr;
257 MapIter i;
258
259 DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
260 va, partition_id, context_id, real);
261
262 cacheValid = false;
263
264 // Assemble full address structure
265 tr.va = va;
266 tr.size = 1;
267 tr.contextId = context_id;
268 tr.partitionId = partition_id;
269 tr.real = real;
270
271 // Demap any entry that conflicts
272 i = lookupTable.find(tr);
273 if (i != lookupTable.end()) {
274 DPRINTF(IPR, "TLB: Demapped page\n");
275 i->second->valid = false;
276 if (i->second->used) {
277 i->second->used = false;
278 usedEntries--;
279 }
280 freeList.push_front(i->second);
282 }
283}
284
285void
286TLB::demapContext(int partition_id, int context_id)
287{
288 DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
289 partition_id, context_id);
290 cacheValid = false;
291 for (int x = 0; x < size; x++) {
292 if (tlb[x].range.contextId == context_id &&
293 tlb[x].range.partitionId == partition_id) {
294 if (tlb[x].valid) {
295 freeList.push_front(&tlb[x]);
296 }
297 tlb[x].valid = false;
298 if (tlb[x].used) {
299 tlb[x].used = false;
300 usedEntries--;
301 }
302 lookupTable.erase(tlb[x].range);
303 }
304 }
305}
306
307void
308TLB::demapAll(int partition_id)
309{
310 DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
311 cacheValid = false;
312 for (int x = 0; x < size; x++) {
313 if (tlb[x].valid && !tlb[x].pte.locked() &&
314 tlb[x].range.partitionId == partition_id) {
315 freeList.push_front(&tlb[x]);
316 tlb[x].valid = false;
317 if (tlb[x].used) {
318 tlb[x].used = false;
319 usedEntries--;
320 }
321 lookupTable.erase(tlb[x].range);
322 }
323 }
324}
325
326void
328{
329 cacheValid = false;
331
332 for (int x = 0; x < size; x++) {
333 if (tlb[x].valid)
334 freeList.push_back(&tlb[x]);
335 tlb[x].valid = false;
336 tlb[x].used = false;
337 }
338 usedEntries = 0;
339}
340
341uint64_t
342TLB::TteRead(int entry)
343{
344 if (entry >= size)
345 panic("entry: %d\n", entry);
346
347 assert(entry < size);
348 if (tlb[entry].valid)
349 return tlb[entry].pte();
350 else
351 return (uint64_t)-1ll;
352}
353
354uint64_t
355TLB::TagRead(int entry)
356{
357 assert(entry < size);
358 uint64_t tag;
359 if (!tlb[entry].valid)
360 return (uint64_t)-1ll;
361
362 tag = tlb[entry].range.contextId;
363 tag |= tlb[entry].range.va;
364 tag |= (uint64_t)tlb[entry].range.partitionId << 61;
365 tag |= tlb[entry].range.real ? 1ULL << 60 : 0;
366 tag |= (uint64_t)~tlb[entry].pte._size() << 56;
367 return tag;
368}
369
370bool
372{
373 if (am)
374 return true;
375 if (va >= StartVAddrHole && va <= EndVAddrHole)
376 return false;
377 return true;
378}
379
380void
381TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
382{
383 if (sfsr & 0x1)
384 sfsr = 0x3;
385 else
386 sfsr = 1;
387
388 if (write)
389 sfsr |= 1 << 2;
390 sfsr |= ct << 4;
391 if (se)
392 sfsr |= 1 << 6;
393 sfsr |= ft << 7;
394 sfsr |= asi << 16;
395}
396
397void
399{
400 DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
401 va, context, mbits(va, 63,13) | mbits(context,12,0));
402
403 tag_access = mbits(va, 63,13) | mbits(context,12,0);
404}
405
406void
408 bool se, FaultTypes ft, int asi)
409{
410 DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
411 a, (int)write, ct, ft, asi);
412 TLB::writeSfsr(write, ct, se, ft, asi);
413 sfar = a;
414}
415
416Fault
418{
419 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
420
421 Addr vaddr = req->getVaddr();
422 TlbEntry *e;
423
424 assert(req->getArchFlags() == ASI_IMPLICIT);
425
426 DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
427 vaddr, req->getSize());
428
429 // Be fast if we can!
430 if (cacheValid && cacheState == tlbdata) {
431 if (cacheEntry[0]) {
432 if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
434 req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
435 return NoFault;
436 }
437 } else {
438 req->setPaddr(vaddr & PAddrImplMask);
439 return NoFault;
440 }
441 }
442
443 bool hpriv = bits(tlbdata,0,0);
444 bool red = bits(tlbdata,1,1);
445 bool priv = bits(tlbdata,2,2);
446 bool addr_mask = bits(tlbdata,3,3);
447 bool lsu_im = bits(tlbdata,4,4);
448
449 int part_id = bits(tlbdata,15,8);
450 int tl = bits(tlbdata,18,16);
451 int pri_context = bits(tlbdata,47,32);
452 int context;
453 ContextType ct;
454 int asi;
455 bool real = false;
456
457 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
458 priv, hpriv, red, lsu_im, part_id);
459
460 if (tl > 0) {
461 asi = ASI_N;
462 ct = Nucleus;
463 context = 0;
464 } else {
465 asi = ASI_P;
466 ct = Primary;
467 context = pri_context;
468 }
469
470 if ( hpriv || red ) {
471 cacheValid = true;
472 cacheState = tlbdata;
473 cacheEntry[0] = NULL;
474 req->setPaddr(vaddr & PAddrImplMask);
475 return NoFault;
476 }
477
478 // If the access is unaligned trap
479 if (vaddr & 0x3) {
480 writeSfsr(false, ct, false, OtherFault, asi);
481 return std::make_shared<MemAddressNotAligned>();
482 }
483
484 if (addr_mask)
486
487 if (!validVirtualAddress(vaddr, addr_mask)) {
488 writeSfsr(false, ct, false, VaOutOfRange, asi);
489 return std::make_shared<InstructionAccessException>();
490 }
491
492 if (!lsu_im) {
493 e = lookup(vaddr, part_id, true);
494 real = true;
495 context = 0;
496 } else {
497 e = lookup(vaddr, part_id, false, context);
498 }
499
500 if (e == NULL || !e->valid) {
501 writeTagAccess(vaddr, context);
502 if (real) {
503 return std::make_shared<InstructionRealTranslationMiss>();
504 } else {
505 if (FullSystem)
506 return std::make_shared<FastInstructionAccessMMUMiss>();
507 else
508 return std::make_shared<FastInstructionAccessMMUMiss>(
509 req->getVaddr());
510 }
511 }
512
513 // were not priviledged accesing priv page
514 if (!priv && e->pte.priv()) {
515 writeTagAccess(vaddr, context);
516 writeSfsr(false, ct, false, PrivViolation, asi);
517 return std::make_shared<InstructionAccessException>();
518 }
519
520 // cache translation date for next translation
521 cacheValid = true;
522 cacheState = tlbdata;
523 cacheEntry[0] = e;
524
525 req->setPaddr(e->pte.translate(vaddr));
526 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
527 return NoFault;
528}
529
530Fault
531TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
532{
533 /*
534 * @todo this could really use some profiling and fixing to make
535 * it faster!
536 */
537 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
538 Addr vaddr = req->getVaddr();
539 Addr size = req->getSize();
540 ASI asi;
541 asi = (ASI)req->getArchFlags();
542 bool implicit = false;
543 bool hpriv = bits(tlbdata,0,0);
544 bool unaligned = vaddr & (size - 1);
545
546 DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
547 vaddr, size, asi);
548
549 if (lookupTable.size() != 64 - freeList.size())
550 panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
551 freeList.size());
552 if (asi == ASI_IMPLICIT)
553 implicit = true;
554
555 // Only use the fast path here if there doesn't need to be an unaligned
556 // trap later
557 if (!unaligned) {
558 if (hpriv && implicit) {
559 req->setPaddr(vaddr & PAddrImplMask);
560 return NoFault;
561 }
562
563 // Be fast if we can!
564 if (cacheValid && cacheState == tlbdata) {
565
566
567
568 if (cacheEntry[0]) {
569 TlbEntry *ce = cacheEntry[0];
570 Addr ce_va = ce->range.va;
571 if (cacheAsi[0] == asi &&
572 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
573 (!write || ce->pte.writable())) {
574 req->setPaddr(ce->pte.translate(vaddr));
575 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
576 req->setFlags(
578 }
579 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
580 return NoFault;
581 } // if matched
582 } // if cache entry valid
583 if (cacheEntry[1]) {
584 TlbEntry *ce = cacheEntry[1];
585 Addr ce_va = ce->range.va;
586 if (cacheAsi[1] == asi &&
587 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
588 (!write || ce->pte.writable())) {
589 req->setPaddr(ce->pte.translate(vaddr));
590 if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
591 req->setFlags(
593 }
594 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
595 return NoFault;
596 } // if matched
597 } // if cache entry valid
598 }
599 }
600
601 bool red = bits(tlbdata,1,1);
602 bool priv = bits(tlbdata,2,2);
603 bool addr_mask = bits(tlbdata,3,3);
604 bool lsu_dm = bits(tlbdata,5,5);
605
606 int part_id = bits(tlbdata,15,8);
607 int tl = bits(tlbdata,18,16);
608 int pri_context = bits(tlbdata,47,32);
609 int sec_context = bits(tlbdata,63,48);
610
611 bool real = false;
612 ContextType ct = Primary;
613 int context = 0;
614
615 TlbEntry *e;
616
617 DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
618 priv, hpriv, red, lsu_dm, part_id);
619
620 if (implicit) {
621 if (tl > 0) {
622 asi = ASI_N;
623 ct = Nucleus;
624 context = 0;
625 } else {
626 asi = ASI_P;
627 ct = Primary;
628 context = pri_context;
629 }
630 } else {
631 // We need to check for priv level/asi priv
632 if (!priv && !hpriv && !asiIsUnPriv(asi)) {
633 // It appears that context should be Nucleus in these cases?
634 writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
635 return std::make_shared<PrivilegedAction>();
636 }
637
638 if (!hpriv && asiIsHPriv(asi)) {
639 writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
640 return std::make_shared<DataAccessException>();
641 }
642
643 if (asiIsPrimary(asi)) {
644 context = pri_context;
645 ct = Primary;
646 } else if (asiIsSecondary(asi)) {
647 context = sec_context;
648 ct = Secondary;
649 } else if (asiIsNucleus(asi)) {
650 ct = Nucleus;
651 context = 0;
652 } else { // ????
653 ct = Primary;
654 context = pri_context;
655 }
656 }
657
658 if (!implicit && asi != ASI_P && asi != ASI_S) {
659 if (asiIsLittle(asi))
660 panic("Little Endian ASIs not supported\n");
661
662 if (asiIsPartialStore(asi))
663 panic("Partial Store ASIs not supported\n");
664
665 if (asiIsCmt(asi))
666 panic("Cmt ASI registers not implmented\n");
667
668 if (asiIsInterrupt(asi))
669 goto handleIntRegAccess;
670 if (asiIsMmu(asi))
671 goto handleMmuRegAccess;
672 if (asiIsScratchPad(asi))
673 goto handleScratchRegAccess;
674 if (asiIsQueue(asi))
675 goto handleQueueRegAccess;
676 if (asiIsSparcError(asi))
677 goto handleSparcErrorRegAccess;
678
679 if (!asiIsReal(asi) && !asiIsNucleus(asi) && !asiIsAsIfUser(asi) &&
680 !asiIsTwin(asi) && !asiIsBlock(asi) && !asiIsNoFault(asi))
681 panic("Accessing ASI %#X. Should we?\n", asi);
682 }
683
684 // If the asi is unaligned trap
685 if (unaligned) {
686 writeSfsr(vaddr, false, ct, false, OtherFault, asi);
687 return std::make_shared<MemAddressNotAligned>();
688 }
689
690 if (addr_mask)
692
693 if (!validVirtualAddress(vaddr, addr_mask)) {
694 writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
695 return std::make_shared<DataAccessException>();
696 }
697
698 if ((!lsu_dm && !hpriv && !red) || asiIsReal(asi)) {
699 real = true;
700 context = 0;
701 }
702
703 if (hpriv && (implicit || (!asiIsAsIfUser(asi) && !asiIsReal(asi)))) {
704 req->setPaddr(vaddr & PAddrImplMask);
705 return NoFault;
706 }
707
708 e = lookup(vaddr, part_id, real, context);
709
710 if (e == NULL || !e->valid) {
711 writeTagAccess(vaddr, context);
712 DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
713 if (real) {
714 return std::make_shared<DataRealTranslationMiss>();
715 } else {
716 if (FullSystem)
717 return std::make_shared<FastDataAccessMMUMiss>();
718 else
719 return std::make_shared<FastDataAccessMMUMiss>(
720 req->getVaddr());
721 }
722
723 }
724
725 if (!priv && e->pte.priv()) {
726 writeTagAccess(vaddr, context);
727 writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
728 return std::make_shared<DataAccessException>();
729 }
730
731 if (write && !e->pte.writable()) {
732 writeTagAccess(vaddr, context);
733 writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
734 return std::make_shared<FastDataAccessProtection>();
735 }
736
737 if (e->pte.nofault() && !asiIsNoFault(asi)) {
738 writeTagAccess(vaddr, context);
739 writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
740 return std::make_shared<DataAccessException>();
741 }
742
743 if (e->pte.sideffect() && asiIsNoFault(asi)) {
744 writeTagAccess(vaddr, context);
745 writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
746 return std::make_shared<DataAccessException>();
747 }
748
749 if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
751
752 // cache translation date for next translation
753 cacheState = tlbdata;
754 if (!cacheValid) {
755 cacheEntry[1] = NULL;
756 cacheEntry[0] = NULL;
757 }
758
759 if (cacheEntry[0] != e && cacheEntry[1] != e) {
760 cacheEntry[1] = cacheEntry[0];
761 cacheEntry[0] = e;
762 cacheAsi[1] = cacheAsi[0];
763 cacheAsi[0] = asi;
764 if (implicit)
765 cacheAsi[0] = (ASI)0;
766 }
767 cacheValid = true;
768 req->setPaddr(e->pte.translate(vaddr));
769 DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
770 return NoFault;
771
773handleIntRegAccess:
774 if (!hpriv) {
775 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
776 if (priv)
777 return std::make_shared<DataAccessException>();
778 else
779 return std::make_shared<PrivilegedAction>();
780 }
781
782 if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
783 (asi == ASI_SWVR_UDB_INTR_R && write)) {
784 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
785 return std::make_shared<DataAccessException>();
786 }
787
788 goto regAccessOk;
789
790
791handleScratchRegAccess:
792 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
793 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
794 return std::make_shared<DataAccessException>();
795 }
796 goto regAccessOk;
797
798handleQueueRegAccess:
799 if (!priv && !hpriv) {
800 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
801 return std::make_shared<PrivilegedAction>();
802 }
803 if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
804 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
805 return std::make_shared<DataAccessException>();
806 }
807 goto regAccessOk;
808
809handleSparcErrorRegAccess:
810 if (!hpriv) {
811 writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
812 if (priv)
813 return std::make_shared<DataAccessException>();
814 else
815 return std::make_shared<PrivilegedAction>();
816 }
817 goto regAccessOk;
818
819
820regAccessOk:
821handleMmuRegAccess:
822 DPRINTF(TLB, "TLB: DTB Translating local access\n");
823 req->setLocalAccessor(
824 [this,write](ThreadContext *tc, PacketPtr pkt) -> Cycles
825 {
826 return write ? doMmuRegWrite(tc, pkt) : doMmuRegRead(tc, pkt);
827 }
828 );
829 req->setPaddr(req->getVaddr());
830 return NoFault;
831};
832
833Fault
836{
837 if (mode == BaseMMU::Execute)
838 return translateInst(req, tc);
839 else
840 return translateData(req, tc, mode == BaseMMU::Write);
841}
842
843Fault
846{
847 Addr vaddr = req->getVaddr();
848
849 // Here we have many options and are really implementing something like
850 // a fill handler to find the address since there isn't a multilevel
851 // table for us to walk around.
852 //
853 // 1. We are currently hyperpriv, return the address unmodified
854 // 2. The mmu is off return(ra->pa)
855 // 3. We are currently priv, use ctx0* tsbs to find the page
856 // 4. We are not priv, use ctxN0* tsbs to find the page
857 // For all accesses we check the tlbs first since it's possible that
858 // long standing pages (e.g. locked kernel mappings) won't be in the tsb
859 uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
860
861 bool hpriv = bits(tlbdata,0,0);
862 // bool priv = bits(tlbdata,2,2);
863 bool addr_mask = bits(tlbdata,3,3);
864 bool data_real = !bits(tlbdata,5,5);
865 bool inst_real = !bits(tlbdata,4,4);
866 bool ctx_zero = bits(tlbdata,18,16) > 0;
867 int part_id = bits(tlbdata,15,8);
868 int pri_context = bits(tlbdata,47,32);
869 // int sec_context = bits(tlbdata,63,48);
870
871 bool real = (mode == BaseMMU::Execute) ? inst_real : data_real;
872
873 TlbEntry* tbe;
874 PageTableEntry pte;
875 Addr tsbs[4];
876 Addr va_tag;
877 TteTag ttetag;
878
879 if (hpriv) {
880 req->setPaddr(vaddr);
881 return NoFault;
882 }
883
884 if (addr_mask)
886
887 if (!validVirtualAddress(vaddr, addr_mask)) {
888 if (mode == BaseMMU::Execute)
889 return std::make_shared<InstructionAccessException>();
890 else
891 return std::make_shared<DataAccessException>();
892 }
893
894 tbe = lookup(vaddr, part_id, real, ctx_zero ? 0 : pri_context, false);
895 if (tbe) {
896 pte = tbe->pte;
897 DPRINTF(TLB, "Virtual(%#x)->Physical(%#x) found in TLB\n", vaddr,
898 pte.translate(vaddr));
899 req->setPaddr(pte.translate(vaddr));
900 return NoFault;
901 }
902
903 if (!FullSystem)
904 return tc->getProcessPtr()->pTable->translate(req);
905
907 // We didn't find it in the tlbs, so lets look at the TSBs
908 GetTsbPtr(tc, vaddr, ctx_zero ? 0 : pri_context, tsbs);
909 va_tag = bits(vaddr, 63, 22);
910 for (int x = 0; x < 4; x++) {
911 ttetag = betoh(mem.read<uint64_t>(tsbs[x]));
912 if (ttetag.valid() && ttetag.va() == va_tag) {
913 uint64_t entry = mem.read<uint64_t>(tsbs[x]) + sizeof(uint64_t);
914 // I think it's sun4v at least!
916 DPRINTF(TLB, "Virtual(%#x)->Physical(%#x) found in TTE\n",
917 vaddr, pte.translate(vaddr));
918 req->setPaddr(pte.translate(vaddr));
919 return NoFault;
920 }
921 }
922
923 if (mode == BaseMMU::Execute) {
924 if (real)
925 return std::make_shared<InstructionRealTranslationMiss>();
926 else if (FullSystem)
927 return std::make_shared<FastInstructionAccessMMUMiss>();
928 else
929 return std::make_shared<FastInstructionAccessMMUMiss>(vaddr);
930 } else {
931 if (real)
932 return std::make_shared<DataRealTranslationMiss>();
933 else if (FullSystem)
934 return std::make_shared<FastDataAccessMMUMiss>();
935 else
936 return std::make_shared<FastDataAccessMMUMiss>(vaddr);
937 }
938}
939
940void
943{
944 assert(translation);
945 translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
946}
947
948Fault
951{
952 return NoFault;
953}
954
955Cycles
957{
958 Addr va = pkt->getAddr();
959 ASI asi = (ASI)pkt->req->getArchFlags();
960 uint64_t temp;
961
962 DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
963 (uint32_t)pkt->req->getArchFlags(), pkt->getAddr());
964
965 TLB *itb = static_cast<TLB *>(tc->getMMUPtr()->itb);
966
967 switch (asi) {
969 assert(va == 0);
971 break;
972 case ASI_MMU:
973 switch (va) {
974 case 0x8:
976 break;
977 case 0x10:
979 break;
980 default:
981 goto doMmuReadError;
982 }
983 break;
984 case ASI_QUEUE:
986 (va >> 4) - 0x3c));
987 break;
989 assert(va == 0);
990 pkt->setBE(c0_tsb_ps0);
991 break;
993 assert(va == 0);
994 pkt->setBE(c0_tsb_ps1);
995 break;
997 assert(va == 0);
998 pkt->setBE(c0_config);
999 break;
1001 assert(va == 0);
1002 pkt->setBE(itb->c0_tsb_ps0);
1003 break;
1005 assert(va == 0);
1006 pkt->setBE(itb->c0_tsb_ps1);
1007 break;
1009 assert(va == 0);
1010 pkt->setBE(itb->c0_config);
1011 break;
1013 assert(va == 0);
1014 pkt->setBE(cx_tsb_ps0);
1015 break;
1017 assert(va == 0);
1018 pkt->setBE(cx_tsb_ps1);
1019 break;
1021 assert(va == 0);
1022 pkt->setBE(cx_config);
1023 break;
1025 assert(va == 0);
1026 pkt->setBE(itb->cx_tsb_ps0);
1027 break;
1029 assert(va == 0);
1030 pkt->setBE(itb->cx_tsb_ps1);
1031 break;
1033 assert(va == 0);
1034 pkt->setBE(itb->cx_config);
1035 break;
1037 pkt->setBE((uint64_t)0);
1038 break;
1039 case ASI_HYP_SCRATCHPAD:
1040 case ASI_SCRATCHPAD:
1041 pkt->setBE(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
1042 break;
1043 case ASI_IMMU:
1044 switch (va) {
1045 case 0x0:
1046 temp = itb->tag_access;
1047 pkt->setBE(bits(temp,63,22) | bits(temp,12,0) << 48);
1048 break;
1049 case 0x18:
1050 pkt->setBE(itb->sfsr);
1051 break;
1052 case 0x30:
1053 pkt->setBE(itb->tag_access);
1054 break;
1055 default:
1056 goto doMmuReadError;
1057 }
1058 break;
1059 case ASI_DMMU:
1060 switch (va) {
1061 case 0x0:
1062 temp = tag_access;
1063 pkt->setBE(bits(temp,63,22) | bits(temp,12,0) << 48);
1064 break;
1065 case 0x18:
1066 pkt->setBE(sfsr);
1067 break;
1068 case 0x20:
1069 pkt->setBE(sfar);
1070 break;
1071 case 0x30:
1072 pkt->setBE(tag_access);
1073 break;
1074 case 0x80:
1076 break;
1077 default:
1078 goto doMmuReadError;
1079 }
1080 break;
1082 pkt->setBE(MakeTsbPtr(Ps0,
1083 tag_access,
1084 c0_tsb_ps0,
1085 c0_config,
1086 cx_tsb_ps0,
1087 cx_config));
1088 break;
1090 pkt->setBE(MakeTsbPtr(Ps1,
1091 tag_access,
1092 c0_tsb_ps1,
1093 c0_config,
1094 cx_tsb_ps1,
1095 cx_config));
1096 break;
1098 pkt->setBE(MakeTsbPtr(Ps0,
1099 itb->tag_access,
1100 itb->c0_tsb_ps0,
1101 itb->c0_config,
1102 itb->cx_tsb_ps0,
1103 itb->cx_config));
1104 break;
1106 pkt->setBE(MakeTsbPtr(Ps1,
1107 itb->tag_access,
1108 itb->c0_tsb_ps1,
1109 itb->c0_config,
1110 itb->cx_tsb_ps1,
1111 itb->cx_config));
1112 break;
1114 {
1115 SparcISA::Interrupts * interrupts =
1116 dynamic_cast<SparcISA::Interrupts *>(
1118 pkt->setBE(interrupts->get_vec(IT_INT_VEC));
1119 }
1120 break;
1122 {
1123 SparcISA::Interrupts * interrupts =
1124 dynamic_cast<SparcISA::Interrupts *>(
1126 temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1127 tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, temp);
1128 pkt->setBE(temp);
1129 }
1130 break;
1131 default:
1132doMmuReadError:
1133 panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1134 (uint32_t)asi, va);
1135 }
1136 pkt->makeAtomicResponse();
1137 return Cycles(1);
1138}
1139
1140Cycles
1142{
1143 uint64_t data = pkt->getBE<uint64_t>();
1144 Addr va = pkt->getAddr();
1145 ASI asi = (ASI)pkt->req->getArchFlags();
1146
1147 Addr ta_insert;
1148 Addr va_insert;
1149 Addr ct_insert;
1150 int part_insert;
1151 int entry_insert = -1;
1152 bool real_insert;
1153 bool ignore;
1154 int part_id;
1155 int ctx_id;
1156 PageTableEntry pte;
1157
1158 DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1159 (uint32_t)asi, va, data);
1160
1161 TLB *itb = static_cast<TLB *>(tc->getMMUPtr()->itb);
1162
1163 switch (asi) {
1165 assert(va == 0);
1167 break;
1168 case ASI_MMU:
1169 switch (va) {
1170 case 0x8:
1172 break;
1173 case 0x10:
1175 break;
1176 default:
1177 goto doMmuWriteError;
1178 }
1179 break;
1180 case ASI_QUEUE:
1181 assert(mbits(data,13,6) == data);
1183 (va >> 4) - 0x3c, data);
1184 break;
1186 assert(va == 0);
1187 c0_tsb_ps0 = data;
1188 break;
1190 assert(va == 0);
1191 c0_tsb_ps1 = data;
1192 break;
1194 assert(va == 0);
1195 c0_config = data;
1196 break;
1198 assert(va == 0);
1199 itb->c0_tsb_ps0 = data;
1200 break;
1202 assert(va == 0);
1203 itb->c0_tsb_ps1 = data;
1204 break;
1206 assert(va == 0);
1207 itb->c0_config = data;
1208 break;
1210 assert(va == 0);
1211 cx_tsb_ps0 = data;
1212 break;
1214 assert(va == 0);
1215 cx_tsb_ps1 = data;
1216 break;
1218 assert(va == 0);
1219 cx_config = data;
1220 break;
1222 assert(va == 0);
1223 itb->cx_tsb_ps0 = data;
1224 break;
1226 assert(va == 0);
1227 itb->cx_tsb_ps1 = data;
1228 break;
1230 assert(va == 0);
1231 itb->cx_config = data;
1232 break;
1235 inform("Ignoring write to SPARC ERROR regsiter\n");
1236 break;
1237 case ASI_HYP_SCRATCHPAD:
1238 case ASI_SCRATCHPAD:
1239 tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1240 break;
1241 case ASI_IMMU:
1242 switch (va) {
1243 case 0x18:
1244 itb->sfsr = data;
1245 break;
1246 case 0x30:
1247 itb->tag_access = szext<60>(data);
1248 break;
1249 default:
1250 goto doMmuWriteError;
1251 }
1252 break;
1254 entry_insert = bits(va, 8,3);
1255 [[fallthrough]];
1257 assert(entry_insert != -1 || mbits(va,10,9) == va);
1258 ta_insert = itb->tag_access;
1259 va_insert = mbits(ta_insert, 63,13);
1260 ct_insert = mbits(ta_insert, 12,0);
1261 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1262 real_insert = bits(va, 9,9);
1263 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1265 itb->insert(va_insert, part_insert, ct_insert, real_insert,
1266 pte, entry_insert);
1267 break;
1269 entry_insert = bits(va, 8,3);
1270 [[fallthrough]];
1272 assert(entry_insert != -1 || mbits(va,10,9) == va);
1273 ta_insert = tag_access;
1274 va_insert = mbits(ta_insert, 63,13);
1275 ct_insert = mbits(ta_insert, 12,0);
1276 part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1277 real_insert = bits(va, 9,9);
1278 pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1280 insert(va_insert, part_insert, ct_insert, real_insert, pte,
1281 entry_insert);
1282 break;
1283 case ASI_IMMU_DEMAP:
1284 ignore = false;
1285 ctx_id = -1;
1286 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1287 switch (bits(va,5,4)) {
1288 case 0:
1289 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1290 break;
1291 case 1:
1292 ignore = true;
1293 break;
1294 case 3:
1295 ctx_id = 0;
1296 break;
1297 default:
1298 ignore = true;
1299 }
1300
1301 switch (bits(va,7,6)) {
1302 case 0: // demap page
1303 if (!ignore)
1304 itb->demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1305 break;
1306 case 1: // demap context
1307 if (!ignore)
1308 itb->demapContext(part_id, ctx_id);
1309 break;
1310 case 2:
1311 itb->demapAll(part_id);
1312 break;
1313 default:
1314 panic("Invalid type for IMMU demap\n");
1315 }
1316 break;
1317 case ASI_DMMU:
1318 switch (va) {
1319 case 0x18:
1320 sfsr = data;
1321 break;
1322 case 0x30:
1324 break;
1325 case 0x80:
1327 break;
1328 default:
1329 goto doMmuWriteError;
1330 }
1331 break;
1332 case ASI_DMMU_DEMAP:
1333 ignore = false;
1334 ctx_id = -1;
1335 part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1336 switch (bits(va,5,4)) {
1337 case 0:
1338 ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1339 break;
1340 case 1:
1341 ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1342 break;
1343 case 3:
1344 ctx_id = 0;
1345 break;
1346 default:
1347 ignore = true;
1348 }
1349
1350 switch (bits(va,7,6)) {
1351 case 0: // demap page
1352 if (!ignore)
1353 demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1354 break;
1355 case 1: // demap context
1356 if (!ignore)
1357 demapContext(part_id, ctx_id);
1358 break;
1359 case 2:
1360 demapAll(part_id);
1361 break;
1362 default:
1363 panic("Invalid type for IMMU demap\n");
1364 }
1365 break;
1367 {
1368 int msb;
1369 // clear all the interrupts that aren't set in the write
1370 SparcISA::Interrupts * interrupts =
1371 dynamic_cast<SparcISA::Interrupts *>(
1373 while (interrupts->get_vec(IT_INT_VEC) & data) {
1374 msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1375 tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, msb);
1376 }
1377 }
1378 break;
1380 tc->getSystemPtr()->threads[bits(data,12,8)]->
1381 getCpuPtr()->postInterrupt(0, bits(data, 5, 0), 0);
1382 break;
1383 default:
1384doMmuWriteError:
1385 panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1386 (uint32_t)pkt->req->getArchFlags(), pkt->getAddr(), data);
1387 }
1388 pkt->makeAtomicResponse();
1389 return Cycles(1);
1390}
1391
1392void
1394{
1395 uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1396 TLB *itb = static_cast<TLB *>(tc->getMMUPtr()->itb);
1397 ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1398 c0_tsb_ps0,
1399 c0_config,
1400 cx_tsb_ps0,
1401 cx_config);
1402 ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1403 c0_tsb_ps1,
1404 c0_config,
1405 cx_tsb_ps1,
1406 cx_config);
1407 ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1408 itb->c0_tsb_ps0,
1409 itb->c0_config,
1410 itb->cx_tsb_ps0,
1411 itb->cx_config);
1412 ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1413 itb->c0_tsb_ps1,
1414 itb->c0_config,
1415 itb->cx_tsb_ps1,
1416 itb->cx_config);
1417}
1418
1419uint64_t
1420TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1421 uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1422{
1423 uint64_t tsb;
1424 uint64_t config;
1425
1426 if (bits(tag_access, 12,0) == 0) {
1427 tsb = c0_tsb;
1428 config = c0_config;
1429 } else {
1430 tsb = cX_tsb;
1431 config = cX_config;
1432 }
1433
1434 uint64_t ptr = mbits(tsb,63,13);
1435 bool split = bits(tsb,12,12);
1436 int tsb_size = bits(tsb,3,0);
1437 int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1438
1439 if (ps == Ps1 && split)
1440 ptr |= 1ULL << (13 + tsb_size);
1441 ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1442
1443 return ptr;
1444}
1445
1446void
1448{
1452
1453 // convert the pointer based free list into an index based one
1454 std::vector<int> free_list;
1455 for (const TlbEntry *entry : freeList)
1456 free_list.push_back(entry - tlb);
1457
1458 SERIALIZE_CONTAINER(free_list);
1459
1469
1470 for (int x = 0; x < size; x++) {
1471 ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1472 tlb[x].serialize(cp);
1473 }
1474}
1475
1476void
1478{
1479 int oldSize;
1480
1481 paramIn(cp, "size", oldSize);
1482 if (oldSize != size)
1483 panic("Don't support unserializing different sized TLBs\n");
1486
1487 std::vector<int> free_list;
1488 UNSERIALIZE_CONTAINER(free_list);
1489 freeList.clear();
1490 for (int idx : free_list)
1491 freeList.push_back(&tlb[idx]);
1492
1501
1503 for (int x = 0; x < size; x++) {
1504 ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1505 tlb[x].unserialize(cp);
1506 if (tlb[x].valid)
1507 lookupTable.insert(tlb[x].range, &tlb[x]);
1508
1509 }
1511}
1512
1513} // namespace SparcISA
1514} // namespace gem5
#define DPRINTFN(...)
Definition trace.hh:238
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
BaseInterrupts * getInterruptController(ThreadID tid)
Definition base.hh:228
void clearInterrupt(ThreadID tid, int int_num, int index)
Definition base.hh:242
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
BaseTLB * itb
Definition mmu.hh:159
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
bool translate(Addr vaddr, Addr &paddr)
Translate function.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
void setBE(T v)
Set the value in the data pointer to v as big endian.
T getBE() const
Get the data in the packet byte swapped from big endian to host endian.
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
void makeAtomicResponse()
Definition packet.hh:1074
This object is a proxy for a port or other object which implements the functional response protocol,...
Definition port_proxy.hh:87
EmulationPageTable * pTable
Definition process.hh:184
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
uint64_t get_vec(int int_num)
Addr translate(Addr vaddr) const
Definition pagetable.hh:172
void populate(uint64_t e, EntryType t=sun4u)
Definition pagetable.hh:95
uint64_t c0_tsb_ps0
Definition tlb.hh:61
uint64_t TagRead(int entry)
Given an entry id, read that tlb entries' tag.
Definition tlb.cc:355
TLB(const Params &p)
Definition tlb.cc:60
uint64_t cx_tsb_ps1
Definition tlb.hh:65
void demapPage(Addr va, int partition_id, bool real, int context_id)
Remve all entries that match a certain partition id, (contextid), and va).
Definition tlb.cc:254
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
Definition tlb.cc:1393
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition tlb.cc:1477
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt)
Definition tlb.cc:1141
uint64_t c0_config
Definition tlb.hh:63
uint64_t MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb, uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
Definition tlb.cc:1420
TlbMap::iterator MapIter
Definition tlb.hh:72
TlbEntry * cacheEntry[2]
Definition tlb.hh:202
SparcTLBParams Params
Definition tlb.hh:160
void writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
Definition tlb.cc:381
std::list< TlbEntry * > freeList
Definition tlb.hh:83
void clearUsedBits()
Definition tlb.cc:88
void flushAll() override
Remove all entries from the TLB.
Definition tlb.cc:327
void demapContext(int partition_id, int context_id)
Remove all entries that match a given context/partition id.
Definition tlb.cc:286
TlbEntry * tlb
Definition tlb.hh:74
uint64_t sfsr
Definition tlb.hh:67
uint64_t c0_tsb_ps1
Definition tlb.hh:62
ASI cacheAsi[2]
Definition tlb.hh:203
void translateTiming(const RequestPtr &req, ThreadContext *tc, BaseMMU::Translation *translation, BaseMMU::Mode mode) override
Definition tlb.cc:941
uint64_t TteRead(int entry)
Give an entry id, read that tlb entries' tte.
Definition tlb.cc:342
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) const override
Do post-translation physical address finalization.
Definition tlb.cc:949
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) override
Definition tlb.cc:834
TlbEntry * lookup(Addr va, int partition_id, bool real, int context_id=0, bool update_used=true)
lookup an entry in the TLB based on the partition id, and real bit if real is true or the partition i...
Definition tlb.cc:196
uint64_t sfar
Definition tlb.hh:60
Fault translateInst(const RequestPtr &req, ThreadContext *tc)
Definition tlb.cc:219
Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write)
Definition tlb.cc:234
void insert(Addr vpn, int partition_id, int context_id, bool real, const PageTableEntry &PTE, int entry=-1)
Insert a PTE into the TLB.
Definition tlb.cc:102
TlbMap lookupTable
Definition tlb.hh:71
uint64_t cx_tsb_ps0
Definition tlb.hh:64
bool validVirtualAddress(Addr va, bool am)
Checks if the virtual address provided is a valid one.
Definition tlb.cc:371
void demapAll(int partition_id)
Remove all non-locked entries from the tlb that match partition id.
Definition tlb.cc:308
uint64_t tag_access
Definition tlb.hh:68
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition tlb.cc:1447
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) override
Definition tlb.cc:844
uint64_t cx_config
Definition tlb.hh:66
uint64_t cacheState
Definition tlb.hh:80
void writeTagAccess(Addr va, int context)
Definition tlb.cc:398
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt)
Definition tlb.cc:956
iterator insert(TlbRange &r, TlbEntry *d)
Definition tlb_map.hh:97
iterator find(const TlbRange &r)
Definition tlb_map.hh:52
size_t erase(TlbRange k)
Definition tlb_map.hh:106
Addr cacheLineSize() const
Get the cache line size of the system.
Definition system.hh:308
Threads threads
Definition system.hh:310
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
virtual System * getSystemPtr()=0
virtual BaseCPU * getCpuPtr()=0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual BaseMMU * getMMUPtr()=0
virtual Process * getProcessPtr()=0
STL vector class.
Definition stl.hh:37
constexpr int findMsbSet(uint64_t val)
Returns the bit position of the MSB that is set in the input.
Definition bitfield.hh:279
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition bitfield.hh:106
constexpr uint64_t szext(uint64_t val)
Sign-extend an N-bit value to 64 bits.
Definition bitfield.hh:161
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define UNSERIALIZE_CONTAINER(member)
Definition serialize.hh:634
#define SERIALIZE_CONTAINER(member)
Definition serialize.hh:626
#define inform(...)
Definition logging.hh:257
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 18, 16 > ps
Bitfield< 5 > t
Definition misc_types.hh:71
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 8 > a
Definition misc_types.hh:66
Bitfield< 8 > va
Bitfield< 23, 20 > tl
Bitfield< 29, 28 > ce
Bitfield< 17 > tbe
Bitfield< 0 > p
Bitfield< 3 > x
Definition pagetable.hh:73
@ MISCREG_MMU_PART_ID
Definition misc.hh:93
@ MISCREG_SCRATCHPAD_R0
Scratchpad regiscers.
Definition misc.hh:97
@ MISCREG_MMU_P_CONTEXT
MMU Internal Registers.
Definition misc.hh:91
@ MISCREG_QUEUE_CPU_MONDO_HEAD
Definition misc.hh:107
@ MISCREG_TLB_DATA
Definition misc.hh:117
@ MISCREG_MMU_LSU_CTRL
Definition misc.hh:94
@ MISCREG_MMU_S_CONTEXT
Definition misc.hh:92
bool asiIsTwin(ASI asi)
Definition asi.cc:188
bool asiIsNucleus(ASI asi)
Definition asi.cc:109
bool asiIsReal(ASI asi)
Definition asi.cc:142
bool asiIsAsIfUser(ASI asi)
Definition asi.cc:118
const Addr VAddrAMask
Definition tlb.hh:50
Bitfield< 5 > red
Definition misc.hh:124
bool asiIsCmt(ASI asi)
Definition asi.cc:249
Bitfield< 2 > hpriv
Definition misc.hh:123
bool asiIsUnPriv(ASI asi)
Definition asi.cc:285
bool asiIsPrimary(ASI asi)
Definition asi.cc:51
bool asiIsPartialStore(ASI asi)
Definition asi.cc:203
bool asiIsLittle(ASI asi)
Definition asi.cc:153
bool asiIsInterrupt(ASI asi)
Definition asi.cc:262
uint32_t MachInst
Definition types.hh:41
@ ASI_DMMU_TSB_PS1_PTR_REG
Definition asi.hh:148
@ ASI_DMMU_CTXT_ZERO_TSB_BASE_PS1
Definition asi.hh:105
@ ASI_IMMU_CTXT_ZERO_CONFIG
Definition asi.hh:110
@ ASI_SWVR_UDB_INTR_W
Definition asi.hh:162
@ ASI_DMMU_DEMAP
Definition asi.hh:153
@ ASI_DMMU_TSB_PS0_PTR_REG
Definition asi.hh:147
@ ASI_IMMU_CTXT_ZERO_TSB_BASE_PS0
Definition asi.hh:108
@ ASI_IMMU_CTXT_NONZERO_CONFIG
Definition asi.hh:118
@ ASI_ITLB_DATA_IN_REG
Definition asi.hh:142
@ ASI_SPARC_ERROR_EN_REG
Definition asi.hh:133
@ ASI_IMMU_CTXT_ZERO_TSB_BASE_PS1
Definition asi.hh:109
@ ASI_HYP_SCRATCHPAD
Definition asi.hh:137
@ ASI_ITLB_DATA_ACCESS_REG
Definition asi.hh:143
@ ASI_DTLB_DATA_ACCESS_REG
Definition asi.hh:151
@ ASI_IMMU_TSB_PS0_PTR_REG
Definition asi.hh:139
@ ASI_DMMU_CTXT_ZERO_CONFIG
Definition asi.hh:106
@ ASI_SPARC_ERROR_STATUS_REG
Definition asi.hh:134
@ ASI_IMMU_DEMAP
Definition asi.hh:145
@ ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS0
Definition asi.hh:112
@ ASI_LSU_CONTROL_REG
Definition asi.hh:127
@ ASI_DMMU_CTXT_ZERO_TSB_BASE_PS0
Definition asi.hh:104
@ ASI_SCRATCHPAD
Definition asi.hh:73
@ ASI_DTLB_DATA_IN_REG
Definition asi.hh:150
@ ASI_IMMU_TSB_PS1_PTR_REG
Definition asi.hh:140
@ ASI_SWVR_INTR_RECEIVE
Definition asi.hh:161
@ ASI_SWVR_UDB_INTR_R
Definition asi.hh:163
@ ASI_DMMU_CTXT_NONZERO_CONFIG
Definition asi.hh:114
@ ASI_IMPLICIT
Definition asi.hh:40
@ ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS0
Definition asi.hh:116
@ ASI_DMMU_CTXT_NONZERO_TSB_BASE_PS1
Definition asi.hh:113
@ ASI_IMMU_CTXT_NONZERO_TSB_BASE_PS1
Definition asi.hh:117
bool asiIsNoFault(ASI asi)
Definition asi.cc:233
Bitfield< 2 > priv
Definition misc.hh:131
bool asiIsQueue(ASI asi)
Definition asi.cc:256
const Addr EndVAddrHole
Definition tlb.hh:49
bool asiIsScratchPad(ASI asi)
Definition asi.cc:242
Bitfield< 3 > am
Definition misc.hh:132
bool asiIsSparcError(ASI asi)
Definition asi.cc:312
const Addr StartVAddrHole
Definition tlb.hh:48
const Addr PAddrImplMask
Definition tlb.hh:51
bool asiIsHPriv(ASI asi)
Definition asi.cc:298
bool asiIsMmu(ASI asi)
Definition asi.cc:270
bool asiIsBlock(ASI asi)
Definition asi.cc:38
bool asiIsSecondary(ASI asi)
Definition asi.cc:80
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
static void ignore(const char *expr)
Definition debug.cc:79
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
void paramIn(CheckpointIn &cp, const std::string &name, ExtMachInst &machInst)
Definition types.cc:72
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
T betoh(T value)
Definition byteswap.hh:175
Packet * PacketPtr
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Declarations of a non-full system Page Table.
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
void unserialize(CheckpointIn &cp)
Definition pagetable.cc:57
void serialize(CheckpointOut &cp) const
Definition pagetable.cc:40
bool_vector8 mem[]
Definition reset_stim.h:43

Generated on Tue Jun 18 2024 16:24:00 for gem5 by doxygen 1.11.0