gem5  v20.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2005 The Regents of The University of Michigan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "arch/sparc/tlb.hh"
30 
31 #include <cstring>
32 
33 #include "arch/sparc/asi.hh"
34 #include "arch/sparc/faults.hh"
35 #include "arch/sparc/interrupts.hh"
36 #include "arch/sparc/registers.hh"
37 #include "base/bitfield.hh"
38 #include "base/compiler.hh"
39 #include "base/trace.hh"
40 #include "cpu/base.hh"
41 #include "cpu/thread_context.hh"
42 #include "debug/IPR.hh"
43 #include "debug/TLB.hh"
44 #include "mem/packet_access.hh"
45 #include "mem/page_table.hh"
46 #include "mem/request.hh"
47 #include "sim/full_system.hh"
48 #include "sim/process.hh"
49 #include "sim/system.hh"
50 
51 /* @todo remove some of the magic constants. -- ali
52  * */
53 namespace SparcISA {
54 
55 TLB::TLB(const Params *p)
56  : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
57  cacheState(0), cacheValid(false)
58 {
59  // To make this work you'll have to change the hypervisor and OS
60  if (size > 64)
61  fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
62 
63  tlb = new TlbEntry[size];
64  std::memset((void *)tlb, 0, sizeof(TlbEntry) * size);
65 
66  for (int x = 0; x < size; x++)
67  freeList.push_back(&tlb[x]);
68 
69  c0_tsb_ps0 = 0;
70  c0_tsb_ps1 = 0;
71  c0_config = 0;
72  cx_tsb_ps0 = 0;
73  cx_tsb_ps1 = 0;
74  cx_config = 0;
75  sfsr = 0;
76  tag_access = 0;
77  sfar = 0;
78  cacheEntry[0] = NULL;
79  cacheEntry[1] = NULL;
80 }
81 
82 void
84 {
85  MapIter i;
86  for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
87  TlbEntry *t = i->second;
88  if (!t->pte.locked()) {
89  t->used = false;
90  usedEntries--;
91  }
92  }
93 }
94 
95 
96 void
97 TLB::insert(Addr va, int partition_id, int context_id, bool real,
98  const PageTableEntry& PTE, int entry)
99 {
100  MapIter i;
101  TlbEntry *new_entry = NULL;
102  int x;
103 
104  cacheValid = false;
105  va &= ~(PTE.size()-1);
106 
107  DPRINTF(TLB,
108  "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
109  va, PTE.paddr(), partition_id, context_id, (int)real, entry);
110 
111  // Demap any entry that conflicts
112  for (x = 0; x < size; x++) {
113  if (tlb[x].range.real == real &&
114  tlb[x].range.partitionId == partition_id &&
115  tlb[x].range.va < va + PTE.size() - 1 &&
116  tlb[x].range.va + tlb[x].range.size >= va &&
117  (real || tlb[x].range.contextId == context_id ))
118  {
119  if (tlb[x].valid) {
120  freeList.push_front(&tlb[x]);
121  DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
122 
123  tlb[x].valid = false;
124  if (tlb[x].used) {
125  tlb[x].used = false;
126  usedEntries--;
127  }
128  lookupTable.erase(tlb[x].range);
129  }
130  }
131  }
132 
133  if (entry != -1) {
134  assert(entry < size && entry >= 0);
135  new_entry = &tlb[entry];
136  } else {
137  if (!freeList.empty()) {
138  new_entry = freeList.front();
139  } else {
140  x = lastReplaced;
141  do {
142  ++x;
143  if (x == size)
144  x = 0;
145  if (x == lastReplaced)
146  goto insertAllLocked;
147  } while (tlb[x].pte.locked());
148  lastReplaced = x;
149  new_entry = &tlb[x];
150  }
151  }
152 
153 insertAllLocked:
154  // Update the last ently if their all locked
155  if (!new_entry) {
156  new_entry = &tlb[size-1];
157  }
158 
159  freeList.remove(new_entry);
160  if (new_entry->valid && new_entry->used)
161  usedEntries--;
162  if (new_entry->valid)
163  lookupTable.erase(new_entry->range);
164 
165 
166  assert(PTE.valid());
167  new_entry->range.va = va;
168  new_entry->range.size = PTE.size() - 1;
169  new_entry->range.partitionId = partition_id;
170  new_entry->range.contextId = context_id;
171  new_entry->range.real = real;
172  new_entry->pte = PTE;
173  new_entry->used = true;;
174  new_entry->valid = true;
175  usedEntries++;
176 
177  i = lookupTable.insert(new_entry->range, new_entry);
178  assert(i != lookupTable.end());
179 
180  // If all entries have their used bit set, clear it on them all,
181  // but the one we just inserted
182  if (usedEntries == size) {
183  clearUsedBits();
184  new_entry->used = true;
185  usedEntries++;
186  }
187 }
188 
189 
190 TlbEntry*
191 TLB::lookup(Addr va, int partition_id, bool real, int context_id,
192  bool update_used)
193 {
194  MapIter i;
195  TlbRange tr;
196  TlbEntry *t;
197 
198  DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
199  va, partition_id, context_id, real);
200  // Assemble full address structure
201  tr.va = va;
202  tr.size = 1;
203  tr.contextId = context_id;
204  tr.partitionId = partition_id;
205  tr.real = real;
206 
207  // Try to find the entry
208  i = lookupTable.find(tr);
209  if (i == lookupTable.end()) {
210  DPRINTF(TLB, "TLB: No valid entry found\n");
211  return NULL;
212  }
213 
214  // Mark the entries used bit and clear other used bits in needed
215  t = i->second;
216  DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
217  t->pte.size());
218 
219  // Update the used bits only if this is a real access (not a fake
220  // one from virttophys()
221  if (!t->used && update_used) {
222  t->used = true;
223  usedEntries++;
224  if (usedEntries == size) {
225  clearUsedBits();
226  t->used = true;
227  usedEntries++;
228  }
229  }
230 
231  return t;
232 }
233 
234 void
236 {
237  MapIter i;
238  for (int x = 0; x < size; x++) {
239  if (tlb[x].valid) {
240  DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
241  x, tlb[x].range.partitionId, tlb[x].range.contextId,
242  tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
243  tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
244  }
245  }
246 }
247 
248 void
249 TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
250 {
251  TlbRange tr;
252  MapIter i;
253 
254  DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
255  va, partition_id, context_id, real);
256 
257  cacheValid = false;
258 
259  // Assemble full address structure
260  tr.va = va;
261  tr.size = 1;
262  tr.contextId = context_id;
263  tr.partitionId = partition_id;
264  tr.real = real;
265 
266  // Demap any entry that conflicts
267  i = lookupTable.find(tr);
268  if (i != lookupTable.end()) {
269  DPRINTF(IPR, "TLB: Demapped page\n");
270  i->second->valid = false;
271  if (i->second->used) {
272  i->second->used = false;
273  usedEntries--;
274  }
275  freeList.push_front(i->second);
276  lookupTable.erase(i);
277  }
278 }
279 
280 void
281 TLB::demapContext(int partition_id, int context_id)
282 {
283  DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
284  partition_id, context_id);
285  cacheValid = false;
286  for (int x = 0; x < size; x++) {
287  if (tlb[x].range.contextId == context_id &&
288  tlb[x].range.partitionId == partition_id) {
289  if (tlb[x].valid) {
290  freeList.push_front(&tlb[x]);
291  }
292  tlb[x].valid = false;
293  if (tlb[x].used) {
294  tlb[x].used = false;
295  usedEntries--;
296  }
297  lookupTable.erase(tlb[x].range);
298  }
299  }
300 }
301 
302 void
303 TLB::demapAll(int partition_id)
304 {
305  DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
306  cacheValid = false;
307  for (int x = 0; x < size; x++) {
308  if (tlb[x].valid && !tlb[x].pte.locked() &&
309  tlb[x].range.partitionId == partition_id) {
310  freeList.push_front(&tlb[x]);
311  tlb[x].valid = false;
312  if (tlb[x].used) {
313  tlb[x].used = false;
314  usedEntries--;
315  }
316  lookupTable.erase(tlb[x].range);
317  }
318  }
319 }
320 
321 void
323 {
324  cacheValid = false;
325  lookupTable.clear();
326 
327  for (int x = 0; x < size; x++) {
328  if (tlb[x].valid)
329  freeList.push_back(&tlb[x]);
330  tlb[x].valid = false;
331  tlb[x].used = false;
332  }
333  usedEntries = 0;
334 }
335 
336 uint64_t
337 TLB::TteRead(int entry)
338 {
339  if (entry >= size)
340  panic("entry: %d\n", entry);
341 
342  assert(entry < size);
343  if (tlb[entry].valid)
344  return tlb[entry].pte();
345  else
346  return (uint64_t)-1ll;
347 }
348 
349 uint64_t
350 TLB::TagRead(int entry)
351 {
352  assert(entry < size);
353  uint64_t tag;
354  if (!tlb[entry].valid)
355  return (uint64_t)-1ll;
356 
357  tag = tlb[entry].range.contextId;
358  tag |= tlb[entry].range.va;
359  tag |= (uint64_t)tlb[entry].range.partitionId << 61;
360  tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
361  tag |= (uint64_t)~tlb[entry].pte._size() << 56;
362  return tag;
363 }
364 
365 bool
367 {
368  if (am)
369  return true;
370  if (va >= StartVAddrHole && va <= EndVAddrHole)
371  return false;
372  return true;
373 }
374 
375 void
376 TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
377 {
378  if (sfsr & 0x1)
379  sfsr = 0x3;
380  else
381  sfsr = 1;
382 
383  if (write)
384  sfsr |= 1 << 2;
385  sfsr |= ct << 4;
386  if (se)
387  sfsr |= 1 << 6;
388  sfsr |= ft << 7;
389  sfsr |= asi << 16;
390 }
391 
392 void
394 {
395  DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
396  va, context, mbits(va, 63,13) | mbits(context,12,0));
397 
398  tag_access = mbits(va, 63,13) | mbits(context,12,0);
399 }
400 
401 void
403  bool se, FaultTypes ft, int asi)
404 {
405  DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
406  a, (int)write, ct, ft, asi);
407  TLB::writeSfsr(write, ct, se, ft, asi);
408  sfar = a;
409 }
410 
411 Fault
413 {
414  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
415 
416  Addr vaddr = req->getVaddr();
417  TlbEntry *e;
418 
419  assert(req->getArchFlags() == ASI_IMPLICIT);
420 
421  DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
422  vaddr, req->getSize());
423 
424  // Be fast if we can!
425  if (cacheValid && cacheState == tlbdata) {
426  if (cacheEntry[0]) {
427  if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
428  cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
429  req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
430  return NoFault;
431  }
432  } else {
433  req->setPaddr(vaddr & PAddrImplMask);
434  return NoFault;
435  }
436  }
437 
438  bool hpriv = bits(tlbdata,0,0);
439  bool red = bits(tlbdata,1,1);
440  bool priv = bits(tlbdata,2,2);
441  bool addr_mask = bits(tlbdata,3,3);
442  bool lsu_im = bits(tlbdata,4,4);
443 
444  int part_id = bits(tlbdata,15,8);
445  int tl = bits(tlbdata,18,16);
446  int pri_context = bits(tlbdata,47,32);
447  int context;
448  ContextType ct;
449  int asi;
450  bool real = false;
451 
452  DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
453  priv, hpriv, red, lsu_im, part_id);
454 
455  if (tl > 0) {
456  asi = ASI_N;
457  ct = Nucleus;
458  context = 0;
459  } else {
460  asi = ASI_P;
461  ct = Primary;
462  context = pri_context;
463  }
464 
465  if ( hpriv || red ) {
466  cacheValid = true;
467  cacheState = tlbdata;
468  cacheEntry[0] = NULL;
469  req->setPaddr(vaddr & PAddrImplMask);
470  return NoFault;
471  }
472 
473  // If the access is unaligned trap
474  if (vaddr & 0x3) {
475  writeSfsr(false, ct, false, OtherFault, asi);
476  return std::make_shared<MemAddressNotAligned>();
477  }
478 
479  if (addr_mask)
480  vaddr = vaddr & VAddrAMask;
481 
482  if (!validVirtualAddress(vaddr, addr_mask)) {
483  writeSfsr(false, ct, false, VaOutOfRange, asi);
484  return std::make_shared<InstructionAccessException>();
485  }
486 
487  if (!lsu_im) {
488  e = lookup(vaddr, part_id, true);
489  real = true;
490  context = 0;
491  } else {
492  e = lookup(vaddr, part_id, false, context);
493  }
494 
495  if (e == NULL || !e->valid) {
496  writeTagAccess(vaddr, context);
497  if (real) {
498  return std::make_shared<InstructionRealTranslationMiss>();
499  } else {
500  if (FullSystem)
501  return std::make_shared<FastInstructionAccessMMUMiss>();
502  else
503  return std::make_shared<FastInstructionAccessMMUMiss>(
504  req->getVaddr());
505  }
506  }
507 
508  // were not priviledged accesing priv page
509  if (!priv && e->pte.priv()) {
510  writeTagAccess(vaddr, context);
511  writeSfsr(false, ct, false, PrivViolation, asi);
512  return std::make_shared<InstructionAccessException>();
513  }
514 
515  // cache translation date for next translation
516  cacheValid = true;
517  cacheState = tlbdata;
518  cacheEntry[0] = e;
519 
520  req->setPaddr(e->pte.translate(vaddr));
521  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
522  return NoFault;
523 }
524 
525 Fault
526 TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
527 {
528  /*
529  * @todo this could really use some profiling and fixing to make
530  * it faster!
531  */
532  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
533  Addr vaddr = req->getVaddr();
534  Addr size = req->getSize();
535  ASI asi;
536  asi = (ASI)req->getArchFlags();
537  bool implicit = false;
538  bool hpriv = bits(tlbdata,0,0);
539  bool unaligned = vaddr & (size - 1);
540 
541  DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
542  vaddr, size, asi);
543 
544  if (lookupTable.size() != 64 - freeList.size())
545  panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
546  freeList.size());
547  if (asi == ASI_IMPLICIT)
548  implicit = true;
549 
550  // Only use the fast path here if there doesn't need to be an unaligned
551  // trap later
552  if (!unaligned) {
553  if (hpriv && implicit) {
554  req->setPaddr(vaddr & PAddrImplMask);
555  return NoFault;
556  }
557 
558  // Be fast if we can!
559  if (cacheValid && cacheState == tlbdata) {
560 
561 
562 
563  if (cacheEntry[0]) {
564  TlbEntry *ce = cacheEntry[0];
565  Addr ce_va = ce->range.va;
566  if (cacheAsi[0] == asi &&
567  ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
568  (!write || ce->pte.writable())) {
569  req->setPaddr(ce->pte.translate(vaddr));
570  if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
571  req->setFlags(
573  }
574  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
575  return NoFault;
576  } // if matched
577  } // if cache entry valid
578  if (cacheEntry[1]) {
579  TlbEntry *ce = cacheEntry[1];
580  Addr ce_va = ce->range.va;
581  if (cacheAsi[1] == asi &&
582  ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
583  (!write || ce->pte.writable())) {
584  req->setPaddr(ce->pte.translate(vaddr));
585  if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
586  req->setFlags(
588  }
589  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
590  return NoFault;
591  } // if matched
592  } // if cache entry valid
593  }
594  }
595 
596  bool red = bits(tlbdata,1,1);
597  bool priv = bits(tlbdata,2,2);
598  bool addr_mask = bits(tlbdata,3,3);
599  bool lsu_dm = bits(tlbdata,5,5);
600 
601  int part_id = bits(tlbdata,15,8);
602  int tl = bits(tlbdata,18,16);
603  int pri_context = bits(tlbdata,47,32);
604  int sec_context = bits(tlbdata,63,48);
605 
606  bool real = false;
607  ContextType ct = Primary;
608  int context = 0;
609 
610  TlbEntry *e;
611 
612  DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
613  priv, hpriv, red, lsu_dm, part_id);
614 
615  if (implicit) {
616  if (tl > 0) {
617  asi = ASI_N;
618  ct = Nucleus;
619  context = 0;
620  } else {
621  asi = ASI_P;
622  ct = Primary;
623  context = pri_context;
624  }
625  } else {
626  // We need to check for priv level/asi priv
627  if (!priv && !hpriv && !asiIsUnPriv(asi)) {
628  // It appears that context should be Nucleus in these cases?
629  writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
630  return std::make_shared<PrivilegedAction>();
631  }
632 
633  if (!hpriv && asiIsHPriv(asi)) {
634  writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
635  return std::make_shared<DataAccessException>();
636  }
637 
638  if (asiIsPrimary(asi)) {
639  context = pri_context;
640  ct = Primary;
641  } else if (asiIsSecondary(asi)) {
642  context = sec_context;
643  ct = Secondary;
644  } else if (asiIsNucleus(asi)) {
645  ct = Nucleus;
646  context = 0;
647  } else { // ????
648  ct = Primary;
649  context = pri_context;
650  }
651  }
652 
653  if (!implicit && asi != ASI_P && asi != ASI_S) {
654  if (asiIsLittle(asi))
655  panic("Little Endian ASIs not supported\n");
656 
657  if (asiIsPartialStore(asi))
658  panic("Partial Store ASIs not supported\n");
659 
660  if (asiIsCmt(asi))
661  panic("Cmt ASI registers not implmented\n");
662 
663  if (asiIsInterrupt(asi))
664  goto handleIntRegAccess;
665  if (asiIsMmu(asi))
666  goto handleMmuRegAccess;
667  if (asiIsScratchPad(asi))
668  goto handleScratchRegAccess;
669  if (asiIsQueue(asi))
670  goto handleQueueRegAccess;
671  if (asiIsSparcError(asi))
672  goto handleSparcErrorRegAccess;
673 
674  if (!asiIsReal(asi) && !asiIsNucleus(asi) && !asiIsAsIfUser(asi) &&
675  !asiIsTwin(asi) && !asiIsBlock(asi) && !asiIsNoFault(asi))
676  panic("Accessing ASI %#X. Should we?\n", asi);
677  }
678 
679  // If the asi is unaligned trap
680  if (unaligned) {
681  writeSfsr(vaddr, false, ct, false, OtherFault, asi);
682  return std::make_shared<MemAddressNotAligned>();
683  }
684 
685  if (addr_mask)
686  vaddr = vaddr & VAddrAMask;
687 
688  if (!validVirtualAddress(vaddr, addr_mask)) {
689  writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
690  return std::make_shared<DataAccessException>();
691  }
692 
693  if ((!lsu_dm && !hpriv && !red) || asiIsReal(asi)) {
694  real = true;
695  context = 0;
696  }
697 
698  if (hpriv && (implicit || (!asiIsAsIfUser(asi) && !asiIsReal(asi)))) {
699  req->setPaddr(vaddr & PAddrImplMask);
700  return NoFault;
701  }
702 
703  e = lookup(vaddr, part_id, real, context);
704 
705  if (e == NULL || !e->valid) {
706  writeTagAccess(vaddr, context);
707  DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
708  if (real) {
709  return std::make_shared<DataRealTranslationMiss>();
710  } else {
711  if (FullSystem)
712  return std::make_shared<FastDataAccessMMUMiss>();
713  else
714  return std::make_shared<FastDataAccessMMUMiss>(
715  req->getVaddr());
716  }
717 
718  }
719 
720  if (!priv && e->pte.priv()) {
721  writeTagAccess(vaddr, context);
722  writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
723  return std::make_shared<DataAccessException>();
724  }
725 
726  if (write && !e->pte.writable()) {
727  writeTagAccess(vaddr, context);
728  writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
729  return std::make_shared<FastDataAccessProtection>();
730  }
731 
732  if (e->pte.nofault() && !asiIsNoFault(asi)) {
733  writeTagAccess(vaddr, context);
734  writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
735  return std::make_shared<DataAccessException>();
736  }
737 
738  if (e->pte.sideffect() && asiIsNoFault(asi)) {
739  writeTagAccess(vaddr, context);
740  writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
741  return std::make_shared<DataAccessException>();
742  }
743 
744  if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
746 
747  // cache translation date for next translation
748  cacheState = tlbdata;
749  if (!cacheValid) {
750  cacheEntry[1] = NULL;
751  cacheEntry[0] = NULL;
752  }
753 
754  if (cacheEntry[0] != e && cacheEntry[1] != e) {
755  cacheEntry[1] = cacheEntry[0];
756  cacheEntry[0] = e;
757  cacheAsi[1] = cacheAsi[0];
758  cacheAsi[0] = asi;
759  if (implicit)
760  cacheAsi[0] = (ASI)0;
761  }
762  cacheValid = true;
763  req->setPaddr(e->pte.translate(vaddr));
764  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
765  return NoFault;
766 
768 handleIntRegAccess:
769  if (!hpriv) {
770  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
771  if (priv)
772  return std::make_shared<DataAccessException>();
773  else
774  return std::make_shared<PrivilegedAction>();
775  }
776 
777  if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
778  (asi == ASI_SWVR_UDB_INTR_R && write)) {
779  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
780  return std::make_shared<DataAccessException>();
781  }
782 
783  goto regAccessOk;
784 
785 
786 handleScratchRegAccess:
787  if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
788  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
789  return std::make_shared<DataAccessException>();
790  }
791  goto regAccessOk;
792 
793 handleQueueRegAccess:
794  if (!priv && !hpriv) {
795  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
796  return std::make_shared<PrivilegedAction>();
797  }
798  if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
799  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
800  return std::make_shared<DataAccessException>();
801  }
802  goto regAccessOk;
803 
804 handleSparcErrorRegAccess:
805  if (!hpriv) {
806  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
807  if (priv)
808  return std::make_shared<DataAccessException>();
809  else
810  return std::make_shared<PrivilegedAction>();
811  }
812  goto regAccessOk;
813 
814 
815 regAccessOk:
816 handleMmuRegAccess:
817  DPRINTF(TLB, "TLB: DTB Translating local access\n");
818  req->setLocalAccessor(
819  [this,write](ThreadContext *tc, PacketPtr pkt) -> Cycles
820  {
821  return write ? doMmuRegWrite(tc, pkt) : doMmuRegRead(tc, pkt);
822  }
823  );
824  req->setPaddr(req->getVaddr());
825  return NoFault;
826 };
827 
828 Fault
830 {
831  if (mode == Execute)
832  return translateInst(req, tc);
833  else
834  return translateData(req, tc, mode == Write);
835 }
836 
837 Fault
839 {
840  Addr vaddr = req->getVaddr();
841 
842  // Here we have many options and are really implementing something like
843  // a fill handler to find the address since there isn't a multilevel
844  // table for us to walk around.
845  //
846  // 1. We are currently hyperpriv, return the address unmodified
847  // 2. The mmu is off return(ra->pa)
848  // 3. We are currently priv, use ctx0* tsbs to find the page
849  // 4. We are not priv, use ctxN0* tsbs to find the page
850  // For all accesses we check the tlbs first since it's possible that
851  // long standing pages (e.g. locked kernel mappings) won't be in the tsb
852  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
853 
854  bool hpriv = bits(tlbdata,0,0);
855  // bool priv = bits(tlbdata,2,2);
856  bool addr_mask = bits(tlbdata,3,3);
857  bool data_real = !bits(tlbdata,5,5);
858  bool inst_real = !bits(tlbdata,4,4);
859  bool ctx_zero = bits(tlbdata,18,16) > 0;
860  int part_id = bits(tlbdata,15,8);
861  int pri_context = bits(tlbdata,47,32);
862  // int sec_context = bits(tlbdata,63,48);
863 
864  bool real = (mode == Execute) ? inst_real : data_real;
865 
866  TlbEntry* tbe;
867  PageTableEntry pte;
868  Addr tsbs[4];
869  Addr va_tag;
870  TteTag ttetag;
871 
872  if (hpriv) {
873  req->setPaddr(vaddr);
874  return NoFault;
875  }
876 
877  if (addr_mask)
878  vaddr = vaddr & VAddrAMask;
879 
880  if (!validVirtualAddress(vaddr, addr_mask)) {
881  if (mode == Execute)
882  return std::make_shared<InstructionAccessException>();
883  else
884  return std::make_shared<DataAccessException>();
885  }
886 
887  tbe = lookup(vaddr, part_id, real, ctx_zero ? 0 : pri_context, false);
888  if (tbe) {
889  pte = tbe->pte;
890  DPRINTF(TLB, "Virtual(%#x)->Physical(%#x) found in TLB\n", vaddr,
891  pte.translate(vaddr));
892  req->setPaddr(pte.translate(vaddr));
893  return NoFault;
894  }
895 
896  if (!FullSystem)
897  return tc->getProcessPtr()->pTable->translate(req);
898 
899  PortProxy &mem = tc->getPhysProxy();
900  // We didn't find it in the tlbs, so lets look at the TSBs
901  GetTsbPtr(tc, vaddr, ctx_zero ? 0 : pri_context, tsbs);
902  va_tag = bits(vaddr, 63, 22);
903  for (int x = 0; x < 4; x++) {
904  ttetag = betoh(mem.read<uint64_t>(tsbs[x]));
905  if (ttetag.valid() && ttetag.va() == va_tag) {
906  uint64_t entry = mem.read<uint64_t>(tsbs[x]) + sizeof(uint64_t);
907  // I think it's sun4v at least!
908  pte.populate(betoh(entry), PageTableEntry::sun4v);
909  DPRINTF(TLB, "Virtual(%#x)->Physical(%#x) found in TTE\n",
910  vaddr, pte.translate(vaddr));
911  req->setPaddr(pte.translate(vaddr));
912  return NoFault;
913  }
914  }
915 
916  if (mode == Execute) {
917  if (real)
918  return std::make_shared<InstructionRealTranslationMiss>();
919  else if (FullSystem)
920  return std::make_shared<FastInstructionAccessMMUMiss>();
921  else
922  return std::make_shared<FastInstructionAccessMMUMiss>(vaddr);
923  } else {
924  if (real)
925  return std::make_shared<DataRealTranslationMiss>();
926  else if (FullSystem)
927  return std::make_shared<FastDataAccessMMUMiss>();
928  else
929  return std::make_shared<FastDataAccessMMUMiss>(vaddr);
930  }
931 }
932 
933 void
935  Translation *translation, Mode mode)
936 {
937  assert(translation);
938  translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
939 }
940 
941 Fault
943  ThreadContext *tc, Mode mode) const
944 {
945  return NoFault;
946 }
947 
948 Cycles
950 {
951  Addr va = pkt->getAddr();
952  ASI asi = (ASI)pkt->req->getArchFlags();
953  uint64_t temp;
954 
955  DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
956  (uint32_t)pkt->req->getArchFlags(), pkt->getAddr());
957 
958  TLB *itb = dynamic_cast<TLB *>(tc->getITBPtr());
959 
960  switch (asi) {
961  case ASI_LSU_CONTROL_REG:
962  assert(va == 0);
964  break;
965  case ASI_MMU:
966  switch (va) {
967  case 0x8:
969  break;
970  case 0x10:
972  break;
973  default:
974  goto doMmuReadError;
975  }
976  break;
977  case ASI_QUEUE:
979  (va >> 4) - 0x3c));
980  break;
982  assert(va == 0);
983  pkt->setBE(c0_tsb_ps0);
984  break;
986  assert(va == 0);
987  pkt->setBE(c0_tsb_ps1);
988  break;
990  assert(va == 0);
991  pkt->setBE(c0_config);
992  break;
994  assert(va == 0);
995  pkt->setBE(itb->c0_tsb_ps0);
996  break;
998  assert(va == 0);
999  pkt->setBE(itb->c0_tsb_ps1);
1000  break;
1002  assert(va == 0);
1003  pkt->setBE(itb->c0_config);
1004  break;
1006  assert(va == 0);
1007  pkt->setBE(cx_tsb_ps0);
1008  break;
1010  assert(va == 0);
1011  pkt->setBE(cx_tsb_ps1);
1012  break;
1014  assert(va == 0);
1015  pkt->setBE(cx_config);
1016  break;
1018  assert(va == 0);
1019  pkt->setBE(itb->cx_tsb_ps0);
1020  break;
1022  assert(va == 0);
1023  pkt->setBE(itb->cx_tsb_ps1);
1024  break;
1026  assert(va == 0);
1027  pkt->setBE(itb->cx_config);
1028  break;
1030  pkt->setBE((uint64_t)0);
1031  break;
1032  case ASI_HYP_SCRATCHPAD:
1033  case ASI_SCRATCHPAD:
1034  pkt->setBE(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
1035  break;
1036  case ASI_IMMU:
1037  switch (va) {
1038  case 0x0:
1039  temp = itb->tag_access;
1040  pkt->setBE(bits(temp,63,22) | bits(temp,12,0) << 48);
1041  break;
1042  case 0x18:
1043  pkt->setBE(itb->sfsr);
1044  break;
1045  case 0x30:
1046  pkt->setBE(itb->tag_access);
1047  break;
1048  default:
1049  goto doMmuReadError;
1050  }
1051  break;
1052  case ASI_DMMU:
1053  switch (va) {
1054  case 0x0:
1055  temp = tag_access;
1056  pkt->setBE(bits(temp,63,22) | bits(temp,12,0) << 48);
1057  break;
1058  case 0x18:
1059  pkt->setBE(sfsr);
1060  break;
1061  case 0x20:
1062  pkt->setBE(sfar);
1063  break;
1064  case 0x30:
1065  pkt->setBE(tag_access);
1066  break;
1067  case 0x80:
1069  break;
1070  default:
1071  goto doMmuReadError;
1072  }
1073  break;
1075  pkt->setBE(MakeTsbPtr(Ps0,
1076  tag_access,
1077  c0_tsb_ps0,
1078  c0_config,
1079  cx_tsb_ps0,
1080  cx_config));
1081  break;
1083  pkt->setBE(MakeTsbPtr(Ps1,
1084  tag_access,
1085  c0_tsb_ps1,
1086  c0_config,
1087  cx_tsb_ps1,
1088  cx_config));
1089  break;
1091  pkt->setBE(MakeTsbPtr(Ps0,
1092  itb->tag_access,
1093  itb->c0_tsb_ps0,
1094  itb->c0_config,
1095  itb->cx_tsb_ps0,
1096  itb->cx_config));
1097  break;
1099  pkt->setBE(MakeTsbPtr(Ps1,
1100  itb->tag_access,
1101  itb->c0_tsb_ps1,
1102  itb->c0_config,
1103  itb->cx_tsb_ps1,
1104  itb->cx_config));
1105  break;
1106  case ASI_SWVR_INTR_RECEIVE:
1107  {
1108  SparcISA::Interrupts * interrupts =
1109  dynamic_cast<SparcISA::Interrupts *>(
1110  tc->getCpuPtr()->getInterruptController(0));
1111  pkt->setBE(interrupts->get_vec(IT_INT_VEC));
1112  }
1113  break;
1114  case ASI_SWVR_UDB_INTR_R:
1115  {
1116  SparcISA::Interrupts * interrupts =
1117  dynamic_cast<SparcISA::Interrupts *>(
1118  tc->getCpuPtr()->getInterruptController(0));
1119  temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1120  tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, temp);
1121  pkt->setBE(temp);
1122  }
1123  break;
1124  default:
1125 doMmuReadError:
1126  panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1127  (uint32_t)asi, va);
1128  }
1129  pkt->makeAtomicResponse();
1130  return Cycles(1);
1131 }
1132 
1133 Cycles
1135 {
1136  uint64_t data = pkt->getBE<uint64_t>();
1137  Addr va = pkt->getAddr();
1138  ASI asi = (ASI)pkt->req->getArchFlags();
1139 
1140  Addr ta_insert;
1141  Addr va_insert;
1142  Addr ct_insert;
1143  int part_insert;
1144  int entry_insert = -1;
1145  bool real_insert;
1146  bool ignore;
1147  int part_id;
1148  int ctx_id;
1149  PageTableEntry pte;
1150 
1151  DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1152  (uint32_t)asi, va, data);
1153 
1154  TLB *itb = dynamic_cast<TLB *>(tc->getITBPtr());
1155 
1156  switch (asi) {
1157  case ASI_LSU_CONTROL_REG:
1158  assert(va == 0);
1159  tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1160  break;
1161  case ASI_MMU:
1162  switch (va) {
1163  case 0x8:
1164  tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1165  break;
1166  case 0x10:
1167  tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1168  break;
1169  default:
1170  goto doMmuWriteError;
1171  }
1172  break;
1173  case ASI_QUEUE:
1174  assert(mbits(data,13,6) == data);
1176  (va >> 4) - 0x3c, data);
1177  break;
1179  assert(va == 0);
1180  c0_tsb_ps0 = data;
1181  break;
1183  assert(va == 0);
1184  c0_tsb_ps1 = data;
1185  break;
1187  assert(va == 0);
1188  c0_config = data;
1189  break;
1191  assert(va == 0);
1192  itb->c0_tsb_ps0 = data;
1193  break;
1195  assert(va == 0);
1196  itb->c0_tsb_ps1 = data;
1197  break;
1199  assert(va == 0);
1200  itb->c0_config = data;
1201  break;
1203  assert(va == 0);
1204  cx_tsb_ps0 = data;
1205  break;
1207  assert(va == 0);
1208  cx_tsb_ps1 = data;
1209  break;
1211  assert(va == 0);
1212  cx_config = data;
1213  break;
1215  assert(va == 0);
1216  itb->cx_tsb_ps0 = data;
1217  break;
1219  assert(va == 0);
1220  itb->cx_tsb_ps1 = data;
1221  break;
1223  assert(va == 0);
1224  itb->cx_config = data;
1225  break;
1228  inform("Ignoring write to SPARC ERROR regsiter\n");
1229  break;
1230  case ASI_HYP_SCRATCHPAD:
1231  case ASI_SCRATCHPAD:
1232  tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1233  break;
1234  case ASI_IMMU:
1235  switch (va) {
1236  case 0x18:
1237  itb->sfsr = data;
1238  break;
1239  case 0x30:
1240  sext<59>(bits(data, 59,0));
1241  itb->tag_access = data;
1242  break;
1243  default:
1244  goto doMmuWriteError;
1245  }
1246  break;
1248  entry_insert = bits(va, 8,3);
1250  case ASI_ITLB_DATA_IN_REG:
1251  assert(entry_insert != -1 || mbits(va,10,9) == va);
1252  ta_insert = itb->tag_access;
1253  va_insert = mbits(ta_insert, 63,13);
1254  ct_insert = mbits(ta_insert, 12,0);
1255  part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1256  real_insert = bits(va, 9,9);
1257  pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1259  itb->insert(va_insert, part_insert, ct_insert, real_insert,
1260  pte, entry_insert);
1261  break;
1263  entry_insert = bits(va, 8,3);
1265  case ASI_DTLB_DATA_IN_REG:
1266  assert(entry_insert != -1 || mbits(va,10,9) == va);
1267  ta_insert = tag_access;
1268  va_insert = mbits(ta_insert, 63,13);
1269  ct_insert = mbits(ta_insert, 12,0);
1270  part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1271  real_insert = bits(va, 9,9);
1272  pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1273  PageTableEntry::sun4u);
1274  insert(va_insert, part_insert, ct_insert, real_insert, pte,
1275  entry_insert);
1276  break;
1277  case ASI_IMMU_DEMAP:
1278  ignore = false;
1279  ctx_id = -1;
1280  part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1281  switch (bits(va,5,4)) {
1282  case 0:
1283  ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1284  break;
1285  case 1:
1286  ignore = true;
1287  break;
1288  case 3:
1289  ctx_id = 0;
1290  break;
1291  default:
1292  ignore = true;
1293  }
1294 
1295  switch (bits(va,7,6)) {
1296  case 0: // demap page
1297  if (!ignore)
1298  itb->demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1299  break;
1300  case 1: // demap context
1301  if (!ignore)
1302  itb->demapContext(part_id, ctx_id);
1303  break;
1304  case 2:
1305  itb->demapAll(part_id);
1306  break;
1307  default:
1308  panic("Invalid type for IMMU demap\n");
1309  }
1310  break;
1311  case ASI_DMMU:
1312  switch (va) {
1313  case 0x18:
1314  sfsr = data;
1315  break;
1316  case 0x30:
1317  sext<59>(bits(data, 59,0));
1318  tag_access = data;
1319  break;
1320  case 0x80:
1321  tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1322  break;
1323  default:
1324  goto doMmuWriteError;
1325  }
1326  break;
1327  case ASI_DMMU_DEMAP:
1328  ignore = false;
1329  ctx_id = -1;
1330  part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1331  switch (bits(va,5,4)) {
1332  case 0:
1333  ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1334  break;
1335  case 1:
1336  ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1337  break;
1338  case 3:
1339  ctx_id = 0;
1340  break;
1341  default:
1342  ignore = true;
1343  }
1344 
1345  switch (bits(va,7,6)) {
1346  case 0: // demap page
1347  if (!ignore)
1348  demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1349  break;
1350  case 1: // demap context
1351  if (!ignore)
1352  demapContext(part_id, ctx_id);
1353  break;
1354  case 2:
1355  demapAll(part_id);
1356  break;
1357  default:
1358  panic("Invalid type for IMMU demap\n");
1359  }
1360  break;
1361  case ASI_SWVR_INTR_RECEIVE:
1362  {
1363  int msb;
1364  // clear all the interrupts that aren't set in the write
1365  SparcISA::Interrupts * interrupts =
1366  dynamic_cast<SparcISA::Interrupts *>(
1367  tc->getCpuPtr()->getInterruptController(0));
1368  while (interrupts->get_vec(IT_INT_VEC) & data) {
1369  msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1370  tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, msb);
1371  }
1372  }
1373  break;
1374  case ASI_SWVR_UDB_INTR_W:
1375  tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1376  postInterrupt(0, bits(data, 5, 0), 0);
1377  break;
1378  default:
1379 doMmuWriteError:
1380  panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1381  (uint32_t)pkt->req->getArchFlags(), pkt->getAddr(), data);
1382  }
1383  pkt->makeAtomicResponse();
1384  return Cycles(1);
1385 }
1386 
1387 void
1389 {
1390  uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1391  TLB *itb = dynamic_cast<TLB *>(tc->getITBPtr());
1392  ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1393  c0_tsb_ps0,
1394  c0_config,
1395  cx_tsb_ps0,
1396  cx_config);
1397  ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1398  c0_tsb_ps1,
1399  c0_config,
1400  cx_tsb_ps1,
1401  cx_config);
1402  ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1403  itb->c0_tsb_ps0,
1404  itb->c0_config,
1405  itb->cx_tsb_ps0,
1406  itb->cx_config);
1407  ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1408  itb->c0_tsb_ps1,
1409  itb->c0_config,
1410  itb->cx_tsb_ps1,
1411  itb->cx_config);
1412 }
1413 
1414 uint64_t
1415 TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1416  uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1417 {
1418  uint64_t tsb;
1419  uint64_t config;
1420 
1421  if (bits(tag_access, 12,0) == 0) {
1422  tsb = c0_tsb;
1423  config = c0_config;
1424  } else {
1425  tsb = cX_tsb;
1426  config = cX_config;
1427  }
1428 
1429  uint64_t ptr = mbits(tsb,63,13);
1430  bool split = bits(tsb,12,12);
1431  int tsb_size = bits(tsb,3,0);
1432  int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1433 
1434  if (ps == Ps1 && split)
1435  ptr |= ULL(1) << (13 + tsb_size);
1436  ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1437 
1438  return ptr;
1439 }
1440 
1441 void
1443 {
1447 
1448  // convert the pointer based free list into an index based one
1449  std::vector<int> free_list;
1450  for (const TlbEntry *entry : freeList)
1451  free_list.push_back(entry - tlb);
1452 
1453  SERIALIZE_CONTAINER(free_list);
1454 
1464 
1465  for (int x = 0; x < size; x++) {
1466  ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1467  tlb[x].serialize(cp);
1468  }
1469 }
1470 
1471 void
1473 {
1474  int oldSize;
1475 
1476  paramIn(cp, "size", oldSize);
1477  if (oldSize != size)
1478  panic("Don't support unserializing different sized TLBs\n");
1481 
1482  std::vector<int> free_list;
1483  UNSERIALIZE_CONTAINER(free_list);
1484  freeList.clear();
1485  for (int idx : free_list)
1486  freeList.push_back(&tlb[idx]);
1487 
1496 
1497  lookupTable.clear();
1498  for (int x = 0; x < size; x++) {
1499  ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1500  tlb[x].unserialize(cp);
1501  if (tlb[x].valid)
1502  lookupTable.insert(tlb[x].range, &tlb[x]);
1503 
1504  }
1506 }
1507 
1508 } // namespace SparcISA
1509 
1510 SparcISA::TLB *
1511 SparcTLBParams::create()
1512 {
1513  return new SparcISA::TLB(this);
1514 }
void demapContext(int partition_id, int context_id)
Remove all entries that match a given context/partition id.
Definition: tlb.cc:281
Addr translate(Addr vaddr) const
Definition: pagetable.hh:173
static void ignore(const char *expr)
Definition: debug.cc:71
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
bool asiIsReal(ASI asi)
Definition: asi.cc:139
#define DPRINTF(x,...)
Definition: trace.hh:225
bool asiIsSparcError(ASI asi)
Definition: asi.cc:309
uint64_t cx_config
Definition: tlb.hh:65
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
void writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
Definition: tlb.cc:376
virtual System * getSystemPtr()=0
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:829
int usedEntries
Definition: tlb.hh:76
decltype(nullptr) constexpr NoFault
Definition: types.hh:243
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:81
iterator insert(TlbRange &r, TlbEntry *d)
Definition: tlb_map.hh:94
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
Bitfield< 7 > i
const Addr PAddrImplMask
Definition: tlb.hh:48
const Addr VAddrAMask
Definition: tlb.hh:47
uint32_t MachInst
Definition: types.hh:38
size_t size()
Definition: tlb_map.hh:139
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
Bitfield< 3 > am
Definition: miscregs.hh:127
std::shared_ptr< Request > RequestPtr
Definition: request.hh:81
Bitfield< 8 > a
const Addr EndVAddrHole
Definition: tlb.hh:46
ip6_addr_t addr
Definition: inet.hh:330
Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write)
Definition: tlb.cc:526
uint64_t c0_config
Definition: tlb.hh:62
void clearInterrupt(ThreadID tid, int int_num, int index)
Definition: base.hh:242
T read(Addr address) const
Read sizeof(T) bytes from address and return as object T.
Definition: port_proxy.hh:282
Bitfield< 29, 28 > ce
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:132
void clearUsedBits()
Definition: tlb.cc:83
size_t erase(TlbRange k)
Definition: tlb_map.hh:103
virtual Process * getProcessPtr()=0
bool validVirtualAddress(Addr va, bool am)
Checks if the virtual address provided is a valid one.
Definition: tlb.cc:366
uint64_t MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb, uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
Definition: tlb.cc:1415
virtual BaseCPU * getCpuPtr()=0
Definition: cprintf.cc:40
Bitfield< 4, 0 > mode
TlbEntry * lookup(Addr va, int partition_id, bool real, int context_id=0, bool update_used=true)
lookup an entry in the TLB based on the partition id, and real bit if real is true or the partition i...
Definition: tlb.cc:191
MMU Internal Registers.
Definition: miscregs.hh:86
void setBE(T v)
Set the value in the data pointer to v as big endian.
ASI cacheAsi[2]
Definition: tlb.hh:199
ThreadContext is the external interface to all thread state for anything outside of the CPU...
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: tlb.cc:829
#define DPRINTFN(...)
Definition: trace.hh:229
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt)
Definition: tlb.cc:949
The request is to an uncacheable address.
Definition: request.hh:113
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: tlb.cc:1472
void populate(uint64_t e, EntryType t=sun4u)
Definition: pagetable.hh:96
RequestPtr req
A pointer to the original request.
Definition: packet.hh:321
Bitfield< 3 > x
Definition: pagetable.hh:69
Bitfield< 2 > hpriv
Definition: miscregs.hh:118
void dumpAll()
Definition: tlb.cc:235
Definition: tlb.hh:50
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:770
bool asiIsQueue(ASI asi)
Definition: asi.cc:253
#define inform(...)
Definition: logging.hh:209
Bitfield< 23, 20 > tl
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt)
Definition: tlb.cc:1134
virtual PortProxy & getPhysProxy()=0
std::list< TlbEntry * > freeList
Definition: tlb.hh:82
TlbMap::iterator MapIter
Definition: tlb.hh:70
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:158
uint64_t cx_tsb_ps0
Definition: tlb.hh:63
Addr va() const
Definition: pagetable.hh:66
void demapPage(Addr va, int partition_id, bool real, int context_id)
Remve all entries that match a certain partition id, (contextid), and va).
Definition: tlb.cc:249
#define M5_FALLTHROUGH
Definition: compiler.hh:84
bool translate(Addr vaddr, Addr &paddr)
Translate function.
Definition: page_table.cc:140
const Addr StartVAddrHole
Definition: tlb.hh:45
bool valid() const
Definition: pagetable.hh:65
iterator end()
Definition: tlb_map.hh:133
void makeAtomicResponse()
Definition: packet.hh:943
uint64_t cx_tsb_ps1
Definition: tlb.hh:64
uint64_t get_vec(int int_num)
Definition: interrupts.hh:249
void flushAll() override
Remove all entries from the TLB.
Definition: tlb.cc:322
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: tlb.cc:934
TlbEntry * cacheEntry[2]
Definition: tlb.hh:198
std::vector< ThreadContext * > threadContexts
Definition: system.hh:182
bool asiIsPrimary(ASI asi)
Definition: asi.cc:48
virtual BaseTLB * getITBPtr()=0
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: tlb.cc:838
Addr getAddr() const
Definition: packet.hh:720
bool asiIsBlock(ASI asi)
Definition: asi.cc:35
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
TlbEntry * tlb
Definition: tlb.hh:73
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
#define ULL(N)
uint64_t constant
Definition: types.hh:48
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Do post-translation physical address finalization.
Definition: tlb.cc:942
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
TlbMap lookupTable
Definition: tlb.hh:70
bool asiIsHPriv(ASI asi)
Definition: asi.cc:295
Bitfield< 8 > va
void unserialize(CheckpointIn &cp)
Definition: pagetable.cc:54
void writeTagAccess(Addr va, int context)
Definition: tlb.cc:393
bool asiIsScratchPad(ASI asi)
Definition: asi.cc:239
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:763
Mode
Definition: tlb.hh:57
This object is a proxy for a port or other object which implements the functional response protocol...
Definition: port_proxy.hh:80
void insert(Addr vpn, int partition_id, int context_id, bool real, const PageTableEntry &PTE, int entry=-1)
Insert a PTE into the TLB.
Definition: tlb.cc:97
Bitfield< 9 > e
bool sideffect() const
Definition: pagetable.hh:169
bool asiIsNoFault(ASI asi)
Definition: asi.cc:230
SparcTLBParams Params
Definition: tlb.hh:156
EmulationPageTable * pTable
Definition: process.hh:174
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
Declarations of a non-full system Page Table.
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:821
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
Definition: tlb.cc:1388
std::ostream CheckpointOut
Definition: serialize.hh:63
uint64_t tag_access
Definition: tlb.hh:67
Definition: asi.cc:31
uint64_t TagRead(int entry)
Given an entry id, read that tlb entries&#39; tag.
Definition: tlb.cc:350
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: tlb.cc:1442
Scratchpad regiscers.
Definition: miscregs.hh:92
Bitfield< 17 > tbe
Definition: mt_constants.hh:77
bool asiIsUnPriv(ASI asi)
Definition: asi.cc:282
bool asiIsCmt(ASI asi)
Definition: asi.cc:246
bool asiIsTwin(ASI asi)
Definition: asi.cc:185
iterator find(const TlbRange &r)
Definition: tlb_map.hh:49
uint64_t TteRead(int entry)
Give an entry id, read that tlb entries&#39; tte.
Definition: tlb.cc:337
uint64_t c0_tsb_ps1
Definition: tlb.hh:61
Bitfield< 5 > red
Definition: miscregs.hh:119
BaseInterrupts * getInterruptController(ThreadID tid)
Definition: base.hh:222
int findMsbSet(uint64_t val)
Returns the bit position of the MSB that is set in the input.
Definition: bitfield.hh:203
int lastReplaced
Definition: tlb.hh:77
Bitfield< 2 > priv
Definition: miscregs.hh:126
uint64_t sfsr
Definition: tlb.hh:66
bool asiIsInterrupt(ASI asi)
Definition: asi.cc:259
void paramIn(CheckpointIn &cp, const string &name, ExtMachInst &machInst)
Definition: types.cc:69
TLB(const Params *p)
Definition: tlb.cc:55
bool cacheValid
Definition: tlb.hh:80
bool asiIsMmu(ASI asi)
Definition: asi.cc:267
bool asiIsAsIfUser(ASI asi)
Definition: asi.cc:115
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:123
T betoh(T value)
Definition: byteswap.hh:143
int size
Definition: tlb.hh:75
Bitfield< 3, 0 > mask
Definition: types.hh:62
bool asiIsLittle(ASI asi)
Definition: asi.cc:150
PageTableEntry pte
Definition: pagetable.hh:262
uint64_t c0_tsb_ps0
Definition: tlb.hh:60
Bitfield< 5 > t
uint64_t sfar
Definition: tlb.hh:59
uint64_t cacheState
Definition: tlb.hh:79
bool_vector8 mem[]
Definition: reset_stim.h:43
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:95
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:71
void demapAll(int partition_id)
Remove all non-locked entries from the tlb that match partition id.
Definition: tlb.cc:303
Bitfield< 18, 16 > ps
iterator begin()
Definition: tlb_map.hh:127
Scoped checkpoint section helper class.
Definition: serialize.hh:186
bool asiIsNucleus(ASI asi)
Definition: asi.cc:106
bool asiIsSecondary(ASI asi)
Definition: asi.cc:77
Bitfield< 0 > p
Fault translateInst(const RequestPtr &req, ThreadContext *tc)
Definition: tlb.cc:412
virtual RegVal readMiscReg(RegIndex misc_reg)=0
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:238
T getBE() const
Get the data in the packet byte swapped from big endian to host endian.
ASI
Definition: asi.hh:35
void serialize(CheckpointOut &cp) const
Definition: pagetable.cc:37
bool asiIsPartialStore(ASI asi)
Definition: asi.cc:200

Generated on Thu May 28 2020 16:11:01 for gem5 by doxygen 1.8.13