gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2005 The Regents of The University of Michigan
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Authors: Ali Saidi
29  */
30 
31 #include "arch/sparc/tlb.hh"
32 
33 #include <cstring>
34 
35 #include "arch/sparc/asi.hh"
36 #include "arch/sparc/faults.hh"
37 #include "arch/sparc/interrupts.hh"
38 #include "arch/sparc/registers.hh"
39 #include "base/bitfield.hh"
40 #include "base/compiler.hh"
41 #include "base/trace.hh"
42 #include "cpu/base.hh"
43 #include "cpu/thread_context.hh"
44 #include "debug/IPR.hh"
45 #include "debug/TLB.hh"
46 #include "mem/packet_access.hh"
47 #include "mem/request.hh"
48 #include "sim/full_system.hh"
49 #include "sim/system.hh"
50 
51 /* @todo remove some of the magic constants. -- ali
52  * */
53 namespace SparcISA {
54 
55 TLB::TLB(const Params *p)
56  : BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
57  cacheState(0), cacheValid(false)
58 {
59  // To make this work you'll have to change the hypervisor and OS
60  if (size > 64)
61  fatal("SPARC T1 TLB registers don't support more than 64 TLB entries");
62 
63  tlb = new TlbEntry[size];
64  std::memset((void *)tlb, 0, sizeof(TlbEntry) * size);
65 
66  for (int x = 0; x < size; x++)
67  freeList.push_back(&tlb[x]);
68 
69  c0_tsb_ps0 = 0;
70  c0_tsb_ps1 = 0;
71  c0_config = 0;
72  cx_tsb_ps0 = 0;
73  cx_tsb_ps1 = 0;
74  cx_config = 0;
75  sfsr = 0;
76  tag_access = 0;
77  sfar = 0;
78  cacheEntry[0] = NULL;
79  cacheEntry[1] = NULL;
80 }
81 
82 void
84 {
85  MapIter i;
86  for (i = lookupTable.begin(); i != lookupTable.end(); i++) {
87  TlbEntry *t = i->second;
88  if (!t->pte.locked()) {
89  t->used = false;
90  usedEntries--;
91  }
92  }
93 }
94 
95 
96 void
97 TLB::insert(Addr va, int partition_id, int context_id, bool real,
98  const PageTableEntry& PTE, int entry)
99 {
100  MapIter i;
101  TlbEntry *new_entry = NULL;
102 // TlbRange tr;
103  int x;
104 
105  cacheValid = false;
106  va &= ~(PTE.size()-1);
107  /* tr.va = va;
108  tr.size = PTE.size() - 1;
109  tr.contextId = context_id;
110  tr.partitionId = partition_id;
111  tr.real = real;
112 */
113 
114  DPRINTF(TLB,
115  "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
116  va, PTE.paddr(), partition_id, context_id, (int)real, entry);
117 
118  // Demap any entry that conflicts
119  for (x = 0; x < size; x++) {
120  if (tlb[x].range.real == real &&
121  tlb[x].range.partitionId == partition_id &&
122  tlb[x].range.va < va + PTE.size() - 1 &&
123  tlb[x].range.va + tlb[x].range.size >= va &&
124  (real || tlb[x].range.contextId == context_id ))
125  {
126  if (tlb[x].valid) {
127  freeList.push_front(&tlb[x]);
128  DPRINTF(TLB, "TLB: Conflicting entry %#X , deleting it\n", x);
129 
130  tlb[x].valid = false;
131  if (tlb[x].used) {
132  tlb[x].used = false;
133  usedEntries--;
134  }
135  lookupTable.erase(tlb[x].range);
136  }
137  }
138  }
139 
140  if (entry != -1) {
141  assert(entry < size && entry >= 0);
142  new_entry = &tlb[entry];
143  } else {
144  if (!freeList.empty()) {
145  new_entry = freeList.front();
146  } else {
147  x = lastReplaced;
148  do {
149  ++x;
150  if (x == size)
151  x = 0;
152  if (x == lastReplaced)
153  goto insertAllLocked;
154  } while (tlb[x].pte.locked());
155  lastReplaced = x;
156  new_entry = &tlb[x];
157  }
158  }
159 
160 insertAllLocked:
161  // Update the last ently if their all locked
162  if (!new_entry) {
163  new_entry = &tlb[size-1];
164  }
165 
166  freeList.remove(new_entry);
167  if (new_entry->valid && new_entry->used)
168  usedEntries--;
169  if (new_entry->valid)
170  lookupTable.erase(new_entry->range);
171 
172 
173  assert(PTE.valid());
174  new_entry->range.va = va;
175  new_entry->range.size = PTE.size() - 1;
176  new_entry->range.partitionId = partition_id;
177  new_entry->range.contextId = context_id;
178  new_entry->range.real = real;
179  new_entry->pte = PTE;
180  new_entry->used = true;;
181  new_entry->valid = true;
182  usedEntries++;
183 
184  i = lookupTable.insert(new_entry->range, new_entry);
185  assert(i != lookupTable.end());
186 
187  // If all entries have their used bit set, clear it on them all,
188  // but the one we just inserted
189  if (usedEntries == size) {
190  clearUsedBits();
191  new_entry->used = true;
192  usedEntries++;
193  }
194 }
195 
196 
197 TlbEntry*
198 TLB::lookup(Addr va, int partition_id, bool real, int context_id,
199  bool update_used)
200 {
201  MapIter i;
202  TlbRange tr;
203  TlbEntry *t;
204 
205  DPRINTF(TLB, "TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
206  va, partition_id, context_id, real);
207  // Assemble full address structure
208  tr.va = va;
209  tr.size = 1;
210  tr.contextId = context_id;
211  tr.partitionId = partition_id;
212  tr.real = real;
213 
214  // Try to find the entry
215  i = lookupTable.find(tr);
216  if (i == lookupTable.end()) {
217  DPRINTF(TLB, "TLB: No valid entry found\n");
218  return NULL;
219  }
220 
221  // Mark the entries used bit and clear other used bits in needed
222  t = i->second;
223  DPRINTF(TLB, "TLB: Valid entry found pa: %#x size: %#x\n", t->pte.paddr(),
224  t->pte.size());
225 
226  // Update the used bits only if this is a real access (not a fake
227  // one from virttophys()
228  if (!t->used && update_used) {
229  t->used = true;
230  usedEntries++;
231  if (usedEntries == size) {
232  clearUsedBits();
233  t->used = true;
234  usedEntries++;
235  }
236  }
237 
238  return t;
239 }
240 
241 void
243 {
244  MapIter i;
245  for (int x = 0; x < size; x++) {
246  if (tlb[x].valid) {
247  DPRINTFN("%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
248  x, tlb[x].range.partitionId, tlb[x].range.contextId,
249  tlb[x].range.real ? 'R' : ' ', tlb[x].range.size,
250  tlb[x].range.va, tlb[x].pte.paddr(), tlb[x].pte());
251  }
252  }
253 }
254 
255 void
256 TLB::demapPage(Addr va, int partition_id, bool real, int context_id)
257 {
258  TlbRange tr;
259  MapIter i;
260 
261  DPRINTF(IPR, "TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
262  va, partition_id, context_id, real);
263 
264  cacheValid = false;
265 
266  // Assemble full address structure
267  tr.va = va;
268  tr.size = 1;
269  tr.contextId = context_id;
270  tr.partitionId = partition_id;
271  tr.real = real;
272 
273  // Demap any entry that conflicts
274  i = lookupTable.find(tr);
275  if (i != lookupTable.end()) {
276  DPRINTF(IPR, "TLB: Demapped page\n");
277  i->second->valid = false;
278  if (i->second->used) {
279  i->second->used = false;
280  usedEntries--;
281  }
282  freeList.push_front(i->second);
283  lookupTable.erase(i);
284  }
285 }
286 
287 void
288 TLB::demapContext(int partition_id, int context_id)
289 {
290  DPRINTF(IPR, "TLB: Demapping Context pid=%#d cid=%d\n",
291  partition_id, context_id);
292  cacheValid = false;
293  for (int x = 0; x < size; x++) {
294  if (tlb[x].range.contextId == context_id &&
295  tlb[x].range.partitionId == partition_id) {
296  if (tlb[x].valid) {
297  freeList.push_front(&tlb[x]);
298  }
299  tlb[x].valid = false;
300  if (tlb[x].used) {
301  tlb[x].used = false;
302  usedEntries--;
303  }
304  lookupTable.erase(tlb[x].range);
305  }
306  }
307 }
308 
309 void
310 TLB::demapAll(int partition_id)
311 {
312  DPRINTF(TLB, "TLB: Demapping All pid=%#d\n", partition_id);
313  cacheValid = false;
314  for (int x = 0; x < size; x++) {
315  if (tlb[x].valid && !tlb[x].pte.locked() &&
316  tlb[x].range.partitionId == partition_id) {
317  freeList.push_front(&tlb[x]);
318  tlb[x].valid = false;
319  if (tlb[x].used) {
320  tlb[x].used = false;
321  usedEntries--;
322  }
323  lookupTable.erase(tlb[x].range);
324  }
325  }
326 }
327 
328 void
330 {
331  cacheValid = false;
332  lookupTable.clear();
333 
334  for (int x = 0; x < size; x++) {
335  if (tlb[x].valid)
336  freeList.push_back(&tlb[x]);
337  tlb[x].valid = false;
338  tlb[x].used = false;
339  }
340  usedEntries = 0;
341 }
342 
343 uint64_t
344 TLB::TteRead(int entry)
345 {
346  if (entry >= size)
347  panic("entry: %d\n", entry);
348 
349  assert(entry < size);
350  if (tlb[entry].valid)
351  return tlb[entry].pte();
352  else
353  return (uint64_t)-1ll;
354 }
355 
356 uint64_t
357 TLB::TagRead(int entry)
358 {
359  assert(entry < size);
360  uint64_t tag;
361  if (!tlb[entry].valid)
362  return (uint64_t)-1ll;
363 
364  tag = tlb[entry].range.contextId;
365  tag |= tlb[entry].range.va;
366  tag |= (uint64_t)tlb[entry].range.partitionId << 61;
367  tag |= tlb[entry].range.real ? ULL(1) << 60 : 0;
368  tag |= (uint64_t)~tlb[entry].pte._size() << 56;
369  return tag;
370 }
371 
372 bool
374 {
375  if (am)
376  return true;
377  if (va >= StartVAddrHole && va <= EndVAddrHole)
378  return false;
379  return true;
380 }
381 
382 void
383 TLB::writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
384 {
385  if (sfsr & 0x1)
386  sfsr = 0x3;
387  else
388  sfsr = 1;
389 
390  if (write)
391  sfsr |= 1 << 2;
392  sfsr |= ct << 4;
393  if (se)
394  sfsr |= 1 << 6;
395  sfsr |= ft << 7;
396  sfsr |= asi << 16;
397 }
398 
399 void
401 {
402  DPRINTF(TLB, "TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
403  va, context, mbits(va, 63,13) | mbits(context,12,0));
404 
405  tag_access = mbits(va, 63,13) | mbits(context,12,0);
406 }
407 
408 void
410  bool se, FaultTypes ft, int asi)
411 {
412  DPRINTF(TLB, "TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
413  a, (int)write, ct, ft, asi);
414  TLB::writeSfsr(write, ct, se, ft, asi);
415  sfar = a;
416 }
417 
418 Fault
420 {
421  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
422 
423  Addr vaddr = req->getVaddr();
424  TlbEntry *e;
425 
426  assert(req->getArchFlags() == ASI_IMPLICIT);
427 
428  DPRINTF(TLB, "TLB: ITB Request to translate va=%#x size=%d\n",
429  vaddr, req->getSize());
430 
431  // Be fast if we can!
432  if (cacheValid && cacheState == tlbdata) {
433  if (cacheEntry[0]) {
434  if (cacheEntry[0]->range.va < vaddr + sizeof(MachInst) &&
435  cacheEntry[0]->range.va + cacheEntry[0]->range.size >= vaddr) {
436  req->setPaddr(cacheEntry[0]->pte.translate(vaddr));
437  return NoFault;
438  }
439  } else {
440  req->setPaddr(vaddr & PAddrImplMask);
441  return NoFault;
442  }
443  }
444 
445  bool hpriv = bits(tlbdata,0,0);
446  bool red = bits(tlbdata,1,1);
447  bool priv = bits(tlbdata,2,2);
448  bool addr_mask = bits(tlbdata,3,3);
449  bool lsu_im = bits(tlbdata,4,4);
450 
451  int part_id = bits(tlbdata,15,8);
452  int tl = bits(tlbdata,18,16);
453  int pri_context = bits(tlbdata,47,32);
454  int context;
455  ContextType ct;
456  int asi;
457  bool real = false;
458 
459  DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
460  priv, hpriv, red, lsu_im, part_id);
461 
462  if (tl > 0) {
463  asi = ASI_N;
464  ct = Nucleus;
465  context = 0;
466  } else {
467  asi = ASI_P;
468  ct = Primary;
469  context = pri_context;
470  }
471 
472  if ( hpriv || red ) {
473  cacheValid = true;
474  cacheState = tlbdata;
475  cacheEntry[0] = NULL;
476  req->setPaddr(vaddr & PAddrImplMask);
477  return NoFault;
478  }
479 
480  // If the access is unaligned trap
481  if (vaddr & 0x3) {
482  writeSfsr(false, ct, false, OtherFault, asi);
483  return std::make_shared<MemAddressNotAligned>();
484  }
485 
486  if (addr_mask)
487  vaddr = vaddr & VAddrAMask;
488 
489  if (!validVirtualAddress(vaddr, addr_mask)) {
490  writeSfsr(false, ct, false, VaOutOfRange, asi);
491  return std::make_shared<InstructionAccessException>();
492  }
493 
494  if (!lsu_im) {
495  e = lookup(vaddr, part_id, true);
496  real = true;
497  context = 0;
498  } else {
499  e = lookup(vaddr, part_id, false, context);
500  }
501 
502  if (e == NULL || !e->valid) {
503  writeTagAccess(vaddr, context);
504  if (real) {
505  return std::make_shared<InstructionRealTranslationMiss>();
506  } else {
507  if (FullSystem)
508  return std::make_shared<FastInstructionAccessMMUMiss>();
509  else
510  return std::make_shared<FastInstructionAccessMMUMiss>(
511  req->getVaddr());
512  }
513  }
514 
515  // were not priviledged accesing priv page
516  if (!priv && e->pte.priv()) {
517  writeTagAccess(vaddr, context);
518  writeSfsr(false, ct, false, PrivViolation, asi);
519  return std::make_shared<InstructionAccessException>();
520  }
521 
522  // cache translation date for next translation
523  cacheValid = true;
524  cacheState = tlbdata;
525  cacheEntry[0] = e;
526 
527  req->setPaddr(e->pte.translate(vaddr));
528  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
529  return NoFault;
530 }
531 
532 Fault
533 TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
534 {
535  /*
536  * @todo this could really use some profiling and fixing to make
537  * it faster!
538  */
539  uint64_t tlbdata = tc->readMiscRegNoEffect(MISCREG_TLB_DATA);
540  Addr vaddr = req->getVaddr();
541  Addr size = req->getSize();
542  ASI asi;
543  asi = (ASI)req->getArchFlags();
544  bool implicit = false;
545  bool hpriv = bits(tlbdata,0,0);
546  bool unaligned = vaddr & (size - 1);
547 
548  DPRINTF(TLB, "TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
549  vaddr, size, asi);
550 
551  if (lookupTable.size() != 64 - freeList.size())
552  panic("Lookup table size: %d tlb size: %d\n", lookupTable.size(),
553  freeList.size());
554  if (asi == ASI_IMPLICIT)
555  implicit = true;
556 
557  // Only use the fast path here if there doesn't need to be an unaligned
558  // trap later
559  if (!unaligned) {
560  if (hpriv && implicit) {
561  req->setPaddr(vaddr & PAddrImplMask);
562  return NoFault;
563  }
564 
565  // Be fast if we can!
566  if (cacheValid && cacheState == tlbdata) {
567 
568 
569 
570  if (cacheEntry[0]) {
571  TlbEntry *ce = cacheEntry[0];
572  Addr ce_va = ce->range.va;
573  if (cacheAsi[0] == asi &&
574  ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
575  (!write || ce->pte.writable())) {
576  req->setPaddr(ce->pte.translate(vaddr));
577  if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
578  req->setFlags(
580  }
581  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
582  return NoFault;
583  } // if matched
584  } // if cache entry valid
585  if (cacheEntry[1]) {
586  TlbEntry *ce = cacheEntry[1];
587  Addr ce_va = ce->range.va;
588  if (cacheAsi[1] == asi &&
589  ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
590  (!write || ce->pte.writable())) {
591  req->setPaddr(ce->pte.translate(vaddr));
592  if (ce->pte.sideffect() || (ce->pte.paddr() >> 39) & 1) {
593  req->setFlags(
595  }
596  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
597  return NoFault;
598  } // if matched
599  } // if cache entry valid
600  }
601  }
602 
603  bool red = bits(tlbdata,1,1);
604  bool priv = bits(tlbdata,2,2);
605  bool addr_mask = bits(tlbdata,3,3);
606  bool lsu_dm = bits(tlbdata,5,5);
607 
608  int part_id = bits(tlbdata,15,8);
609  int tl = bits(tlbdata,18,16);
610  int pri_context = bits(tlbdata,47,32);
611  int sec_context = bits(tlbdata,63,48);
612 
613  bool real = false;
614  ContextType ct = Primary;
615  int context = 0;
616 
617  TlbEntry *e;
618 
619  DPRINTF(TLB, "TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
620  priv, hpriv, red, lsu_dm, part_id);
621 
622  if (implicit) {
623  if (tl > 0) {
624  asi = ASI_N;
625  ct = Nucleus;
626  context = 0;
627  } else {
628  asi = ASI_P;
629  ct = Primary;
630  context = pri_context;
631  }
632  } else {
633  // We need to check for priv level/asi priv
634  if (!priv && !hpriv && !asiIsUnPriv(asi)) {
635  // It appears that context should be Nucleus in these cases?
636  writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
637  return std::make_shared<PrivilegedAction>();
638  }
639 
640  if (!hpriv && asiIsHPriv(asi)) {
641  writeSfsr(vaddr, write, Nucleus, false, IllegalAsi, asi);
642  return std::make_shared<DataAccessException>();
643  }
644 
645  if (asiIsPrimary(asi)) {
646  context = pri_context;
647  ct = Primary;
648  } else if (asiIsSecondary(asi)) {
649  context = sec_context;
650  ct = Secondary;
651  } else if (asiIsNucleus(asi)) {
652  ct = Nucleus;
653  context = 0;
654  } else { // ????
655  ct = Primary;
656  context = pri_context;
657  }
658  }
659 
660  if (!implicit && asi != ASI_P && asi != ASI_S) {
661  if (asiIsLittle(asi))
662  panic("Little Endian ASIs not supported\n");
663 
664  //XXX It's unclear from looking at the documentation how a no fault
665  // load differs from a regular one, other than what happens concerning
666  // nfo and e bits in the TTE
667 // if (asiIsNoFault(asi))
668 // panic("No Fault ASIs not supported\n");
669 
670  if (asiIsPartialStore(asi))
671  panic("Partial Store ASIs not supported\n");
672 
673  if (asiIsCmt(asi))
674  panic("Cmt ASI registers not implmented\n");
675 
676  if (asiIsInterrupt(asi))
677  goto handleIntRegAccess;
678  if (asiIsMmu(asi))
679  goto handleMmuRegAccess;
680  if (asiIsScratchPad(asi))
681  goto handleScratchRegAccess;
682  if (asiIsQueue(asi))
683  goto handleQueueRegAccess;
684  if (asiIsSparcError(asi))
685  goto handleSparcErrorRegAccess;
686 
687  if (!asiIsReal(asi) && !asiIsNucleus(asi) && !asiIsAsIfUser(asi) &&
688  !asiIsTwin(asi) && !asiIsBlock(asi) && !asiIsNoFault(asi))
689  panic("Accessing ASI %#X. Should we?\n", asi);
690  }
691 
692  // If the asi is unaligned trap
693  if (unaligned) {
694  writeSfsr(vaddr, false, ct, false, OtherFault, asi);
695  return std::make_shared<MemAddressNotAligned>();
696  }
697 
698  if (addr_mask)
699  vaddr = vaddr & VAddrAMask;
700 
701  if (!validVirtualAddress(vaddr, addr_mask)) {
702  writeSfsr(vaddr, false, ct, true, VaOutOfRange, asi);
703  return std::make_shared<DataAccessException>();
704  }
705 
706  if ((!lsu_dm && !hpriv && !red) || asiIsReal(asi)) {
707  real = true;
708  context = 0;
709  }
710 
711  if (hpriv && (implicit || (!asiIsAsIfUser(asi) && !asiIsReal(asi)))) {
712  req->setPaddr(vaddr & PAddrImplMask);
713  return NoFault;
714  }
715 
716  e = lookup(vaddr, part_id, real, context);
717 
718  if (e == NULL || !e->valid) {
719  writeTagAccess(vaddr, context);
720  DPRINTF(TLB, "TLB: DTB Failed to find matching TLB entry\n");
721  if (real) {
722  return std::make_shared<DataRealTranslationMiss>();
723  } else {
724  if (FullSystem)
725  return std::make_shared<FastDataAccessMMUMiss>();
726  else
727  return std::make_shared<FastDataAccessMMUMiss>(
728  req->getVaddr());
729  }
730 
731  }
732 
733  if (!priv && e->pte.priv()) {
734  writeTagAccess(vaddr, context);
735  writeSfsr(vaddr, write, ct, e->pte.sideffect(), PrivViolation, asi);
736  return std::make_shared<DataAccessException>();
737  }
738 
739  if (write && !e->pte.writable()) {
740  writeTagAccess(vaddr, context);
741  writeSfsr(vaddr, write, ct, e->pte.sideffect(), OtherFault, asi);
742  return std::make_shared<FastDataAccessProtection>();
743  }
744 
745  if (e->pte.nofault() && !asiIsNoFault(asi)) {
746  writeTagAccess(vaddr, context);
747  writeSfsr(vaddr, write, ct, e->pte.sideffect(), LoadFromNfo, asi);
748  return std::make_shared<DataAccessException>();
749  }
750 
751  if (e->pte.sideffect() && asiIsNoFault(asi)) {
752  writeTagAccess(vaddr, context);
753  writeSfsr(vaddr, write, ct, e->pte.sideffect(), SideEffect, asi);
754  return std::make_shared<DataAccessException>();
755  }
756 
757  if (e->pte.sideffect() || (e->pte.paddr() >> 39) & 1)
759 
760  // cache translation date for next translation
761  cacheState = tlbdata;
762  if (!cacheValid) {
763  cacheEntry[1] = NULL;
764  cacheEntry[0] = NULL;
765  }
766 
767  if (cacheEntry[0] != e && cacheEntry[1] != e) {
768  cacheEntry[1] = cacheEntry[0];
769  cacheEntry[0] = e;
770  cacheAsi[1] = cacheAsi[0];
771  cacheAsi[0] = asi;
772  if (implicit)
773  cacheAsi[0] = (ASI)0;
774  }
775  cacheValid = true;
776  req->setPaddr(e->pte.translate(vaddr));
777  DPRINTF(TLB, "TLB: %#X -> %#X\n", vaddr, req->getPaddr());
778  return NoFault;
779 
781 handleIntRegAccess:
782  if (!hpriv) {
783  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
784  if (priv)
785  return std::make_shared<DataAccessException>();
786  else
787  return std::make_shared<PrivilegedAction>();
788  }
789 
790  if ((asi == ASI_SWVR_UDB_INTR_W && !write) ||
791  (asi == ASI_SWVR_UDB_INTR_R && write)) {
792  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
793  return std::make_shared<DataAccessException>();
794  }
795 
796  goto regAccessOk;
797 
798 
799 handleScratchRegAccess:
800  if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
801  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
802  return std::make_shared<DataAccessException>();
803  }
804  goto regAccessOk;
805 
806 handleQueueRegAccess:
807  if (!priv && !hpriv) {
808  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
809  return std::make_shared<PrivilegedAction>();
810  }
811  if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
812  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
813  return std::make_shared<DataAccessException>();
814  }
815  goto regAccessOk;
816 
817 handleSparcErrorRegAccess:
818  if (!hpriv) {
819  writeSfsr(vaddr, write, Primary, true, IllegalAsi, asi);
820  if (priv)
821  return std::make_shared<DataAccessException>();
822  else
823  return std::make_shared<PrivilegedAction>();
824  }
825  goto regAccessOk;
826 
827 
828 regAccessOk:
829 handleMmuRegAccess:
830  DPRINTF(TLB, "TLB: DTB Translating MM IPR access\n");
831  req->setFlags(Request::MMAPPED_IPR);
832  req->setPaddr(req->getVaddr());
833  return NoFault;
834 };
835 
836 Fault
838 {
839  if (mode == Execute)
840  return translateInst(req, tc);
841  else
842  return translateData(req, tc, mode == Write);
843 }
844 
845 void
847  Translation *translation, Mode mode)
848 {
849  assert(translation);
850  translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
851 }
852 
853 Fault
855  ThreadContext *tc, Mode mode) const
856 {
857  return NoFault;
858 }
859 
860 Cycles
862 {
863  Addr va = pkt->getAddr();
864  ASI asi = (ASI)pkt->req->getArchFlags();
865  uint64_t temp;
866 
867  DPRINTF(IPR, "Memory Mapped IPR Read: asi=%#X a=%#x\n",
868  (uint32_t)pkt->req->getArchFlags(), pkt->getAddr());
869 
870  TLB *itb = dynamic_cast<TLB *>(tc->getITBPtr());
871 
872  switch (asi) {
873  case ASI_LSU_CONTROL_REG:
874  assert(va == 0);
876  break;
877  case ASI_MMU:
878  switch (va) {
879  case 0x8:
881  break;
882  case 0x10:
884  break;
885  default:
886  goto doMmuReadError;
887  }
888  break;
889  case ASI_QUEUE:
891  (va >> 4) - 0x3c));
892  break;
894  assert(va == 0);
895  pkt->setBE(c0_tsb_ps0);
896  break;
898  assert(va == 0);
899  pkt->setBE(c0_tsb_ps1);
900  break;
902  assert(va == 0);
903  pkt->setBE(c0_config);
904  break;
906  assert(va == 0);
907  pkt->setBE(itb->c0_tsb_ps0);
908  break;
910  assert(va == 0);
911  pkt->setBE(itb->c0_tsb_ps1);
912  break;
914  assert(va == 0);
915  pkt->setBE(itb->c0_config);
916  break;
918  assert(va == 0);
919  pkt->setBE(cx_tsb_ps0);
920  break;
922  assert(va == 0);
923  pkt->setBE(cx_tsb_ps1);
924  break;
926  assert(va == 0);
927  pkt->setBE(cx_config);
928  break;
930  assert(va == 0);
931  pkt->setBE(itb->cx_tsb_ps0);
932  break;
934  assert(va == 0);
935  pkt->setBE(itb->cx_tsb_ps1);
936  break;
938  assert(va == 0);
939  pkt->setBE(itb->cx_config);
940  break;
942  pkt->setBE((uint64_t)0);
943  break;
944  case ASI_HYP_SCRATCHPAD:
945  case ASI_SCRATCHPAD:
946  pkt->setBE(tc->readMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3)));
947  break;
948  case ASI_IMMU:
949  switch (va) {
950  case 0x0:
951  temp = itb->tag_access;
952  pkt->setBE(bits(temp,63,22) | bits(temp,12,0) << 48);
953  break;
954  case 0x18:
955  pkt->setBE(itb->sfsr);
956  break;
957  case 0x30:
958  pkt->setBE(itb->tag_access);
959  break;
960  default:
961  goto doMmuReadError;
962  }
963  break;
964  case ASI_DMMU:
965  switch (va) {
966  case 0x0:
967  temp = tag_access;
968  pkt->setBE(bits(temp,63,22) | bits(temp,12,0) << 48);
969  break;
970  case 0x18:
971  pkt->setBE(sfsr);
972  break;
973  case 0x20:
974  pkt->setBE(sfar);
975  break;
976  case 0x30:
977  pkt->setBE(tag_access);
978  break;
979  case 0x80:
981  break;
982  default:
983  goto doMmuReadError;
984  }
985  break;
987  pkt->setBE(MakeTsbPtr(Ps0,
988  tag_access,
989  c0_tsb_ps0,
990  c0_config,
991  cx_tsb_ps0,
992  cx_config));
993  break;
995  pkt->setBE(MakeTsbPtr(Ps1,
996  tag_access,
997  c0_tsb_ps1,
998  c0_config,
999  cx_tsb_ps1,
1000  cx_config));
1001  break;
1003  pkt->setBE(MakeTsbPtr(Ps0,
1004  itb->tag_access,
1005  itb->c0_tsb_ps0,
1006  itb->c0_config,
1007  itb->cx_tsb_ps0,
1008  itb->cx_config));
1009  break;
1011  pkt->setBE(MakeTsbPtr(Ps1,
1012  itb->tag_access,
1013  itb->c0_tsb_ps1,
1014  itb->c0_config,
1015  itb->cx_tsb_ps1,
1016  itb->cx_config));
1017  break;
1018  case ASI_SWVR_INTR_RECEIVE:
1019  {
1020  SparcISA::Interrupts * interrupts =
1021  dynamic_cast<SparcISA::Interrupts *>(
1022  tc->getCpuPtr()->getInterruptController(0));
1023  pkt->setBE(interrupts->get_vec(IT_INT_VEC));
1024  }
1025  break;
1026  case ASI_SWVR_UDB_INTR_R:
1027  {
1028  SparcISA::Interrupts * interrupts =
1029  dynamic_cast<SparcISA::Interrupts *>(
1030  tc->getCpuPtr()->getInterruptController(0));
1031  temp = findMsbSet(interrupts->get_vec(IT_INT_VEC));
1032  tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, temp);
1033  pkt->setBE(temp);
1034  }
1035  break;
1036  default:
1037 doMmuReadError:
1038  panic("need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1039  (uint32_t)asi, va);
1040  }
1041  pkt->makeAtomicResponse();
1042  return Cycles(1);
1043 }
1044 
1045 Cycles
1047 {
1048  uint64_t data = pkt->getBE<uint64_t>();
1049  Addr va = pkt->getAddr();
1050  ASI asi = (ASI)pkt->req->getArchFlags();
1051 
1052  Addr ta_insert;
1053  Addr va_insert;
1054  Addr ct_insert;
1055  int part_insert;
1056  int entry_insert = -1;
1057  bool real_insert;
1058  bool ignore;
1059  int part_id;
1060  int ctx_id;
1061  PageTableEntry pte;
1062 
1063  DPRINTF(IPR, "Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1064  (uint32_t)asi, va, data);
1065 
1066  TLB *itb = dynamic_cast<TLB *>(tc->getITBPtr());
1067 
1068  switch (asi) {
1069  case ASI_LSU_CONTROL_REG:
1070  assert(va == 0);
1071  tc->setMiscReg(MISCREG_MMU_LSU_CTRL, data);
1072  break;
1073  case ASI_MMU:
1074  switch (va) {
1075  case 0x8:
1076  tc->setMiscReg(MISCREG_MMU_P_CONTEXT, data);
1077  break;
1078  case 0x10:
1079  tc->setMiscReg(MISCREG_MMU_S_CONTEXT, data);
1080  break;
1081  default:
1082  goto doMmuWriteError;
1083  }
1084  break;
1085  case ASI_QUEUE:
1086  assert(mbits(data,13,6) == data);
1088  (va >> 4) - 0x3c, data);
1089  break;
1091  assert(va == 0);
1092  c0_tsb_ps0 = data;
1093  break;
1095  assert(va == 0);
1096  c0_tsb_ps1 = data;
1097  break;
1099  assert(va == 0);
1100  c0_config = data;
1101  break;
1103  assert(va == 0);
1104  itb->c0_tsb_ps0 = data;
1105  break;
1107  assert(va == 0);
1108  itb->c0_tsb_ps1 = data;
1109  break;
1111  assert(va == 0);
1112  itb->c0_config = data;
1113  break;
1115  assert(va == 0);
1116  cx_tsb_ps0 = data;
1117  break;
1119  assert(va == 0);
1120  cx_tsb_ps1 = data;
1121  break;
1123  assert(va == 0);
1124  cx_config = data;
1125  break;
1127  assert(va == 0);
1128  itb->cx_tsb_ps0 = data;
1129  break;
1131  assert(va == 0);
1132  itb->cx_tsb_ps1 = data;
1133  break;
1135  assert(va == 0);
1136  itb->cx_config = data;
1137  break;
1140  inform("Ignoring write to SPARC ERROR regsiter\n");
1141  break;
1142  case ASI_HYP_SCRATCHPAD:
1143  case ASI_SCRATCHPAD:
1144  tc->setMiscReg(MISCREG_SCRATCHPAD_R0 + (va >> 3), data);
1145  break;
1146  case ASI_IMMU:
1147  switch (va) {
1148  case 0x18:
1149  itb->sfsr = data;
1150  break;
1151  case 0x30:
1152  sext<59>(bits(data, 59,0));
1153  itb->tag_access = data;
1154  break;
1155  default:
1156  goto doMmuWriteError;
1157  }
1158  break;
1160  entry_insert = bits(va, 8,3);
1162  case ASI_ITLB_DATA_IN_REG:
1163  assert(entry_insert != -1 || mbits(va,10,9) == va);
1164  ta_insert = itb->tag_access;
1165  va_insert = mbits(ta_insert, 63,13);
1166  ct_insert = mbits(ta_insert, 12,0);
1167  part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1168  real_insert = bits(va, 9,9);
1169  pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1171  itb->insert(va_insert, part_insert, ct_insert, real_insert,
1172  pte, entry_insert);
1173  break;
1175  entry_insert = bits(va, 8,3);
1177  case ASI_DTLB_DATA_IN_REG:
1178  assert(entry_insert != -1 || mbits(va,10,9) == va);
1179  ta_insert = tag_access;
1180  va_insert = mbits(ta_insert, 63,13);
1181  ct_insert = mbits(ta_insert, 12,0);
1182  part_insert = tc->readMiscReg(MISCREG_MMU_PART_ID);
1183  real_insert = bits(va, 9,9);
1184  pte.populate(data, bits(va,10,10) ? PageTableEntry::sun4v :
1185  PageTableEntry::sun4u);
1186  insert(va_insert, part_insert, ct_insert, real_insert, pte,
1187  entry_insert);
1188  break;
1189  case ASI_IMMU_DEMAP:
1190  ignore = false;
1191  ctx_id = -1;
1192  part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1193  switch (bits(va,5,4)) {
1194  case 0:
1195  ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1196  break;
1197  case 1:
1198  ignore = true;
1199  break;
1200  case 3:
1201  ctx_id = 0;
1202  break;
1203  default:
1204  ignore = true;
1205  }
1206 
1207  switch (bits(va,7,6)) {
1208  case 0: // demap page
1209  if (!ignore)
1210  itb->demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1211  break;
1212  case 1: // demap context
1213  if (!ignore)
1214  itb->demapContext(part_id, ctx_id);
1215  break;
1216  case 2:
1217  itb->demapAll(part_id);
1218  break;
1219  default:
1220  panic("Invalid type for IMMU demap\n");
1221  }
1222  break;
1223  case ASI_DMMU:
1224  switch (va) {
1225  case 0x18:
1226  sfsr = data;
1227  break;
1228  case 0x30:
1229  sext<59>(bits(data, 59,0));
1230  tag_access = data;
1231  break;
1232  case 0x80:
1233  tc->setMiscReg(MISCREG_MMU_PART_ID, data);
1234  break;
1235  default:
1236  goto doMmuWriteError;
1237  }
1238  break;
1239  case ASI_DMMU_DEMAP:
1240  ignore = false;
1241  ctx_id = -1;
1242  part_id = tc->readMiscReg(MISCREG_MMU_PART_ID);
1243  switch (bits(va,5,4)) {
1244  case 0:
1245  ctx_id = tc->readMiscReg(MISCREG_MMU_P_CONTEXT);
1246  break;
1247  case 1:
1248  ctx_id = tc->readMiscReg(MISCREG_MMU_S_CONTEXT);
1249  break;
1250  case 3:
1251  ctx_id = 0;
1252  break;
1253  default:
1254  ignore = true;
1255  }
1256 
1257  switch (bits(va,7,6)) {
1258  case 0: // demap page
1259  if (!ignore)
1260  demapPage(mbits(va,63,13), part_id, bits(va,9,9), ctx_id);
1261  break;
1262  case 1: // demap context
1263  if (!ignore)
1264  demapContext(part_id, ctx_id);
1265  break;
1266  case 2:
1267  demapAll(part_id);
1268  break;
1269  default:
1270  panic("Invalid type for IMMU demap\n");
1271  }
1272  break;
1273  case ASI_SWVR_INTR_RECEIVE:
1274  {
1275  int msb;
1276  // clear all the interrupts that aren't set in the write
1277  SparcISA::Interrupts * interrupts =
1278  dynamic_cast<SparcISA::Interrupts *>(
1279  tc->getCpuPtr()->getInterruptController(0));
1280  while (interrupts->get_vec(IT_INT_VEC) & data) {
1281  msb = findMsbSet(interrupts->get_vec(IT_INT_VEC) & data);
1282  tc->getCpuPtr()->clearInterrupt(0, IT_INT_VEC, msb);
1283  }
1284  }
1285  break;
1286  case ASI_SWVR_UDB_INTR_W:
1287  tc->getSystemPtr()->threadContexts[bits(data,12,8)]->getCpuPtr()->
1288  postInterrupt(0, bits(data, 5, 0), 0);
1289  break;
1290  default:
1291 doMmuWriteError:
1292  panic("need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1293  (uint32_t)pkt->req->getArchFlags(), pkt->getAddr(), data);
1294  }
1295  pkt->makeAtomicResponse();
1296  return Cycles(1);
1297 }
1298 
1299 void
1301 {
1302  uint64_t tag_access = mbits(addr,63,13) | mbits(ctx,12,0);
1303  TLB *itb = dynamic_cast<TLB *>(tc->getITBPtr());
1304  ptrs[0] = MakeTsbPtr(Ps0, tag_access,
1305  c0_tsb_ps0,
1306  c0_config,
1307  cx_tsb_ps0,
1308  cx_config);
1309  ptrs[1] = MakeTsbPtr(Ps1, tag_access,
1310  c0_tsb_ps1,
1311  c0_config,
1312  cx_tsb_ps1,
1313  cx_config);
1314  ptrs[2] = MakeTsbPtr(Ps0, tag_access,
1315  itb->c0_tsb_ps0,
1316  itb->c0_config,
1317  itb->cx_tsb_ps0,
1318  itb->cx_config);
1319  ptrs[3] = MakeTsbPtr(Ps1, tag_access,
1320  itb->c0_tsb_ps1,
1321  itb->c0_config,
1322  itb->cx_tsb_ps1,
1323  itb->cx_config);
1324 }
1325 
1326 uint64_t
1327 TLB::MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb,
1328  uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
1329 {
1330  uint64_t tsb;
1331  uint64_t config;
1332 
1333  if (bits(tag_access, 12,0) == 0) {
1334  tsb = c0_tsb;
1335  config = c0_config;
1336  } else {
1337  tsb = cX_tsb;
1338  config = cX_config;
1339  }
1340 
1341  uint64_t ptr = mbits(tsb,63,13);
1342  bool split = bits(tsb,12,12);
1343  int tsb_size = bits(tsb,3,0);
1344  int page_size = (ps == Ps0) ? bits(config, 2,0) : bits(config,10,8);
1345 
1346  if (ps == Ps1 && split)
1347  ptr |= ULL(1) << (13 + tsb_size);
1348  ptr |= (tag_access >> (9 + page_size * 3)) & mask(12+tsb_size, 4);
1349 
1350  return ptr;
1351 }
1352 
1353 void
1355 {
1359 
1360  // convert the pointer based free list into an index based one
1361  std::vector<int> free_list;
1362  for (const TlbEntry *entry : freeList)
1363  free_list.push_back(entry - tlb);
1364 
1365  SERIALIZE_CONTAINER(free_list);
1366 
1376 
1377  for (int x = 0; x < size; x++) {
1378  ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1379  tlb[x].serialize(cp);
1380  }
1381 }
1382 
1383 void
1385 {
1386  int oldSize;
1387 
1388  paramIn(cp, "size", oldSize);
1389  if (oldSize != size)
1390  panic("Don't support unserializing different sized TLBs\n");
1393 
1394  std::vector<int> free_list;
1395  UNSERIALIZE_CONTAINER(free_list);
1396  freeList.clear();
1397  for (int idx : free_list)
1398  freeList.push_back(&tlb[idx]);
1399 
1408 
1409  lookupTable.clear();
1410  for (int x = 0; x < size; x++) {
1411  ScopedCheckpointSection sec(cp, csprintf("PTE%d", x));
1412  tlb[x].unserialize(cp);
1413  if (tlb[x].valid)
1414  lookupTable.insert(tlb[x].range, &tlb[x]);
1415 
1416  }
1418 }
1419 
1420 } // namespace SparcISA
1421 
1422 SparcISA::TLB *
1423 SparcTLBParams::create()
1424 {
1425  return new SparcISA::TLB(this);
1426 }
void demapContext(int partition_id, int context_id)
Remove all entries that match a given context/partition id.
Definition: tlb.cc:288
Addr translate(Addr vaddr) const
Definition: pagetable.hh:175
static void ignore(const char *expr)
Definition: debug.cc:74
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
bool asiIsReal(ASI asi)
Definition: asi.cc:142
#define DPRINTF(x,...)
Definition: trace.hh:229
bool asiIsSparcError(ASI asi)
Definition: asi.cc:312
uint64_t cx_config
Definition: tlb.hh:67
The request is to an uncacheable address.
Definition: request.hh:115
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
void writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
Definition: tlb.cc:383
virtual System * getSystemPtr()=0
int usedEntries
Definition: tlb.hh:78
decltype(nullptr) constexpr NoFault
Definition: types.hh:245
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
iterator insert(TlbRange &r, TlbEntry *d)
Definition: tlb_map.hh:96
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
Bitfield< 7 > i
const Addr PAddrImplMask
Definition: tlb.hh:50
const Addr VAddrAMask
Definition: tlb.hh:49
uint32_t MachInst
Definition: types.hh:40
size_t size()
Definition: tlb_map.hh:141
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
Bitfield< 3 > am
Definition: miscregs.hh:130
std::shared_ptr< Request > RequestPtr
Definition: request.hh:83
Bitfield< 8 > a
const Addr EndVAddrHole
Definition: tlb.hh:48
ip6_addr_t addr
Definition: inet.hh:335
Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write)
Definition: tlb.cc:533
uint64_t c0_config
Definition: tlb.hh:64
void clearInterrupt(ThreadID tid, int int_num, int index)
Definition: base.hh:246
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:125
Bitfield< 29, 28 > ce
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:136
void clearUsedBits()
Definition: tlb.cc:83
size_t erase(TlbRange k)
Definition: tlb_map.hh:105
bool validVirtualAddress(Addr va, bool am)
Checks if the virtual address provided is a valid one.
Definition: tlb.cc:373
uint64_t MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb, uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
Definition: tlb.cc:1327
virtual BaseCPU * getCpuPtr()=0
Definition: cprintf.cc:42
Bitfield< 4, 0 > mode
TlbEntry * lookup(Addr va, int partition_id, bool real, int context_id=0, bool update_used=true)
lookup an entry in the TLB based on the partition id, and real bit if real is true or the partition i...
Definition: tlb.cc:198
MMU Internal Registers.
Definition: miscregs.hh:89
void setBE(T v)
Set the value in the data pointer to v as big endian.
ASI cacheAsi[2]
Definition: tlb.hh:199
ThreadContext is the external interface to all thread state for anything outside of the CPU...
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: tlb.cc:837
#define DPRINTFN(...)
Definition: trace.hh:233
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt)
Definition: tlb.cc:861
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: tlb.cc:1384
void populate(uint64_t e, EntryType t=sun4u)
Definition: pagetable.hh:98
RequestPtr req
A pointer to the original request.
Definition: packet.hh:327
Bitfield< 2 > hpriv
Definition: miscregs.hh:121
void dumpAll()
Definition: tlb.cc:242
Definition: tlb.hh:52
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:645
bool asiIsQueue(ASI asi)
Definition: asi.cc:256
#define inform(...)
Definition: logging.hh:213
Bitfield< 23, 20 > tl
#define SERIALIZE_CONTAINER(member)
Definition: serialize.hh:664
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt)
Definition: tlb.cc:1046
std::list< TlbEntry * > freeList
Definition: tlb.hh:84
TlbMap::iterator MapIter
Definition: tlb.hh:72
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:162
uint64_t cx_tsb_ps0
Definition: tlb.hh:65
void demapPage(Addr va, int partition_id, bool real, int context_id)
Remve all entries that match a certain partition id, (contextid), and va).
Definition: tlb.cc:256
#define M5_FALLTHROUGH
Definition: compiler.hh:86
const Addr StartVAddrHole
Definition: tlb.hh:47
iterator end()
Definition: tlb_map.hh:135
void makeAtomicResponse()
Definition: packet.hh:949
uint64_t cx_tsb_ps1
Definition: tlb.hh:66
uint64_t get_vec(int int_num)
Definition: interrupts.hh:252
void flushAll() override
Remove all entries from the TLB.
Definition: tlb.cc:329
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: tlb.cc:846
TlbEntry * cacheEntry[2]
Definition: tlb.hh:198
std::vector< ThreadContext * > threadContexts
Definition: system.hh:190
bool asiIsPrimary(ASI asi)
Definition: asi.cc:51
virtual BaseTLB * getITBPtr()=0
Addr getAddr() const
Definition: packet.hh:726
bool asiIsBlock(ASI asi)
Definition: asi.cc:38
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
#define UNSERIALIZE_CONTAINER(member)
Definition: serialize.hh:667
This request is to a memory mapped register.
Definition: request.hh:127
TlbEntry * tlb
Definition: tlb.hh:75
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
#define ULL(N)
uint64_t constant
Definition: types.hh:50
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Do post-translation physical address finalization.
Definition: tlb.cc:854
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
TlbMap lookupTable
Definition: tlb.hh:72
bool asiIsHPriv(ASI asi)
Definition: asi.cc:298
Bitfield< 8 > va
void unserialize(CheckpointIn &cp)
Definition: pagetable.cc:56
void writeTagAccess(Addr va, int context)
Definition: tlb.cc:400
bool asiIsScratchPad(ASI asi)
Definition: asi.cc:242
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:643
Mode
Definition: tlb.hh:59
void insert(Addr vpn, int partition_id, int context_id, bool real, const PageTableEntry &PTE, int entry=-1)
Insert a PTE into the TLB.
Definition: tlb.cc:97
Bitfield< 9 > e
bool sideffect() const
Definition: pagetable.hh:171
bool asiIsNoFault(ASI asi)
Definition: asi.cc:233
SparcTLBParams Params
Definition: tlb.hh:158
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
Definition: tlb.cc:1300
std::ostream CheckpointOut
Definition: serialize.hh:68
uint64_t tag_access
Definition: tlb.hh:69
Definition: asi.cc:34
uint64_t TagRead(int entry)
Given an entry id, read that tlb entries&#39; tag.
Definition: tlb.cc:357
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: tlb.cc:1354
Scratchpad regiscers.
Definition: miscregs.hh:95
bool asiIsUnPriv(ASI asi)
Definition: asi.cc:285
bool asiIsCmt(ASI asi)
Definition: asi.cc:249
bool asiIsTwin(ASI asi)
Definition: asi.cc:188
iterator find(const TlbRange &r)
Definition: tlb_map.hh:51
uint64_t TteRead(int entry)
Give an entry id, read that tlb entries&#39; tte.
Definition: tlb.cc:344
uint64_t c0_tsb_ps1
Definition: tlb.hh:63
Bitfield< 5 > red
Definition: miscregs.hh:122
BaseInterrupts * getInterruptController(ThreadID tid)
Definition: base.hh:226
int findMsbSet(uint64_t val)
Returns the bit position of the MSB that is set in the input.
Definition: bitfield.hh:204
int lastReplaced
Definition: tlb.hh:79
Bitfield< 2 > priv
Definition: miscregs.hh:129
uint64_t sfsr
Definition: tlb.hh:68
bool asiIsInterrupt(ASI asi)
Definition: asi.cc:262
void paramIn(CheckpointIn &cp, const string &name, ExtMachInst &machInst)
Definition: types.cc:71
TLB(const Params *p)
Definition: tlb.cc:55
bool cacheValid
Definition: tlb.hh:82
bool asiIsMmu(ASI asi)
Definition: asi.cc:270
bool asiIsAsIfUser(ASI asi)
Definition: asi.cc:118
int size
Definition: tlb.hh:77
Bitfield< 3, 0 > mask
Definition: types.hh:64
bool asiIsLittle(ASI asi)
Definition: asi.cc:153
PageTableEntry pte
Definition: pagetable.hh:264
uint64_t c0_tsb_ps0
Definition: tlb.hh:62
Bitfield< 5 > t
uint64_t sfar
Definition: tlb.hh:61
uint64_t cacheState
Definition: tlb.hh:81
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:96
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:72
void demapAll(int partition_id)
Remove all non-locked entries from the tlb that match partition id.
Definition: tlb.cc:310
Bitfield< 18, 16 > ps
iterator begin()
Definition: tlb_map.hh:129
Scoped checkpoint section helper class.
Definition: serialize.hh:173
bool asiIsNucleus(ASI asi)
Definition: asi.cc:109
bool asiIsSecondary(ASI asi)
Definition: asi.cc:80
Bitfield< 0 > p
Fault translateInst(const RequestPtr &req, ThreadContext *tc)
Definition: tlb.cc:419
virtual RegVal readMiscReg(RegIndex misc_reg)=0
Bitfield< 1 > x
Definition: types.hh:105
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
T getBE() const
Get the data in the packet byte swapped from big endian to host endian.
ASI
Definition: asi.hh:38
void serialize(CheckpointOut &cp) const
Definition: pagetable.cc:39
bool asiIsPartialStore(ASI asi)
Definition: asi.cc:203

Generated on Fri Feb 28 2020 16:26:56 for gem5 by doxygen 1.8.13