gem5  v21.1.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
gpu_tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * For use for simulation and test purposes only
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its contributors
18  * may be used to endorse or promote products derived from this software
19  * without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Author: Lisa Hsu
34  */
35 
36 #include "gpu-compute/gpu_tlb.hh"
37 
38 #include <cmath>
39 #include <cstring>
40 
41 #include "arch/x86/faults.hh"
43 #include "arch/x86/page_size.hh"
44 #include "arch/x86/pagetable.hh"
46 #include "arch/x86/regs/misc.hh"
47 #include "arch/x86/regs/msr.hh"
48 #include "arch/x86/x86_traits.hh"
49 #include "base/bitfield.hh"
50 #include "base/logging.hh"
51 #include "base/output.hh"
52 #include "base/trace.hh"
53 #include "cpu/base.hh"
54 #include "cpu/thread_context.hh"
55 #include "debug/GPUPrefetch.hh"
56 #include "debug/GPUTLB.hh"
57 #include "mem/packet_access.hh"
58 #include "mem/page_table.hh"
59 #include "mem/request.hh"
60 #include "sim/process.hh"
61 #include "sim/pseudo_inst.hh"
62 
63 namespace gem5
64 {
65 namespace X86ISA
66 {
67 
69  : ClockedObject(p), configAddress(0), size(p.size),
70  cleanupEvent([this]{ cleanup(); }, name(), false,
72  exitEvent([this]{ exitCallback(); }, name()), stats(this)
73  {
74  assoc = p.assoc;
75  assert(assoc <= size);
76  numSets = size/assoc;
77  allocationPolicy = p.allocationPolicy;
78  hasMemSidePort = false;
79  accessDistance = p.accessDistance;
80 
81  tlb.assign(size, TlbEntry());
82 
83  freeList.resize(numSets);
84  entryList.resize(numSets);
85 
86  for (int set = 0; set < numSets; ++set) {
87  for (int way = 0; way < assoc; ++way) {
88  int x = set * assoc + way;
89  freeList[set].push_back(&tlb.at(x));
90  }
91  }
92 
93  FA = (size == assoc);
94 
103  setMask = numSets - 1;
104 
105  maxCoalescedReqs = p.maxOutstandingReqs;
106 
107  // Do not allow maxCoalescedReqs to be more than the TLB associativity
108  if (maxCoalescedReqs > assoc) {
110  cprintf("Forcing maxCoalescedReqs to %d (TLB assoc.) \n", assoc);
111  }
112 
113  outstandingReqs = 0;
114  hitLatency = p.hitLatency;
115  missLatency1 = p.missLatency1;
116  missLatency2 = p.missLatency2;
117 
118  // create the response ports based on the number of connected ports
119  for (size_t i = 0; i < p.port_cpu_side_ports_connection_count; ++i) {
120  cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d",
121  name(), i), this, i));
122  }
123 
124  // create the request ports based on the number of connected ports
125  for (size_t i = 0; i < p.port_mem_side_ports_connection_count; ++i) {
126  memSidePort.push_back(new MemSidePort(csprintf("%s-port%d",
127  name(), i), this, i));
128  }
129  }
130 
131  // fixme: this is never called?
133  {
134  // make sure all the hash-maps are empty
135  assert(translationReturnEvent.empty());
136  }
137 
138  Port &
139  GpuTLB::getPort(const std::string &if_name, PortID idx)
140  {
141  if (if_name == "cpu_side_ports") {
142  if (idx >= static_cast<PortID>(cpuSidePort.size())) {
143  panic("TLBCoalescer::getPort: unknown index %d\n", idx);
144  }
145 
146  return *cpuSidePort[idx];
147  } else if (if_name == "mem_side_ports") {
148  if (idx >= static_cast<PortID>(memSidePort.size())) {
149  panic("TLBCoalescer::getPort: unknown index %d\n", idx);
150  }
151 
152  hasMemSidePort = true;
153 
154  return *memSidePort[idx];
155  } else {
156  panic("TLBCoalescer::getPort: unknown port %s\n", if_name);
157  }
158  }
159 
160  TlbEntry*
162  {
163  TlbEntry *newEntry = nullptr;
164 
169  int set = (vpn >> PageShift) & setMask;
170 
171  if (!freeList[set].empty()) {
172  newEntry = freeList[set].front();
173  freeList[set].pop_front();
174  } else {
175  newEntry = entryList[set].back();
176  entryList[set].pop_back();
177  }
178 
179  *newEntry = entry;
180  newEntry->vaddr = vpn;
181  entryList[set].push_front(newEntry);
182 
183  return newEntry;
184  }
185 
186  GpuTLB::EntryList::iterator
187  GpuTLB::lookupIt(Addr va, bool update_lru)
188  {
189  int set = (va >> PageShift) & setMask;
190 
191  if (FA) {
192  assert(!set);
193  }
194 
195  auto entry = entryList[set].begin();
196  for (; entry != entryList[set].end(); ++entry) {
197  int page_size = (*entry)->size();
198 
199  if ((*entry)->vaddr <= va && (*entry)->vaddr + page_size > va) {
200  DPRINTF(GPUTLB, "Matched vaddr %#x to entry starting at %#x "
201  "with size %#x.\n", va, (*entry)->vaddr, page_size);
202 
203  if (update_lru) {
204  entryList[set].push_front(*entry);
205  entryList[set].erase(entry);
206  entry = entryList[set].begin();
207  }
208 
209  break;
210  }
211  }
212 
213  return entry;
214  }
215 
216  TlbEntry*
217  GpuTLB::lookup(Addr va, bool update_lru)
218  {
219  int set = (va >> PageShift) & setMask;
220 
221  auto entry = lookupIt(va, update_lru);
222 
223  if (entry == entryList[set].end())
224  return nullptr;
225  else
226  return *entry;
227  }
228 
229  void
231  {
232  DPRINTF(GPUTLB, "Invalidating all entries.\n");
233 
234  for (int i = 0; i < numSets; ++i) {
235  while (!entryList[i].empty()) {
236  TlbEntry *entry = entryList[i].front();
237  entryList[i].pop_front();
238  freeList[i].push_back(entry);
239  }
240  }
241  }
242 
243  void
245  {
247  }
248 
249  void
251  {
252  DPRINTF(GPUTLB, "Invalidating all non global entries.\n");
253 
254  for (int i = 0; i < numSets; ++i) {
255  for (auto entryIt = entryList[i].begin();
256  entryIt != entryList[i].end();) {
257  if (!(*entryIt)->global) {
258  freeList[i].push_back(*entryIt);
259  entryList[i].erase(entryIt++);
260  } else {
261  ++entryIt;
262  }
263  }
264  }
265  }
266 
267  void
268  GpuTLB::demapPage(Addr va, uint64_t asn)
269  {
270 
271  int set = (va >> PageShift) & setMask;
272  auto entry = lookupIt(va, false);
273 
274  if (entry != entryList[set].end()) {
275  freeList[set].push_back(*entry);
276  entryList[set].erase(entry);
277  }
278  }
279 
280 
281 
282  namespace
283  {
284 
285  Cycles
286  localMiscRegAccess(bool read, MiscRegIndex regNum,
287  ThreadContext *tc, PacketPtr pkt)
288  {
289  if (read) {
290  RegVal data = htole(tc->readMiscReg(regNum));
291  // Make sure we don't trot off the end of data.
292  pkt->setData((uint8_t *)&data);
293  } else {
294  RegVal data = htole(tc->readMiscRegNoEffect(regNum));
295  tc->setMiscReg(regNum, letoh(data));
296  }
297  return Cycles(1);
298  }
299 
300  } // anonymous namespace
301 
302  Fault
303  GpuTLB::translateInt(bool read, const RequestPtr &req, ThreadContext *tc)
304  {
305  DPRINTF(GPUTLB, "Addresses references internal memory.\n");
306  Addr vaddr = req->getVaddr();
307  Addr prefix = (vaddr >> 3) & IntAddrPrefixMask;
308 
309  if (prefix == IntAddrPrefixCPUID) {
310  panic("CPUID memory space not yet implemented!\n");
311  } else if (prefix == IntAddrPrefixMSR) {
312  vaddr = (vaddr >> 3) & ~IntAddrPrefixMask;
313 
314  MiscRegIndex regNum;
315  if (!msrAddrToIndex(regNum, vaddr))
316  return std::make_shared<GeneralProtection>(0);
317 
318  req->setLocalAccessor(
319  [read,regNum](ThreadContext *tc, PacketPtr pkt)
320  {
321  return localMiscRegAccess(read, regNum, tc, pkt);
322  }
323  );
324 
325  return NoFault;
326  } else if (prefix == IntAddrPrefixIO) {
327  // TODO If CPL > IOPL or in virtual mode, check the I/O permission
328  // bitmap in the TSS.
329 
330  Addr IOPort = vaddr & ~IntAddrPrefixMask;
331  // Make sure the address fits in the expected 16 bit IO address
332  // space.
333  assert(!(IOPort & ~0xFFFF));
334  if (IOPort == 0xCF8 && req->getSize() == 4) {
335  req->setLocalAccessor(
336  [read](ThreadContext *tc, PacketPtr pkt)
337  {
338  return localMiscRegAccess(
339  read, MISCREG_PCI_CONFIG_ADDRESS, tc, pkt);
340  }
341  );
342  } else if ((IOPort & ~mask(2)) == 0xCFC) {
346  if (bits(configAddress, 31, 31)) {
347  req->setPaddr(PhysAddrPrefixPciConfig |
348  mbits(configAddress, 30, 2) |
349  (IOPort & mask(2)));
350  } else {
351  req->setPaddr(PhysAddrPrefixIO | IOPort);
352  }
353  } else {
355  req->setPaddr(PhysAddrPrefixIO | IOPort);
356  }
357  return NoFault;
358  } else {
359  panic("Access to unrecognized internal address space %#x.\n",
360  prefix);
361  }
362  }
363 
371  bool
373  ThreadContext *tc, bool update_stats)
374  {
375  bool tlb_hit = false;
376  #ifndef NDEBUG
377  uint32_t flags = req->getFlags();
378  int seg = flags & SegmentFlagMask;
379  #endif
380 
381  assert(seg != SEGMENT_REG_MS);
382  Addr vaddr = req->getVaddr();
383  DPRINTF(GPUTLB, "TLB Lookup for vaddr %#x.\n", vaddr);
384  HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
385 
386  if (m5Reg.prot) {
387  DPRINTF(GPUTLB, "In protected mode.\n");
388  // make sure we are in 64-bit mode
389  assert(m5Reg.mode == LongMode);
390 
391  // If paging is enabled, do the translation.
392  if (m5Reg.paging) {
393  DPRINTF(GPUTLB, "Paging enabled.\n");
394  //update LRU stack on a hit
395  TlbEntry *entry = lookup(vaddr, true);
396 
397  if (entry)
398  tlb_hit = true;
399 
400  if (!update_stats) {
401  // functional tlb access for memory initialization
402  // i.e., memory seeding or instr. seeding -> don't update
403  // TLB and stats
404  return tlb_hit;
405  }
406 
408 
409  if (!entry) {
411  } else {
413  }
414  }
415  }
416 
417  return tlb_hit;
418  }
419 
420  Fault
422  Translation *translation, Mode mode,
423  bool &delayedResponse, bool timing, int &latency)
424  {
425  uint32_t flags = req->getFlags();
426  int seg = flags & SegmentFlagMask;
427  bool storeCheck = flags & (StoreCheck << FlagShift);
428 
429  // If this is true, we're dealing with a request
430  // to a non-memory address space.
431  if (seg == SEGMENT_REG_MS) {
432  return translateInt(mode == Mode::Read, req, tc);
433  }
434 
435  delayedResponse = false;
436  Addr vaddr = req->getVaddr();
437  DPRINTF(GPUTLB, "Translating vaddr %#x.\n", vaddr);
438 
439  HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
440 
441  // If protected mode has been enabled...
442  if (m5Reg.prot) {
443  DPRINTF(GPUTLB, "In protected mode.\n");
444  // If we're not in 64-bit mode, do protection/limit checks
445  if (m5Reg.mode != LongMode) {
446  DPRINTF(GPUTLB, "Not in long mode. Checking segment "
447  "protection.\n");
448 
449  // Check for a null segment selector.
450  if (!(seg == SEGMENT_REG_TSG || seg == SYS_SEGMENT_REG_IDTR ||
453  return std::make_shared<GeneralProtection>(0);
454  }
455 
456  bool expandDown = false;
458 
459  if (seg >= SEGMENT_REG_ES && seg <= SEGMENT_REG_HS) {
460  if (!attr.writable && (mode == BaseMMU::Write ||
461  storeCheck))
462  return std::make_shared<GeneralProtection>(0);
463 
464  if (!attr.readable && mode == BaseMMU::Read)
465  return std::make_shared<GeneralProtection>(0);
466 
467  expandDown = attr.expandDown;
468 
469  }
470 
473  // This assumes we're not in 64 bit mode. If we were, the
474  // default address size is 64 bits, overridable to 32.
475  int size = 32;
476  bool sizeOverride = (flags & (AddrSizeFlagBit << FlagShift));
477  SegAttr csAttr = tc->readMiscRegNoEffect(MISCREG_CS_ATTR);
478 
479  if ((csAttr.defaultSize && sizeOverride) ||
480  (!csAttr.defaultSize && !sizeOverride)) {
481  size = 16;
482  }
483 
484  Addr offset = bits(vaddr - base, size - 1, 0);
485  Addr endOffset = offset + req->getSize() - 1;
486 
487  if (expandDown) {
488  DPRINTF(GPUTLB, "Checking an expand down segment.\n");
489  warn_once("Expand down segments are untested.\n");
490 
491  if (offset <= limit || endOffset <= limit)
492  return std::make_shared<GeneralProtection>(0);
493  } else {
494  if (offset > limit || endOffset > limit)
495  return std::make_shared<GeneralProtection>(0);
496  }
497  }
498 
499  // If paging is enabled, do the translation.
500  if (m5Reg.paging) {
501  DPRINTF(GPUTLB, "Paging enabled.\n");
502  // The vaddr already has the segment base applied.
503  TlbEntry *entry = lookup(vaddr);
505 
506  if (!entry) {
508  if (timing) {
509  latency = missLatency1;
510  }
511 
512  if (FullSystem) {
513  fatal("GpuTLB doesn't support full-system mode\n");
514  } else {
515  DPRINTF(GPUTLB, "Handling a TLB miss for address %#x "
516  "at pc %#x.\n", vaddr, tc->instAddr());
517 
518  Process *p = tc->getProcessPtr();
519  const EmulationPageTable::Entry *pte =
520  p->pTable->lookup(vaddr);
521 
522  if (!pte && mode != BaseMMU::Execute) {
523  // penalize a "page fault" more
524  if (timing)
525  latency += missLatency2;
526 
527  if (p->fixupFault(vaddr))
528  pte = p->pTable->lookup(vaddr);
529  }
530 
531  if (!pte) {
532  return std::make_shared<PageFault>(vaddr, true,
533  mode, true,
534  false);
535  } else {
536  Addr alignedVaddr = p->pTable->pageAlign(vaddr);
537 
538  DPRINTF(GPUTLB, "Mapping %#x to %#x\n",
539  alignedVaddr, pte->paddr);
540 
541  TlbEntry gpuEntry(p->pid(), alignedVaddr,
542  pte->paddr, false, false);
543  entry = insert(alignedVaddr, gpuEntry);
544  }
545 
546  DPRINTF(GPUTLB, "Miss was serviced.\n");
547  }
548  } else {
550 
551  if (timing) {
552  latency = hitLatency;
553  }
554  }
555 
556  // Do paging protection checks.
557  bool inUser = (m5Reg.cpl == 3 &&
558  !(flags & (CPL0FlagBit << FlagShift)));
559 
560  CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
561  bool badWrite = (!entry->writable && (inUser || cr0.wp));
562 
563  if ((inUser && !entry->user) || (mode == BaseMMU::Write &&
564  badWrite)) {
565  // The page must have been present to get into the TLB in
566  // the first place. We'll assume the reserved bits are
567  // fine even though we're not checking them.
568  return std::make_shared<PageFault>(vaddr, true, mode,
569  inUser, false);
570  }
571 
572  if (storeCheck && badWrite) {
573  // This would fault if this were a write, so return a page
574  // fault that reflects that happening.
575  return std::make_shared<PageFault>(vaddr, true,
577  inUser, false);
578  }
579 
580 
581  DPRINTF(GPUTLB, "Entry found with paddr %#x, doing protection "
582  "checks.\n", entry->paddr);
583 
584  int page_size = entry->size();
585  Addr paddr = entry->paddr | (vaddr & (page_size - 1));
586  DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
587  req->setPaddr(paddr);
588 
589  if (entry->uncacheable)
590  req->setFlags(Request::UNCACHEABLE);
591  } else {
592  //Use the address which already has segmentation applied.
593  DPRINTF(GPUTLB, "Paging disabled.\n");
594  DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
595  req->setPaddr(vaddr);
596  }
597  } else {
598  // Real mode
599  DPRINTF(GPUTLB, "In real mode.\n");
600  DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, vaddr);
601  req->setPaddr(vaddr);
602  }
603 
604  // Check for an access to the local APIC
605  if (FullSystem) {
606  LocalApicBase localApicBase =
608 
609  Addr baseAddr = localApicBase.base * PageBytes;
610  Addr paddr = req->getPaddr();
611 
612  if (baseAddr <= paddr && baseAddr + PageBytes > paddr) {
613  // Force the access to be uncacheable.
614  req->setFlags(Request::UNCACHEABLE);
615  req->setPaddr(x86LocalAPICAddress(tc->contextId(),
616  paddr - baseAddr));
617  }
618  }
619 
620  return NoFault;
621  };
622 
623  Fault
625  Mode mode, int &latency)
626  {
627  bool delayedResponse;
628 
629  return GpuTLB::translate(req, tc, nullptr, mode, delayedResponse,
630  false, latency);
631  }
632 
633  void
635  Translation *translation, Mode mode, int &latency)
636  {
637  bool delayedResponse;
638  assert(translation);
639 
640  Fault fault = GpuTLB::translate(req, tc, translation, mode,
641  delayedResponse, true, latency);
642 
643  if (!delayedResponse)
644  translation->finish(fault, req, tc, mode);
645  }
646 
647  Walker*
649  {
650  return walker;
651  }
652 
653 
654  void
656  {
657  }
658 
659  void
661  {
662  }
663 
669  void
671  {
672  assert(pkt);
673  assert(pkt->senderState);
674 
675  Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
677 
678  TranslationState *sender_state =
679  safe_cast<TranslationState*>(pkt->senderState);
680 
681  bool update_stats = !sender_state->isPrefetch;
682  ThreadContext * tmp_tc = sender_state->tc;
683 
684  DPRINTF(GPUTLB, "Translation req. for virt. page addr %#x\n",
685  virt_page_addr);
686 
687  int req_cnt = sender_state->reqCnt.back();
688 
689  if (update_stats) {
690  stats.accessCycles -= (curTick() * req_cnt);
692  updatePageFootprint(virt_page_addr);
693  stats.globalNumTLBAccesses += req_cnt;
694  }
695 
696  tlbOutcome lookup_outcome = TLB_MISS;
697  const RequestPtr &tmp_req = pkt->req;
698 
699  // Access the TLB and figure out if it's a hit or a miss.
700  bool success = tlbLookup(tmp_req, tmp_tc, update_stats);
701 
702  if (success) {
703  lookup_outcome = TLB_HIT;
704  // Put the entry in SenderState
705  TlbEntry *entry = lookup(tmp_req->getVaddr(), false);
706  assert(entry);
707 
708  auto p = sender_state->tc->getProcessPtr();
709  sender_state->tlbEntry =
710  new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
711  false, false);
712 
713  if (update_stats) {
714  // the reqCnt has an entry per level, so its size tells us
715  // which level we are in
716  sender_state->hitLevel = sender_state->reqCnt.size();
717  stats.globalNumTLBHits += req_cnt;
718  }
719  } else {
720  if (update_stats)
721  stats.globalNumTLBMisses += req_cnt;
722  }
723 
724  /*
725  * We now know the TLB lookup outcome (if it's a hit or a miss), as
726  * well as the TLB access latency.
727  *
728  * We create and schedule a new TLBEvent which will help us take the
729  * appropriate actions (e.g., update TLB on a hit, send request to
730  * lower level TLB on a miss, or start a page walk if this was the
731  * last-level TLB)
732  */
733  TLBEvent *tlb_event =
734  new TLBEvent(this, virt_page_addr, lookup_outcome, pkt);
735 
736  if (translationReturnEvent.count(virt_page_addr)) {
737  panic("Virtual Page Address %#x already has a return event\n",
738  virt_page_addr);
739  }
740 
741  translationReturnEvent[virt_page_addr] = tlb_event;
742  assert(tlb_event);
743 
744  DPRINTF(GPUTLB, "schedule translationReturnEvent @ curTick %d\n",
746 
747  schedule(tlb_event, curTick() + cyclesToTicks(Cycles(hitLatency)));
748  }
749 
751  tlbOutcome tlb_outcome, PacketPtr _pkt)
752  : Event(CPU_Tick_Pri), tlb(_tlb), virtPageAddr(_addr),
753  outcome(tlb_outcome), pkt(_pkt)
754  {
755  }
756 
761  void
763  TlbEntry * tlb_entry, Mode mode)
764  {
765  HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
766  uint32_t flags = pkt->req->getFlags();
767  bool storeCheck = flags & (StoreCheck << FlagShift);
768 
769  // Do paging protection checks.
770  bool inUser
771  = (m5Reg.cpl == 3 && !(flags & (CPL0FlagBit << FlagShift)));
772  CR0 cr0 = tc->readMiscRegNoEffect(MISCREG_CR0);
773 
774  bool badWrite = (!tlb_entry->writable && (inUser || cr0.wp));
775 
776  if ((inUser && !tlb_entry->user) ||
777  (mode == BaseMMU::Write && badWrite)) {
778  // The page must have been present to get into the TLB in
779  // the first place. We'll assume the reserved bits are
780  // fine even though we're not checking them.
781  panic("Page fault detected");
782  }
783 
784  if (storeCheck && badWrite) {
785  // This would fault if this were a write, so return a page
786  // fault that reflects that happening.
787  panic("Page fault detected");
788  }
789  }
790 
796  void
798  tlbOutcome tlb_outcome, PacketPtr pkt)
799  {
800  assert(pkt);
801  Addr vaddr = pkt->req->getVaddr();
802 
803  TranslationState *sender_state =
804  safe_cast<TranslationState*>(pkt->senderState);
805 
806  ThreadContext *tc = sender_state->tc;
807  Mode mode = sender_state->tlbMode;
808 
809  TlbEntry *local_entry, *new_entry;
810 
811  if (tlb_outcome == TLB_HIT) {
812  DPRINTF(GPUTLB, "Translation Done - TLB Hit for addr %#x\n",
813  vaddr);
814  local_entry = sender_state->tlbEntry;
815  } else {
816  DPRINTF(GPUTLB, "Translation Done - TLB Miss for addr %#x\n",
817  vaddr);
818 
824  new_entry = sender_state->tlbEntry;
825  assert(new_entry);
826  local_entry = new_entry;
827 
828  if (allocationPolicy) {
829  DPRINTF(GPUTLB, "allocating entry w/ addr %#x\n",
830  virt_page_addr);
831 
832  local_entry = insert(virt_page_addr, *new_entry);
833  }
834 
835  assert(local_entry);
836  }
837 
843  DPRINTF(GPUTLB, "Entry found with vaddr %#x, doing protection checks "
844  "while paddr was %#x.\n", local_entry->vaddr,
845  local_entry->paddr);
846 
847  pagingProtectionChecks(tc, pkt, local_entry, mode);
848  int page_size = local_entry->size();
849  Addr paddr = local_entry->paddr | (vaddr & (page_size - 1));
850  DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
851 
852  // Since this packet will be sent through the cpu side port,
853  // it must be converted to a response pkt if it is not one already
854  if (pkt->isRequest()) {
855  pkt->makeTimingResponse();
856  }
857 
858  pkt->req->setPaddr(paddr);
859 
860  if (local_entry->uncacheable) {
861  pkt->req->setFlags(Request::UNCACHEABLE);
862  }
863 
864  //send packet back to coalescer
865  cpuSidePort[0]->sendTimingResp(pkt);
866  //schedule cleanup event
867  cleanupQueue.push(virt_page_addr);
868 
869  // schedule this only once per cycle.
870  // The check is required because we might have multiple translations
871  // returning the same cycle
872  // this is a maximum priority event and must be on the same cycle
873  // as the cleanup event in TLBCoalescer to avoid a race with
874  // IssueProbeEvent caused by TLBCoalescer::MemSidePort::recvReqRetry
875  if (!cleanupEvent.scheduled())
877  }
878 
883  void
885  PacketPtr pkt)
886  {
887  DPRINTF(GPUTLB, "Triggered TLBEvent for addr %#x\n", virtPageAddr);
888 
889  assert(translationReturnEvent[virtPageAddr]);
890  assert(pkt);
891 
892  TranslationState *tmp_sender_state =
893  safe_cast<TranslationState*>(pkt->senderState);
894 
895  int req_cnt = tmp_sender_state->reqCnt.back();
896  bool update_stats = !tmp_sender_state->isPrefetch;
897 
898 
899  if (outcome == TLB_HIT) {
900  handleTranslationReturn(virtPageAddr, TLB_HIT, pkt);
901 
902  if (update_stats) {
903  stats.accessCycles += (req_cnt * curTick());
905  }
906 
907  } else if (outcome == TLB_MISS) {
908 
909  DPRINTF(GPUTLB, "This is a TLB miss\n");
910  if (update_stats) {
911  stats.accessCycles += (req_cnt*curTick());
913  }
914 
915  if (hasMemSidePort) {
916  // the one cyle added here represent the delay from when we get
917  // the reply back till when we propagate it to the coalescer
918  // above.
919  if (update_stats) {
920  stats.accessCycles += (req_cnt * 1);
921  stats.localCycles += 1;
922  }
923 
929  if (!memSidePort[0]->sendTimingReq(pkt)) {
930  DPRINTF(GPUTLB, "Failed sending translation request to "
931  "lower level TLB for addr %#x\n", virtPageAddr);
932 
933  memSidePort[0]->retries.push_back(pkt);
934  } else {
935  DPRINTF(GPUTLB, "Sent translation request to lower level "
936  "TLB for addr %#x\n", virtPageAddr);
937  }
938  } else {
939  //this is the last level TLB. Start a page walk
940  DPRINTF(GPUTLB, "Last level TLB - start a page walk for "
941  "addr %#x\n", virtPageAddr);
942 
943  if (update_stats)
944  stats.pageTableCycles -= (req_cnt*curTick());
945 
946  TLBEvent *tlb_event = translationReturnEvent[virtPageAddr];
947  assert(tlb_event);
948  tlb_event->updateOutcome(PAGE_WALK);
949  schedule(tlb_event,
951  }
952  } else if (outcome == PAGE_WALK) {
953  if (update_stats)
954  stats.pageTableCycles += (req_cnt*curTick());
955 
956  // Need to access the page table and update the TLB
957  DPRINTF(GPUTLB, "Doing a page walk for address %#x\n",
958  virtPageAddr);
959 
960  TranslationState *sender_state =
961  safe_cast<TranslationState*>(pkt->senderState);
962 
963  Process *p = sender_state->tc->getProcessPtr();
964  Addr vaddr = pkt->req->getVaddr();
965 
966  Addr alignedVaddr = p->pTable->pageAlign(vaddr);
967  assert(alignedVaddr == virtPageAddr);
968 
969  const EmulationPageTable::Entry *pte = p->pTable->lookup(vaddr);
970  if (!pte && sender_state->tlbMode != BaseMMU::Execute &&
971  p->fixupFault(vaddr)) {
972  pte = p->pTable->lookup(vaddr);
973  }
974 
975  if (pte) {
976  DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
977  pte->paddr);
978 
979  sender_state->tlbEntry =
980  new TlbEntry(p->pid(), virtPageAddr, pte->paddr, false,
981  false);
982  } else {
983  sender_state->tlbEntry = nullptr;
984  }
985 
986  handleTranslationReturn(virtPageAddr, TLB_MISS, pkt);
987  } else if (outcome == MISS_RETURN) {
991  handleTranslationReturn(virtPageAddr, TLB_MISS, pkt);
992  } else {
993  panic("Unexpected TLB outcome %d", outcome);
994  }
995  }
996 
997  void
999  {
1000  tlb->translationReturn(virtPageAddr, outcome, pkt);
1001  }
1002 
1003  const char*
1005  {
1006  return "trigger translationDoneEvent";
1007  }
1008 
1009  void
1011  {
1012  outcome = _outcome;
1013  }
1014 
1015  Addr
1017  {
1018  return virtPageAddr;
1019  }
1020 
1027  bool
1029  {
1030  if (tlb->outstandingReqs < tlb->maxCoalescedReqs) {
1031  tlb->issueTLBLookup(pkt);
1032  // update number of outstanding translation requests
1033  tlb->outstandingReqs++;
1034  return true;
1035  } else {
1036  DPRINTF(GPUTLB, "Reached maxCoalescedReqs number %d\n",
1037  tlb->outstandingReqs);
1038  return false;
1039  }
1040  }
1041 
1050  void
1052  {
1053  TranslationState *sender_state =
1054  safe_cast<TranslationState*>(pkt->senderState);
1055 
1056  ThreadContext *tc = sender_state->tc;
1057  Mode mode = sender_state->tlbMode;
1058  Addr vaddr = pkt->req->getVaddr();
1059 
1060  TlbEntry *local_entry, *new_entry;
1061 
1062  if (tlb_outcome == TLB_HIT) {
1063  DPRINTF(GPUTLB, "Functional Translation Done - TLB hit for addr "
1064  "%#x\n", vaddr);
1065 
1066  local_entry = sender_state->tlbEntry;
1067  } else {
1068  DPRINTF(GPUTLB, "Functional Translation Done - TLB miss for addr "
1069  "%#x\n", vaddr);
1070 
1076  new_entry = sender_state->tlbEntry;
1077  assert(new_entry);
1078  local_entry = new_entry;
1079 
1080  if (allocationPolicy) {
1081  Addr virt_page_addr = roundDown(vaddr, X86ISA::PageBytes);
1082 
1083  DPRINTF(GPUTLB, "allocating entry w/ addr %#x\n",
1084  virt_page_addr);
1085 
1086  local_entry = insert(virt_page_addr, *new_entry);
1087  }
1088 
1089  assert(local_entry);
1090  }
1091 
1092  DPRINTF(GPUTLB, "Entry found with vaddr %#x, doing protection checks "
1093  "while paddr was %#x.\n", local_entry->vaddr,
1094  local_entry->paddr);
1095 
1107  if (!sender_state->isPrefetch && sender_state->tlbEntry)
1108  pagingProtectionChecks(tc, pkt, local_entry, mode);
1109 
1110  int page_size = local_entry->size();
1111  Addr paddr = local_entry->paddr | (vaddr & (page_size - 1));
1112  DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
1113 
1114  pkt->req->setPaddr(paddr);
1115 
1116  if (local_entry->uncacheable)
1117  pkt->req->setFlags(Request::UNCACHEABLE);
1118  }
1119 
1120  // This is used for atomic translations. Need to
1121  // make it all happen during the same cycle.
1122  void
1124  {
1125  TranslationState *sender_state =
1126  safe_cast<TranslationState*>(pkt->senderState);
1127 
1128  ThreadContext *tc = sender_state->tc;
1129  bool update_stats = !sender_state->isPrefetch;
1130 
1131  Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
1133 
1134  if (update_stats)
1135  tlb->updatePageFootprint(virt_page_addr);
1136 
1137  // do the TLB lookup without updating the stats
1138  bool success = tlb->tlbLookup(pkt->req, tc, update_stats);
1139  tlbOutcome tlb_outcome = success ? TLB_HIT : TLB_MISS;
1140 
1141  // functional mode means no coalescing
1142  // global metrics are the same as the local metrics
1143  if (update_stats) {
1144  tlb->stats.globalNumTLBAccesses++;
1145 
1146  if (success) {
1147  sender_state->hitLevel = sender_state->reqCnt.size();
1148  tlb->stats.globalNumTLBHits++;
1149  }
1150  }
1151 
1152  if (!success) {
1153  if (update_stats)
1154  tlb->stats.globalNumTLBMisses++;
1155  if (tlb->hasMemSidePort) {
1156  // there is a TLB below -> propagate down the TLB hierarchy
1157  tlb->memSidePort[0]->sendFunctional(pkt);
1158  // If no valid translation from a prefetch, then just return
1159  if (sender_state->isPrefetch && !pkt->req->hasPaddr())
1160  return;
1161  } else {
1162  // Need to access the page table and update the TLB
1163  DPRINTF(GPUTLB, "Doing a page walk for address %#x\n",
1164  virt_page_addr);
1165 
1166  Process *p = tc->getProcessPtr();
1167 
1168  Addr vaddr = pkt->req->getVaddr();
1169 
1170  Addr alignedVaddr = p->pTable->pageAlign(vaddr);
1171  assert(alignedVaddr == virt_page_addr);
1172 
1173  const EmulationPageTable::Entry *pte =
1174  p->pTable->lookup(vaddr);
1175  if (!pte && sender_state->tlbMode != BaseMMU::Execute &&
1176  p->fixupFault(vaddr)) {
1177  pte = p->pTable->lookup(vaddr);
1178  }
1179 
1180  if (!sender_state->isPrefetch) {
1181  // no PageFaults are permitted after
1182  // the second page table lookup
1183  assert(pte);
1184 
1185  DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
1186  pte->paddr);
1187 
1188  sender_state->tlbEntry =
1189  new TlbEntry(p->pid(), virt_page_addr,
1190  pte->paddr, false, false);
1191  } else {
1192  // If this was a prefetch, then do the normal thing if it
1193  // was a successful translation. Otherwise, send an empty
1194  // TLB entry back so that it can be figured out as empty
1195  // and handled accordingly.
1196  if (pte) {
1197  DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
1198  pte->paddr);
1199 
1200  sender_state->tlbEntry =
1201  new TlbEntry(p->pid(), virt_page_addr,
1202  pte->paddr, false, false);
1203  } else {
1204  DPRINTF(GPUPrefetch, "Prefetch failed %#x\n",
1205  alignedVaddr);
1206 
1207  sender_state->tlbEntry = nullptr;
1208 
1209  return;
1210  }
1211  }
1212  }
1213  } else {
1214  DPRINTF(GPUPrefetch, "Functional Hit for vaddr %#x\n",
1215  tlb->lookup(pkt->req->getVaddr()));
1216 
1217  TlbEntry *entry = tlb->lookup(pkt->req->getVaddr(),
1218  update_stats);
1219 
1220  assert(entry);
1221 
1222  auto p = sender_state->tc->getProcessPtr();
1223  sender_state->tlbEntry =
1224  new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
1225  false, false);
1226  }
1227  // This is the function that would populate pkt->req with the paddr of
1228  // the translation. But if no translation happens (i.e Prefetch fails)
1229  // then the early returns in the above code wiill keep this function
1230  // from executing.
1231  tlb->handleFuncTranslationReturn(pkt, tlb_outcome);
1232  }
1233 
1234  void
1236  {
1237  // The CPUSidePort never sends anything but replies. No retries
1238  // expected.
1239  panic("recvReqRetry called");
1240  }
1241 
1244  {
1245  // currently not checked by the requestor
1246  AddrRangeList ranges;
1247 
1248  return ranges;
1249  }
1250 
1256  bool
1258  {
1259  Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
1261 
1262  DPRINTF(GPUTLB, "MemSidePort recvTiming for virt_page_addr %#x\n",
1263  virt_page_addr);
1264 
1265  TLBEvent *tlb_event = tlb->translationReturnEvent[virt_page_addr];
1266  assert(tlb_event);
1267  assert(virt_page_addr == tlb_event->getTLBEventVaddr());
1268 
1269  tlb_event->updateOutcome(MISS_RETURN);
1270  tlb->schedule(tlb_event, curTick()+tlb->clockPeriod());
1271 
1272  return true;
1273  }
1274 
1275  void
1277  {
1278  // No retries should reach the TLB. The retries
1279  // should only reach the TLBCoalescer.
1280  panic("recvReqRetry called");
1281  }
1282 
1283  void
1285  {
1286  while (!cleanupQueue.empty()) {
1287  Addr cleanup_addr = cleanupQueue.front();
1288  cleanupQueue.pop();
1289 
1290  // delete TLBEvent
1291  TLBEvent * old_tlb_event = translationReturnEvent[cleanup_addr];
1292  delete old_tlb_event;
1293  translationReturnEvent.erase(cleanup_addr);
1294 
1295  // update number of outstanding requests
1296  outstandingReqs--;
1297  }
1298 
1302  for (int i = 0; i < cpuSidePort.size(); ++i) {
1303  cpuSidePort[i]->sendRetryReq();
1304  }
1305  }
1306 
1307  void
1309  {
1310 
1312 
1313  AccessInfo tmp_access_info;
1314  tmp_access_info.lastTimeAccessed = 0;
1315  tmp_access_info.accessesPerPage = 0;
1316  tmp_access_info.totalReuseDistance = 0;
1317  tmp_access_info.sumDistance = 0;
1318  tmp_access_info.meanDistance = 0;
1319 
1320  ret = TLBFootprint.insert(
1321  AccessPatternTable::value_type(virt_page_addr, tmp_access_info));
1322 
1323  bool first_page_access = ret.second;
1324 
1325  if (first_page_access) {
1327  } else {
1328  int accessed_before;
1329  accessed_before = curTick() - ret.first->second.lastTimeAccessed;
1330  ret.first->second.totalReuseDistance += accessed_before;
1331  }
1332 
1333  ret.first->second.accessesPerPage++;
1334  ret.first->second.lastTimeAccessed = curTick();
1335 
1336  if (accessDistance) {
1337  ret.first->second.localTLBAccesses
1338  .push_back(stats.localNumTLBAccesses.value());
1339  }
1340  }
1341 
1342  void
1344  {
1345  std::ostream *page_stat_file = nullptr;
1346 
1347  if (accessDistance) {
1348 
1349  // print per page statistics to a separate file (.csv format)
1350  // simout is the gem5 output directory (default is m5out or the one
1351  // specified with -d
1352  page_stat_file = simout.create(name().c_str())->stream();
1353 
1354  // print header
1355  *page_stat_file
1356  << "page,max_access_distance,mean_access_distance, "
1357  << "stddev_distance" << std::endl;
1358  }
1359 
1360  // update avg. reuse distance footprint
1361  unsigned int sum_avg_reuse_distance_per_page = 0;
1362 
1363  // iterate through all pages seen by this TLB
1364  for (auto &iter : TLBFootprint) {
1365  sum_avg_reuse_distance_per_page += iter.second.totalReuseDistance /
1366  iter.second.accessesPerPage;
1367 
1368  if (accessDistance) {
1369  unsigned int tmp = iter.second.localTLBAccesses[0];
1370  unsigned int prev = tmp;
1371 
1372  for (int i = 0; i < iter.second.localTLBAccesses.size(); ++i) {
1373  if (i) {
1374  tmp = prev + 1;
1375  }
1376 
1377  prev = iter.second.localTLBAccesses[i];
1378  // update the localTLBAccesses value
1379  // with the actual differece
1380  iter.second.localTLBAccesses[i] -= tmp;
1381  // compute the sum of AccessDistance per page
1382  // used later for mean
1383  iter.second.sumDistance +=
1384  iter.second.localTLBAccesses[i];
1385  }
1386 
1387  iter.second.meanDistance =
1388  iter.second.sumDistance / iter.second.accessesPerPage;
1389 
1390  // compute std_dev and max (we need a second round because we
1391  // need to know the mean value
1392  unsigned int max_distance = 0;
1393  unsigned int stddev_distance = 0;
1394 
1395  for (int i = 0; i < iter.second.localTLBAccesses.size(); ++i) {
1396  unsigned int tmp_access_distance =
1397  iter.second.localTLBAccesses[i];
1398 
1399  if (tmp_access_distance > max_distance) {
1400  max_distance = tmp_access_distance;
1401  }
1402 
1403  unsigned int diff =
1404  tmp_access_distance - iter.second.meanDistance;
1405  stddev_distance += pow(diff, 2);
1406 
1407  }
1408 
1409  stddev_distance =
1410  sqrt(stddev_distance/iter.second.accessesPerPage);
1411 
1412  if (page_stat_file) {
1413  *page_stat_file << std::hex << iter.first << ",";
1414  *page_stat_file << std::dec << max_distance << ",";
1415  *page_stat_file << std::dec << iter.second.meanDistance
1416  << ",";
1417  *page_stat_file << std::dec << stddev_distance;
1418  *page_stat_file << std::endl;
1419  }
1420 
1421  // erase the localTLBAccesses array
1422  iter.second.localTLBAccesses.clear();
1423  }
1424  }
1425 
1426  if (!TLBFootprint.empty()) {
1428  sum_avg_reuse_distance_per_page / TLBFootprint.size();
1429  }
1430 
1431  //clear the TLBFootprint map
1432  TLBFootprint.clear();
1433  }
1434 
1436  : statistics::Group(parent),
1437  ADD_STAT(localNumTLBAccesses, "Number of TLB accesses"),
1438  ADD_STAT(localNumTLBHits, "Number of TLB hits"),
1439  ADD_STAT(localNumTLBMisses, "Number of TLB misses"),
1440  ADD_STAT(localTLBMissRate, "TLB miss rate"),
1441  ADD_STAT(globalNumTLBAccesses, "Number of TLB accesses"),
1442  ADD_STAT(globalNumTLBHits, "Number of TLB hits"),
1443  ADD_STAT(globalNumTLBMisses, "Number of TLB misses"),
1444  ADD_STAT(globalTLBMissRate, "TLB miss rate"),
1445  ADD_STAT(accessCycles, "Cycles spent accessing this TLB level"),
1446  ADD_STAT(pageTableCycles, "Cycles spent accessing the page table"),
1447  ADD_STAT(numUniquePages, "Number of unique pages touched"),
1448  ADD_STAT(localCycles, "Number of cycles spent in queue for all "
1449  "incoming reqs"),
1450  ADD_STAT(localLatency, "Avg. latency over incoming coalesced reqs"),
1451  ADD_STAT(avgReuseDistance, "avg. reuse distance over all pages (in "
1452  "ticks)")
1453  {
1455 
1458  }
1459 } // namespace X86ISA
1460 } // namespace gem5
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:189
gem5::X86ISA::mask
mask
Definition: misc.hh:802
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:252
pagetable.hh
gem5::X86ISA::GpuTLB::configAddress
uint32_t configAddress
Definition: gpu_tlb.hh:74
gem5::X86ISA::TlbEntry::writable
bool writable
Definition: pagetable.hh:77
gem5::X86ISA::GpuTLB::GpuTLBStats::numUniquePages
statistics::Scalar numUniquePages
Definition: gpu_tlb.hh:429
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:91
gem5::X86ISA::SEGMENT_REG_ES
@ SEGMENT_REG_ES
Definition: segment.hh:48
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:53
x86_traits.hh
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::X86ISA::MISCREG_M5_REG
@ MISCREG_M5_REG
Definition: misc.hh:143
gem5::X86ISA::x
Bitfield< 1 > x
Definition: types.hh:108
gem5::X86ISA::MISCREG_SEG_SEL
static MiscRegIndex MISCREG_SEG_SEL(int index)
Definition: misc.hh:511
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::Packet::isRequest
bool isRequest() const
Definition: packet.hh:586
gem5::X86ISA::GpuTLB::GpuTLBStats::GpuTLBStats
GpuTLBStats(statistics::Group *parent)
Definition: gpu_tlb.cc:1435
gem5::RegVal
uint64_t RegVal
Definition: types.hh:173
gem5::X86ISA::GpuTLB::GpuTLBStats::accessCycles
statistics::Scalar accessCycles
Definition: gpu_tlb.hh:426
gem5::Request::STRICT_ORDER
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:135
gem5::cprintf
void cprintf(const char *format, const Args &...args)
Definition: cprintf.hh:155
gem5::X86ISA::GpuTLB::CpuSidePort::recvFunctional
virtual void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
Definition: gpu_tlb.cc:1123
gem5::X86ISA::GpuTLB::translationReturn
void translationReturn(Addr virtPageAddr, tlbOutcome outcome, PacketPtr pkt)
A TLBEvent is scheduled after the TLB lookup and helps us take the appropriate actions: (e....
Definition: gpu_tlb.cc:884
gem5::X86ISA::GpuTLB::stats
gem5::X86ISA::GpuTLB::GpuTLBStats stats
gem5::X86ISA::GpuTLB::memSidePort
std::vector< MemSidePort * > memSidePort
Definition: gpu_tlb.hh:264
gem5::X86ISA::GpuTLB::GpuTLBStats::avgReuseDistance
statistics::Scalar avgReuseDistance
Definition: gpu_tlb.hh:436
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::X86ISA::GpuTLB::size
int size
Definition: gpu_tlb.hh:118
gem5::X86ISA::x86LocalAPICAddress
static Addr x86LocalAPICAddress(const uint8_t id, const uint16_t addr)
Definition: x86_traits.hh:88
gem5::X86ISA::GpuTLB::TranslationState::tc
ThreadContext * tc
Definition: gpu_tlb.hh:290
microldstop.hh
warn_once
#define warn_once(...)
Definition: logging.hh:249
gem5::X86ISA::IntAddrPrefixMSR
const Addr IntAddrPrefixMSR
Definition: x86_traits.hh:64
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1252
gem5::X86ISA::GpuTLB::GpuTLBStats::localNumTLBAccesses
statistics::Scalar localNumTLBAccesses
Definition: gpu_tlb.hh:412
gem5::X86ISA::MISCREG_APIC_BASE
@ MISCREG_APIC_BASE
Definition: misc.hh:399
gem5::X86ISA::TlbEntry::user
bool user
Definition: pagetable.hh:79
gem5::X86ISA::GpuTLB::numSets
int numSets
Definition: gpu_tlb.hh:120
pseudo_inst.hh
gem5::ArmISA::attr
attr
Definition: misc_types.hh:655
gem5::X86ISA::MISCREG_SEG_LIMIT
static MiscRegIndex MISCREG_SEG_LIMIT(int index)
Definition: misc.hh:532
gem5::X86ISA::GpuTLB::exitCallback
void exitCallback()
Definition: gpu_tlb.cc:1343
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:53
gem5::X86ISA::GpuTLB::cleanup
void cleanup()
Definition: gpu_tlb.cc:1284
gem5::CheckpointIn
Definition: serialize.hh:68
gem5::X86ISA::GpuTLB::outstandingReqs
int outstandingReqs
Definition: gpu_tlb.hh:325
gem5::X86ISA::MISCREG_SEG_ATTR
static MiscRegIndex MISCREG_SEG_ATTR(int index)
Definition: misc.hh:539
gem5::X86ISA::TlbEntry::size
int size()
Definition: pagetable.hh:112
gem5::X86ISA::GpuTLB::AccessInfo::totalReuseDistance
unsigned int totalReuseDistance
Definition: gpu_tlb.hh:382
gem5::X86ISA::GpuTLB::walker
Walker * walker
Definition: gpu_tlb.hh:109
gem5::X86ISA::offset
offset
Definition: misc.hh:1030
gem5::X86ISA::GpuTLB::TLB_MISS
@ TLB_MISS
Definition: gpu_tlb.hh:196
gem5::X86ISA::GpuTLB::Mode
enum BaseMMU::Mode Mode
Definition: gpu_tlb.hh:81
gem5::simout
OutputDirectory simout
Definition: output.cc:62
gem5::X86ISA::GpuTLB::GpuTLBStats::localTLBMissRate
statistics::Formula localTLBMissRate
Definition: gpu_tlb.hh:415
pagetable_walker.hh
gem5::X86ISA::GpuTLB::TranslationState::tlbMode
Mode tlbMode
Definition: gpu_tlb.hh:288
gem5::X86ISA::GpuTLB::~GpuTLB
~GpuTLB()
Definition: gpu_tlb.cc:132
gem5::ThreadContext::contextId
virtual ContextID contextId() const =0
gem5::X86ISA::GpuTLB::getWalker
Walker * getWalker()
Definition: gpu_tlb.cc:648
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
gem5::OutputDirectory::create
OutputStream * create(const std::string &name, bool binary=false, bool no_gz=false)
Creates a file in this directory (optionally compressed).
Definition: output.cc:210
gem5::X86ISA::GpuTLB::lookup
TlbEntry * lookup(Addr va, bool update_lru=true)
Definition: gpu_tlb.cc:217
gem5::ThreadContext::instAddr
virtual Addr instAddr() const =0
gem5::csprintf
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
gem5::X86ISA::limit
BitfieldType< SegDescriptorLimit > limit
Definition: misc.hh:930
gem5::X86ISA::Walker
Definition: pagetable_walker.hh:60
gem5::X86ISA::PageShift
const Addr PageShift
Definition: page_size.hh:48
gem5::X86ISA::GpuTLB::GpuTLBStats::globalTLBMissRate
statistics::Formula globalTLBMissRate
Definition: gpu_tlb.hh:423
gem5::mbits
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:103
gem5::X86ISA::GpuTLB::FA
bool FA
true if this is a fully-associative TLB
Definition: gpu_tlb.hh:125
gem5::X86ISA::base
Bitfield< 51, 12 > base
Definition: pagetable.hh:141
gem5::X86ISA::GpuTLB::TLBEvent::getTLBEventVaddr
Addr getTLBEventVaddr()
Definition: gpu_tlb.cc:1016
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::X86ISA::GpuTLB::insert
TlbEntry * insert(Addr vpn, TlbEntry &entry)
Definition: gpu_tlb.cc:161
faults.hh
gem5::X86ISA::GpuTLB::GpuTLBStats::globalNumTLBAccesses
statistics::Scalar globalNumTLBAccesses
Definition: gpu_tlb.hh:420
gem5::X86ISA::GpuTLB::tlbOutcome
tlbOutcome
Definition: gpu_tlb.hh:196
gem5::X86ISA::SEGMENT_REG_MS
@ SEGMENT_REG_MS
Definition: segment.hh:58
output.hh
gem5::X86ISA::GpuTLB::lookupIt
EntryList::iterator lookupIt(Addr va, bool update_lru=true)
Definition: gpu_tlb.cc:187
request.hh
gem5::X86ISA::GpuTLB::AccessInfo::lastTimeAccessed
unsigned int lastTimeAccessed
Definition: gpu_tlb.hh:379
gem5::X86ISA::GpuTLB::allocationPolicy
bool allocationPolicy
Allocation Policy: true if we always allocate on a hit, false otherwise.
Definition: gpu_tlb.hh:132
gem5::ArmISA::TlbEntry
Definition: pagetable.hh:86
gem5::X86ISA::GpuTLB::setConfigAddress
void setConfigAddress(uint32_t addr)
Definition: gpu_tlb.cc:244
gem5::X86ISA::GpuTLB::Translation::finish
virtual void finish(Fault fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
The memory for this object may be dynamically allocated, and it may be responsible for cleaning itsle...
gem5::X86ISA::GpuTLB::hasMemSidePort
bool hasMemSidePort
if true, then this is not the last level TLB
Definition: gpu_tlb.hh:137
gem5::X86ISA::GpuTLB::missLatency1
int missLatency1
Definition: gpu_tlb.hh:173
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:53
gem5::letoh
T letoh(T value)
Definition: byteswap.hh:173
gem5::X86ISA::GpuTLB::TranslationState::isPrefetch
bool isPrefetch
Definition: gpu_tlb.hh:299
gem5::X86ISA::GpuTLB::GpuTLBStats::localLatency
statistics::Formula localLatency
Definition: gpu_tlb.hh:433
gem5::X86ISA::GpuTLB::cleanupQueue
std::queue< Addr > cleanupQueue
Definition: gpu_tlb.hh:364
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::X86ISA::TlbEntry
Definition: pagetable.hh:65
gem5::X86ISA::GpuTLB::CpuSidePort::getAddrRanges
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: gpu_tlb.cc:1243
gem5::X86ISA::CPL0FlagBit
@ CPL0FlagBit
Definition: ldstflags.hh:57
gem5::X86ISA::GpuTLB::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode, int &latency)
Definition: gpu_tlb.cc:624
gem5::X86ISA::GpuTLB::serialize
virtual void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: gpu_tlb.cc:655
gem5::X86ISA::GpuTLB::TranslationState
TLB TranslationState: this currently is a somewhat bastardization of the usage of SenderState,...
Definition: gpu_tlb.hh:285
gem5::X86ISA::GpuTLB::TranslationState::hitLevel
int hitLevel
Definition: gpu_tlb.hh:309
gem5::OutputStream::stream
std::ostream * stream() const
Get the output underlying output stream.
Definition: output.hh:62
gem5::X86ISA::GpuTLB::translate
Fault translate(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, bool &delayedResponse, bool timing, int &latency)
Definition: gpu_tlb.cc:421
gem5::X86ISA::GpuTLB::accessDistance
bool accessDistance
Print out accessDistance stats.
Definition: gpu_tlb.hh:143
gem5::X86ISA::GpuTLB::unserialize
virtual void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: gpu_tlb.cc:660
gem5::X86ISA::GpuTLB::handleFuncTranslationReturn
void handleFuncTranslationReturn(PacketPtr pkt, tlbOutcome outcome)
handleFuncTranslationReturn is called on a TLB hit, when a TLB miss returns or when a page fault retu...
Definition: gpu_tlb.cc:1051
gem5::X86ISA::SEGMENT_REG_TSG
@ SEGMENT_REG_TSG
Definition: segment.hh:56
bitfield.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:93
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::X86ISA::GpuTLB::issueTLBLookup
void issueTLBLookup(PacketPtr pkt)
Do the TLB lookup for this coalesced request and schedule another event <TLB access latency> cycles l...
Definition: gpu_tlb.cc:670
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::X86ISA::GpuTLB::GpuTLB
GpuTLB(const Params &p)
Definition: gpu_tlb.cc:68
gem5::Clocked::cyclesToTicks
Tick cyclesToTicks(Cycles c) const
Definition: clocked_object.hh:227
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::Event
Definition: eventq.hh:251
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::X86ISA::FlagShift
const int FlagShift
Definition: ldstflags.hh:54
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
msr.hh
gem5::X86ISA::GpuTLB::Params
X86GPUTLBParams Params
Definition: gpu_tlb.hh:77
gem5::X86ISA::GpuTLB::updatePageFootprint
void updatePageFootprint(Addr virt_page_addr)
Definition: gpu_tlb.cc:1308
gem5::EventBase::Maximum_Pri
static const Priority Maximum_Pri
Maximum priority.
Definition: eventq.hh:241
gem5::X86ISA::GpuTLB::translateInt
Fault translateInt(bool read, const RequestPtr &req, ThreadContext *tc)
Definition: gpu_tlb.cc:303
gem5::X86ISA::GpuTLB::TLBEvent
Definition: gpu_tlb.hh:337
gem5::X86ISA::MiscRegIndex
MiscRegIndex
Definition: misc.hh:106
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::X86ISA::IntAddrPrefixMask
const Addr IntAddrPrefixMask
Definition: x86_traits.hh:62
gem5::X86ISA::GpuTLB
Definition: gpu_tlb.hh:67
process.hh
gem5::X86ISA::GpuTLB::Translation
Definition: gpu_tlb.hh:83
gem5::X86ISA::TlbEntry::uncacheable
bool uncacheable
Definition: pagetable.hh:84
gem5::X86ISA::GpuTLB::tlbLookup
bool tlbLookup(const RequestPtr &req, ThreadContext *tc, bool update_stats)
TLB_lookup will only perform a TLB lookup returning true on a TLB hit and false on a TLB miss.
Definition: gpu_tlb.cc:372
page_size.hh
gem5::X86ISA::MISCREG_PCI_CONFIG_ADDRESS
@ MISCREG_PCI_CONFIG_ADDRESS
Definition: misc.hh:402
gem5::X86ISA::GpuTLB::AccessInfo::accessesPerPage
unsigned int accessesPerPage
Definition: gpu_tlb.hh:380
gem5::X86ISA::MISCREG_CS_ATTR
@ MISCREG_CS_ATTR
Definition: misc.hh:369
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
gem5::X86ISA::IntAddrPrefixCPUID
const Addr IntAddrPrefixCPUID
Definition: x86_traits.hh:63
gem5::X86ISA::MISCREG_SEG_BASE
static MiscRegIndex MISCREG_SEG_BASE(int index)
Definition: misc.hh:518
gem5::roundDown
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:279
gem5::X86ISA::GpuTLB::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: gpu_tlb.cc:139
gem5::X86ISA::GpuTLB::GpuTLBStats::globalNumTLBMisses
statistics::Scalar globalNumTLBMisses
Definition: gpu_tlb.hh:422
gem5::X86ISA::GpuTLB::translationReturnEvent
std::unordered_map< Addr, TLBEvent * > translationReturnEvent
Definition: gpu_tlb.hh:360
gem5::ThreadContext::readMiscRegNoEffect
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
gem5::X86ISA::GpuTLB::GpuTLBStats::pageTableCycles
statistics::Scalar pageTableCycles
Definition: gpu_tlb.hh:428
std::pair
STL pair class.
Definition: stl.hh:58
gpu_tlb.hh
gem5::X86ISA::TlbEntry::paddr
Addr paddr
Definition: pagetable.hh:68
gem5::X86ISA::GpuTLB::CpuSidePort::recvReqRetry
virtual void recvReqRetry()
Definition: gpu_tlb.cc:1235
gem5::X86ISA::GpuTLB::freeList
std::vector< EntryList > freeList
Definition: gpu_tlb.hh:152
gem5::X86ISA::PhysAddrPrefixPciConfig
const Addr PhysAddrPrefixPciConfig
Definition: x86_traits.hh:68
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:534
gem5::X86ISA::GpuTLB::GpuTLBStats::localNumTLBHits
statistics::Scalar localNumTLBHits
Definition: gpu_tlb.hh:413
gem5::X86ISA::msrAddrToIndex
bool msrAddrToIndex(MiscRegIndex &regNum, Addr addr)
Find and return the misc reg corresponding to an MSR address.
Definition: msr.cc:150
gem5::X86ISA::GpuTLB::cpuSidePort
std::vector< CpuSidePort * > cpuSidePort
Definition: gpu_tlb.hh:262
gem5::X86ISA::SYS_SEGMENT_REG_IDTR
@ SYS_SEGMENT_REG_IDTR
Definition: segment.hh:63
gem5::X86ISA::GpuTLB::invalidateNonGlobal
void invalidateNonGlobal()
Definition: gpu_tlb.cc:250
name
const std::string & name()
Definition: trace.cc:49
gem5::X86ISA::expandDown
Bitfield< 14 > expandDown
Definition: misc.hh:1002
gem5::X86ISA::GpuTLB::GpuTLBStats::localCycles
statistics::Scalar localCycles
Definition: gpu_tlb.hh:431
packet_access.hh
gem5::ArmISA::va
Bitfield< 8 > va
Definition: misc_types.hh:275
gem5::ClockedObject
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Definition: clocked_object.hh:234
gem5::X86ISA::GpuTLB::setMask
Addr setMask
Definition: gpu_tlb.hh:126
gem5::X86ISA::GpuTLB::TLBFootprint
AccessPatternTable TLBFootprint
Definition: gpu_tlb.hh:398
gem5::X86ISA::GpuTLB::entryList
std::vector< EntryList > entryList
An entryList per set is the equivalent of an LRU stack; it's used to guide replacement decisions.
Definition: gpu_tlb.hh:161
gem5::X86ISA::GpuTLB::MemSidePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
MemSidePort receives the packet back.
Definition: gpu_tlb.cc:1257
gem5::X86ISA::GpuTLB::TLB_HIT
@ TLB_HIT
Definition: gpu_tlb.hh:196
gem5::Process
Definition: process.hh:67
gem5::X86ISA::GpuTLB::MISS_RETURN
@ MISS_RETURN
Definition: gpu_tlb.hh:196
gem5::ThreadContext::getProcessPtr
virtual Process * getProcessPtr()=0
gem5::X86ISA::GpuTLB::AccessInfo::sumDistance
unsigned int sumDistance
Definition: gpu_tlb.hh:393
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:223
gem5::X86ISA::StoreCheck
@ StoreCheck
Definition: ldstflags.hh:59
gem5::EmulationPageTable::Entry::paddr
Addr paddr
Definition: page_table.hh:57
gem5::X86ISA::GpuTLB::TLBEvent::process
void process()
Definition: gpu_tlb.cc:998
gem5::X86ISA::GpuTLB::TLBEvent::description
const char * description() const
Return a C string describing the event.
Definition: gpu_tlb.cc:1004
gem5::X86ISA::TlbEntry::vaddr
Addr vaddr
Definition: pagetable.hh:71
gem5::X86ISA::GpuTLB::hitLatency
int hitLatency
Definition: gpu_tlb.hh:172
gem5::Packet::makeTimingResponse
void makeTimingResponse()
Definition: packet.hh:1049
gem5::X86ISA::GpuTLB::TranslationState::tlbEntry
TlbEntry * tlbEntry
Definition: gpu_tlb.hh:297
gem5::X86ISA::GpuTLB::TLBEvent::TLBEvent
TLBEvent(GpuTLB *_tlb, Addr _addr, tlbOutcome outcome, PacketPtr _pkt)
Definition: gpu_tlb.cc:750
gem5::X86ISA::GpuTLB::TLBEvent::updateOutcome
void updateOutcome(tlbOutcome _outcome)
Definition: gpu_tlb.cc:1010
gem5::X86ISA::GpuTLB::missLatency2
int missLatency2
Definition: gpu_tlb.hh:174
base.hh
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::ThreadContext::setMiscReg
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
gem5::X86ISA::GpuTLB::pagingProtectionChecks
void pagingProtectionChecks(ThreadContext *tc, PacketPtr pkt, TlbEntry *tlb_entry, Mode mode)
Do Paging protection checks.
Definition: gpu_tlb.cc:762
gem5::X86ISA::GpuTLB::PAGE_WALK
@ PAGE_WALK
Definition: gpu_tlb.hh:196
gem5::X86ISA::AddrSizeFlagBit
@ AddrSizeFlagBit
Definition: ldstflags.hh:58
gem5::X86ISA::seg
Bitfield< 2, 0 > seg
Definition: types.hh:87
gem5::htole
T htole(T value)
Definition: byteswap.hh:172
gem5::X86ISA::MISCREG_CR0
@ MISCREG_CR0
Definition: misc.hh:111
logging.hh
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::X86ISA::GpuTLB::AccessInfo::meanDistance
unsigned int meanDistance
Definition: gpu_tlb.hh:394
gem5::X86ISA::p
Bitfield< 0 > p
Definition: pagetable.hh:151
gem5::X86ISA::GpuTLB::invalidateAll
void invalidateAll()
Definition: gpu_tlb.cc:230
gem5::CheckpointOut
std::ostream CheckpointOut
Definition: serialize.hh:66
gem5::X86ISA::IntAddrPrefixIO
const Addr IntAddrPrefixIO
Definition: x86_traits.hh:65
trace.hh
gem5::X86ISA::GpuTLB::AccessInfo
This hash map will use the virtual page address as a key and will keep track of total number of acces...
Definition: gpu_tlb.hh:377
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::X86ISA::GpuTLB::tlb
std::vector< TlbEntry > tlb
Definition: gpu_tlb.hh:145
gem5::X86ISA::GpuTLB::GpuTLBStats::globalNumTLBHits
statistics::Scalar globalNumTLBHits
Definition: gpu_tlb.hh:421
gem5::X86ISA::GpuTLB::assoc
int assoc
Definition: gpu_tlb.hh:119
std::list< AddrRange >
gem5::X86ISA::GpuTLB::CpuSidePort::recvTimingReq
virtual bool recvTimingReq(PacketPtr pkt)
recvTiming receives a coalesced timing request from a TLBCoalescer and it calls issueTLBLookup() It o...
Definition: gpu_tlb.cc:1028
gem5::X86ISA::PageBytes
const Addr PageBytes
Definition: page_size.hh:49
gem5::X86ISA::GpuTLB::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, int &latency)
Definition: gpu_tlb.cc:634
gem5::X86ISA::GpuTLB::handleTranslationReturn
void handleTranslationReturn(Addr addr, tlbOutcome outcome, PacketPtr pkt)
handleTranslationReturn is called on a TLB hit, when a TLB miss returns or when a page fault returns.
Definition: gpu_tlb.cc:797
page_table.hh
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::X86ISA::GpuTLB::MemSidePort::recvReqRetry
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition: gpu_tlb.cc:1276
gem5::X86ISA::SegmentFlagMask
const GEM5_VAR_USED Request::FlagsType SegmentFlagMask
Definition: ldstflags.hh:53
gem5::X86ISA::GpuTLB::TranslationState::reqCnt
std::vector< int > reqCnt
Definition: gpu_tlb.hh:307
gem5::X86ISA::GpuTLB::demapPage
void demapPage(Addr va, uint64_t asn)
Definition: gpu_tlb.cc:268
misc.hh
gem5::X86ISA::GpuTLB::GpuTLBStats::localNumTLBMisses
statistics::Scalar localNumTLBMisses
Definition: gpu_tlb.hh:414
gem5::EmulationPageTable::Entry
Definition: page_table.hh:55
gem5::X86ISA::PhysAddrPrefixIO
const Addr PhysAddrPrefixIO
Definition: x86_traits.hh:67
gem5::X86ISA::SEGMENT_REG_HS
@ SEGMENT_REG_HS
Definition: segment.hh:54
thread_context.hh
gem5::X86ISA::GpuTLB::maxCoalescedReqs
int maxCoalescedReqs
Definition: gpu_tlb.hh:321
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::X86ISA::GpuTLB::cleanupEvent
EventFunctionWrapper cleanupEvent
Definition: gpu_tlb.hh:370
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:73
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::statistics::ScalarBase::value
Counter value() const
Return the current value of this stat as its base type.
Definition: statistics.hh:619
gem5::X86ISA::SEGMENT_REG_LS
@ SEGMENT_REG_LS
Definition: segment.hh:57

Generated on Wed Jul 28 2021 12:10:27 for gem5 by doxygen 1.8.17