gem5  v21.1.0.2
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2005 The Regents of The University of Michigan
3  * Copyright (c) 2007 MIPS Technologies, Inc.
4  * Copyright (c) 2020 Barkhausen Institut
5  * Copyright (c) 2021 Huawei International
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met: redistributions of source code must retain the above copyright
11  * notice, this list of conditions and the following disclaimer;
12  * redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution;
15  * neither the name of the copyright holders nor the names of its
16  * contributors may be used to endorse or promote products derived from
17  * this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "arch/riscv/tlb.hh"
33 
34 #include <string>
35 #include <vector>
36 
37 #include "arch/riscv/faults.hh"
38 #include "arch/riscv/mmu.hh"
39 #include "arch/riscv/pagetable.hh"
42 #include "arch/riscv/pmp.hh"
44 #include "arch/riscv/utility.hh"
45 #include "base/inifile.hh"
46 #include "base/str.hh"
47 #include "base/trace.hh"
48 #include "cpu/thread_context.hh"
49 #include "debug/TLB.hh"
50 #include "debug/TLBVerbose.hh"
51 #include "mem/page_table.hh"
52 #include "params/RiscvTLB.hh"
53 #include "sim/full_system.hh"
54 #include "sim/process.hh"
55 #include "sim/system.hh"
56 
57 namespace gem5
58 {
59 
60 using namespace RiscvISA;
61 
63 //
64 // RISC-V TLB
65 //
66 
67 static Addr
68 buildKey(Addr vpn, uint16_t asid)
69 {
70  return (static_cast<Addr>(asid) << 48) | vpn;
71 }
72 
73 TLB::TLB(const Params &p) :
74  BaseTLB(p), size(p.size), tlb(size),
75  lruSeq(0), stats(this), pma(p.pma_checker),
76  pmp(p.pmp)
77 {
78  for (size_t x = 0; x < size; x++) {
79  tlb[x].trieHandle = NULL;
80  freeList.push_back(&tlb[x]);
81  }
82 
83  walker = p.walker;
84  walker->setTLB(this);
85 }
86 
87 Walker *
88 TLB::getWalker()
89 {
90  return walker;
91 }
92 
93 void
94 TLB::evictLRU()
95 {
96  // Find the entry with the lowest (and hence least recently updated)
97  // sequence number.
98 
99  size_t lru = 0;
100  for (size_t i = 1; i < size; i++) {
101  if (tlb[i].lruSeq < tlb[lru].lruSeq)
102  lru = i;
103  }
104 
105  remove(lru);
106 }
107 
108 TlbEntry *
109 TLB::lookup(Addr vpn, uint16_t asid, BaseMMU::Mode mode, bool hidden)
110 {
111  TlbEntry *entry = trie.lookup(buildKey(vpn, asid));
112 
113  if (!hidden) {
114  if (entry)
115  entry->lruSeq = nextSeq();
116 
117  if (mode == BaseMMU::Write)
118  stats.writeAccesses++;
119  else
120  stats.readAccesses++;
121 
122  if (!entry) {
123  if (mode == BaseMMU::Write)
124  stats.writeMisses++;
125  else
126  stats.readMisses++;
127  }
128  else {
129  if (mode == BaseMMU::Write)
130  stats.writeHits++;
131  else
132  stats.readHits++;
133  }
134 
135  DPRINTF(TLBVerbose, "lookup(vpn=%#x, asid=%#x): %s ppn %#x\n",
136  vpn, asid, entry ? "hit" : "miss", entry ? entry->paddr : 0);
137  }
138 
139  return entry;
140 }
141 
142 TlbEntry *
143 TLB::insert(Addr vpn, const TlbEntry &entry)
144 {
145  DPRINTF(TLB, "insert(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
146  vpn, entry.asid, entry.paddr, entry.pte, entry.size());
147 
148  // If somebody beat us to it, just use that existing entry.
149  TlbEntry *newEntry = lookup(vpn, entry.asid, BaseMMU::Read, true);
150  if (newEntry) {
151  // update PTE flags (maybe we set the dirty/writable flag)
152  newEntry->pte = entry.pte;
153  assert(newEntry->vaddr == vpn);
154  return newEntry;
155  }
156 
157  if (freeList.empty())
158  evictLRU();
159 
160  newEntry = freeList.front();
161  freeList.pop_front();
162 
163  Addr key = buildKey(vpn, entry.asid);
164  *newEntry = entry;
165  newEntry->lruSeq = nextSeq();
166  newEntry->vaddr = vpn;
167  newEntry->trieHandle =
168  trie.insert(key, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
169  return newEntry;
170 }
171 
172 void
173 TLB::demapPage(Addr vpn, uint64_t asid)
174 {
175  asid &= 0xFFFF;
176 
177  if (vpn == 0 && asid == 0)
178  flushAll();
179  else {
180  DPRINTF(TLB, "flush(vpn=%#x, asid=%#x)\n", vpn, asid);
181  if (vpn != 0 && asid != 0) {
182  TlbEntry *newEntry = lookup(vpn, asid, BaseMMU::Read, true);
183  if (newEntry)
184  remove(newEntry - tlb.data());
185  }
186  else {
187  for (size_t i = 0; i < size; i++) {
188  if (tlb[i].trieHandle) {
189  Addr mask = ~(tlb[i].size() - 1);
190  if ((vpn == 0 || (vpn & mask) == tlb[i].vaddr) &&
191  (asid == 0 || tlb[i].asid == asid))
192  remove(i);
193  }
194  }
195  }
196  }
197 }
198 
199 void
200 TLB::flushAll()
201 {
202  DPRINTF(TLB, "flushAll()\n");
203  for (size_t i = 0; i < size; i++) {
204  if (tlb[i].trieHandle)
205  remove(i);
206  }
207 }
208 
209 void
210 TLB::remove(size_t idx)
211 {
212  DPRINTF(TLB, "remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
213  tlb[idx].vaddr, tlb[idx].asid, tlb[idx].paddr, tlb[idx].pte,
214  tlb[idx].size());
215 
216  assert(tlb[idx].trieHandle);
217  trie.remove(tlb[idx].trieHandle);
218  tlb[idx].trieHandle = NULL;
219  freeList.push_back(&tlb[idx]);
220 }
221 
222 Fault
223 TLB::checkPermissions(STATUS status, PrivilegeMode pmode, Addr vaddr,
224  BaseMMU::Mode mode, PTESv39 pte)
225 {
226  Fault fault = NoFault;
227 
228  if (mode == BaseMMU::Read && !pte.r) {
229  DPRINTF(TLB, "PTE has no read perm, raising PF\n");
230  fault = createPagefault(vaddr, mode);
231  }
232  else if (mode == BaseMMU::Write && !pte.w) {
233  DPRINTF(TLB, "PTE has no write perm, raising PF\n");
234  fault = createPagefault(vaddr, mode);
235  }
236  else if (mode == BaseMMU::Execute && !pte.x) {
237  DPRINTF(TLB, "PTE has no exec perm, raising PF\n");
238  fault = createPagefault(vaddr, mode);
239  }
240 
241  if (fault == NoFault) {
242  // check pte.u
243  if (pmode == PrivilegeMode::PRV_U && !pte.u) {
244  DPRINTF(TLB, "PTE is not user accessible, raising PF\n");
245  fault = createPagefault(vaddr, mode);
246  }
247  else if (pmode == PrivilegeMode::PRV_S && pte.u && status.sum == 0) {
248  DPRINTF(TLB, "PTE is only user accessible, raising PF\n");
249  fault = createPagefault(vaddr, mode);
250  }
251  }
252 
253  return fault;
254 }
255 
256 Fault
257 TLB::createPagefault(Addr vaddr, BaseMMU::Mode mode)
258 {
259  ExceptionCode code;
260  if (mode == BaseMMU::Read)
262  else if (mode == BaseMMU::Write)
264  else
266  return std::make_shared<AddressFault>(vaddr, code);
267 }
268 
269 Addr
270 TLB::translateWithTLB(Addr vaddr, uint16_t asid, BaseMMU::Mode mode)
271 {
272  TlbEntry *e = lookup(vaddr, asid, mode, false);
273  assert(e != nullptr);
274  return e->paddr << PageShift | (vaddr & mask(e->logBytes));
275 }
276 
277 Fault
278 TLB::doTranslate(const RequestPtr &req, ThreadContext *tc,
280  bool &delayed)
281 {
282  delayed = false;
283 
284  Addr vaddr = Addr(sext<VADDR_BITS>(req->getVaddr()));
285  SATP satp = tc->readMiscReg(MISCREG_SATP);
286 
287  TlbEntry *e = lookup(vaddr, satp.asid, mode, false);
288  if (!e) {
289  Fault fault = walker->start(tc, translation, req, mode);
290  if (translation != nullptr || fault != NoFault) {
291  // This gets ignored in atomic mode.
292  delayed = true;
293  return fault;
294  }
295  e = lookup(vaddr, satp.asid, mode, false);
296  assert(e != nullptr);
297  }
298 
299  STATUS status = tc->readMiscReg(MISCREG_STATUS);
300  PrivilegeMode pmode = getMemPriv(tc, mode);
301  Fault fault = checkPermissions(status, pmode, vaddr, mode, e->pte);
302  if (fault != NoFault) {
303  // if we want to write and it isn't writable, do a page table walk
304  // again to update the dirty flag.
305  if (mode == BaseMMU::Write && !e->pte.w) {
306  DPRINTF(TLB, "Dirty bit not set, repeating PT walk\n");
307  fault = walker->start(tc, translation, req, mode);
308  if (translation != nullptr || fault != NoFault) {
309  delayed = true;
310  return fault;
311  }
312  }
313  if (fault != NoFault)
314  return fault;
315  }
316 
317  Addr paddr = e->paddr << PageShift | (vaddr & mask(e->logBytes));
318  DPRINTF(TLBVerbose, "translate(vpn=%#x, asid=%#x): %#x\n",
319  vaddr, satp.asid, paddr);
320  req->setPaddr(paddr);
321 
322  return NoFault;
323 }
324 
326 TLB::getMemPriv(ThreadContext *tc, BaseMMU::Mode mode)
327 {
328  STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS);
330  if (mode != BaseMMU::Execute && status.mprv == 1)
331  pmode = (PrivilegeMode)(RegVal)status.mpp;
332  return pmode;
333 }
334 
335 Fault
336 TLB::translate(const RequestPtr &req, ThreadContext *tc,
338  bool &delayed)
339 {
340  delayed = false;
341 
342  if (FullSystem) {
343  PrivilegeMode pmode = getMemPriv(tc, mode);
344  SATP satp = tc->readMiscReg(MISCREG_SATP);
345  if (pmode == PrivilegeMode::PRV_M || satp.mode == AddrXlateMode::BARE)
346  req->setFlags(Request::PHYSICAL);
347 
348  Fault fault;
349  if (req->getFlags() & Request::PHYSICAL) {
353  req->setPaddr(req->getVaddr());
354  fault = NoFault;
355  } else {
356  fault = doTranslate(req, tc, translation, mode, delayed);
357  }
358 
359  // according to the RISC-V tests, negative physical addresses trigger
360  // an illegal address exception.
361  // TODO where is that written in the manual?
362  if (!delayed && fault == NoFault && bits(req->getPaddr(), 63)) {
363  ExceptionCode code;
364  if (mode == BaseMMU::Read)
366  else if (mode == BaseMMU::Write)
368  else
370  fault = std::make_shared<AddressFault>(req->getVaddr(), code);
371  }
372 
373  if (!delayed && fault == NoFault) {
374  pma->check(req);
375 
376  // do pmp check if any checking condition is met.
377  // timingFault will be NoFault if pmp checks are
378  // passed, otherwise an address fault will be returned.
379  fault = pmp->pmpCheck(req, mode, pmode, tc);
380  }
381 
382  return fault;
383  } else {
384  // In the O3 CPU model, sometimes a memory access will be speculatively
385  // executed along a branch that will end up not being taken where the
386  // address is invalid. In that case, return a fault rather than trying
387  // to translate it (which will cause a panic). Since RISC-V allows
388  // unaligned memory accesses, this should only happen if the request's
389  // length is long enough to wrap around from the end of the memory to
390  // the start.
391  assert(req->getSize() > 0);
392  if (req->getVaddr() + req->getSize() - 1 < req->getVaddr())
393  return std::make_shared<GenericPageTableFault>(req->getVaddr());
394 
395  Process * p = tc->getProcessPtr();
396 
397  Fault fault = p->pTable->translate(req);
398  if (fault != NoFault)
399  return fault;
400 
401  return NoFault;
402  }
403 }
404 
405 Fault
406 TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc,
408 {
409  bool delayed;
410  return translate(req, tc, nullptr, mode, delayed);
411 }
412 
413 void
414 TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
416 {
417  bool delayed;
418  assert(translation);
419  Fault fault = translate(req, tc, translation, mode, delayed);
420  if (!delayed)
421  translation->finish(fault, req, tc, mode);
422  else
423  translation->markDelayed();
424 }
425 
426 Fault
427 TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc,
428  BaseMMU::Mode mode)
429 {
430  const Addr vaddr = req->getVaddr();
431  Addr paddr = vaddr;
432 
433  if (FullSystem) {
434  MMU *mmu = static_cast<MMU *>(tc->getMMUPtr());
435 
436  PrivilegeMode pmode = mmu->getMemPriv(tc, mode);
437  SATP satp = tc->readMiscReg(MISCREG_SATP);
438  if (pmode != PrivilegeMode::PRV_M &&
439  satp.mode != AddrXlateMode::BARE) {
440  Walker *walker = mmu->getDataWalker();
441  unsigned logBytes;
442  Fault fault = walker->startFunctional(
443  tc, paddr, logBytes, mode);
444  if (fault != NoFault)
445  return fault;
446 
447  Addr masked_addr = vaddr & mask(logBytes);
448  paddr |= masked_addr;
449  }
450  }
451  else {
452  Process *process = tc->getProcessPtr();
453  const auto *pte = process->pTable->lookup(vaddr);
454 
455  if (!pte && mode != BaseMMU::Execute) {
456  // Check if we just need to grow the stack.
457  if (process->fixupFault(vaddr)) {
458  // If we did, lookup the entry for the new page.
459  pte = process->pTable->lookup(vaddr);
460  }
461  }
462 
463  if (!pte)
464  return std::make_shared<GenericPageTableFault>(req->getVaddr());
465 
466  paddr = pte->paddr | process->pTable->pageOffset(vaddr);
467  }
468 
469  DPRINTF(TLB, "Translated (functional) %#x -> %#x.\n", vaddr, paddr);
470  req->setPaddr(paddr);
471  return NoFault;
472 }
473 
474 Fault
475 TLB::finalizePhysical(const RequestPtr &req,
476  ThreadContext *tc, BaseMMU::Mode mode) const
477 {
478  return NoFault;
479 }
480 
481 void
482 TLB::serialize(CheckpointOut &cp) const
483 {
484  // Only store the entries in use.
485  uint32_t _size = size - freeList.size();
486  SERIALIZE_SCALAR(_size);
487  SERIALIZE_SCALAR(lruSeq);
488 
489  uint32_t _count = 0;
490  for (uint32_t x = 0; x < size; x++) {
491  if (tlb[x].trieHandle != NULL)
492  tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
493  }
494 }
495 
496 void
497 TLB::unserialize(CheckpointIn &cp)
498 {
499  // Do not allow to restore with a smaller tlb.
500  uint32_t _size;
501  UNSERIALIZE_SCALAR(_size);
502  if (_size > size) {
503  fatal("TLB size less than the one in checkpoint!");
504  }
505 
506  UNSERIALIZE_SCALAR(lruSeq);
507 
508  for (uint32_t x = 0; x < _size; x++) {
509  TlbEntry *newEntry = freeList.front();
510  freeList.pop_front();
511 
512  newEntry->unserializeSection(cp, csprintf("Entry%d", x));
513  Addr key = buildKey(newEntry->vaddr, newEntry->asid);
514  newEntry->trieHandle = trie.insert(key,
515  TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
516  }
517 }
518 
519 TLB::TlbStats::TlbStats(statistics::Group *parent)
520  : statistics::Group(parent),
521  ADD_STAT(readHits, statistics::units::Count::get(), "read hits"),
522  ADD_STAT(readMisses, statistics::units::Count::get(), "read misses"),
523  ADD_STAT(readAccesses, statistics::units::Count::get(), "read accesses"),
524  ADD_STAT(writeHits, statistics::units::Count::get(), "write hits"),
525  ADD_STAT(writeMisses, statistics::units::Count::get(), "write misses"),
526  ADD_STAT(writeAccesses, statistics::units::Count::get(), "write accesses"),
527  ADD_STAT(hits, statistics::units::Count::get(),
528  "Total TLB (read and write) hits", readHits + writeHits),
529  ADD_STAT(misses, statistics::units::Count::get(),
530  "Total TLB (read and write) misses", readMisses + writeMisses),
531  ADD_STAT(accesses, statistics::units::Count::get(),
532  "Total TLB (read and write) accesses",
533  readAccesses + writeAccesses)
534 {
535 }
536 
537 Port *
539 {
540  return &walker->getPort("port");
541 }
542 
543 } // namespace gem5
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:189
gem5::unserialize
void unserialize(ThreadContext &tc, CheckpointIn &cp)
Definition: thread_context.cc:206
gem5::RiscvISA::PRV_S
@ PRV_S
Definition: isa.hh:55
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:91
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::ArmISA::PageShift
const Addr PageShift
Definition: page_size.hh:52
gem5::RiscvISA::PRV_M
@ PRV_M
Definition: isa.hh:56
gem5::RegVal
uint64_t RegVal
Definition: types.hh:173
faults.hh
system.hh
gem5::X86ISA::TlbEntry::trieHandle
TlbEntryTrie::Handle trieHandle
Definition: pagetable.hh:94
UNSERIALIZE_SCALAR
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:575
gem5::MipsISA::MISCREG_STATUS
@ MISCREG_STATUS
Definition: misc.hh:96
gem5::RiscvISA::TLB::TLB
TLB(const Params &p)
gem5::RiscvISA::LOAD_PAGE
@ LOAD_PAGE
Definition: faults.hh:83
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:53
gem5::ArmISA::asid
asid
Definition: misc_types.hh:617
gem5::X86ISA::TlbEntry::size
int size()
Definition: pagetable.hh:112
gem5::BaseMMU::Translation::markDelayed
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
gem5::RiscvISA::PrivilegeMode
PrivilegeMode
Definition: isa.hh:52
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:64
pma_checker.hh
gem5::csprintf
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
gem5::RiscvISA::Walker::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: pagetable_walker.cc:173
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::X86ISA::TlbEntry
Definition: pagetable.hh:65
str.hh
gem5::buildKey
static Addr buildKey(Addr vpn, uint16_t asid)
Definition: tlb.cc:68
pmp.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:93
gem5::RiscvISA::LOAD_ACCESS
@ LOAD_ACCESS
Definition: faults.hh:74
gem5::Serializable::unserializeSection
void unserializeSection(CheckpointIn &cp, const char *name)
Unserialize an a child object.
Definition: serialize.cc:81
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
tlb.hh
gem5::RiscvISA::TlbEntry
Definition: pagetable.hh:82
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::RiscvISA::TLB::walker
Walker * walker
Definition: tlb.hh:70
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::RiscvISA::INST_ACCESS
@ INST_ACCESS
Definition: faults.hh:70
gem5::RiscvISA::MISCREG_SATP
@ MISCREG_SATP
Definition: misc.hh:180
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::RiscvISA::INST_PAGE
@ INST_PAGE
Definition: faults.hh:82
process.hh
gem5::RiscvISA::STORE_ACCESS
@ STORE_ACCESS
Definition: faults.hh:77
gem5::ArmISA::mask
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
pra_constants.hh
gem5::RiscvISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
inifile.hh
gem5::serialize
void serialize(const ThreadContext &tc, CheckpointOut &cp)
Thread context serialization helpers.
Definition: thread_context.cc:157
gem5::RiscvISA::asid
Bitfield< 59, 44 > asid
Definition: pagetable.hh:47
gem5::RiscvISA::ExceptionCode
ExceptionCode
Definition: faults.hh:67
gem5::X86ISA::TlbEntry::paddr
Addr paddr
Definition: pagetable.hh:68
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::RiscvISA::TLB
Definition: tlb.hh:59
SERIALIZE_SCALAR
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:568
full_system.hh
gem5::Process
Definition: process.hh:67
gem5::ThreadContext::getProcessPtr
virtual Process * getProcessPtr()=0
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:223
gem5::RiscvISA::TLB::getTableWalkerPort
Port * getTableWalkerPort() override
Get the table walker port.
gem5::BaseMMU::Translation
Definition: mmu.hh:55
gem5::X86ISA::TlbEntry::vaddr
Addr vaddr
Definition: pagetable.hh:71
gem5::X86ISA::TLB
Definition: tlb.hh:60
gem5::RiscvISA::x
Bitfield< 3 > x
Definition: pagetable.hh:73
mmu.hh
gem5::RiscvISA::STORE_PAGE
@ STORE_PAGE
Definition: faults.hh:84
gem5::X86ISA::TlbEntry::lruSeq
uint64_t lruSeq
Definition: pagetable.hh:92
gem5::RiscvISA::TlbEntry::paddr
Addr paddr
Definition: pagetable.hh:85
gem5::X86ISA::TlbEntry::logBytes
unsigned logBytes
Definition: pagetable.hh:73
gem5::CheckpointOut
std::ostream CheckpointOut
Definition: serialize.hh:66
utility.hh
gem5::RiscvISA::PRV_U
@ PRV_U
Definition: isa.hh:54
trace.hh
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
page_table.hh
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::RiscvISA::MISCREG_PRV
@ MISCREG_PRV
Definition: misc.hh:65
gem5::RiscvISA::TlbEntry::lruSeq
uint64_t lruSeq
Definition: pagetable.hh:99
gem5::BaseMMU::Translation::finish
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
pagetable.hh
thread_context.hh
gem5::ArmISA::status
Bitfield< 5, 0 > status
Definition: misc_types.hh:422
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:73
pagetable_walker.hh

Generated on Tue Sep 21 2021 12:24:27 for gem5 by doxygen 1.8.17