gem5 v24.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tlb.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * Copyright (c) 2007 MIPS Technologies, Inc.
4 * Copyright (c) 2020 Barkhausen Institut
5 * Copyright (c) 2021 Huawei International
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met: redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer;
12 * redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution;
15 * neither the name of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "arch/riscv/tlb.hh"
33
34#include <string>
35#include <vector>
36
37#include "arch/riscv/faults.hh"
38#include "arch/riscv/mmu.hh"
42#include "arch/riscv/pmp.hh"
44#include "arch/riscv/utility.hh"
45#include "base/inifile.hh"
46#include "base/str.hh"
47#include "base/trace.hh"
48#include "cpu/thread_context.hh"
49#include "debug/TLB.hh"
50#include "debug/TLBVerbose.hh"
51#include "mem/page_table.hh"
52#include "params/RiscvTLB.hh"
53#include "sim/full_system.hh"
54#include "sim/process.hh"
55#include "sim/system.hh"
56
57namespace gem5
58{
59
60using namespace RiscvISA;
61
63//
64// RISC-V TLB
65//
66
67static Addr
68buildKey(Addr vpn, uint16_t asid)
69{
70 // Note ASID is 16 bits
71 // The VPN in sv39 is up to 39-12=27 bits
72 // The VPN in sv48 is up to 48-12=36 bits
73 // The VPN in sv57 is up to 57-12=45 bits
74 // So, shifting the ASID into the top 16 bits is safe.
75 assert(bits(vpn, 63, 48) == 0);
76 return (static_cast<Addr>(asid) << 48) | vpn;
77}
78
79TLB::TLB(const Params &p) :
80 BaseTLB(p), size(p.size), tlb(size),
81 lruSeq(0), stats(this), pma(p.pma_checker),
82 pmp(p.pmp)
83{
84 for (size_t x = 0; x < size; x++) {
85 tlb[x].trieHandle = NULL;
86 freeList.push_back(&tlb[x]);
87 }
88
89 walker = p.walker;
90 walker->setTLB(this);
91}
92
93Walker *
94TLB::getWalker()
95{
96 return walker;
97}
98
99void
100TLB::evictLRU()
101{
102 // Find the entry with the lowest (and hence least recently updated)
103 // sequence number.
104
105 size_t lru = 0;
106 for (size_t i = 1; i < size; i++) {
107 if (tlb[i].lruSeq < tlb[lru].lruSeq)
108 lru = i;
109 }
110
111 remove(lru);
112}
113
114TlbEntry *
115TLB::lookup(Addr vpn, uint16_t asid, BaseMMU::Mode mode, bool hidden)
116{
117 TlbEntry *entry = trie.lookup(buildKey(vpn, asid));
118
119 DPRINTF(TLBVerbose, "lookup(vpn=%#x, asid=%#x, key=%#x): "
120 "%s ppn=%#x (%#x) %s\n",
121 vpn, asid, buildKey(vpn, asid), entry ? "hit" : "miss",
122 entry ? entry->paddr : 0, entry ? entry->size() : 0,
123 hidden ? "hidden" : "");
124
125 if (!hidden) {
126 if (entry)
127 entry->lruSeq = nextSeq();
128
129 if (mode == BaseMMU::Write)
130 stats.writeAccesses++;
131 else
132 stats.readAccesses++;
133
134 if (!entry) {
135 if (mode == BaseMMU::Write)
136 stats.writeMisses++;
137 else
138 stats.readMisses++;
139 }
140 else {
141 if (mode == BaseMMU::Write)
142 stats.writeHits++;
143 else
144 stats.readHits++;
145 }
146 }
147
148 return entry;
149}
150
151TlbEntry *
152TLB::insert(Addr vpn, const TlbEntry &entry)
153{
154 DPRINTF(TLB, "insert(vpn=%#x, asid=%#x, key=%#x): "
155 "vaddr=%#x paddr=%#x pte=%#x size=%#x\n",
156 vpn, entry.asid, buildKey(vpn, entry.asid), entry.vaddr, entry.paddr,
157 entry.pte, entry.size());
158
159 // If somebody beat us to it, just use that existing entry.
160 TlbEntry *newEntry = lookup(vpn, entry.asid, BaseMMU::Read, true);
161 if (newEntry) {
162 // update PTE flags (maybe we set the dirty/writable flag)
163 newEntry->pte = entry.pte;
164 assert(newEntry->vaddr == entry.vaddr);
165 assert(newEntry->asid == entry.asid);
166 assert(newEntry->logBytes == entry.logBytes);
167 return newEntry;
168 }
169
170 if (freeList.empty())
171 evictLRU();
172
173 newEntry = freeList.front();
174 freeList.pop_front();
175
176 Addr key = buildKey(vpn, entry.asid);
177 *newEntry = entry;
178 newEntry->lruSeq = nextSeq();
179 newEntry->trieHandle = trie.insert(
180 key, TlbEntryTrie::MaxBits - entry.logBytes + PageShift, newEntry
181 );
182 return newEntry;
183}
184
185void
186TLB::demapPage(Addr vaddr, uint64_t asid)
187{
188 // Note: vaddr is Reg[rs1] and asid is Reg[rs2]
189 // The definition of this instruction is
190 // if vaddr=x0 and asid=x0, then flush all
191 // if vaddr=x0 and asid!=x0 then flush all with matching asid
192 // if vaddr!=x0 and asid=x0 then flush all leaf PTEs that match vaddr
193 // if vaddr!=x0 and asid!=x0 then flush the leaf PTE that matches vaddr
194 // in the given asid.
195 // No effect if vaddr is not valid
196 // Currently, we assume if the values of the registers are 0 then it was
197 // referencing x0.
198
199 asid &= 0xFFFF;
200
201 DPRINTF(TLB, "flush(vaddr=%#x, asid=%#x)\n", vaddr, asid);
202 if (vaddr == 0 && asid == 0) {
203 DPRINTF(TLB, "Flushing all TLB entries\n");
204 flushAll();
205 } else {
206 if (vaddr != 0 && asid != 0) {
207 // TODO: When supporting other address translation modes, fix this
208 Addr vpn = getVPNFromVAddr(vaddr, AddrXlateMode::SV39);
209 TlbEntry *entry = lookup(vpn, asid, BaseMMU::Read, true);
210 if (entry) {
211 remove(entry - tlb.data());
212 }
213 }
214 else {
215 for (size_t i = 0; i < size; i++) {
216 if (tlb[i].trieHandle) {
217 Addr mask = ~(tlb[i].size() - 1);
218 if ((vaddr == 0 || (vaddr & mask) == tlb[i].vaddr) &&
219 (asid == 0 || tlb[i].asid == asid))
220 remove(i);
221 }
222 }
223 }
224 }
225}
226
227void
228TLB::flushAll()
229{
230 DPRINTF(TLB, "flushAll()\n");
231 for (size_t i = 0; i < size; i++) {
232 if (tlb[i].trieHandle)
233 remove(i);
234 }
235}
236
237void
238TLB::remove(size_t idx)
239{
240 DPRINTF(TLB, "remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
241 tlb[idx].vaddr, tlb[idx].asid, tlb[idx].paddr, tlb[idx].pte,
242 tlb[idx].size());
243
244 assert(tlb[idx].trieHandle);
245 trie.remove(tlb[idx].trieHandle);
246 tlb[idx].trieHandle = NULL;
247 freeList.push_back(&tlb[idx]);
248}
249
250Fault
251TLB::checkPermissions(STATUS status, PrivilegeMode pmode, Addr vaddr,
252 BaseMMU::Mode mode, PTESv39 pte)
253{
254 Fault fault = NoFault;
255
256 if (mode == BaseMMU::Read && !pte.r) {
257 DPRINTF(TLB, "PTE has no read perm, raising PF\n");
258 fault = createPagefault(vaddr, mode);
259 }
260 else if (mode == BaseMMU::Write && !pte.w) {
261 DPRINTF(TLB, "PTE has no write perm, raising PF\n");
262 fault = createPagefault(vaddr, mode);
263 }
264 else if (mode == BaseMMU::Execute && !pte.x) {
265 DPRINTF(TLB, "PTE has no exec perm, raising PF\n");
266 fault = createPagefault(vaddr, mode);
267 }
268
269 if (fault == NoFault) {
270 // check pte.u
271 if (pmode == PrivilegeMode::PRV_U && !pte.u) {
272 DPRINTF(TLB, "PTE is not user accessible, raising PF\n");
273 fault = createPagefault(vaddr, mode);
274 }
275 else if (pmode == PrivilegeMode::PRV_S && pte.u && status.sum == 0) {
276 DPRINTF(TLB, "PTE is only user accessible, raising PF\n");
277 fault = createPagefault(vaddr, mode);
278 }
279 }
280
281 return fault;
282}
283
284Fault
285TLB::createPagefault(Addr vaddr, BaseMMU::Mode mode)
286{
287 ExceptionCode code;
288 if (mode == BaseMMU::Read)
289 code = ExceptionCode::LOAD_PAGE;
290 else if (mode == BaseMMU::Write)
291 code = ExceptionCode::STORE_PAGE;
292 else
293 code = ExceptionCode::INST_PAGE;
294 return std::make_shared<AddressFault>(vaddr, code);
295}
296
297Addr
298TLB::hiddenTranslateWithTLB(Addr vaddr, uint16_t asid, Addr xmode,
299 BaseMMU::Mode mode)
300{
301 TlbEntry *e = lookup(getVPNFromVAddr(vaddr, xmode), asid, mode, true);
302 assert(e != nullptr);
303 return e->paddr << PageShift | (vaddr & mask(e->logBytes));
304}
305
306Fault
307TLB::doTranslate(const RequestPtr &req, ThreadContext *tc,
308 BaseMMU::Translation *translation, BaseMMU::Mode mode,
309 bool &delayed)
310{
311 delayed = false;
312
313 Addr vaddr = Addr(sext<VADDR_BITS>(req->getVaddr()));
314 SATP satp = tc->readMiscReg(MISCREG_SATP);
315
316 Addr vpn = getVPNFromVAddr(vaddr, satp.mode);
317 TlbEntry *e = lookup(vpn, satp.asid, mode, false);
318 if (!e) {
319 Fault fault = walker->start(tc, translation, req, mode);
320 if (translation != nullptr || fault != NoFault) {
321 // This gets ignored in atomic mode.
322 delayed = true;
323 return fault;
324 }
325 e = lookup(vpn, satp.asid, mode, true);
326 assert(e != nullptr);
327 }
328
329 STATUS status = tc->readMiscReg(MISCREG_STATUS);
330 PrivilegeMode pmode = getMemPriv(tc, mode);
331 Fault fault = checkPermissions(status, pmode, vaddr, mode, e->pte);
332 if (fault != NoFault) {
333 // if we want to write and it isn't writable, do a page table walk
334 // again to update the dirty flag.
335 if (mode == BaseMMU::Write && !e->pte.w) {
336 DPRINTF(TLB, "Dirty bit not set, repeating PT walk\n");
337 fault = walker->start(tc, translation, req, mode);
338 if (translation != nullptr || fault != NoFault) {
339 delayed = true;
340 return fault;
341 }
342 }
343 if (fault != NoFault)
344 return fault;
345 }
346
347 Addr paddr = e->paddr << PageShift | (vaddr & mask(e->logBytes));
348 DPRINTF(TLBVerbose, "translate(vaddr=%#x, vpn=%#x, asid=%#x): %#x\n",
349 vaddr, vpn, satp.asid, paddr);
350 req->setPaddr(paddr);
351
352 return NoFault;
353}
354
356TLB::getMemPriv(ThreadContext *tc, BaseMMU::Mode mode)
357{
358 STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS);
360 if (mode != BaseMMU::Execute && status.mprv == 1)
361 pmode = (PrivilegeMode)(RegVal)status.mpp;
362 return pmode;
363}
364
365Fault
366TLB::translate(const RequestPtr &req, ThreadContext *tc,
367 BaseMMU::Translation *translation, BaseMMU::Mode mode,
368 bool &delayed)
369{
370 delayed = false;
371
372 if (FullSystem) {
373 PrivilegeMode pmode = getMemPriv(tc, mode);
374 MISA misa = tc->readMiscRegNoEffect(MISCREG_ISA);
375 SATP satp = tc->readMiscReg(MISCREG_SATP);
376 Fault fault = NoFault;
377
378 fault = pma->checkVAddrAlignment(req, mode);
379
380 if (!misa.rvs || pmode == PrivilegeMode::PRV_M ||
381 satp.mode == AddrXlateMode::BARE) {
382 req->setFlags(Request::PHYSICAL);
383 }
384
385 if (fault == NoFault) {
386 if (req->getFlags() & Request::PHYSICAL) {
390 req->setPaddr(getValidAddr(req->getVaddr(), tc, mode));
391 } else {
392 fault = doTranslate(req, tc, translation, mode, delayed);
393 }
394 }
395
396 if (!delayed && fault == NoFault) {
397 // do pmp check if any checking condition is met.
398 // timingFault will be NoFault if pmp checks are
399 // passed, otherwise an address fault will be returned.
400 fault = pmp->pmpCheck(req, mode, pmode, tc);
401 }
402
403 if (!delayed && fault == NoFault) {
404 fault = pma->check(req, mode);
405 }
406 return fault;
407 } else {
408 // In the O3 CPU model, sometimes a memory access will be speculatively
409 // executed along a branch that will end up not being taken where the
410 // address is invalid. In that case, return a fault rather than trying
411 // to translate it (which will cause a panic). Since RISC-V allows
412 // unaligned memory accesses, this should only happen if the request's
413 // length is long enough to wrap around from the end of the memory to
414 // the start.
415 assert(req->getSize() > 0);
416 if (req->getVaddr() + req->getSize() - 1 < req->getVaddr())
417 return std::make_shared<GenericPageTableFault>(req->getVaddr());
418
419 Process * p = tc->getProcessPtr();
420
421 /*
422 * In RV32 Linux, as vaddr >= 0x80000000 is legal in userspace
423 * (except for COMPAT mode for RV32 Userspace in RV64 Linux), we
424 * need to ignore the upper bits beyond 32 bits.
425 */
426 Addr vaddr = getValidAddr(req->getVaddr(), tc, mode);
427 Addr paddr;
428
429 if (!p->pTable->translate(vaddr, paddr))
430 return std::make_shared<GenericPageTableFault>(req->getVaddr());
431
432 req->setPaddr(paddr);
433
434 return NoFault;
435 }
436}
437
438Fault
439TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc,
440 BaseMMU::Mode mode)
441{
442 bool delayed;
443 return translate(req, tc, nullptr, mode, delayed);
444}
445
446void
447TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
448 BaseMMU::Translation *translation, BaseMMU::Mode mode)
449{
450 bool delayed;
451 assert(translation);
452 Fault fault = translate(req, tc, translation, mode, delayed);
453 if (!delayed)
454 translation->finish(fault, req, tc, mode);
455 else
456 translation->markDelayed();
457}
458
459Fault
460TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc,
461 BaseMMU::Mode mode)
462{
463 const Addr vaddr = getValidAddr(req->getVaddr(), tc, mode);
464 Addr paddr = vaddr;
465
466 if (FullSystem) {
467 MMU *mmu = static_cast<MMU *>(tc->getMMUPtr());
468
469 PrivilegeMode pmode = mmu->getMemPriv(tc, mode);
470 MISA misa = tc->readMiscRegNoEffect(MISCREG_ISA);
471 SATP satp = tc->readMiscReg(MISCREG_SATP);
472 if (misa.rvs && pmode != PrivilegeMode::PRV_M &&
473 satp.mode != AddrXlateMode::BARE) {
474 Walker *walker = mmu->getDataWalker();
475 unsigned logBytes;
476 Fault fault = walker->startFunctional(
477 tc, paddr, logBytes, mode);
478 if (fault != NoFault)
479 return fault;
480
481 Addr masked_addr = vaddr & mask(logBytes);
482 paddr |= masked_addr;
483 }
484 }
485 else {
486 Process *process = tc->getProcessPtr();
487 const auto *pte = process->pTable->lookup(vaddr);
488
489 if (!pte && mode != BaseMMU::Execute) {
490 // Check if we just need to grow the stack.
491 if (process->fixupFault(vaddr)) {
492 // If we did, lookup the entry for the new page.
493 pte = process->pTable->lookup(vaddr);
494 }
495 }
496
497 if (!pte)
498 return std::make_shared<GenericPageTableFault>(req->getVaddr());
499
500 paddr = pte->paddr | process->pTable->pageOffset(vaddr);
501 }
502
503 DPRINTF(TLB, "Translated (functional) %#x -> %#x.\n", vaddr, paddr);
504 req->setPaddr(paddr);
505 return NoFault;
506}
507
508Fault
509TLB::finalizePhysical(const RequestPtr &req,
510 ThreadContext *tc, BaseMMU::Mode mode) const
511{
512 return NoFault;
513}
514
515void
516TLB::serialize(CheckpointOut &cp) const
517{
518 // Only store the entries in use.
519 uint32_t _size = size - freeList.size();
520 SERIALIZE_SCALAR(_size);
521 SERIALIZE_SCALAR(lruSeq);
522
523 uint32_t _count = 0;
524 for (uint32_t x = 0; x < size; x++) {
525 if (tlb[x].trieHandle != NULL)
526 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
527 }
528}
529
530void
531TLB::unserialize(CheckpointIn &cp)
532{
533 // Do not allow to restore with a smaller tlb.
534 uint32_t _size;
535 UNSERIALIZE_SCALAR(_size);
536 if (_size > size) {
537 fatal("TLB size less than the one in checkpoint!");
538 }
539
540 UNSERIALIZE_SCALAR(lruSeq);
541
542 for (uint32_t x = 0; x < _size; x++) {
543 TlbEntry *newEntry = freeList.front();
544 freeList.pop_front();
545
546 newEntry->unserializeSection(cp, csprintf("Entry%d", x));
547 // TODO: When supporting other addressing modes fix this
548 Addr vpn = getVPNFromVAddr(newEntry->vaddr, AddrXlateMode::SV39);
549 Addr key = buildKey(vpn, newEntry->asid);
550 newEntry->trieHandle = trie.insert(key,
551 TlbEntryTrie::MaxBits - newEntry->logBytes + PageShift, newEntry);
552 }
553}
554
555TLB::TlbStats::TlbStats(statistics::Group *parent)
556 : statistics::Group(parent),
557 ADD_STAT(readHits, statistics::units::Count::get(), "read hits"),
558 ADD_STAT(readMisses, statistics::units::Count::get(), "read misses"),
559 ADD_STAT(readAccesses, statistics::units::Count::get(), "read accesses"),
560 ADD_STAT(writeHits, statistics::units::Count::get(), "write hits"),
561 ADD_STAT(writeMisses, statistics::units::Count::get(), "write misses"),
562 ADD_STAT(writeAccesses, statistics::units::Count::get(), "write accesses"),
563 ADD_STAT(hits, statistics::units::Count::get(),
564 "Total TLB (read and write) hits", readHits + writeHits),
565 ADD_STAT(misses, statistics::units::Count::get(),
566 "Total TLB (read and write) misses", readMisses + writeMisses),
567 ADD_STAT(accesses, statistics::units::Count::get(),
568 "Total TLB (read and write) accesses",
569 readAccesses + writeAccesses)
570{
571}
572
573Port *
575{
576 return &walker->getPort("port");
577}
578
579} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
TLB(const Params &p)
Definition tlb.cc:101
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
Ports are used to interface objects to each other.
Definition port.hh:62
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual Process * getProcessPtr()=0
Walker * walker
Definition tlb.hh:89
Port * getTableWalkerPort() override
Get the table walker port.
Definition tlb.cc:630
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
Declaration of IniFile object.
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 5, 0 > status
Bitfield< 59, 56 > tlb
Bitfield< 0 > p
Addr getVPNFromVAddr(Addr vaddr, Addr mode)
Definition pagetable.cc:64
Bitfield< 3 > x
Definition pagetable.hh:74
@ MISCREG_STATUS
Definition misc.hh:76
Bitfield< 4 > x
Definition pagetable.hh:61
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
static Addr buildKey(Addr vpn, uint16_t asid)
Definition tlb.cc:68
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t RegVal
Definition types.hh:173
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Declarations of a non-full system Page Table.
PMP header file.
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
TlbEntryTrie::Handle trieHandle
Definition pagetable.hh:104

Generated on Mon Jan 13 2025 04:28:27 for gem5 by doxygen 1.9.8