gem5 v24.0.0.0
Loading...
Searching...
No Matches
tlb.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * Copyright (c) 2007 MIPS Technologies, Inc.
4 * Copyright (c) 2020 Barkhausen Institut
5 * Copyright (c) 2021 Huawei International
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met: redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer;
12 * redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution;
15 * neither the name of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "arch/riscv/tlb.hh"
33
34#include <string>
35#include <vector>
36
37#include "arch/riscv/faults.hh"
38#include "arch/riscv/mmu.hh"
42#include "arch/riscv/pmp.hh"
44#include "arch/riscv/utility.hh"
45#include "base/inifile.hh"
46#include "base/str.hh"
47#include "base/trace.hh"
48#include "cpu/thread_context.hh"
49#include "debug/TLB.hh"
50#include "debug/TLBVerbose.hh"
51#include "mem/page_table.hh"
52#include "params/RiscvTLB.hh"
53#include "sim/full_system.hh"
54#include "sim/process.hh"
55#include "sim/system.hh"
56
57namespace gem5
58{
59
60using namespace RiscvISA;
61
63//
64// RISC-V TLB
65//
66
67static Addr
68buildKey(Addr vpn, uint16_t asid)
69{
70 return (static_cast<Addr>(asid) << 48) | vpn;
71}
72
73TLB::TLB(const Params &p) :
74 BaseTLB(p), size(p.size), tlb(size),
75 lruSeq(0), stats(this), pma(p.pma_checker),
76 pmp(p.pmp)
77{
78 for (size_t x = 0; x < size; x++) {
79 tlb[x].trieHandle = NULL;
80 freeList.push_back(&tlb[x]);
81 }
82
83 walker = p.walker;
84 walker->setTLB(this);
85}
86
87Walker *
88TLB::getWalker()
89{
90 return walker;
91}
92
93void
94TLB::evictLRU()
95{
96 // Find the entry with the lowest (and hence least recently updated)
97 // sequence number.
98
99 size_t lru = 0;
100 for (size_t i = 1; i < size; i++) {
101 if (tlb[i].lruSeq < tlb[lru].lruSeq)
102 lru = i;
103 }
104
105 remove(lru);
106}
107
108TlbEntry *
109TLB::lookup(Addr vpn, uint16_t asid, BaseMMU::Mode mode, bool hidden)
110{
111 TlbEntry *entry = trie.lookup(buildKey(vpn, asid));
112
113 if (!hidden) {
114 if (entry)
115 entry->lruSeq = nextSeq();
116
117 if (mode == BaseMMU::Write)
118 stats.writeAccesses++;
119 else
120 stats.readAccesses++;
121
122 if (!entry) {
123 if (mode == BaseMMU::Write)
124 stats.writeMisses++;
125 else
126 stats.readMisses++;
127 }
128 else {
129 if (mode == BaseMMU::Write)
130 stats.writeHits++;
131 else
132 stats.readHits++;
133 }
134
135 DPRINTF(TLBVerbose, "lookup(vpn=%#x, asid=%#x): %s ppn %#x\n",
136 vpn, asid, entry ? "hit" : "miss", entry ? entry->paddr : 0);
137 }
138
139 return entry;
140}
141
142TlbEntry *
143TLB::insert(Addr vpn, const TlbEntry &entry)
144{
145 DPRINTF(TLB, "insert(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
146 vpn, entry.asid, entry.paddr, entry.pte, entry.size());
147
148 // If somebody beat us to it, just use that existing entry.
149 TlbEntry *newEntry = lookup(vpn, entry.asid, BaseMMU::Read, true);
150 if (newEntry) {
151 // update PTE flags (maybe we set the dirty/writable flag)
152 newEntry->pte = entry.pte;
153 assert(newEntry->vaddr == vpn);
154 return newEntry;
155 }
156
157 if (freeList.empty())
158 evictLRU();
159
160 newEntry = freeList.front();
161 freeList.pop_front();
162
163 Addr key = buildKey(vpn, entry.asid);
164 *newEntry = entry;
165 newEntry->lruSeq = nextSeq();
166 newEntry->vaddr = vpn;
167 newEntry->trieHandle =
168 trie.insert(key, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
169 return newEntry;
170}
171
172void
173TLB::demapPage(Addr vpn, uint64_t asid)
174{
175 asid &= 0xFFFF;
176
177 if (vpn == 0 && asid == 0)
178 flushAll();
179 else {
180 DPRINTF(TLB, "flush(vpn=%#x, asid=%#x)\n", vpn, asid);
181 if (vpn != 0 && asid != 0) {
182 TlbEntry *newEntry = lookup(vpn, asid, BaseMMU::Read, true);
183 if (newEntry)
184 remove(newEntry - tlb.data());
185 }
186 else {
187 for (size_t i = 0; i < size; i++) {
188 if (tlb[i].trieHandle) {
189 Addr mask = ~(tlb[i].size() - 1);
190 if ((vpn == 0 || (vpn & mask) == tlb[i].vaddr) &&
191 (asid == 0 || tlb[i].asid == asid))
192 remove(i);
193 }
194 }
195 }
196 }
197}
198
199void
200TLB::flushAll()
201{
202 DPRINTF(TLB, "flushAll()\n");
203 for (size_t i = 0; i < size; i++) {
204 if (tlb[i].trieHandle)
205 remove(i);
206 }
207}
208
209void
210TLB::remove(size_t idx)
211{
212 DPRINTF(TLB, "remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
213 tlb[idx].vaddr, tlb[idx].asid, tlb[idx].paddr, tlb[idx].pte,
214 tlb[idx].size());
215
216 assert(tlb[idx].trieHandle);
217 trie.remove(tlb[idx].trieHandle);
218 tlb[idx].trieHandle = NULL;
219 freeList.push_back(&tlb[idx]);
220}
221
222Fault
223TLB::checkPermissions(STATUS status, PrivilegeMode pmode, Addr vaddr,
224 BaseMMU::Mode mode, PTESv39 pte)
225{
226 Fault fault = NoFault;
227
228 if (mode == BaseMMU::Read && !pte.r) {
229 DPRINTF(TLB, "PTE has no read perm, raising PF\n");
230 fault = createPagefault(vaddr, mode);
231 }
232 else if (mode == BaseMMU::Write && !pte.w) {
233 DPRINTF(TLB, "PTE has no write perm, raising PF\n");
234 fault = createPagefault(vaddr, mode);
235 }
236 else if (mode == BaseMMU::Execute && !pte.x) {
237 DPRINTF(TLB, "PTE has no exec perm, raising PF\n");
238 fault = createPagefault(vaddr, mode);
239 }
240
241 if (fault == NoFault) {
242 // check pte.u
243 if (pmode == PrivilegeMode::PRV_U && !pte.u) {
244 DPRINTF(TLB, "PTE is not user accessible, raising PF\n");
245 fault = createPagefault(vaddr, mode);
246 }
247 else if (pmode == PrivilegeMode::PRV_S && pte.u && status.sum == 0) {
248 DPRINTF(TLB, "PTE is only user accessible, raising PF\n");
249 fault = createPagefault(vaddr, mode);
250 }
251 }
252
253 return fault;
254}
255
256Fault
257TLB::createPagefault(Addr vaddr, BaseMMU::Mode mode)
258{
259 ExceptionCode code;
260 if (mode == BaseMMU::Read)
261 code = ExceptionCode::LOAD_PAGE;
262 else if (mode == BaseMMU::Write)
263 code = ExceptionCode::STORE_PAGE;
264 else
265 code = ExceptionCode::INST_PAGE;
266 return std::make_shared<AddressFault>(vaddr, code);
267}
268
269Addr
270TLB::translateWithTLB(Addr vaddr, uint16_t asid, BaseMMU::Mode mode)
271{
272 TlbEntry *e = lookup(vaddr, asid, mode, false);
273 assert(e != nullptr);
274 return e->paddr << PageShift | (vaddr & mask(e->logBytes));
275}
276
277Fault
278TLB::doTranslate(const RequestPtr &req, ThreadContext *tc,
279 BaseMMU::Translation *translation, BaseMMU::Mode mode,
280 bool &delayed)
281{
282 delayed = false;
283
284 Addr vaddr = Addr(sext<VADDR_BITS>(req->getVaddr()));
285 SATP satp = tc->readMiscReg(MISCREG_SATP);
286
287 TlbEntry *e = lookup(vaddr, satp.asid, mode, false);
288 if (!e) {
289 Fault fault = walker->start(tc, translation, req, mode);
290 if (translation != nullptr || fault != NoFault) {
291 // This gets ignored in atomic mode.
292 delayed = true;
293 return fault;
294 }
295 e = lookup(vaddr, satp.asid, mode, true);
296 assert(e != nullptr);
297 }
298
299 STATUS status = tc->readMiscReg(MISCREG_STATUS);
300 PrivilegeMode pmode = getMemPriv(tc, mode);
301 Fault fault = checkPermissions(status, pmode, vaddr, mode, e->pte);
302 if (fault != NoFault) {
303 // if we want to write and it isn't writable, do a page table walk
304 // again to update the dirty flag.
305 if (mode == BaseMMU::Write && !e->pte.w) {
306 DPRINTF(TLB, "Dirty bit not set, repeating PT walk\n");
307 fault = walker->start(tc, translation, req, mode);
308 if (translation != nullptr || fault != NoFault) {
309 delayed = true;
310 return fault;
311 }
312 }
313 if (fault != NoFault)
314 return fault;
315 }
316
317 Addr paddr = e->paddr << PageShift | (vaddr & mask(e->logBytes));
318 DPRINTF(TLBVerbose, "translate(vpn=%#x, asid=%#x): %#x\n",
319 vaddr, satp.asid, paddr);
320 req->setPaddr(paddr);
321
322 return NoFault;
323}
324
326TLB::getMemPriv(ThreadContext *tc, BaseMMU::Mode mode)
327{
328 STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS);
330 if (mode != BaseMMU::Execute && status.mprv == 1)
331 pmode = (PrivilegeMode)(RegVal)status.mpp;
332 return pmode;
333}
334
335Fault
336TLB::translate(const RequestPtr &req, ThreadContext *tc,
337 BaseMMU::Translation *translation, BaseMMU::Mode mode,
338 bool &delayed)
339{
340 delayed = false;
341
342 if (FullSystem) {
343 PrivilegeMode pmode = getMemPriv(tc, mode);
344 MISA misa = tc->readMiscRegNoEffect(MISCREG_ISA);
345 SATP satp = tc->readMiscReg(MISCREG_SATP);
346 Fault fault = NoFault;
347
348 fault = pma->checkVAddrAlignment(req, mode);
349
350 if (!misa.rvs || pmode == PrivilegeMode::PRV_M ||
351 satp.mode == AddrXlateMode::BARE) {
352 req->setFlags(Request::PHYSICAL);
353 }
354
355 if (fault == NoFault) {
356 if (req->getFlags() & Request::PHYSICAL) {
360 req->setPaddr(req->getVaddr());
361 } else {
362 fault = doTranslate(req, tc, translation, mode, delayed);
363 }
364 }
365
366 if (!delayed && fault == NoFault) {
367 // do pmp check if any checking condition is met.
368 // timingFault will be NoFault if pmp checks are
369 // passed, otherwise an address fault will be returned.
370 fault = pmp->pmpCheck(req, mode, pmode, tc);
371 }
372
373 if (!delayed && fault == NoFault) {
374 fault = pma->check(req, mode);
375 }
376 return fault;
377 } else {
378 // In the O3 CPU model, sometimes a memory access will be speculatively
379 // executed along a branch that will end up not being taken where the
380 // address is invalid. In that case, return a fault rather than trying
381 // to translate it (which will cause a panic). Since RISC-V allows
382 // unaligned memory accesses, this should only happen if the request's
383 // length is long enough to wrap around from the end of the memory to
384 // the start.
385 assert(req->getSize() > 0);
386 if (req->getVaddr() + req->getSize() - 1 < req->getVaddr())
387 return std::make_shared<GenericPageTableFault>(req->getVaddr());
388
389 Process * p = tc->getProcessPtr();
390
391 Fault fault = p->pTable->translate(req);
392 if (fault != NoFault)
393 return fault;
394
395 return NoFault;
396 }
397}
398
399Fault
400TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc,
401 BaseMMU::Mode mode)
402{
403 bool delayed;
404 return translate(req, tc, nullptr, mode, delayed);
405}
406
407void
408TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
409 BaseMMU::Translation *translation, BaseMMU::Mode mode)
410{
411 bool delayed;
412 assert(translation);
413 Fault fault = translate(req, tc, translation, mode, delayed);
414 if (!delayed)
415 translation->finish(fault, req, tc, mode);
416 else
417 translation->markDelayed();
418}
419
420Fault
421TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc,
422 BaseMMU::Mode mode)
423{
424 const Addr vaddr = req->getVaddr();
425 Addr paddr = vaddr;
426
427 if (FullSystem) {
428 MMU *mmu = static_cast<MMU *>(tc->getMMUPtr());
429
430 PrivilegeMode pmode = mmu->getMemPriv(tc, mode);
431 MISA misa = tc->readMiscRegNoEffect(MISCREG_ISA);
432 SATP satp = tc->readMiscReg(MISCREG_SATP);
433 if (misa.rvs && pmode != PrivilegeMode::PRV_M &&
434 satp.mode != AddrXlateMode::BARE) {
435 Walker *walker = mmu->getDataWalker();
436 unsigned logBytes;
437 Fault fault = walker->startFunctional(
438 tc, paddr, logBytes, mode);
439 if (fault != NoFault)
440 return fault;
441
442 Addr masked_addr = vaddr & mask(logBytes);
443 paddr |= masked_addr;
444 }
445 }
446 else {
447 Process *process = tc->getProcessPtr();
448 const auto *pte = process->pTable->lookup(vaddr);
449
450 if (!pte && mode != BaseMMU::Execute) {
451 // Check if we just need to grow the stack.
452 if (process->fixupFault(vaddr)) {
453 // If we did, lookup the entry for the new page.
454 pte = process->pTable->lookup(vaddr);
455 }
456 }
457
458 if (!pte)
459 return std::make_shared<GenericPageTableFault>(req->getVaddr());
460
461 paddr = pte->paddr | process->pTable->pageOffset(vaddr);
462 }
463
464 DPRINTF(TLB, "Translated (functional) %#x -> %#x.\n", vaddr, paddr);
465 req->setPaddr(paddr);
466 return NoFault;
467}
468
469Fault
470TLB::finalizePhysical(const RequestPtr &req,
471 ThreadContext *tc, BaseMMU::Mode mode) const
472{
473 return NoFault;
474}
475
476void
477TLB::serialize(CheckpointOut &cp) const
478{
479 // Only store the entries in use.
480 uint32_t _size = size - freeList.size();
481 SERIALIZE_SCALAR(_size);
482 SERIALIZE_SCALAR(lruSeq);
483
484 uint32_t _count = 0;
485 for (uint32_t x = 0; x < size; x++) {
486 if (tlb[x].trieHandle != NULL)
487 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
488 }
489}
490
491void
492TLB::unserialize(CheckpointIn &cp)
493{
494 // Do not allow to restore with a smaller tlb.
495 uint32_t _size;
496 UNSERIALIZE_SCALAR(_size);
497 if (_size > size) {
498 fatal("TLB size less than the one in checkpoint!");
499 }
500
501 UNSERIALIZE_SCALAR(lruSeq);
502
503 for (uint32_t x = 0; x < _size; x++) {
504 TlbEntry *newEntry = freeList.front();
505 freeList.pop_front();
506
507 newEntry->unserializeSection(cp, csprintf("Entry%d", x));
508 Addr key = buildKey(newEntry->vaddr, newEntry->asid);
509 newEntry->trieHandle = trie.insert(key,
510 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
511 }
512}
513
514TLB::TlbStats::TlbStats(statistics::Group *parent)
515 : statistics::Group(parent),
516 ADD_STAT(readHits, statistics::units::Count::get(), "read hits"),
517 ADD_STAT(readMisses, statistics::units::Count::get(), "read misses"),
518 ADD_STAT(readAccesses, statistics::units::Count::get(), "read accesses"),
519 ADD_STAT(writeHits, statistics::units::Count::get(), "write hits"),
520 ADD_STAT(writeMisses, statistics::units::Count::get(), "write misses"),
521 ADD_STAT(writeAccesses, statistics::units::Count::get(), "write accesses"),
522 ADD_STAT(hits, statistics::units::Count::get(),
523 "Total TLB (read and write) hits", readHits + writeHits),
524 ADD_STAT(misses, statistics::units::Count::get(),
525 "Total TLB (read and write) misses", readMisses + writeMisses),
526 ADD_STAT(accesses, statistics::units::Count::get(),
527 "Total TLB (read and write) accesses",
528 readAccesses + writeAccesses)
529{
530}
531
532Port *
534{
535 return &walker->getPort("port");
536}
537
538} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
TLB(const Params &p)
Definition tlb.cc:61
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
Ports are used to interface objects to each other.
Definition port.hh:62
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual Process * getProcessPtr()=0
Walker * walker
Definition tlb.hh:89
Port * getTableWalkerPort() override
Get the table walker port.
Definition tlb.cc:630
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
void unserializeSection(CheckpointIn &cp, const char *name)
Unserialize an a child object.
Definition serialize.cc:81
Declaration of IniFile object.
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 5, 0 > status
Bitfield< 59, 56 > tlb
Bitfield< 0 > p
Bitfield< 3 > x
Definition pagetable.hh:73
@ MISCREG_STATUS
Definition misc.hh:76
Bitfield< 4 > x
Definition pagetable.hh:61
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
static Addr buildKey(Addr vpn, uint16_t asid)
Definition tlb.cc:68
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t RegVal
Definition types.hh:173
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Declarations of a non-full system Page Table.
PMP header file.
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
TlbEntryTrie::Handle trieHandle
Definition pagetable.hh:96

Generated on Tue Jun 18 2024 16:24:00 for gem5 by doxygen 1.11.0