gem5 v23.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tlb.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2001-2005 The Regents of The University of Michigan
3 * Copyright (c) 2007 MIPS Technologies, Inc.
4 * Copyright (c) 2020 Barkhausen Institut
5 * Copyright (c) 2021 Huawei International
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
10 * met: redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer;
12 * redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution;
15 * neither the name of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "arch/riscv/tlb.hh"
33
34#include <string>
35#include <vector>
36
37#include "arch/riscv/faults.hh"
38#include "arch/riscv/mmu.hh"
42#include "arch/riscv/pmp.hh"
44#include "arch/riscv/utility.hh"
45#include "base/inifile.hh"
46#include "base/str.hh"
47#include "base/trace.hh"
48#include "cpu/thread_context.hh"
49#include "debug/TLB.hh"
50#include "debug/TLBVerbose.hh"
51#include "mem/page_table.hh"
52#include "params/RiscvTLB.hh"
53#include "sim/full_system.hh"
54#include "sim/process.hh"
55#include "sim/system.hh"
56
57namespace gem5
58{
59
60using namespace RiscvISA;
61
63//
64// RISC-V TLB
65//
66
67static Addr
68buildKey(Addr vpn, uint16_t asid)
69{
70 return (static_cast<Addr>(asid) << 48) | vpn;
71}
72
73TLB::TLB(const Params &p) :
74 BaseTLB(p), size(p.size), tlb(size),
75 lruSeq(0), stats(this), pma(p.pma_checker),
76 pmp(p.pmp)
77{
78 for (size_t x = 0; x < size; x++) {
79 tlb[x].trieHandle = NULL;
80 freeList.push_back(&tlb[x]);
81 }
82
83 walker = p.walker;
84 walker->setTLB(this);
85}
86
87Walker *
88TLB::getWalker()
89{
90 return walker;
91}
92
93void
94TLB::evictLRU()
95{
96 // Find the entry with the lowest (and hence least recently updated)
97 // sequence number.
98
99 size_t lru = 0;
100 for (size_t i = 1; i < size; i++) {
101 if (tlb[i].lruSeq < tlb[lru].lruSeq)
102 lru = i;
103 }
104
105 remove(lru);
106}
107
108TlbEntry *
109TLB::lookup(Addr vpn, uint16_t asid, BaseMMU::Mode mode, bool hidden)
110{
111 TlbEntry *entry = trie.lookup(buildKey(vpn, asid));
112
113 if (!hidden) {
114 if (entry)
115 entry->lruSeq = nextSeq();
116
117 if (mode == BaseMMU::Write)
118 stats.writeAccesses++;
119 else
120 stats.readAccesses++;
121
122 if (!entry) {
123 if (mode == BaseMMU::Write)
124 stats.writeMisses++;
125 else
126 stats.readMisses++;
127 }
128 else {
129 if (mode == BaseMMU::Write)
130 stats.writeHits++;
131 else
132 stats.readHits++;
133 }
134
135 DPRINTF(TLBVerbose, "lookup(vpn=%#x, asid=%#x): %s ppn %#x\n",
136 vpn, asid, entry ? "hit" : "miss", entry ? entry->paddr : 0);
137 }
138
139 return entry;
140}
141
142TlbEntry *
143TLB::insert(Addr vpn, const TlbEntry &entry)
144{
145 DPRINTF(TLB, "insert(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
146 vpn, entry.asid, entry.paddr, entry.pte, entry.size());
147
148 // If somebody beat us to it, just use that existing entry.
149 TlbEntry *newEntry = lookup(vpn, entry.asid, BaseMMU::Read, true);
150 if (newEntry) {
151 // update PTE flags (maybe we set the dirty/writable flag)
152 newEntry->pte = entry.pte;
153 assert(newEntry->vaddr == vpn);
154 return newEntry;
155 }
156
157 if (freeList.empty())
158 evictLRU();
159
160 newEntry = freeList.front();
161 freeList.pop_front();
162
163 Addr key = buildKey(vpn, entry.asid);
164 *newEntry = entry;
165 newEntry->lruSeq = nextSeq();
166 newEntry->vaddr = vpn;
167 newEntry->trieHandle =
168 trie.insert(key, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
169 return newEntry;
170}
171
172void
173TLB::demapPage(Addr vpn, uint64_t asid)
174{
175 asid &= 0xFFFF;
176
177 if (vpn == 0 && asid == 0)
178 flushAll();
179 else {
180 DPRINTF(TLB, "flush(vpn=%#x, asid=%#x)\n", vpn, asid);
181 if (vpn != 0 && asid != 0) {
182 TlbEntry *newEntry = lookup(vpn, asid, BaseMMU::Read, true);
183 if (newEntry)
184 remove(newEntry - tlb.data());
185 }
186 else {
187 for (size_t i = 0; i < size; i++) {
188 if (tlb[i].trieHandle) {
189 Addr mask = ~(tlb[i].size() - 1);
190 if ((vpn == 0 || (vpn & mask) == tlb[i].vaddr) &&
191 (asid == 0 || tlb[i].asid == asid))
192 remove(i);
193 }
194 }
195 }
196 }
197}
198
199void
200TLB::flushAll()
201{
202 DPRINTF(TLB, "flushAll()\n");
203 for (size_t i = 0; i < size; i++) {
204 if (tlb[i].trieHandle)
205 remove(i);
206 }
207}
208
209void
210TLB::remove(size_t idx)
211{
212 DPRINTF(TLB, "remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
213 tlb[idx].vaddr, tlb[idx].asid, tlb[idx].paddr, tlb[idx].pte,
214 tlb[idx].size());
215
216 assert(tlb[idx].trieHandle);
217 trie.remove(tlb[idx].trieHandle);
218 tlb[idx].trieHandle = NULL;
219 freeList.push_back(&tlb[idx]);
220}
221
222Fault
223TLB::checkPermissions(STATUS status, PrivilegeMode pmode, Addr vaddr,
224 BaseMMU::Mode mode, PTESv39 pte)
225{
226 Fault fault = NoFault;
227
228 if (mode == BaseMMU::Read && !pte.r) {
229 DPRINTF(TLB, "PTE has no read perm, raising PF\n");
230 fault = createPagefault(vaddr, mode);
231 }
232 else if (mode == BaseMMU::Write && !pte.w) {
233 DPRINTF(TLB, "PTE has no write perm, raising PF\n");
234 fault = createPagefault(vaddr, mode);
235 }
236 else if (mode == BaseMMU::Execute && !pte.x) {
237 DPRINTF(TLB, "PTE has no exec perm, raising PF\n");
238 fault = createPagefault(vaddr, mode);
239 }
240
241 if (fault == NoFault) {
242 // check pte.u
243 if (pmode == PrivilegeMode::PRV_U && !pte.u) {
244 DPRINTF(TLB, "PTE is not user accessible, raising PF\n");
245 fault = createPagefault(vaddr, mode);
246 }
247 else if (pmode == PrivilegeMode::PRV_S && pte.u && status.sum == 0) {
248 DPRINTF(TLB, "PTE is only user accessible, raising PF\n");
249 fault = createPagefault(vaddr, mode);
250 }
251 }
252
253 return fault;
254}
255
256Fault
257TLB::createPagefault(Addr vaddr, BaseMMU::Mode mode)
258{
259 ExceptionCode code;
260 if (mode == BaseMMU::Read)
261 code = ExceptionCode::LOAD_PAGE;
262 else if (mode == BaseMMU::Write)
263 code = ExceptionCode::STORE_PAGE;
264 else
265 code = ExceptionCode::INST_PAGE;
266 return std::make_shared<AddressFault>(vaddr, code);
267}
268
269Addr
270TLB::translateWithTLB(Addr vaddr, uint16_t asid, BaseMMU::Mode mode)
271{
272 TlbEntry *e = lookup(vaddr, asid, mode, false);
273 assert(e != nullptr);
274 return e->paddr << PageShift | (vaddr & mask(e->logBytes));
275}
276
277Fault
278TLB::doTranslate(const RequestPtr &req, ThreadContext *tc,
279 BaseMMU::Translation *translation, BaseMMU::Mode mode,
280 bool &delayed)
281{
282 delayed = false;
283
284 Addr vaddr = Addr(sext<VADDR_BITS>(req->getVaddr()));
285 SATP satp = tc->readMiscReg(MISCREG_SATP);
286
287 TlbEntry *e = lookup(vaddr, satp.asid, mode, false);
288 if (!e) {
289 Fault fault = walker->start(tc, translation, req, mode);
290 if (translation != nullptr || fault != NoFault) {
291 // This gets ignored in atomic mode.
292 delayed = true;
293 return fault;
294 }
295 e = lookup(vaddr, satp.asid, mode, false);
296 assert(e != nullptr);
297 }
298
299 STATUS status = tc->readMiscReg(MISCREG_STATUS);
300 PrivilegeMode pmode = getMemPriv(tc, mode);
301 Fault fault = checkPermissions(status, pmode, vaddr, mode, e->pte);
302 if (fault != NoFault) {
303 // if we want to write and it isn't writable, do a page table walk
304 // again to update the dirty flag.
305 if (mode == BaseMMU::Write && !e->pte.w) {
306 DPRINTF(TLB, "Dirty bit not set, repeating PT walk\n");
307 fault = walker->start(tc, translation, req, mode);
308 if (translation != nullptr || fault != NoFault) {
309 delayed = true;
310 return fault;
311 }
312 }
313 if (fault != NoFault)
314 return fault;
315 }
316
317 Addr paddr = e->paddr << PageShift | (vaddr & mask(e->logBytes));
318 DPRINTF(TLBVerbose, "translate(vpn=%#x, asid=%#x): %#x\n",
319 vaddr, satp.asid, paddr);
320 req->setPaddr(paddr);
321
322 return NoFault;
323}
324
326TLB::getMemPriv(ThreadContext *tc, BaseMMU::Mode mode)
327{
328 STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS);
330 if (mode != BaseMMU::Execute && status.mprv == 1)
331 pmode = (PrivilegeMode)(RegVal)status.mpp;
332 return pmode;
333}
334
335Fault
336TLB::translate(const RequestPtr &req, ThreadContext *tc,
337 BaseMMU::Translation *translation, BaseMMU::Mode mode,
338 bool &delayed)
339{
340 delayed = false;
341
342 if (FullSystem) {
343 PrivilegeMode pmode = getMemPriv(tc, mode);
344 SATP satp = tc->readMiscReg(MISCREG_SATP);
345 if (pmode == PrivilegeMode::PRV_M || satp.mode == AddrXlateMode::BARE)
346 req->setFlags(Request::PHYSICAL);
347
348 Fault fault;
349 if (req->getFlags() & Request::PHYSICAL) {
353 req->setPaddr(req->getVaddr());
354 fault = NoFault;
355 } else {
356 fault = doTranslate(req, tc, translation, mode, delayed);
357 }
358
359 // according to the RISC-V tests, negative physical addresses trigger
360 // an illegal address exception.
361 // TODO where is that written in the manual?
362 if (!delayed && fault == NoFault && bits(req->getPaddr(), 63)) {
363 ExceptionCode code;
364 if (mode == BaseMMU::Read)
365 code = ExceptionCode::LOAD_ACCESS;
366 else if (mode == BaseMMU::Write)
367 code = ExceptionCode::STORE_ACCESS;
368 else
369 code = ExceptionCode::INST_ACCESS;
370 fault = std::make_shared<AddressFault>(req->getVaddr(), code);
371 }
372
373 if (!delayed && fault == NoFault) {
374 pma->check(req);
375
376 // do pmp check if any checking condition is met.
377 // timingFault will be NoFault if pmp checks are
378 // passed, otherwise an address fault will be returned.
379 fault = pmp->pmpCheck(req, mode, pmode, tc);
380 }
381
382 return fault;
383 } else {
384 // In the O3 CPU model, sometimes a memory access will be speculatively
385 // executed along a branch that will end up not being taken where the
386 // address is invalid. In that case, return a fault rather than trying
387 // to translate it (which will cause a panic). Since RISC-V allows
388 // unaligned memory accesses, this should only happen if the request's
389 // length is long enough to wrap around from the end of the memory to
390 // the start.
391 assert(req->getSize() > 0);
392 if (req->getVaddr() + req->getSize() - 1 < req->getVaddr())
393 return std::make_shared<GenericPageTableFault>(req->getVaddr());
394
395 Process * p = tc->getProcessPtr();
396
397 Fault fault = p->pTable->translate(req);
398 if (fault != NoFault)
399 return fault;
400
401 return NoFault;
402 }
403}
404
405Fault
406TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc,
407 BaseMMU::Mode mode)
408{
409 bool delayed;
410 return translate(req, tc, nullptr, mode, delayed);
411}
412
413void
414TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
415 BaseMMU::Translation *translation, BaseMMU::Mode mode)
416{
417 bool delayed;
418 assert(translation);
419 Fault fault = translate(req, tc, translation, mode, delayed);
420 if (!delayed)
421 translation->finish(fault, req, tc, mode);
422 else
423 translation->markDelayed();
424}
425
426Fault
427TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc,
428 BaseMMU::Mode mode)
429{
430 const Addr vaddr = req->getVaddr();
431 Addr paddr = vaddr;
432
433 if (FullSystem) {
434 MMU *mmu = static_cast<MMU *>(tc->getMMUPtr());
435
436 PrivilegeMode pmode = mmu->getMemPriv(tc, mode);
437 SATP satp = tc->readMiscReg(MISCREG_SATP);
438 if (pmode != PrivilegeMode::PRV_M &&
439 satp.mode != AddrXlateMode::BARE) {
440 Walker *walker = mmu->getDataWalker();
441 unsigned logBytes;
442 Fault fault = walker->startFunctional(
443 tc, paddr, logBytes, mode);
444 if (fault != NoFault)
445 return fault;
446
447 Addr masked_addr = vaddr & mask(logBytes);
448 paddr |= masked_addr;
449 }
450 }
451 else {
452 Process *process = tc->getProcessPtr();
453 const auto *pte = process->pTable->lookup(vaddr);
454
455 if (!pte && mode != BaseMMU::Execute) {
456 // Check if we just need to grow the stack.
457 if (process->fixupFault(vaddr)) {
458 // If we did, lookup the entry for the new page.
459 pte = process->pTable->lookup(vaddr);
460 }
461 }
462
463 if (!pte)
464 return std::make_shared<GenericPageTableFault>(req->getVaddr());
465
466 paddr = pte->paddr | process->pTable->pageOffset(vaddr);
467 }
468
469 DPRINTF(TLB, "Translated (functional) %#x -> %#x.\n", vaddr, paddr);
470 req->setPaddr(paddr);
471 return NoFault;
472}
473
474Fault
475TLB::finalizePhysical(const RequestPtr &req,
476 ThreadContext *tc, BaseMMU::Mode mode) const
477{
478 return NoFault;
479}
480
481void
482TLB::serialize(CheckpointOut &cp) const
483{
484 // Only store the entries in use.
485 uint32_t _size = size - freeList.size();
486 SERIALIZE_SCALAR(_size);
487 SERIALIZE_SCALAR(lruSeq);
488
489 uint32_t _count = 0;
490 for (uint32_t x = 0; x < size; x++) {
491 if (tlb[x].trieHandle != NULL)
492 tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
493 }
494}
495
496void
497TLB::unserialize(CheckpointIn &cp)
498{
499 // Do not allow to restore with a smaller tlb.
500 uint32_t _size;
501 UNSERIALIZE_SCALAR(_size);
502 if (_size > size) {
503 fatal("TLB size less than the one in checkpoint!");
504 }
505
506 UNSERIALIZE_SCALAR(lruSeq);
507
508 for (uint32_t x = 0; x < _size; x++) {
509 TlbEntry *newEntry = freeList.front();
510 freeList.pop_front();
511
512 newEntry->unserializeSection(cp, csprintf("Entry%d", x));
513 Addr key = buildKey(newEntry->vaddr, newEntry->asid);
514 newEntry->trieHandle = trie.insert(key,
515 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
516 }
517}
518
519TLB::TlbStats::TlbStats(statistics::Group *parent)
520 : statistics::Group(parent),
521 ADD_STAT(readHits, statistics::units::Count::get(), "read hits"),
522 ADD_STAT(readMisses, statistics::units::Count::get(), "read misses"),
523 ADD_STAT(readAccesses, statistics::units::Count::get(), "read accesses"),
524 ADD_STAT(writeHits, statistics::units::Count::get(), "write hits"),
525 ADD_STAT(writeMisses, statistics::units::Count::get(), "write misses"),
526 ADD_STAT(writeAccesses, statistics::units::Count::get(), "write accesses"),
527 ADD_STAT(hits, statistics::units::Count::get(),
528 "Total TLB (read and write) hits", readHits + writeHits),
529 ADD_STAT(misses, statistics::units::Count::get(),
530 "Total TLB (read and write) misses", readMisses + writeMisses),
531 ADD_STAT(accesses, statistics::units::Count::get(),
532 "Total TLB (read and write) accesses",
533 readAccesses + writeAccesses)
534{
535}
536
537Port *
539{
540 return &walker->getPort("port");
541}
542
543} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
TLB(const Params &p)
Definition tlb.cc:61
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
Ports are used to interface objects to each other.
Definition port.hh:62
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
Walker * walker
Definition tlb.hh:89
Port * getTableWalkerPort() override
Get the table walker port.
Definition tlb.cc:621
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:76
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
void unserializeSection(CheckpointIn &cp, const char *name)
Unserialize an a child object.
Definition serialize.cc:81
Declaration of IniFile object.
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 5, 0 > status
Bitfield< 59, 56 > tlb
Bitfield< 0 > p
Bitfield< 3 > x
Definition pagetable.hh:73
@ MISCREG_STATUS
Definition misc.hh:75
Bitfield< 4 > x
Definition pagetable.hh:61
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
static Addr buildKey(Addr vpn, uint16_t asid)
Definition tlb.cc:68
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t RegVal
Definition types.hh:173
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Declarations of a non-full system Page Table.
PMP header file.
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
TlbEntryTrie::Handle trieHandle
Definition pagetable.hh:96

Generated on Mon Jul 10 2023 14:24:28 for gem5 by doxygen 1.9.7