49 #include "debug/TLB.hh"
50 #include "debug/TLBVerbose.hh"
52 #include "params/RiscvTLB.hh"
60 using namespace RiscvISA;
70 return (
static_cast<Addr>(
asid) << 48) | vpn;
74 BaseTLB(
p), size(
p.size),
tlb(size),
75 lruSeq(0), stats(this), pma(
p.pma_checker),
78 for (
size_t x = 0;
x < size;
x++) {
79 tlb[
x].trieHandle = NULL;
80 freeList.push_back(&
tlb[
x]);
100 for (
size_t i = 1;
i < size;
i++) {
101 if (
tlb[
i].lruSeq <
tlb[lru].lruSeq)
115 entry->
lruSeq = nextSeq();
117 if (
mode == BaseMMU::Write)
118 stats.writeAccesses++;
120 stats.readAccesses++;
123 if (
mode == BaseMMU::Write)
129 if (
mode == BaseMMU::Write)
135 DPRINTF(TLBVerbose,
"lookup(vpn=%#x, asid=%#x): %s ppn %#x\n",
136 vpn,
asid, entry ?
"hit" :
"miss", entry ? entry->
paddr : 0);
145 DPRINTF(
TLB,
"insert(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
146 vpn, entry.asid, entry.
paddr, entry.pte, entry.
size());
149 TlbEntry *newEntry = lookup(vpn, entry.asid, BaseMMU::Read,
true);
152 newEntry->pte = entry.pte;
153 assert(newEntry->
vaddr == vpn);
157 if (freeList.empty())
160 newEntry = freeList.front();
161 freeList.pop_front();
165 newEntry->
lruSeq = nextSeq();
166 newEntry->
vaddr = vpn;
168 trie.insert(key, TlbEntryTrie::MaxBits - entry.
logBytes, newEntry);
173 TLB::demapPage(
Addr vpn, uint64_t
asid)
177 if (vpn == 0 &&
asid == 0)
181 if (vpn != 0 &&
asid != 0) {
182 TlbEntry *newEntry = lookup(vpn,
asid, BaseMMU::Read,
true);
184 remove(newEntry -
tlb.data());
187 for (
size_t i = 0;
i < size;
i++) {
188 if (
tlb[
i].trieHandle) {
203 for (
size_t i = 0;
i < size;
i++) {
204 if (
tlb[
i].trieHandle)
210 TLB::remove(
size_t idx)
212 DPRINTF(
TLB,
"remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
216 assert(
tlb[idx].trieHandle);
217 trie.remove(
tlb[idx].trieHandle);
218 tlb[idx].trieHandle = NULL;
219 freeList.push_back(&
tlb[idx]);
228 if (
mode == BaseMMU::Read && !pte.r) {
229 DPRINTF(
TLB,
"PTE has no read perm, raising PF\n");
232 else if (
mode == BaseMMU::Write && !pte.w) {
233 DPRINTF(
TLB,
"PTE has no write perm, raising PF\n");
236 else if (
mode == BaseMMU::Execute && !pte.x) {
237 DPRINTF(
TLB,
"PTE has no exec perm, raising PF\n");
244 DPRINTF(
TLB,
"PTE is not user accessible, raising PF\n");
248 DPRINTF(
TLB,
"PTE is only user accessible, raising PF\n");
260 if (
mode == BaseMMU::Read)
262 else if (
mode == BaseMMU::Write)
266 return std::make_shared<AddressFault>(
vaddr, code);
273 assert(
e !=
nullptr);
289 Fault fault = walker->start(tc, translation, req,
mode);
290 if (translation !=
nullptr || fault !=
NoFault) {
296 assert(
e !=
nullptr);
305 if (
mode == BaseMMU::Write && !
e->pte.w) {
306 DPRINTF(
TLB,
"Dirty bit not set, repeating PT walk\n");
307 fault = walker->start(tc, translation, req,
mode);
308 if (translation !=
nullptr || fault !=
NoFault) {
318 DPRINTF(TLBVerbose,
"translate(vpn=%#x, asid=%#x): %#x\n",
319 vaddr, satp.asid, paddr);
320 req->setPaddr(paddr);
330 if (
mode != BaseMMU::Execute &&
status.mprv == 1)
346 req->setFlags(Request::PHYSICAL);
349 if (req->getFlags() & Request::PHYSICAL) {
353 req->setPaddr(req->getVaddr());
356 fault = doTranslate(req, tc, translation,
mode, delayed);
362 if (!delayed && fault ==
NoFault &&
bits(req->getPaddr(), 63)) {
364 if (
mode == BaseMMU::Read)
366 else if (
mode == BaseMMU::Write)
370 fault = std::make_shared<AddressFault>(req->getVaddr(), code);
373 if (!delayed && fault ==
NoFault) {
379 fault = pmp->pmpCheck(req,
mode, pmode, tc);
391 assert(req->getSize() > 0);
392 if (req->getVaddr() + req->getSize() - 1 < req->getVaddr())
393 return std::make_shared<GenericPageTableFault>(req->getVaddr());
397 Fault fault =
p->pTable->translate(req);
410 return translate(req, tc,
nullptr,
mode, delayed);
419 Fault fault = translate(req, tc, translation,
mode, delayed);
427 TLB::translateFunctional(
const RequestPtr &req, ThreadContext *tc,
434 MMU *mmu =
static_cast<MMU *
>(tc->getMMUPtr());
439 satp.mode != AddrXlateMode::BARE) {
440 Walker *walker = mmu->getDataWalker();
442 Fault fault = walker->startFunctional(
443 tc, paddr, logBytes,
mode);
448 paddr |= masked_addr;
452 Process *process = tc->getProcessPtr();
453 const auto *pte = process->pTable->lookup(
vaddr);
455 if (!pte &&
mode != BaseMMU::Execute) {
457 if (process->fixupFault(
vaddr)) {
459 pte = process->pTable->lookup(
vaddr);
464 return std::make_shared<GenericPageTableFault>(req->getVaddr());
466 paddr = pte->paddr | process->pTable->pageOffset(
vaddr);
469 DPRINTF(TLB,
"Translated (functional) %#x -> %#x.\n",
vaddr, paddr);
470 req->setPaddr(paddr);
476 ThreadContext *tc, BaseMMU::Mode
mode)
const
485 uint32_t _size = size - freeList.size();
490 for (uint32_t
x = 0;
x < size;
x++) {
491 if (
tlb[
x].trieHandle != NULL)
492 tlb[
x].serializeSection(cp,
csprintf(
"Entry%d", _count++));
503 fatal(
"TLB size less than the one in checkpoint!");
508 for (uint32_t
x = 0;
x < _size;
x++) {
509 TlbEntry *newEntry = freeList.front();
510 freeList.pop_front();
514 newEntry->trieHandle = trie.insert(key,
515 TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
519 TLB::TlbStats::TlbStats(statistics::Group *parent)
520 : statistics::Group(parent),
521 ADD_STAT(readHits, statistics::units::Count::get(),
"read hits"),
522 ADD_STAT(readMisses, statistics::units::Count::get(),
"read misses"),
523 ADD_STAT(readAccesses, statistics::units::Count::get(),
"read accesses"),
524 ADD_STAT(writeHits, statistics::units::Count::get(),
"write hits"),
525 ADD_STAT(writeMisses, statistics::units::Count::get(),
"write misses"),
526 ADD_STAT(writeAccesses, statistics::units::Count::get(),
"write accesses"),
527 ADD_STAT(hits, statistics::units::Count::get(),
528 "Total TLB (read and write) hits", readHits + writeHits),
529 ADD_STAT(misses, statistics::units::Count::get(),
530 "Total TLB (read and write) misses", readMisses + writeMisses),
531 ADD_STAT(accesses, statistics::units::Count::get(),
532 "Total TLB (read and write) accesses",
533 readAccesses + writeAccesses)