52#include "debug/TLB.hh"
66 :
BaseTLB(
p), configAddress(0), size(
p.size),
67 tlb(size), lruSeq(0), m5opRange(
p.
system->m5opRange()), stats(this)
70 fatal(
"TLBs must have a non-zero size.\n");
72 for (
int x = 0;
x <
size;
x++) {
73 tlb[
x].trieHandle = NULL;
88 for (
unsigned i = 1;
i <
size;
i++) {
93 assert(
tlb[lru].trieHandle);
95 tlb[lru].trieHandle = NULL;
111 assert(newEntry->
vaddr == vpn);
123 newEntry->
vaddr = vpn;
139 if (entry && update_lru)
148 for (
unsigned i = 0;
i <
size;
i++) {
149 if (
tlb[
i].trieHandle) {
151 tlb[
i].trieHandle = NULL;
166 DPRINTF(
TLB,
"Invalidating all non global entries.\n");
167 for (
unsigned i = 0;
i <
size;
i++) {
168 if (
tlb[
i].trieHandle && !
tlb[
i].global) {
170 tlb[
i].trieHandle = NULL;
191localMiscRegAccess(
bool read,
RegIndex regNum,
212 DPRINTF(
TLB,
"Addresses references internal memory.\n");
216 panic(
"CPUID memory space not yet implemented!\n");
222 return std::make_shared<GeneralProtection>(0);
224 req->setPaddr(req->getVaddr());
225 req->setLocalAccessor(
228 return localMiscRegAccess(read, regNum, tc, pkt);
237 Addr IOPort =
vaddr & ~IntAddrPrefixMask;
240 assert(!(IOPort & ~0xFFFF));
241 if (IOPort == 0xCF8 && req->getSize() == 4) {
242 req->setPaddr(req->getVaddr());
243 req->setLocalAccessor(
246 return localMiscRegAccess(
250 }
else if ((IOPort & ~
mask(2)) == 0xCFC) {
267 panic(
"Access to unrecognized internal address space %#x.\n",
276 Addr paddr = req->getPaddr();
282 req->setLocalAccessor(
294 LocalApicBase localApicBase =
314 paddr - apicRange.
start()));
330 delayedResponse =
false;
344 const int addrSize = 8 << logAddrSize;
345 const Addr addrMask =
mask(addrSize);
351 if (m5Reg.mode != LongMode) {
352 DPRINTF(
TLB,
"Not in long mode. Checking segment protection.\n");
363 return std::make_shared<GeneralProtection>(0);
368 DPRINTF(
TLB,
"Tried to write to unwritable segment.\n");
369 return std::make_shared<GeneralProtection>(0);
372 DPRINTF(
TLB,
"Tried to read from unreadble segment.\n");
373 return std::make_shared<GeneralProtection>(0);
387 DPRINTF(
TLB,
"Checking an expand down segment.\n");
388 warn_once(
"Expand down segments are untested.\n");
390 return std::make_shared<GeneralProtection>(0);
395 return std::make_shared<GeneralProtection>(0);
428 "address %#x at pc %#x.\n",
437 if (timing || fault !=
NoFault) {
439 delayedResponse =
true;
442 entry =
lookup(pageAlignedVaddr);
449 return std::make_shared<PageFault>(
vaddr,
true,
mode,
452 Addr alignedVaddr =
p->pTable->pageAlign(
vaddr);
453 DPRINTF(
TLB,
"Mapping %#x to %#x\n", alignedVaddr,
456 p->pTable->pid(), alignedVaddr, pte->
paddr,
466 "doing protection checks.\n", entry->
paddr);
470 bool badWrite = (!entry->
writable && (inUser || cr0.wp));
471 if ((inUser && !entry->
user) ||
476 return std::make_shared<PageFault>(
vaddr,
true,
mode, inUser,
479 if (storeCheck && badWrite) {
482 return std::make_shared<PageFault>(
488 req->setPaddr(paddr);
495 req->setPaddr(
vaddr);
501 req->setPaddr(
vaddr);
511 bool delayedResponse;
513 if (req->isCacheClean())
523 if (req->isCacheClean())
547 return std::make_shared<PageFault>(
vaddr,
true,
mode,
true,
false);
552 req->setPaddr(paddr);
560 bool delayedResponse;
563 if (req->isCacheClean())
567 if (!delayedResponse)
580 : statistics::
Group(parent),
581 ADD_STAT(rdAccesses, statistics::units::Count::get(),
582 "TLB accesses on read requests"),
583 ADD_STAT(wrAccesses, statistics::units::Count::get(),
584 "TLB accesses on write requests"),
585 ADD_STAT(rdMisses, statistics::units::Count::get(),
586 "TLB misses on read requests"),
587 ADD_STAT(wrMisses, statistics::units::Count::get(),
588 "TLB misses on write requests")
601 for (uint32_t
x = 0;
x <
size;
x++) {
602 if (
tlb[
x].trieHandle != NULL)
603 tlb[
x].serializeSection(cp,
csprintf(
"Entry%d", _count++));
614 fatal(
"TLB size less than the one in checkpoint!");
619 for (uint32_t
x = 0;
x < _size;
x++) {
620 TlbEntry *newEntry =
freeList.front();
623 newEntry->unserializeSection(cp,
csprintf(
"Entry%d",
x));
624 newEntry->trieHandle =
trie.
insert(newEntry->vaddr,
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
Cycles is a wrapper class for representing cycle counts, i.e.
const Entry * lookup(Addr vaddr)
Lookup function.
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void setLE(T v)
Set the value in the data pointer to v as little endian.
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
bool fixupFault(Addr vaddr)
Attempt to fix up a fault at vaddr by allocating a page on the stack.
EmulationPageTable * pTable
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
@ UNCACHEABLE
The request is to an uncacheable address.
@ READ_MODIFY_WRITE
This request is a read which will be followed by a write.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
virtual const PCStateBase & pcState() const =0
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
TlbEntry * lookup(Addr va, bool update_lru=true)
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) const override
Do post-translation physical address finalization.
void flushAll() override
Remove all entries from the TLB.
void translateTiming(const RequestPtr &req, ThreadContext *tc, BaseMMU::Translation *translation, BaseMMU::Mode mode) override
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) override
void setConfigAddress(uint32_t addr)
Addr concAddrPcid(Addr vpn, uint64_t pcid)
Fault translateInt(bool read, RequestPtr req, ThreadContext *tc)
gem5::X86ISA::TLB::TlbStats stats
TlbEntry * insert(Addr vpn, const TlbEntry &entry, uint64_t pcid)
void serialize(CheckpointOut &cp) const override
Serialize an object.
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Port * getTableWalkerPort() override
Get the table walker port.
std::vector< TlbEntry > tlb
void demapPage(Addr va, uint64_t asn) override
Fault translate(const RequestPtr &req, ThreadContext *tc, BaseMMU::Translation *translation, BaseMMU::Mode mode, bool &delayedResponse, bool timing)
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode) override
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Fault start(ThreadContext *_tc, BaseMMU::Translation *translation, const RequestPtr &req, BaseMMU::Mode mode)
Fault startFunctional(ThreadContext *_tc, Addr &addr, unsigned &logBytes, BaseMMU::Mode mode)
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
static const unsigned MaxBits
Value * remove(Handle handle)
Method to delete a value from the trie.
Handle insert(Key key, unsigned width, Value *val)
Method which inserts a key/value pair into the trie.
Value * lookup(Key key)
Method which looks up the Value corresponding to a particular key.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
constexpr T insertBits(T val, unsigned first, unsigned last, B bit_val)
Returns val with bits first to last set to the LSBs of bit_val.
#define panic(...)
This implements a cprintf based panic() function.
#define fatal(...)
This implements a cprintf based fatal() function.
static RegIndex segAttr(int index)
static RegIndex segBase(int index)
static RegIndex segLimit(int index)
constexpr auto AddrSizeFlagMask
const Addr PhysAddrPrefixPciConfig
Bitfield< 14 > expandDown
const Addr IntAddrPrefixIO
constexpr auto AddrSizeFlagShift
constexpr Request::FlagsType SegmentFlagMask
BitfieldType< SegDescriptorLimit > limit
const Addr PhysAddrPrefixIO
constexpr auto CPL0FlagBit
const Addr IntAddrPrefixCPUID
const Addr IntAddrPrefixMSR
const Addr IntAddrPrefixMask
bool msrAddrToIndex(RegIndex ®_num, Addr addr)
Find and return the misc reg corresponding to an MSR address.
static Addr x86LocalAPICAddress(const uint8_t id, const uint16_t addr)
static void decodeAddrOffset(Addr offset, uint8_t &func)
bool pseudoInst(ThreadContext *tc, uint8_t func, uint64_t &result)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
std::shared_ptr< FaultBase > Fault
std::shared_ptr< Request > RequestPtr
std::ostream CheckpointOut
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
std::string csprintf(const char *format, const Args &...args)
constexpr decltype(nullptr) NoFault
Declarations of a non-full system Page Table.
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
#define UNSERIALIZE_SCALAR(scalar)
#define SERIALIZE_SCALAR(scalar)
statistics::Scalar rdMisses
statistics::Scalar wrAccesses
statistics::Scalar rdAccesses
TlbStats(statistics::Group *parent)
statistics::Scalar wrMisses
TlbEntryTrie::Handle trieHandle