44 #include "debug/IPR.hh" 45 #include "debug/TLB.hh" 56 :
BaseTLB(p), size(p->size), usedEntries(0), lastReplaced(0),
57 cacheState(0), cacheValid(false)
61 fatal(
"SPARC T1 TLB registers don't support more than 64 TLB entries");
106 va &= ~(PTE.
size()-1);
115 "TLB: Inserting Entry; va=%#x pa=%#x pid=%d cid=%d r=%d entryid=%d\n",
116 va, PTE.
paddr(), partition_id, context_id, (int)real, entry);
119 for (x = 0; x <
size; x++) {
120 if (
tlb[x].range.real == real &&
128 DPRINTF(
TLB,
"TLB: Conflicting entry %#X , deleting it\n", x);
141 assert(entry < size && entry >= 0);
142 new_entry = &
tlb[entry];
153 goto insertAllLocked;
154 }
while (
tlb[x].pte.locked());
163 new_entry = &
tlb[size-1];
169 if (new_entry->
valid)
179 new_entry->
pte = PTE;
180 new_entry->
used =
true;;
181 new_entry->
valid =
true;
191 new_entry->
used =
true;
205 DPRINTF(
TLB,
"TLB: Looking up entry va=%#x pid=%d cid=%d r=%d\n",
206 va, partition_id, context_id, real);
228 if (!t->
used && update_used) {
245 for (
int x = 0;
x <
size;
x++) {
247 DPRINTFN(
"%4d: %#2x:%#2x %c %#4x %#8x %#8x %#16x\n",
261 DPRINTF(IPR,
"TLB: Demapping Page va=%#x pid=%#d cid=%d r=%d\n",
262 va, partition_id, context_id, real);
276 DPRINTF(IPR,
"TLB: Demapped page\n");
277 i->second->valid =
false;
278 if (i->second->used) {
279 i->second->used =
false;
290 DPRINTF(IPR,
"TLB: Demapping Context pid=%#d cid=%d\n",
291 partition_id, context_id);
293 for (
int x = 0;
x <
size;
x++) {
294 if (
tlb[
x].range.contextId == context_id &&
312 DPRINTF(
TLB,
"TLB: Demapping All pid=%#d\n", partition_id);
314 for (
int x = 0;
x <
size;
x++) {
315 if (
tlb[
x].valid && !
tlb[
x].pte.locked() &&
334 for (
int x = 0;
x <
size;
x++) {
347 panic(
"entry: %d\n", entry);
349 assert(entry <
size);
350 if (
tlb[entry].valid)
353 return (uint64_t)-1ll;
359 assert(entry <
size);
361 if (!
tlb[entry].valid)
362 return (uint64_t)-1ll;
366 tag |= (uint64_t)
tlb[entry].range.partitionId << 61;
368 tag |= (uint64_t)~
tlb[entry].pte._size() << 56;
402 DPRINTF(
TLB,
"TLB: Writing Tag Access: va: %#X ctx: %#X value: %#X\n",
403 va, context,
mbits(va, 63,13) |
mbits(context,12,0));
412 DPRINTF(
TLB,
"TLB: Fault: A=%#x w=%d ct=%d ft=%d asi=%d\n",
413 a, (
int)write, ct, ft, asi);
428 DPRINTF(
TLB,
"TLB: ITB Request to translate va=%#x size=%d\n",
429 vaddr, req->getSize());
436 req->setPaddr(
cacheEntry[0]->pte.translate(vaddr));
448 bool addr_mask =
bits(tlbdata,3,3);
449 bool lsu_im =
bits(tlbdata,4,4);
451 int part_id =
bits(tlbdata,15,8);
452 int tl =
bits(tlbdata,18,16);
453 int pri_context =
bits(tlbdata,47,32);
459 DPRINTF(
TLB,
"TLB: priv:%d hpriv:%d red:%d lsuim:%d part_id: %#X\n",
460 priv, hpriv, red, lsu_im, part_id);
469 context = pri_context;
472 if ( hpriv || red ) {
483 return std::make_shared<MemAddressNotAligned>();
491 return std::make_shared<InstructionAccessException>();
495 e =
lookup(vaddr, part_id,
true);
499 e =
lookup(vaddr, part_id,
false, context);
502 if (e == NULL || !e->
valid) {
505 return std::make_shared<InstructionRealTranslationMiss>();
508 return std::make_shared<FastInstructionAccessMMUMiss>();
510 return std::make_shared<FastInstructionAccessMMUMiss>(
519 return std::make_shared<InstructionAccessException>();
528 DPRINTF(
TLB,
"TLB: %#X -> %#X\n", vaddr, req->getPaddr());
543 asi = (
ASI)req->getArchFlags();
544 bool implicit =
false;
546 bool unaligned = vaddr & (size - 1);
548 DPRINTF(
TLB,
"TLB: DTB Request to translate va=%#x size=%d asi=%#x\n",
560 if (hpriv && implicit) {
574 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
581 DPRINTF(
TLB,
"TLB: %#X -> %#X\n", vaddr, req->getPaddr());
589 ce_va < vaddr + size && ce_va + ce->range.size > vaddr &&
596 DPRINTF(
TLB,
"TLB: %#X -> %#X\n", vaddr, req->getPaddr());
605 bool addr_mask =
bits(tlbdata,3,3);
606 bool lsu_dm =
bits(tlbdata,5,5);
608 int part_id =
bits(tlbdata,15,8);
609 int tl =
bits(tlbdata,18,16);
610 int pri_context =
bits(tlbdata,47,32);
611 int sec_context =
bits(tlbdata,63,48);
619 DPRINTF(
TLB,
"TLB: priv:%d hpriv:%d red:%d lsudm:%d part_id: %#X\n",
620 priv, hpriv, red, lsu_dm, part_id);
630 context = pri_context;
637 return std::make_shared<PrivilegedAction>();
642 return std::make_shared<DataAccessException>();
646 context = pri_context;
649 context = sec_context;
656 context = pri_context;
660 if (!implicit && asi !=
ASI_P && asi !=
ASI_S) {
662 panic(
"Little Endian ASIs not supported\n");
671 panic(
"Partial Store ASIs not supported\n");
674 panic(
"Cmt ASI registers not implmented\n");
677 goto handleIntRegAccess;
679 goto handleMmuRegAccess;
681 goto handleScratchRegAccess;
683 goto handleQueueRegAccess;
685 goto handleSparcErrorRegAccess;
689 panic(
"Accessing ASI %#X. Should we?\n", asi);
695 return std::make_shared<MemAddressNotAligned>();
703 return std::make_shared<DataAccessException>();
706 if ((!lsu_dm && !hpriv && !red) ||
asiIsReal(asi)) {
716 e =
lookup(vaddr, part_id, real, context);
718 if (e == NULL || !e->
valid) {
720 DPRINTF(
TLB,
"TLB: DTB Failed to find matching TLB entry\n");
722 return std::make_shared<DataRealTranslationMiss>();
725 return std::make_shared<FastDataAccessMMUMiss>();
727 return std::make_shared<FastDataAccessMMUMiss>(
736 return std::make_shared<DataAccessException>();
742 return std::make_shared<FastDataAccessProtection>();
748 return std::make_shared<DataAccessException>();
754 return std::make_shared<DataAccessException>();
777 DPRINTF(
TLB,
"TLB: %#X -> %#X\n", vaddr, req->getPaddr());
785 return std::make_shared<DataAccessException>();
787 return std::make_shared<PrivilegedAction>();
793 return std::make_shared<DataAccessException>();
799 handleScratchRegAccess:
800 if (vaddr > 0x38 || (vaddr >= 0x20 && vaddr < 0x30 && !hpriv)) {
802 return std::make_shared<DataAccessException>();
806 handleQueueRegAccess:
807 if (!priv && !hpriv) {
809 return std::make_shared<PrivilegedAction>();
811 if ((!hpriv && vaddr & 0xF) || vaddr > 0x3f8 || vaddr < 0x3c0) {
813 return std::make_shared<DataAccessException>();
817 handleSparcErrorRegAccess:
821 return std::make_shared<DataAccessException>();
823 return std::make_shared<PrivilegedAction>();
830 DPRINTF(
TLB,
"TLB: DTB Translating MM IPR access\n");
832 req->setPaddr(req->getVaddr());
867 DPRINTF(IPR,
"Memory Mapped IPR Read: asi=%#X a=%#x\n",
868 (uint32_t)pkt->
req->getArchFlags(), pkt->
getAddr());
907 pkt->
setBE(itb->c0_tsb_ps0);
911 pkt->
setBE(itb->c0_tsb_ps1);
915 pkt->
setBE(itb->c0_config);
931 pkt->
setBE(itb->cx_tsb_ps0);
935 pkt->
setBE(itb->cx_tsb_ps1);
939 pkt->
setBE(itb->cx_config);
942 pkt->
setBE((uint64_t)0);
951 temp = itb->tag_access;
955 pkt->
setBE(itb->sfsr);
958 pkt->
setBE(itb->tag_access);
1038 panic(
"need to impl DTB::doMmuRegRead() got asi=%#x, va=%#x\n",
1050 ASI asi = (
ASI)pkt->
req->getArchFlags();
1056 int entry_insert = -1;
1063 DPRINTF(IPR,
"Memory Mapped IPR Write: asi=%#X a=%#x d=%#X\n",
1064 (uint32_t)asi, va, data);
1082 goto doMmuWriteError;
1086 assert(
mbits(data,13,6) == data);
1088 (va >> 4) - 0x3c, data);
1140 inform(
"Ignoring write to SPARC ERROR regsiter\n");
1152 sext<59>(
bits(data, 59,0));
1156 goto doMmuWriteError;
1160 entry_insert =
bits(va, 8,3);
1163 assert(entry_insert != -1 ||
mbits(va,10,9) == va);
1165 va_insert =
mbits(ta_insert, 63,13);
1166 ct_insert =
mbits(ta_insert, 12,0);
1168 real_insert =
bits(va, 9,9);
1171 itb->
insert(va_insert, part_insert, ct_insert, real_insert,
1175 entry_insert =
bits(va, 8,3);
1178 assert(entry_insert != -1 ||
mbits(va,10,9) == va);
1180 va_insert =
mbits(ta_insert, 63,13);
1181 ct_insert =
mbits(ta_insert, 12,0);
1183 real_insert =
bits(va, 9,9);
1185 PageTableEntry::sun4u);
1186 insert(va_insert, part_insert, ct_insert, real_insert, pte,
1193 switch (
bits(va,5,4)) {
1207 switch (
bits(va,7,6)) {
1220 panic(
"Invalid type for IMMU demap\n");
1229 sext<59>(
bits(data, 59,0));
1236 goto doMmuWriteError;
1243 switch (
bits(va,5,4)) {
1257 switch (
bits(va,7,6)) {
1270 panic(
"Invalid type for IMMU demap\n");
1288 postInterrupt(0,
bits(data, 5, 0), 0);
1292 panic(
"need to impl DTB::doMmuRegWrite() got asi=%#x, va=%#x d=%#x\n",
1328 uint64_t
c0_config, uint64_t cX_tsb, uint64_t cX_config)
1333 if (
bits(tag_access, 12,0) == 0) {
1341 uint64_t ptr =
mbits(tsb,63,13);
1342 bool split =
bits(tsb,12,12);
1343 int tsb_size =
bits(tsb,3,0);
1344 int page_size = (ps ==
Ps0) ?
bits(config, 2,0) :
bits(config,10,8);
1346 if (ps ==
Ps1 && split)
1347 ptr |=
ULL(1) << (13 + tsb_size);
1348 ptr |= (tag_access >> (9 + page_size * 3)) &
mask(12+tsb_size, 4);
1363 free_list.push_back(entry -
tlb);
1377 for (
int x = 0;
x <
size;
x++) {
1389 if (oldSize !=
size)
1390 panic(
"Don't support unserializing different sized TLBs\n");
1397 for (
int idx : free_list)
1410 for (
int x = 0;
x <
size;
x++) {
1423 SparcTLBParams::create()
void demapContext(int partition_id, int context_id)
Remove all entries that match a given context/partition id.
Addr translate(Addr vaddr) const
static void ignore(const char *expr)
#define panic(...)
This implements a cprintf based panic() function.
bool asiIsSparcError(ASI asi)
The request is to an uncacheable address.
virtual void setMiscReg(RegIndex misc_reg, RegVal val)=0
void writeSfsr(bool write, ContextType ct, bool se, FaultTypes ft, int asi)
virtual System * getSystemPtr()=0
decltype(nullptr) constexpr NoFault
Cycles is a wrapper class for representing cycle counts, i.e.
iterator insert(TlbRange &r, TlbEntry *d)
#define fatal(...)
This implements a cprintf based fatal() function.
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
std::shared_ptr< Request > RequestPtr
Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write)
void clearInterrupt(ThreadID tid, int int_num, int index)
The request is required to be strictly ordered by CPU models and is non-speculative.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
bool validVirtualAddress(Addr va, bool am)
Checks if the virtual address provided is a valid one.
uint64_t MakeTsbPtr(TsbPageSize ps, uint64_t tag_access, uint64_t c0_tsb, uint64_t c0_config, uint64_t cX_tsb, uint64_t cX_config)
virtual BaseCPU * getCpuPtr()=0
TlbEntry * lookup(Addr va, int partition_id, bool real, int context_id=0, bool update_used=true)
lookup an entry in the TLB based on the partition id, and real bit if real is true or the partition i...
void setBE(T v)
Set the value in the data pointer to v as big endian.
ThreadContext is the external interface to all thread state for anything outside of the CPU...
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Cycles doMmuRegRead(ThreadContext *tc, Packet *pkt)
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void populate(uint64_t e, EntryType t=sun4u)
RequestPtr req
A pointer to the original request.
#define UNSERIALIZE_SCALAR(scalar)
#define SERIALIZE_CONTAINER(member)
Cycles doMmuRegWrite(ThreadContext *tc, Packet *pkt)
std::list< TlbEntry * > freeList
std::string csprintf(const char *format, const Args &...args)
void demapPage(Addr va, int partition_id, bool real, int context_id)
Remve all entries that match a certain partition id, (contextid), and va).
const Addr StartVAddrHole
void makeAtomicResponse()
uint64_t get_vec(int int_num)
void flushAll() override
Remove all entries from the TLB.
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
std::vector< ThreadContext * > threadContexts
bool asiIsPrimary(ASI asi)
virtual BaseTLB * getITBPtr()=0
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
#define UNSERIALIZE_CONTAINER(member)
This request is to a memory mapped register.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
#define ULL(N)
uint64_t constant
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Do post-translation physical address finalization.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
void unserialize(CheckpointIn &cp)
void writeTagAccess(Addr va, int context)
bool asiIsScratchPad(ASI asi)
#define SERIALIZE_SCALAR(scalar)
void insert(Addr vpn, int partition_id, int context_id, bool real, const PageTableEntry &PTE, int entry=-1)
Insert a PTE into the TLB.
bool asiIsNoFault(ASI asi)
virtual RegVal readMiscRegNoEffect(RegIndex misc_reg) const =0
void GetTsbPtr(ThreadContext *tc, Addr addr, int ctx, Addr *ptrs)
std::ostream CheckpointOut
uint64_t TagRead(int entry)
Given an entry id, read that tlb entries' tag.
void serialize(CheckpointOut &cp) const override
Serialize an object.
bool asiIsUnPriv(ASI asi)
iterator find(const TlbRange &r)
uint64_t TteRead(int entry)
Give an entry id, read that tlb entries' tte.
BaseInterrupts * getInterruptController(ThreadID tid)
int findMsbSet(uint64_t val)
Returns the bit position of the MSB that is set in the input.
bool asiIsInterrupt(ASI asi)
void paramIn(CheckpointIn &cp, const string &name, ExtMachInst &machInst)
bool asiIsAsIfUser(ASI asi)
bool asiIsLittle(ASI asi)
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
void demapAll(int partition_id)
Remove all non-locked entries from the tlb that match partition id.
Scoped checkpoint section helper class.
bool asiIsNucleus(ASI asi)
bool asiIsSecondary(ASI asi)
Fault translateInst(const RequestPtr &req, ThreadContext *tc)
virtual RegVal readMiscReg(RegIndex misc_reg)=0
std::shared_ptr< FaultBase > Fault
T getBE() const
Get the data in the packet byte swapped from big endian to host endian.
void serialize(CheckpointOut &cp) const
bool asiIsPartialStore(ASI asi)