50#include "debug/MMU.hh"
115 if (
static_cast<TLB*
>(
tlb)->walkCache())
119 if (
static_cast<TLB*
>(
tlb)->walkCache())
123 if (
static_cast<TLB*
>(
tlb)->walkCache())
211 s1State.computeAddrTop.flush();
212 s2State.computeAddrTop.flush();
305 const Addr paddr = req->getPaddr();
310 req->setLocalAccessor(
333 Translation *translation,
bool &delay,
bool timing,
337 Addr vaddr_tainted = req->getVaddr();
343 vaddr = vaddr_tainted;
354 return std::make_shared<DataAbort>(
364 if (
const auto pte =
p->pTable->lookup(
vaddr); !pte) {
365 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
367 req->setPaddr(pte->paddr +
p->pTable->pageOffset(
vaddr));
380 Addr purified_vaddr = 0;
383 static_cast<TCR
>(state.ttbcr),
mode==
Execute, state);
385 purified_vaddr =
vaddr;
387 return purified_vaddr;
403 if (req->isCacheMaintenance()) {
421 if (state.
isStage2 && req->isPTWalk() && state.
hcr.ptw &&
423 return std::make_shared<DataAbort>(
435 return std::make_shared<DataAbort>(
443 if (
te->nonCacheable) {
445 if (req->isPrefetch()) {
448 return std::make_shared<PrefetchAbort>(
454 if (!
te->longDescFormat) {
455 switch ((state.
dacr >> (
static_cast<uint8_t
>(
te->domain) * 2)) & 0x3) {
457 stats.domainFaults++;
458 DPRINTF(
MMU,
"MMU Fault: Data abort on domain. DACR: %#x"
459 " domain: %#x write:%d\n", state.
dacr,
460 static_cast<uint8_t
>(
te->domain), is_write);
465 return std::make_shared<PrefetchAbort>(
470 return std::make_shared<DataAbort>(
478 panic(
"UNPRED domain\n");
485 uint8_t ap =
te->longDescFormat ?
te->ap << 1 :
te->ap;
486 uint8_t hap =
te->hap;
488 if (state.
sctlr.afe == 1 ||
te->longDescFormat)
492 bool isWritable =
true;
501 DPRINTF(
MMU,
"Access permissions 0, checking rs:%#x\n",
502 (
int)state.
sctlr.rs);
503 if (!state.
sctlr.xp) {
504 switch ((
int)state.
sctlr.rs) {
509 abt = is_write || !is_priv;
525 abt = !is_priv && is_write;
526 isWritable = is_priv;
532 panic(
"UNPRED premissions\n");
534 abt = !is_priv || is_write;
543 panic(
"Unknown permissions %#x\n", ap);
547 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
548 bool xn =
te->xn || (isWritable && state.
sctlr.wxn) ||
549 (ap == 3 && state.
sctlr.uwxn && is_priv);
550 if (is_fetch && (abt || xn ||
551 (
te->longDescFormat &&
te->pxn && is_priv) ||
553 te->ns && state.
scr.sif))) {
555 DPRINTF(
MMU,
"MMU Fault: Prefetch abort on permission check. AP:%d "
556 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
557 ap, is_priv, is_write,
te->ns,
561 return std::make_shared<PrefetchAbort>(
565 }
else if (abt | hapAbt) {
567 DPRINTF(
MMU,
"MMU Fault: Data abort on permission check. AP:%d priv:%d"
568 " write:%d\n", ap, is_priv, is_write);
569 return std::make_shared<DataAbort>(
572 state.
isStage2 | !abt, tran_method);
599 Addr vaddr_tainted = req->getVaddr();
603 bool is_write = !req->isCacheClean() &&
mode ==
Write;
604 bool is_atomic = req->isAtomic();
612 if (state.
isStage2 && req->isPTWalk() && state.
hcr.ptw &&
614 return std::make_shared<DataAbort>(
615 vaddr_tainted,
te->domain, is_write,
626 return std::make_shared<DataAbort>(
629 is_atomic ?
false : is_write,
636 if (
te->nonCacheable) {
638 if (req->isPrefetch()) {
641 return std::make_shared<PrefetchAbort>(
653 bool grant_read =
true;
657 (!is_write && !is_fetch), is_write, is_fetch);
660 (!is_write && !is_fetch), is_write, is_fetch);
666 DPRINTF(
MMU,
"MMU Fault: Prefetch abort on permission check. "
667 "ns:%d scr.sif:%d sctlr.afe: %d\n",
671 return std::make_shared<PrefetchAbort>(
677 DPRINTF(
MMU,
"MMU Fault: Data abort on permission check."
679 return std::make_shared<DataAbort>(
680 vaddr_tainted,
te->domain,
681 (is_atomic && !grant_read) ?
false : is_write,
700 bool grant_read =
te->hap & 0b01;
701 bool grant_write =
te->hap & 0b10;
704 uint8_t pxn =
te->pxn;
708 te->ns && state.
scr.sif) {
713 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
714 "w:%d, x:%d\n",
te->hap, xn, pxn,
r,
w,
x);
718 }
else if (req->isAtomic()) {
719 grant = grant_read || grant_write;
725 panic(
"Invalid Operation\n");
728 return std::make_pair(grant, grant_read);
731std::tuple<bool, bool, bool>
734 bool r,
bool w,
bool x)
736 const bool is_priv = state.
isPriv && !(req->getFlags() &
UserMode);
742 uint8_t pxn =
te->pxn;
745 uint8_t piindex =
te->piindex;
746 uint8_t ppi =
bits(state.
pir, 4 * piindex + 3, 4 * piindex);
747 uint8_t upi =
bits(state.
pire0, 4 * piindex + 3, 4 * piindex);
749 DPRINTF(
MMU,
"Checking S1 indirect permissions: "
750 "piindex:%d, ppi:%d, xn:%d, pxn:%d, r:%d, "
751 "w:%d, x:%d, is_priv: %d, wxn: %d\n", piindex, ppi,
752 xn, pxn,
r,
w,
x, is_priv,
wxn);
759 bool p_wxn = ppi == 0b0110;
762 case 0b0000:
pr = 0; pw = 0;
px = 0;
break;
764 case 0b0001:
pr = 1; pw = 0;
px = 0;
break;
766 case 0b0010:
pr = 0; pw = 0;
px = 1;
break;
768 case 0b0011:
pr = 1; pw = 0;
px = 1;
break;
770 case 0b0100:
pr = 0; pw = 0;
px = 0;
break;
772 case 0b0101:
pr = 1; pw = 1;
px = 0;
break;
774 case 0b0110:
pr = 1; pw = 1;
px = 1;
break;
776 case 0b0111:
pr = 1; pw = 1;
px = 1;
break;
778 case 0b1000:
pr = 1; pw = 0;
px = 0;
break;
780 case 0b1001:
pr = 1; pw = 0;
px = 0;
break;
782 case 0b1010:
pr = 1; pw = 0;
px = 1;
break;
784 case 0b1011:
pr = 0; pw = 0;
px = 0;
break;
786 case 0b1100:
pr = 1; pw = 1;
px = 0;
break;
788 case 0b1101:
pr = 0; pw = 0;
px = 0;
break;
790 case 0b1110:
pr = 1; pw = 1;
px = 1;
break;
792 case 0b1111:
pr = 0; pw = 0;
px = 0;
break;
802 bool u_wxn = upi == 0b0110;
805 case 0b0000: ur = 0;
uw = 0;
ux = 0;
break;
807 case 0b0001: ur = 1;
uw = 0;
ux = 0;
break;
809 case 0b0010: ur = 0;
uw = 0;
ux = 1;
break;
811 case 0b0011: ur = 1;
uw = 0;
ux = 1;
break;
813 case 0b0100: ur = 0;
uw = 0;
ux = 0;
break;
815 case 0b0101: ur = 1;
uw = 1;
ux = 0;
break;
817 case 0b0110: ur = 1;
uw = 1;
ux = 1;
break;
819 case 0b0111: ur = 1;
uw = 1;
ux = 1;
break;
821 case 0b1000: ur = 1;
uw = 0;
ux = 0;
break;
823 case 0b1001: ur = 1;
uw = 0;
ux = 0;
break;
825 case 0b1010: ur = 1;
uw = 0;
ux = 1;
break;
827 case 0b1011: ur = 0;
uw = 0;
ux = 0;
break;
829 case 0b1100: ur = 1;
uw = 1;
ux = 0;
break;
831 case 0b1101: ur = 0;
uw = 0;
ux = 0;
break;
833 case 0b1110: ur = 1;
uw = 1;
ux = 1;
break;
835 case 0b1111: ur = 0;
uw = 0;
ux = 0;
break;
839 bool pan_access = !req->isCacheMaintenance() ||
842 if (
_release->has(ArmExtension::FEAT_PAN) && pan_access) {
843 if (state.
cpsr.pan && upi != 0) {
849 grant_read = is_priv ?
pr : ur;
850 grant_write = is_priv ? pw :
uw;
851 grant_exec = is_priv ?
px :
ux;
852 wxn = is_priv ? p_wxn : u_wxn;
862 grant_exec = grant_exec && !(
wxn && grant_write);
866 grant_exec = grant_exec && !state.
scr.sif;
869 return std::make_tuple(grant_read, grant_write, grant_exec);
872std::tuple<bool, bool, bool>
875 bool r,
bool w,
bool x)
877 const uint8_t ap =
te->ap & 0b11;
878 const bool is_priv = state.
isPriv && !(req->getFlags() &
UserMode);
882 uint8_t pxn =
te->pxn;
884 DPRINTF(
MMU,
"Checking S1 direct permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
885 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
886 pxn,
r,
w,
x, is_priv,
wxn);
903 pr = 1; pw = 1; ur = 0;
uw = 0;
906 pr = 1; pw = 1; ur = 1;
uw = 1;
909 pr = 1; pw = 0; ur = 0;
uw = 0;
912 pr = 1; pw = 0; ur = 1;
uw = 0;
918 const bool px = !(pxn ||
uw);
922 bool pan_access = !req->isCacheMaintenance() ||
925 if (
_release->has(ArmExtension::FEAT_PAN) && pan_access) {
931 grant_read = is_priv ?
pr : ur;
932 grant_write = is_priv ? pw :
uw;
933 grant_exec = is_priv ?
px :
ux;
935 switch (
bits(ap, 1)) {
937 grant_read = 1; grant_write = 1;
940 grant_read = 1; grant_write = 0;
948 grant_exec = grant_exec && !(
wxn && grant_write);
952 grant_exec = grant_exec && !state.
scr.sif;
955 return std::make_tuple(grant_read, grant_write, grant_exec);
961 bool r,
bool w,
bool x)
964 bool grant_read =
true, grant_write =
true, grant_exec =
true;
968 std::tie(grant_read, grant_write, grant_exec) =
971 std::tie(grant_read, grant_write, grant_exec) =
977 }
else if (req->isAtomic()) {
978 grant = grant_read && grant_write;
985 return std::make_pair(grant, grant_read);
1005 const bool selbit =
bits(vaddr_tainted, 55);
1019 bool is_atomic = req->isAtomic();
1020 req->setPaddr(
vaddr);
1035 f = std::make_shared<PrefetchAbort>(
vaddr,
1039 f = std::make_shared<DataAbort>(
vaddr,
1049 if (long_desc_format || state.
sctlr.tre == 0 || state.
nmrr.ir0 == 0 ||
1050 state.
nmrr.or0 == 0 || state.
prrr.tr0 != 0x2) {
1051 if (!req->isCacheMaintenance()) {
1060 temp_te.
ns = !in_secure_state;
1061 bool dc = (
HaveExt(tc, ArmExtension::FEAT_VHE) &&
1062 state.
hcr.e2h == 1 && state.
hcr.tge == 1) ? 0: state.
hcr.dc;
1063 bool i_cacheability = state.
sctlr.i && !state.
sctlr.m;
1067 temp_te.
innerAttrs = i_cacheability? 0x2: 0x0;
1068 temp_te.
outerAttrs = i_cacheability? 0x2: 0x0;
1079 DPRINTF(
MMU,
"(No MMU) setting memory attributes: shareable: "
1080 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
1090 Translation *translation,
bool &delay,
bool timing,
1099 Addr vaddr_tainted = req->getVaddr();
1102 functional, &mergeTe, state);
1111 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1112 "outerAttrs: %d, mtype: %d, stage2: %d\n",
1113 te->shareable,
te->innerAttrs,
te->outerAttrs,
1114 static_cast<uint8_t
>(
te->mtype), state.
isStage2);
1117 if (
te->nonCacheable && !req->isCacheMaintenance())
1134 if (!is_fetch && fault ==
NoFault &&
1139 stats.alignFaults++;
1141 return std::make_shared<DataAbort>(
1157 Translation *translation,
bool &delay,
bool timing,
1162 assert(!(timing && functional));
1164 Addr vaddr_tainted = req->getVaddr();
1170 vaddr = vaddr_tainted;
1181 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1186 DPRINTF(
MMU,
"translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1187 "flags %#lx tranType 0x%x\n", vaddr_tainted,
mode,
1191 if ((req->isInstFetch() && (!state.
sctlr.i)) ||
1192 ((!req->isInstFetch()) && (!state.
sctlr.c))){
1193 if (!req->isCacheMaintenance()) {
1202 stats.alignFaults++;
1203 return std::make_shared<DataAbort>(
1212 bool vm = state.
hcr.vm;
1213 if (
HaveExt(tc, ArmExtension::FEAT_VHE) &&
1214 state.
hcr.e2h == 1 && state.
hcr.tge == 1)
1216 else if (state.
hcr.dc == 1)
1223 long_desc_format, state);
1225 DPRINTF(
MMU,
"Translating %s=%#x context=%d\n",
1226 state.
isStage2 ?
"IPA" :
"VA", vaddr_tainted, state.
asid);
1229 functional,
vaddr, tran_method, state);
1235 if (
sd->enabled() && fault ==
NoFault) {
1236 fault =
sd->testDebug(tc, req,
mode);
1259 tran_type,
false, state);
1289 tran_type,
true, state);
1303 assert(translation);
1326 fault =
translateFs(req, tc,
mode, translation, delay,
true, tran_type,
1331 DPRINTF(
MMU,
"Translation returning delay=%d fault=%d\n", delay,
1340 if (translation && (call_from_s2 || !state.
stage2Req || req->hasPaddr() ||
1357 switch (mmfr1.vmidbits) {
1372 panic(
"Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1389 ((tran_type == state.curTranType) || stage2)) {
1393 state.updateMiscReg(tc, tran_type);
1399 static_cast<TLB*
>(
tlb)->setVMID(state.vmid);
1402 static_cast<TLB*
>(
tlb)->setVMID(state.vmid);
1405 static_cast<TLB*
>(
tlb)->setVMID(state.vmid);
1411 if (state.directToStage2) {
1412 s2State.updateMiscReg(tc, tran_type);
1446 if (
mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1451 uint64_t ttbr_asid =
ttbcr.a1 ?
1455 (
mmu->haveLargeAsid64 &&
ttbcr.as) ? 63 : 55, 48);
1464 if (
mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1469 uint64_t ttbr_asid =
ttbcr.a1 ?
1473 (
mmu->haveLargeAsid64 &&
ttbcr.as) ? 63 : 55, 48);
1480 if (
mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1489 if (
mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1498 if (
mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1501 if (
HaveExt(tc, ArmExtension::FEAT_VHE) &&
1502 hcr.e2h == 1 &&
hcr.tge ==1) {
1545 asid = context_id.asid;
1554 if (
mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1601 return currEL(cpsr) ==
EL3 && scr.ns == 0 ?
1608 panic(
"Unknown translation mode!\n");
1614 Translation *translation,
bool timing,
bool functional,
1618 return getTE(
te, req, tc,
mode, translation, timing, functional,
1631 lookup_data.
va =
va;
1634 lookup_data.
vmid = vmid;
1635 lookup_data.
ss =
ss;
1640 return tlb->multiLookup(lookup_data);
1645 Translation *translation,
bool timing,
bool functional,
1655 Addr vaddr_tainted = req->getVaddr();
1663 vaddr = vaddr_tainted;
1670 if (req->isPrefetch()) {
1674 stats.prefetchFaults++;
1675 return std::make_shared<PrefetchAbort>(
1682 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1683 vaddr_tainted, state.
asid, state.
vmid);
1688 translation, timing, functional,
ss,
1692 if (timing || fault !=
NoFault) {
1706 Translation *translation,
bool timing,
bool functional,
1719 fault =
getTE(&s2_te, req, tc,
mode, translation, timing, functional,
1735 Addr vaddr_tainted = req->getVaddr();
1738 fault =
getTE(&s1_te, req, tc,
mode, translation, timing, functional,
1750 req, translation,
mode, timing, functional,
1752 fault = s2_lookup->
getTe(tc, mergeTe);
1767 DPRINTF(
MMU,
"s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1768 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1771 auto arm_fault =
reinterpret_cast<ArmFault*
>(fault.get());
1785 return entry && !entry->
partial;
1793 auto *ommu =
dynamic_cast<MMU*
>(old_mmu);
1796 _attr = ommu->_attr;
1809 fatal_if(!
ti,
"%s is not a valid ARM TLB tester\n", _ti->
name());
1822 if (!
test || !req->hasSize() || req->getSize() == 0 ||
1823 req->isCacheMaintenance()) {
1833 "Number of MMU faults due to alignment restrictions"),
1835 "Number of MMU faults due to prefetch"),
1837 "Number of MMU faults due to domain restrictions"),
1839 "Number of MMU faults due to permissions restrictions")
virtual void annotate(AnnotationIDs id, uint64_t val)
SelfDebug * getSelfDebug() const
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, TranMethod tran_method, CachedState &state)
std::tuple< bool, bool, bool > s1IndirectPermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
ArmISA::TLB * getITBPtr() const
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
void flushStage2(const TLBIOp &tlbi_op)
static bool hasUnprivRegime(TranslationRegime regime)
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
void drainResume() override
Resume execution after a successful drain.
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Fault testTranslation(const RequestPtr &req, Mode mode, DomainType domain, CachedState &state) const
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
bool isCompleteTranslation(TlbEntry *te) const
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
void flushStage1(const TLBIOp &tlbi_op)
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
std::tuple< bool, bool, bool > s1DirectPermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
ArmISA::TLB * getDTBPtr() const
bool checkWalkCache() const
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
void setTestInterface(SimObject *ti)
TableWalker * itbStage2Walker
Addr getValidAddr(Addr vaddr, ThreadContext *tc, Mode mode) override
TableWalker * dtbStage2Walker
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type, bool stage2)
void dflush(const TLBIOp &tlbi_op)
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
const ArmRelease * _release
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
void flush(const TLBIOp &tlbi_op)
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
MMU(const ArmMMUParams &p)
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
void takeOverFrom(BaseMMU *old_mmu) override
Fault testAndFinalize(const RequestPtr &req, ThreadContext *tc, Mode mode, TlbEntry *te, CachedState &state) const
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
void iflush(const TLBIOp &tlbi_op)
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
virtual bool stage1Flush() const
Return true if the TLBI op needs to flush stage1 entries, Defaulting to true in the TLBIOp abstract c...
virtual bool stage2Flush() const
Return true if the TLBI op needs to flush stage2 entries, Defaulting to false in the TLBIOp abstract ...
void setTableWalker(TableWalker *table_walker)
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
const ArmRelease * releaseFS() const
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
virtual void takeOverFrom(BaseMMU *old_mmu)
std::set< BaseTLB * > data
std::set< BaseTLB * > unified
Cycles is a wrapper class for representing cycle counts, i.e.
virtual std::string name() const
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
@ UNCACHEABLE
The request is to an uncacheable address.
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
gem5::Flags< FlagsType > Flags
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
#define panic(...)
This implements a cprintf based panic() function.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
SimObject(const Params &p)
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
bool isSecure(ThreadContext *tc)
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
bool longDescFormatInUse(ThreadContext *tc)
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
bool EL2Enabled(ThreadContext *tc)
ExceptionLevel translationEl(TranslationRegime regime)
SecurityState
Security State.
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
@ MISCREG_ID_AA64MMFR1_EL1
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
bool inAArch64(ThreadContext *tc)
PASpace
Physical Address Space.
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
static void decodeAddrOffset(Addr offset, uint8_t &func)
bool pseudoInst(ThreadContext *tc, uint8_t func, uint64_t &result)
Copyright (c) 2024 Arm Limited All rights reserved.
std::shared_ptr< FaultBase > Fault
std::shared_ptr< Request > RequestPtr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
constexpr decltype(nullptr) NoFault
ExceptionLevel exceptionLevel
SecurityState securityState
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
ArmTranslationType curTranType
TranslationRegime currRegime
Stats(statistics::Group *parent)
statistics::Scalar permsFaults
statistics::Scalar alignFaults
statistics::Scalar prefetchFaults
statistics::Scalar domainFaults
TranslationRegime targetRegime
void setAttributes(bool lpae)
TLBTypes::KeyType KeyType
The file contains the definition of a set of TLB Invalidate Instructions.