50#include "debug/MMU.hh"
58using namespace ArmISA;
62 itbStage2(
p.stage2_itb), dtbStage2(
p.stage2_dtb),
63 itbWalker(
p.itb_walker), dtbWalker(
p.dtb_walker),
64 itbStage2Walker(
p.stage2_itb_walker),
65 dtbStage2Walker(
p.stage2_dtb_walker),
68 s1State(this, false), s2State(this, true),
194 lookup_data.
ss =
state.securityState;
305 const Addr paddr = req->getPaddr();
310 req->setLocalAccessor(
315 pseudo_inst::pseudoInst<RegABI64>(tc, func, ret);
317 pseudo_inst::pseudoInst<RegABI32>(tc, func, ret);
333 Translation *translation,
bool &delay,
bool timing,
337 Addr vaddr_tainted = req->getVaddr();
343 vaddr = vaddr_tainted;
354 return std::make_shared<DataAbort>(
364 if (
const auto pte =
p->pTable->lookup(
vaddr); !pte) {
365 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
367 req->setPaddr(pte->paddr +
p->pTable->pageOffset(
vaddr));
380 Addr purified_vaddr = 0;
385 purified_vaddr =
vaddr;
387 return purified_vaddr;
403 if (req->isCacheMaintenance()) {
421 if (
state.isStage2 && req->isPTWalk() &&
state.hcr.ptw &&
423 return std::make_shared<DataAbort>(
426 state.isStage2, tran_method);
435 return std::make_shared<DataAbort>(
443 if (
te->nonCacheable) {
445 if (req->isPrefetch()) {
448 return std::make_shared<PrefetchAbort>(
450 state.isStage2, tran_method);
454 if (!
te->longDescFormat) {
455 switch ((
state.dacr >> (
static_cast<uint8_t
>(
te->domain) * 2)) & 0x3) {
458 DPRINTF(
MMU,
"MMU Fault: Data abort on domain. DACR: %#x"
459 " domain: %#x write:%d\n",
state.dacr,
460 static_cast<uint8_t
>(
te->domain), is_write);
465 return std::make_shared<PrefetchAbort>(
468 state.isStage2, tran_method);
470 return std::make_shared<DataAbort>(
473 state.isStage2, tran_method);
478 panic(
"UNPRED domain\n");
485 uint8_t ap =
te->longDescFormat ?
te->ap << 1 :
te->ap;
486 uint8_t hap =
te->hap;
488 if (
state.sctlr.afe == 1 ||
te->longDescFormat)
492 bool isWritable =
true;
496 if (
state.isStage2) {
501 DPRINTF(
MMU,
"Access permissions 0, checking rs:%#x\n",
502 (
int)
state.sctlr.rs);
503 if (!
state.sctlr.xp) {
504 switch ((
int)
state.sctlr.rs) {
509 abt = is_write || !is_priv;
525 abt = !is_priv && is_write;
526 isWritable = is_priv;
532 panic(
"UNPRED premissions\n");
534 abt = !is_priv || is_write;
543 panic(
"Unknown permissions %#x\n", ap);
547 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
548 bool xn =
te->xn || (isWritable &&
state.sctlr.wxn) ||
549 (ap == 3 &&
state.sctlr.uwxn && is_priv);
550 if (is_fetch && (abt || xn ||
551 (
te->longDescFormat &&
te->pxn && is_priv) ||
555 DPRINTF(
MMU,
"MMU Fault: Prefetch abort on permission check. AP:%d "
556 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
557 ap, is_priv, is_write,
te->ns,
561 return std::make_shared<PrefetchAbort>(
564 state.isStage2, tran_method);
565 }
else if (abt | hapAbt) {
567 DPRINTF(
MMU,
"MMU Fault: Data abort on permission check. AP:%d priv:%d"
568 " write:%d\n", ap, is_priv, is_write);
569 return std::make_shared<DataAbort>(
572 state.isStage2 | !abt, tran_method);
588 assert(
state.aarch64);
595 if (req->isCacheClean() &&
state.exceptionLevel !=
EL0 && !
state.isStage2) {
599 Addr vaddr_tainted = req->getVaddr();
603 bool is_write = !req->isCacheClean() &&
mode ==
Write;
604 bool is_atomic = req->isAtomic();
612 if (
state.isStage2 && req->isPTWalk() &&
state.hcr.ptw &&
614 return std::make_shared<DataAbort>(
615 vaddr_tainted,
te->domain, is_write,
626 return std::make_shared<DataAbort>(
629 is_atomic ?
false : is_write,
636 if (
te->nonCacheable) {
638 if (req->isPrefetch()) {
641 return std::make_shared<PrefetchAbort>(
653 bool grant_read =
true;
655 if (
state.isStage2) {
657 (!is_write && !is_fetch), is_write, is_fetch);
660 (!is_write && !is_fetch), is_write, is_fetch);
666 DPRINTF(
MMU,
"MMU Fault: Prefetch abort on permission check. "
667 "ns:%d scr.sif:%d sctlr.afe: %d\n",
671 return std::make_shared<PrefetchAbort>(
677 DPRINTF(
MMU,
"MMU Fault: Data abort on permission check."
679 return std::make_shared<DataAbort>(
680 vaddr_tainted,
te->domain,
681 (is_atomic && !grant_read) ?
false : is_write,
700 bool grant_read =
te->hap & 0b01;
701 bool grant_write =
te->hap & 0b10;
704 uint8_t pxn =
te->pxn;
713 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
714 "w:%d, x:%d\n",
te->hap, xn, pxn,
r,
w,
x);
718 }
else if (req->isAtomic()) {
719 grant = grant_read || grant_write;
725 panic(
"Invalid Operation\n");
728 return std::make_pair(grant, grant_read);
735 bool grant =
false, grant_read =
true, grant_write =
true, grant_exec =
true;
737 const uint8_t ap =
te->ap & 0b11;
738 const bool is_priv =
state.isPriv && !(req->getFlags() &
UserMode);
742 uint8_t pxn =
te->pxn;
744 DPRINTF(
MMU,
"Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
745 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
746 pxn,
r,
w,
x, is_priv,
wxn);
749 return std::make_pair(
false,
false);
761 pr = 1; pw = 1; ur = 0;
uw = 0;
764 pr = 1; pw = 1; ur = 1;
uw = 1;
767 pr = 1; pw = 0; ur = 0;
uw = 0;
770 pr = 1; pw = 0; ur = 1;
uw = 0;
775 const bool px = !(pxn ||
uw);
778 grant_read = is_priv ?
pr : ur;
779 grant_write = is_priv ? pw :
uw;
780 grant_exec = is_priv ?
px :
ux;
782 switch (
bits(ap, 1)) {
784 grant_read = 1; grant_write = 1;
787 grant_read = 1; grant_write = 0;
795 grant_exec = grant_exec && !(
wxn && grant_write);
799 grant_exec = grant_exec && !
state.scr.sif;
804 }
else if (req->isAtomic()) {
805 grant = grant_read && grant_write;
812 return std::make_pair(grant, grant_read);
831 bool exception =
false;
832 switch (
state.exceptionLevel) {
867 if (req->isCacheMaintenance() &&
871 }
else if (!is_priv && !(
state.hcr.e2h && !
state.hcr.tge)) {
886 const bool selbit =
bits(vaddr_tainted, 55);
889 const auto topbit =
state.computeAddrTop(tc, selbit, is_inst, tcr,
el);
900 bool is_atomic = req->isAtomic();
901 req->setPaddr(
vaddr);
916 f = std::make_shared<PrefetchAbort>(
vaddr,
920 f = std::make_shared<DataAbort>(
vaddr,
930 if (long_desc_format ||
state.sctlr.tre == 0 ||
state.nmrr.ir0 == 0 ||
931 state.nmrr.or0 == 0 ||
state.prrr.tr0 != 0x2) {
932 if (!req->isCacheMaintenance()) {
941 temp_te.
ns = !in_secure_state;
942 bool dc = (
HaveExt(tc, ArmExtension::FEAT_VHE) &&
944 bool i_cacheability =
state.sctlr.i && !
state.sctlr.m;
948 temp_te.
innerAttrs = i_cacheability? 0x2: 0x0;
949 temp_te.
outerAttrs = i_cacheability? 0x2: 0x0;
960 DPRINTF(
MMU,
"(No MMU) setting memory attributes: shareable: "
961 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
971 Translation *translation,
bool &delay,
bool timing,
980 Addr vaddr_tainted = req->getVaddr();
983 functional, &mergeTe,
state);
992 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
993 "outerAttrs: %d, mtype: %d, stage2: %d\n",
994 te->shareable,
te->innerAttrs,
te->outerAttrs,
995 static_cast<uint8_t
>(
te->mtype),
state.isStage2);
998 if (
te->nonCacheable && !req->isCacheMaintenance())
1015 if (!is_fetch && fault ==
NoFault &&
1022 return std::make_shared<DataAbort>(
1038 Translation *translation,
bool &delay,
bool timing,
1043 assert(!(timing && functional));
1045 Addr vaddr_tainted = req->getVaddr();
1047 if (
state.aarch64) {
1051 vaddr = vaddr_tainted;
1062 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1067 DPRINTF(
MMU,
"translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1068 "flags %#lx tranType 0x%x\n", vaddr_tainted,
mode,
1071 if (!
state.isStage2) {
1072 if ((req->isInstFetch() && (!
state.sctlr.i)) ||
1073 ((!req->isInstFetch()) && (!
state.sctlr.c))){
1074 if (!req->isCacheMaintenance()) {
1084 return std::make_shared<DataAbort>(
1094 if (
HaveExt(tc, ArmExtension::FEAT_VHE) &&
1097 else if (
state.hcr.dc == 1)
1104 long_desc_format,
state);
1106 DPRINTF(
MMU,
"Translating %s=%#x context=%d\n",
1107 state.isStage2 ?
"IPA" :
"VA", vaddr_tainted,
state.asid);
1116 if (
sd->enabled() && fault ==
NoFault) {
1117 fault =
sd->testDebug(tc, req,
mode);
1140 tran_type,
false,
state);
1170 tran_type,
true,
state);
1184 assert(translation);
1207 fault =
translateFs(req, tc,
mode, translation, delay,
true, tran_type,
1212 DPRINTF(
MMU,
"Translation returning delay=%d fault=%d\n", delay,
1221 if (translation && (call_from_s2 || !
state.stage2Req || req->hasPaddr() ||
1238 switch (mmfr1.vmidbits) {
1253 panic(
"Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1270 ((tran_type ==
state.curTranType) || stage2)) {
1274 state.updateMiscReg(tc, tran_type);
1292 if (
state.directToStage2) {
1313 exceptionLevel =
tranTypeEL(cpsr, scr, tran_type);
1321 switch (currRegime) {
1326 uint64_t ttbr_asid = ttbcr.a1 ?
1330 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1338 uint64_t ttbr_asid = ttbcr.a1 ?
1342 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1357 isPriv = exceptionLevel !=
EL0;
1358 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1361 if (
HaveExt(tc, ArmExtension::FEAT_VHE) &&
1362 hcr.e2h == 1 && hcr.tge ==1) {
1366 if (hcr.e2h == 1 && (exceptionLevel ==
EL2
1367 || (hcr.tge ==1 && exceptionLevel ==
EL0))) {
1368 directToStage2 =
false;
1370 stage2DescReq =
false;
1376 stage2Req = isStage2 ||
1377 (
vm && exceptionLevel <
EL2 && el2_enabled &&
1380 stage2DescReq = isStage2 ||
1381 (
vm && exceptionLevel <
EL2 && el2_enabled);
1382 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1386 directToStage2 =
false;
1388 stage2DescReq =
false;
1405 asid = context_id.asid;
1414 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1416 if (exceptionLevel ==
EL2) {
1423 stage2Req = isStage2 ||
1424 (hcr.vm && exceptionLevel <
EL2 && el2_enabled &&
1426 stage2DescReq = isStage2 ||
1427 (hcr.vm && exceptionLevel <
EL2 && el2_enabled);
1428 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1432 directToStage2 =
false;
1433 stage2DescReq =
false;
1436 miscRegValid =
true;
1437 curTranType = tran_type;
1461 return currEL(cpsr) ==
EL3 && scr.ns == 0 ?
1468 panic(
"Unknown translation mode!\n");
1474 Translation *translation,
bool timing,
bool functional,
1478 return getTE(
te, req, tc,
mode, translation, timing, functional,
1491 lookup_data.
va =
va;
1494 lookup_data.
vmid = vmid;
1495 lookup_data.
ss =
ss;
1500 return tlb->multiLookup(lookup_data);
1505 Translation *translation,
bool timing,
bool functional,
1511 if (
state.isStage2) {
1515 Addr vaddr_tainted = req->getVaddr();
1519 if (
state.aarch64) {
1523 vaddr = vaddr_tainted;
1530 if (req->isPrefetch()) {
1535 return std::make_shared<PrefetchAbort>(
1542 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1548 translation, timing, functional,
ss,
1549 ipaspace, tran_type,
state.stage2DescReq, *
te);
1552 if (timing || fault !=
NoFault) {
1557 true,
false, regime,
state.isStage2,
mode);
1566 Translation *translation,
bool timing,
bool functional,
1571 if (
state.isStage2) {
1579 fault =
getTE(&s2_te, req, tc,
mode, translation, timing, functional,
1580 state.securityState, ipaspace,
1595 Addr vaddr_tainted = req->getVaddr();
1598 fault =
getTE(&s1_te, req, tc,
mode, translation, timing, functional,
1610 req, translation,
mode, timing, functional,
1612 fault = s2_lookup->
getTe(tc, mergeTe);
1626 if (
state.isStage2) {
1627 DPRINTF(
MMU,
"s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1628 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1631 auto arm_fault =
reinterpret_cast<ArmFault*
>(fault.get());
1645 return entry && !entry->
partial;
1653 auto *ommu =
dynamic_cast<MMU*
>(old_mmu);
1656 _attr = ommu->_attr;
1669 fatal_if(!
ti,
"%s is not a valid ARM TLB tester\n", _ti->
name());
1682 if (!
test || !req->hasSize() || req->getSize() == 0 ||
1683 req->isCacheMaintenance()) {
1691 : statistics::
Group(parent),
1692 ADD_STAT(alignFaults, statistics::units::Count::get(),
1693 "Number of MMU faults due to alignment restrictions"),
1694 ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1695 "Number of MMU faults due to prefetch"),
1696 ADD_STAT(domainFaults, statistics::units::Count::get(),
1697 "Number of MMU faults due to domain restrictions"),
1698 ADD_STAT(permsFaults, statistics::units::Count::get(),
1699 "Number of MMU faults due to permissions restrictions")
virtual void annotate(AnnotationIDs id, uint64_t val)
SelfDebug * getSelfDebug() const
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, TranMethod tran_method, CachedState &state)
ArmISA::TLB * getITBPtr() const
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
void flushStage2(const TLBIOp &tlbi_op)
static bool hasUnprivRegime(TranslationRegime regime)
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
void drainResume() override
Resume execution after a successful drain.
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Fault testTranslation(const RequestPtr &req, Mode mode, DomainType domain, CachedState &state) const
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
bool isCompleteTranslation(TlbEntry *te) const
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
void flushStage1(const TLBIOp &tlbi_op)
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
ArmISA::TLB * getDTBPtr() const
bool checkWalkCache() const
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
void setTestInterface(SimObject *ti)
TableWalker * itbStage2Walker
Addr getValidAddr(Addr vaddr, ThreadContext *tc, Mode mode) override
TableWalker * dtbStage2Walker
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type, bool stage2)
void dflush(const TLBIOp &tlbi_op)
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
const ArmRelease * _release
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
void flush(const TLBIOp &tlbi_op)
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
MMU(const ArmMMUParams &p)
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
void takeOverFrom(BaseMMU *old_mmu) override
Fault testAndFinalize(const RequestPtr &req, ThreadContext *tc, Mode mode, TlbEntry *te, CachedState &state) const
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
void iflush(const TLBIOp &tlbi_op)
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
virtual bool stage1Flush() const
Return true if the TLBI op needs to flush stage1 entries, Defaulting to true in the TLBIOp abstract c...
virtual bool stage2Flush() const
Return true if the TLBI op needs to flush stage2 entries, Defaulting to false in the TLBIOp abstract ...
void setTableWalker(TableWalker *table_walker)
void setVMID(vmid_t _vmid)
void flush(const TLBIOp &tlbi_op)
Flush TLB entries.
void flushAll() override
Reset the entire TLB.
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
void setTestInterface(TlbTestInterface *ti)
bool has(ArmExtension ext) const
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
const ArmRelease * releaseFS() const
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
virtual void takeOverFrom(BaseMMU *old_mmu)
std::set< BaseTLB * > data
std::set< BaseTLB * > unified
Cycles is a wrapper class for representing cycle counts, i.e.
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
@ UNCACHEABLE
The request is to an uncacheable address.
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Abstract superclass for simulation objects.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
#define panic(...)
This implements a cprintf based panic() function.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
bool isSecure(ThreadContext *tc)
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
bool longDescFormatInUse(ThreadContext *tc)
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
bool EL2Enabled(ThreadContext *tc)
ExceptionLevel translationEl(TranslationRegime regime)
SecurityState
Security State.
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
@ MISCREG_ID_AA64MMFR1_EL1
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
bool inAArch64(ThreadContext *tc)
PASpace
Physical Address Space.
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
static void decodeAddrOffset(Addr offset, uint8_t &func)
Copyright (c) 2024 Arm Limited All rights reserved.
std::shared_ptr< FaultBase > Fault
std::shared_ptr< Request > RequestPtr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
constexpr decltype(nullptr) NoFault
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Stats(statistics::Group *parent)
statistics::Scalar permsFaults
statistics::Scalar alignFaults
statistics::Scalar prefetchFaults
statistics::Scalar domainFaults
TranslationRegime targetRegime
void setAttributes(bool lpae)
Addr pAddr(Addr va) const
The file contains the definition of a set of TLB Invalidate Instructions.