51#include "debug/Checkpoint.hh"
52#include "debug/Drain.hh"
53#include "debug/PageTableWalker.hh"
54#include "debug/TLB.hh"
55#include "debug/TLBVerbose.hh"
61using namespace ArmISA;
65 requestorId(
p.sys->getRequestorId(this)),
66 port(new
Port(*this)),
67 isStage2(
p.is_stage2),
tlb(NULL),
68 currState(NULL), pending(false),
69 numSquashable(
p.num_squash_per_cycle),
75 doL2DescEvent([
this]{ doL2DescriptorWrapper(); },
name()),
76 doL0LongDescEvent([
this]{ doL0LongDescriptorWrapper(); },
name()),
77 doL1LongDescEvent([
this]{ doL1LongDescriptorWrapper(); },
name()),
78 doL2LongDescEvent([
this]{ doL2LongDescriptorWrapper(); },
name()),
79 doL3LongDescEvent([
this]{ doL3LongDescriptorWrapper(); },
name()),
80 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
81 &doL2LongDescEvent, &doL3LongDescEvent },
82 doProcessEvent([
this]{ processWalkWrapper(); },
name()),
89 ArmSystem *arm_sys =
dynamic_cast<ArmSystem *
>(
p.sys);
91 _physAddrRange = arm_sys->physAddrRange();
92 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
94 _haveLargeAsid64 =
false;
114 if (if_name ==
"port") {
129 physAddrRange(0), req(nullptr),
130 asid(0), vmid(0), transState(nullptr),
131 vaddr(0), vaddr_tainted(0),
132 sctlr(0), scr(0), cpsr(0), tcr(0),
133 htcr(0), hcr(0), vtcr(0),
135 isUncacheable(false), longDescData(
std::nullopt),
136 hpd(false),
sh(0), irgn(0), orgn(0), stage2Req(false),
137 stage2Tran(nullptr), timing(false), functional(false),
139 delayed(false), tableWalker(nullptr)
146 reqQueue(_walker, *this),
147 snoopRespQueue(_walker, *this)
162 state->delay = delay;
172 auto pkt = createPacket(req,
data, 0,
nullptr);
176 handleRespPacket(pkt);
184 auto pkt = createPacket(req,
data, delay,
nullptr);
186 Tick lat = sendAtomic(pkt);
188 handleRespPacket(pkt, lat);
197 auto pkt = createPacket(req,
data, delay,
event);
199 schedTimingReq(pkt,
curTick());
206 assert(pkt->
req->isUncacheable() ||
209 handleRespPacket(pkt);
249 DPRINTF(Drain,
"TableWalker done draining, processing drain event\n");
257 bool state_queues_not_empty =
false;
259 for (
int i = 0;
i < LookupLevel::Num_ArmLookupLevel; ++
i) {
261 state_queues_not_empty =
true;
267 DPRINTF(Drain,
"TableWalker not drained\n");
270 DPRINTF(Drain,
"TableWalker free, no need to drain\n");
288 bool disable_cacheability =
isStage2 ?
300 bool _stage2Req,
const TlbEntry *walk_entry)
302 assert(!(_functional && _timing));
311 DPRINTF(PageTableWalker,
"creating new instance of WalkerState\n");
315 }
else if (_functional) {
320 "creating functional instance of WalkerState\n");
324 }
else if (_timing) {
333 return std::make_shared<ReExec>();
402 assert(
release->
has(ArmExtension::VIRTUALIZATION));
412 panic(
"Invalid translation regime");
439 if (long_desc_format) {
466 }
else if (long_desc_format) {
528 if (
te &&
te->partial) {
534 }
else if (long_desc_format) {
561 unsigned num_squashed = 0;
565 (
te && !
te->partial))) {
570 DPRINTF(
TLB,
"Squashing table walk for address %#x\n",
576 std::make_shared<UnimpFault>(
"Squashed Inst"),
618 const auto irgn0_mask = 0x1;
619 const auto irgn1_mask = 0x40;
625 const bool have_security =
release->
has(ArmExtension::SECURITY);
627 DPRINTF(
TLB,
"Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
639 return std::make_shared<PrefetchAbort>(
645 return std::make_shared<DataAbort>(
660 return std::make_shared<PrefetchAbort>(
666 return std::make_shared<DataAbort>(
679 DPRINTF(
TLB,
" - Descriptor at address %#x (%s)\n", l1desc_addr,
693 sizeof(uint32_t), flag, LookupLevel::L1,
703 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
707 DPRINTF(
TLB,
"Beginning table walk for address %#x, TTBCR: %#x\n",
719 DPRINTF(
TLB,
" - Selecting VTTBR (long-desc.)\n");
723 LookupLevel::L1 : LookupLevel::L2;
726 DPRINTF(
TLB,
" - Selecting HTTBR (long-desc.)\n");
737 ttbr0_max = (1ULL << 32) -
740 ttbr0_max = (1ULL << 32) - 1;
742 ttbr1_min = (1ULL << 32) - (1ULL << (32 -
currState->
ttbcr.t1sz));
753 DPRINTF(
TLB,
" - Selecting TTBR0 (long-desc.)\n");
757 return std::make_shared<PrefetchAbort>(
763 return std::make_shared<DataAbort>(
776 if (ttbr0_max < (1ULL << 30))
777 start_lookup_level = LookupLevel::L2;
779 DPRINTF(
TLB,
" - Selecting TTBR1 (long-desc.)\n");
783 return std::make_shared<PrefetchAbort>(
789 return std::make_shared<DataAbort>(
803 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
804 start_lookup_level = LookupLevel::L2;
808 return std::make_shared<PrefetchAbort>(
814 return std::make_shared<DataAbort>(
825 if (start_lookup_level == LookupLevel::L1) {
827 desc_addr =
mbits(ttbr, 39,
n) |
829 DPRINTF(
TLB,
" - Descriptor at address %#x (%s) (long-desc.)\n",
834 n = (tsz >= 2 ? 14 - tsz : 12);
835 desc_addr =
mbits(ttbr, 39,
n) |
837 DPRINTF(
TLB,
" - Descriptor at address %#x (%s) (long-desc.)\n",
853 sizeof(uint64_t), flag, start_lookup_level,
884 panic(
"Invalid grain size\n");
896 return tsz > max_txsz || tsz < min_txsz;
919 DPRINTF(
TLB,
"Beginning table walk for address %#llx, TCR: %#llx\n",
936 bool vaddr_fault =
false;
943 DPRINTF(
TLB,
" - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
948 DPRINTF(
TLB,
" - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
964 DPRINTF(
TLB,
" - Selecting TTBR0_EL1 (AArch64)\n");
979 DPRINTF(
TLB,
" - Selecting TTBR1_EL1 (AArch64)\n");
1004 DPRINTF(
TLB,
" - Selecting TTBR0_EL2 (AArch64)\n");
1021 DPRINTF(
TLB,
" - Selecting TTBR1_EL2 (AArch64)\n");
1045 DPRINTF(
TLB,
" - Selecting TTBR0_EL3 (AArch64)\n");
1074 return std::make_shared<PrefetchAbort>(
1079 return std::make_shared<DataAbort>(
1089 warn_once(
"Reserved granule size requested; gem5's IMPLEMENTATION "
1090 "DEFINED behavior takes this to mean 4KB granules\n");
1102 auto [table_addr, desc_addr, start_lookup_level] =
walkAddresses(
1103 ttbr, tg, tsz, pa_range);
1108 DPRINTF(
TLB,
"Address size fault before any lookup\n");
1110 return std::make_shared<PrefetchAbort>(
1116 return std::make_shared<DataAbort>(
1141 sizeof(uint64_t), flag, start_lookup_level,
1148std::tuple<Addr, Addr, TableWalker::LookupLevel>
1153 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1154 Addr table_addr = 0;
1161 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1171 table_addr = entry->
pfn;
1177 ptops->firstLevel(64 - tsz);
1178 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1179 "Table walker couldn't find lookup level\n");
1182 int base_addr_lo = 3 + tsz -
stride * (3 - first_level) - tg;
1184 if (pa_range == 52) {
1185 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1186 table_addr =
mbits(ttbr, 47,
z);
1187 table_addr |= (
bits(ttbr, 5, 2) << 48);
1189 table_addr =
mbits(ttbr, 47, base_addr_lo);
1193 desc_addr = table_addr + ptops->index(
currState->
vaddr, first_level, tsz);
1195 return std::make_tuple(table_addr, desc_addr, first_level);
1200 uint8_t texcb,
bool s)
1204 DPRINTF(TLBVerbose,
"memAttrs texcb:%d s:%d\n", texcb,
s);
1205 te.shareable =
false;
1206 te.nonCacheable =
false;
1207 te.outerShareable =
false;
1211 te.nonCacheable =
true;
1213 te.shareable =
true;
1218 te.nonCacheable =
true;
1220 te.shareable =
true;
1228 te.outerAttrs =
bits(texcb, 1, 0);
1234 te.outerAttrs =
bits(texcb, 1, 0);
1237 te.nonCacheable =
true;
1241 te.outerAttrs =
bits(texcb, 1, 0);
1244 panic(
"Reserved texcb value!\n");
1247 panic(
"Implementation-defined texcb value!\n");
1256 te.nonCacheable =
true;
1258 te.shareable =
false;
1263 panic(
"Reserved texcb value!\n");
1268 if (
bits(texcb, 1,0) == 0 ||
bits(texcb, 3,2) == 0)
1269 te.nonCacheable =
true;
1270 te.innerAttrs =
bits(texcb, 1, 0);
1271 te.outerAttrs =
bits(texcb, 3, 2);
1274 panic(
"More than 32 states for 5 bits?\n");
1282 DPRINTF(TLBVerbose,
"memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1283 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1284 switch(
bits(texcb, 2,0)) {
1289 te.outerShareable = (prrr.nos0 == 0);
1295 te.outerShareable = (prrr.nos1 == 0);
1301 te.outerShareable = (prrr.nos2 == 0);
1307 te.outerShareable = (prrr.nos3 == 0);
1313 te.outerShareable = (prrr.nos4 == 0);
1319 te.outerShareable = (prrr.nos5 == 0);
1322 panic(
"Imp defined type\n");
1327 te.outerShareable = (prrr.nos7 == 0);
1333 DPRINTF(TLBVerbose,
"StronglyOrdered\n");
1335 te.nonCacheable =
true;
1338 te.shareable =
true;
1341 DPRINTF(TLBVerbose,
"Device ds1:%d ds0:%d s:%d\n",
1342 prrr.ds1, prrr.ds0,
s);
1344 te.nonCacheable =
true;
1348 te.shareable =
true;
1350 te.shareable =
true;
1353 DPRINTF(TLBVerbose,
"Normal ns1:%d ns0:%d s:%d\n",
1354 prrr.ns1, prrr.ns0,
s);
1357 te.shareable =
true;
1359 te.shareable =
true;
1362 panic(
"Reserved type");
1368 te.nonCacheable =
true;
1384 te.nonCacheable =
true;
1399 DPRINTF(TLBVerbose,
"memAttrs: shareable: %d, innerAttrs: %d, "
1401 te.shareable,
te.innerAttrs,
te.outerAttrs);
1402 te.setAttributes(
false);
1412 uint8_t
sh = l_descriptor.
sh();
1417 uint8_t attr_3_2 = (
attr >> 2) & 0x3;
1418 uint8_t attr_1_0 =
attr & 0x3;
1420 DPRINTF(TLBVerbose,
"memAttrsLPAE MemAttr:%#x sh:%#x\n",
attr,
sh);
1422 if (attr_3_2 == 0) {
1426 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1427 te.nonCacheable =
true;
1430 te.outerAttrs = attr_3_2 == 1 ? 0 :
1431 attr_3_2 == 2 ? 2 : 1;
1432 te.innerAttrs = attr_1_0 == 1 ? 0 :
1433 attr_1_0 == 2 ? 6 : 5;
1434 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1437 uint8_t attrIndx = l_descriptor.
attrIndx();
1445 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1446 uint8_t attr_7_4 =
bits(
attr, 7, 4);
1447 uint8_t attr_3_0 =
bits(
attr, 3, 0);
1448 DPRINTF(TLBVerbose,
"memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx,
sh,
attr);
1453 te.nonCacheable =
false;
1458 if (attr_3_0 == 0x0)
1460 else if (attr_3_0 == 0x4)
1463 panic(
"Unpredictable behavior\n");
1464 te.nonCacheable =
true;
1471 if (attr_3_0 == 0x4)
1473 te.nonCacheable =
true;
1474 else if (attr_3_0 < 0x8)
1475 panic(
"Unpredictable behavior\n");
1485 if (attr_7_4 & 0x4) {
1486 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1488 te.outerAttrs = 0x2;
1492 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1493 panic(
"Unpredictable behavior\n");
1496 panic(
"Unpredictable behavior\n");
1502 te.innerAttrs = 0x1;
1505 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1517 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1520 panic(
"Unpredictable behavior\n");
1525 te.outerShareable =
sh == 2;
1526 te.shareable = (
sh & 0x2) ?
true :
false;
1527 te.setAttributes(
true);
1528 te.attributes |= (uint64_t)
attr << 56;
1534 return !
bits(attrs, 2) ||
1545 uint8_t
sh = l_descriptor.
sh();
1549 uint8_t attr_hi = (
attr >> 2) & 0x3;
1550 uint8_t attr_lo =
attr & 0x3;
1552 DPRINTF(TLBVerbose,
"memAttrsAArch64 MemAttr:%#x sh:%#x\n",
attr,
sh);
1558 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1559 te.nonCacheable =
true;
1562 te.outerAttrs = attr_hi == 1 ? 0 :
1563 attr_hi == 2 ? 2 : 1;
1564 te.innerAttrs = attr_lo == 1 ? 0 :
1565 attr_lo == 2 ? 6 : 5;
1568 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1569 (attr_lo == 1) || (attr_lo == 2);
1572 te.xs = !l_descriptor.
fnxs();
1575 uint8_t attrIndx = l_descriptor.
attrIndx();
1577 DPRINTF(TLBVerbose,
"memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx,
sh);
1593 panic(
"Invalid exception level");
1598 attr =
bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1605 case 0b00000000 ... 0b00001111:
1607 te.nonCacheable =
true;
1612 te.nonCacheable =
true;
1617 te.nonCacheable =
true;
1625 te.xs =
te.nonCacheable;
1629 te.shareable =
sh == 2;
1630 te.outerShareable = (
sh & 0x2) ?
true :
false;
1632 te.attributes = ((uint64_t)
attr << 56) |
1647 te.nonCacheable =
true;
1652 te.nonCacheable = (
te.outerAttrs == 0 ||
te.outerAttrs == 2) &&
1653 (
te.innerAttrs == 0 ||
te.innerAttrs == 2);
1657 te.xs =
te.nonCacheable;
1670 DPRINTF(
TLB,
"L1 descriptor for %#x is %#x\n",
1683 DPRINTF(
TLB,
"L1 Descriptor Reserved/Ignore, causing fault\n");
1686 std::make_shared<PrefetchAbort>(
1693 std::make_shared<DataAbort>(
1716 panic(
"Haven't implemented supersections\n");
1725 DPRINTF(
TLB,
"L1 descriptor points to page table at: %#x (%s)\n",
1740 sizeof(uint32_t), flag, LookupLevel::L2,
1749 panic(
"A new type in a 2 bit field?\n");
1757 return std::make_shared<PrefetchAbort>(
1763 return std::make_shared<DataAbort>(
1783 DPRINTF(
TLB,
"L%d descriptor for %#llx is %#llx (%s)\n",
1790 DPRINTF(PageTableWalker,
"Analyzing L%d descriptor: %#llx, pxn: %d, "
1791 "xn: %d, ap: %d, af: %d, type: %d\n",
1800 DPRINTF(PageTableWalker,
"Analyzing L%d descriptor: %#llx, type: %d\n",
1810 DPRINTF(
TLB,
"L%d descriptor Invalid, causing fault type %d\n",
1829 DPRINTF(
TLB,
"L%d descriptor causing Address Size Fault\n",
1836 DPRINTF(
TLB,
"L%d descriptor causing Access Fault\n",
1872 DPRINTF(
TLB,
"L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1881 DPRINTF(
TLB,
"L%d descriptor causing Address Size Fault\n",
1903 Event *
event = NULL;
1905 case LookupLevel::L1:
1907 case LookupLevel::L2:
1908 case LookupLevel::L3:
1912 panic(
"Wrong lookup level in table walk\n");
1918 sizeof(uint64_t), flag,
L,
event,
1925 panic(
"A new type in a 2 bit field?\n");
1939 DPRINTF(
TLB,
"L2 descriptor for %#x is %#x\n",
1946 DPRINTF(
TLB,
"L2 descriptor invalid, causing fault\n");
1971 DPRINTF(
TLB,
"Generating access fault at L2, afe: %d, ap: %d\n",
1997 DPRINTF(PageTableWalker,
"L1 Desc object host addr: %p\n",
1999 DPRINTF(PageTableWalker,
"L1 Desc object data: %08x\n",
2002 DPRINTF(PageTableWalker,
"calling doL1Descriptor for vaddr:%#x\n",
2025 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2058 DPRINTF(PageTableWalker,
"calling doL2Descriptor for vaddr:%#x\n",
2069 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2128 DPRINTF(PageTableWalker,
"calling doLongDescriptor for vaddr:%#x\n",
2148 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2165 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2166 panic(
"Max. number of lookups already reached in table walk\n");
2192 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2207 fault = tran->fault;
2223 (this->*doDescriptor)();
2248 (this->*doDescriptor)();
2251 (this->*doDescriptor)();
2259 DPRINTF(PageTableWalker,
"Adding to walker fifo: "
2260 "queue size before adding: %d\n",
2269 const bool have_security =
release->
has(ArmExtension::SECURITY);
2274 te.longDescFormat =
true;
2284 te.size = (1ULL <<
te.N) - 1;
2291 te.type = TypeTLB::unified;
2304 DPRINTF(
TLB,
" - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2306 DPRINTF(
TLB,
" - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2307 "vmid:%d nc:%d ns:%d\n",
te.vpn,
te.xn,
te.pxn,
2308 te.ap,
static_cast<uint8_t
>(
te.domain),
te.asid,
te.
vmid,
2309 te.nonCacheable,
te.ns);
2310 DPRINTF(
TLB,
" - domain from L%d desc:%d data:%#x\n",
2321 const bool have_security =
release->
has(ArmExtension::SECURITY);
2326 te.longDescFormat = long_descriptor;
2331 te.size = (1<<
te.N) - 1;
2332 te.pfn = descriptor.
pfn();
2338 te.xn = descriptor.
xn();
2340 TypeTLB::instruction : TypeTLB::data;
2350 if (long_descriptor) {
2360 te.hap = l_descriptor.
ap();
2363 descriptor.
ap() >> 1) << 1) |
2371 te.ap = descriptor.
ap();
2378 DPRINTF(
TLB,
" - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2380 DPRINTF(
TLB,
" - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2381 "vmid:%d nc:%d ns:%d\n",
te.vpn,
te.xn,
te.pxn,
2382 te.ap,
static_cast<uint8_t
>(
te.domain),
te.asid,
te.
vmid,
2383 te.nonCacheable,
te.ns);
2384 DPRINTF(
TLB,
" - domain from L%d desc:%d data:%#x\n",
2399 switch (lookup_level_as_int) {
2400 case LookupLevel::L1:
2401 return LookupLevel::L1;
2402 case LookupLevel::L2:
2403 return LookupLevel::L2;
2404 case LookupLevel::L3:
2405 return LookupLevel::L3;
2407 panic(
"Invalid lookup level conversion");
2465 panic(
"unknown page size");
2478 auto req = std::make_shared<Request>();
2532 :
data(_data), numBytes(0),
event(_event), parent(_parent),
2535 req = std::make_shared<Request>();
2554 parent.getTableWalkerPort().sendTimingReq(req,
data,
2566 parent.mmu->translateTiming(req, tc,
this,
mode, tranType,
true);
2570 : statistics::
Group(parent),
2571 ADD_STAT(walks, statistics::units::Count::get(),
2572 "Table walker walks requested"),
2573 ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2574 "Table walker walks initiated with short descriptors"),
2575 ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2576 "Table walker walks initiated with long descriptors"),
2577 ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2578 "Level at which table walker walks with short descriptors "
2580 ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2581 "Level at which table walker walks with long descriptors "
2583 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2584 "Table walks squashed before starting"),
2585 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2586 "Table walks squashed after completion"),
2588 "Table walker wait (enqueue to first request) latency"),
2589 ADD_STAT(walkServiceTime, statistics::units::
Tick::get(),
2590 "Table walker service (enqueue to completion) latency"),
2592 "Table walker pending requests distribution"),
2593 ADD_STAT(pageSizes, statistics::units::Count::get(),
2594 "Table walker page sizes translated"),
2595 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2596 "Table walker requests started/completed, data/inst")
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
virtual void annotate(AnnotationIDs id, uint64_t val)
static bool hasUnprivRegime(TranslationRegime regime)
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
bool hasWalkCache() const
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
const ArmRelease * release() const
void multiInsert(const Lookup &lookup_data, TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
virtual bool global(WalkerState *currState) const =0
virtual bool xn() const =0
virtual uint8_t * getRawPtr()=0
virtual uint64_t getRawData() const =0
virtual DomainType domain() const =0
virtual uint8_t texcb() const
virtual std::string dbgHeader() const =0
virtual uint8_t ap() const =0
virtual bool shareable() const
virtual uint8_t offsetBits() const =0
virtual Addr pfn() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual bool secure(bool have_security, WalkerState *currState) const =0
uint32_t data
The raw bits of the entry.
bool supersection() const
Is the page a Supersection (16 MiB)?
Addr l2Addr() const
Address of L2 descriptor if it exists.
DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
uint8_t ap() const override
Three bit access protection flags.
uint8_t ap() const override
Three bit access protection flags.
uint32_t data
The raw bits of the entry.
bool invalid() const
Is the entry invalid.
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
bool af() const
Returns true if the access flag (AF) is set.
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
bool aarch64
True if the current lookup is performed in AArch64 state.
EntryType type() const
Return the descriptor type.
bool xn() const override
Is execution allowed on this mapping?
DomainType domain() const override
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
uint64_t data
The raw bits of the entry.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Addr paddr() const
Return the physical address of the entry.
bool fnxs() const
FNXS for FEAT_XS only.
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool secureTable() const
Whether the subsequent levels of lookup are secure.
uint64_t getRawData() const override
bool xnTable() const
Is execution allowed on subsequent lookup levels?
Port(TableWalker &_walker)
void sendAtomicReq(const RequestPtr &req, uint8_t *data, Tick delay)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void sendFunctionalReq(const RequestPtr &req, uint8_t *data)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendTimingReq(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
PacketPtr createPacket(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool secureLookup
Whether lookups should be treated as using the secure state.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
SecurityState ss
Security State of the access.
std::optional< LongDescData > longDescData
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
ExceptionLevel el
Current exception level.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
TableWalker * tableWalker
Fault fault
The fault that we are going to return.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
TranslationRegime regime
Current translation regime.
bool timing
If the mode is timing or atomic.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
int physAddrRange
Current physical address range in bits.
PASpace ipaSpace
IPA space (Secure vs NonSecure); stage2 only.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
void doL2DescriptorWrapper()
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
void doL2LongDescriptorWrapper()
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
void doL3LongDescriptorWrapper()
const ArmRelease * release
Cached copies of system-level properties.
void doL1DescriptorWrapper()
bool checkVAOutOfRange(Addr addr, int top_bit, int tsz, bool low_range)
Fault generateLongDescFault(ArmFault::FaultSource src)
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
bool uncacheableWalk() const
Returns true if the table walk should be uncacheable.
Event * LongDescEventByLevel[4]
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void doL1LongDescriptorWrapper()
void fetchDescriptor(Addr desc_addr, DescriptorBase &descriptor, int num_bytes, Request::Flags flags, LookupLevel lookup_lvl, Event *event, void(TableWalker::*doDescriptor)())
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
void doL0LongDescriptorWrapper()
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
Fault testWalk(const RequestPtr &walk_req, DomainType domain, LookupLevel lookup_level)
Addr s1MinTxSz(GrainSize tg) const
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
void nextWalk(ThreadContext *tc)
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
bool s1TxSzFault(GrainSize tg, int tsz) const
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
gem5::ArmISA::TableWalker::TableWalkerStats stats
Port & getTableWalkerPort()
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Fault processWalkAArch64()
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool uncacheableFromAttrs(uint8_t attrs)
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
void mpamTagTableWalk(RequestPtr &req) const
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void setTestInterface(TlbTestInterface *ti)
void processWalkWrapper()
void memAttrsWalkAArch64(TlbEntry &te)
Addr maxTxSz(GrainSize tg) const
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
void stashCurrState(int queue_idx)
Timing mode: saves the currState into the stateQueues.
bool has(ArmExtension ext) const
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Cycles is a wrapper class for representing cycle counts, i.e.
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
SenderState * senderState
This packet's sender state.
RequestPtr req
A pointer to the original request.
bool cacheResponding() const
Ports are used to interface objects to each other.
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
@ PT_WALK
The request is a page table walk.
@ SECURE
The request targets the secure memory space.
@ UNCACHEABLE
The request is to an uncacheable address.
@ NO_ACCESS
The request should not cause a memory access.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Derived & ysubname(off_type index, const std::string &subname)
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type _x, size_type _y)
Derived & init(size_type size)
Set this vector to have the given size.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
void signalDrainDone() const
Signal that an object is drained.
DrainState drainState() const
Return the current drain state of an object.
DrainState
Object drain/handover states.
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void schedule(Event &event, Tick when)
void set(Type mask)
Set all flag's bits matching the given mask.
#define panic(...)
This implements a cprintf based panic() function.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
ByteOrder byteOrder(const ThreadContext *tc)
const PageTableOps * getPageTableOps(GrainSize trans_granule)
bool longDescFormatInUse(ThreadContext *tc)
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
const GrainSize GrainMap_tg1[]
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
ExceptionLevel translationEl(TranslationRegime regime)
SecurityState
Security State.
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Bitfield< 21, 20 > stride
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
const GrainSize GrainMap_tg0[]
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
PASpace
Physical Address Space.
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
const FlagsType pdf
Print the percent of the total that this entry represents.
const FlagsType nonan
Don't print if this is NAN.
const FlagsType nozero
Don't print if this is zero.
const FlagsType total
Print the total.
const FlagsType dist
Print the distribution.
Copyright (c) 2024 Arm Limited All rights reserved.
std::shared_ptr< FaultBase > Fault
std::shared_ptr< Request > RequestPtr
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
uint64_t Tick
Tick count type.
T htog(T value, ByteOrder guest_byte_order)
constexpr decltype(nullptr) NoFault
Overload hash function for BasicBlockRange type.
statistics::Scalar squashedBefore
statistics::Vector2d requestOrigin
TableWalkerStats(statistics::Group *parent)
statistics::Scalar walksLongDescriptor
statistics::Scalar walksShortDescriptor
statistics::Histogram walkWaitTime
statistics::Vector walksShortTerminatedAtLevel
statistics::Vector pageSizes
statistics::Vector walksLongTerminatedAtLevel
statistics::Histogram walkServiceTime
statistics::Histogram pendingWalks
statistics::Scalar squashedAfter
Helper variables used to implement hierarchical access permissions when the long-desc.
const std::string & name()