Go to the documentation of this file.
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/PageTableWalker.hh"
53 #include "debug/TLB.hh"
54 #include "debug/TLBVerbose.hh"
60 using namespace ArmISA;
64 requestorId(
p.sys->getRequestorId(this)),
65 port(new
Port(this, requestorId)),
66 isStage2(
p.is_stage2),
tlb(NULL),
67 currState(NULL), pending(false),
68 numSquashable(
p.num_squash_per_cycle),
74 doL2DescEvent([
this]{ doL2DescriptorWrapper(); },
name()),
75 doL0LongDescEvent([
this]{ doL0LongDescriptorWrapper(); },
name()),
76 doL1LongDescEvent([
this]{ doL1LongDescriptorWrapper(); },
name()),
77 doL2LongDescEvent([
this]{ doL2LongDescriptorWrapper(); },
name()),
78 doL3LongDescEvent([
this]{ doL3LongDescriptorWrapper(); },
name()),
79 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
80 &doL2LongDescEvent, &doL3LongDescEvent },
81 doProcessEvent([
this]{ processWalkWrapper(); },
name())
87 ArmSystem *arm_sys =
dynamic_cast<ArmSystem *
>(
p.sys);
89 _physAddrRange = arm_sys->physAddrRange();
90 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
92 _haveLargeAsid64 =
false;
112 if (if_name ==
"port") {
126 tc(nullptr),
aarch64(false),
el(
EL0), physAddrRange(0), req(nullptr),
127 asid(0), vmid(0), isHyp(false), transState(nullptr),
128 vaddr(0), vaddr_tainted(0),
129 sctlr(0), scr(0), cpsr(0), tcr(0),
130 htcr(0), hcr(0), vtcr(0),
131 isWrite(false), isFetch(false),
isSecure(false),
132 isUncacheable(false),
133 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
134 pxnTable(false),
hpd(false), stage2Req(false),
135 stage2Tran(nullptr), timing(false), functional(false),
137 delayed(false), tableWalker(nullptr)
143 reqQueue, snoopRespQueue),
144 reqQueue(*_walker, *this), snoopRespQueue(*_walker, *this),
151 Addr desc_addr,
int size,
164 state->delay = delay;
172 Addr desc_addr,
int size,
175 auto pkt = createPacket(desc_addr, size,
data,
flags, 0,
nullptr);
179 handleRespPacket(pkt);
184 Addr desc_addr,
int size,
187 auto pkt = createPacket(desc_addr, size,
data,
flags, delay,
nullptr);
189 Tick lat = sendAtomic(pkt);
191 handleRespPacket(pkt, lat);
196 Addr desc_addr,
int size,
200 auto pkt = createPacket(desc_addr, size,
data,
flags, delay,
event);
202 schedTimingReq(pkt,
curTick());
209 assert(pkt->
req->isUncacheable() ||
212 handleRespPacket(pkt);
252 DPRINTF(Drain,
"TableWalker done draining, processing drain event\n");
260 bool state_queues_not_empty =
false;
262 for (
int i = 0;
i < LookupLevel::Num_ArmLookupLevel; ++
i) {
264 state_queues_not_empty =
true;
270 DPRINTF(Drain,
"TableWalker not drained\n");
273 DPRINTF(Drain,
"TableWalker free, no need to drain\n");
293 bool _stage2Req,
const TlbEntry *walk_entry)
295 assert(!(_functional && _timing));
304 DPRINTF(PageTableWalker,
"creating new instance of WalkerState\n");
308 }
else if (_functional) {
313 "creating functional instance of WalkerState\n");
317 }
else if (_timing) {
326 return std::make_shared<ReExec>();
400 assert(
release->
has(ArmExtension::VIRTUALIZATION));
410 panic(
"Invalid exception level");
434 if (long_desc_format) {
451 else if (long_desc_format)
474 else if (long_desc_format)
508 if (
te &&
te->partial) {
523 curr_state_copy->
tc, curr_state_copy->
mode);
525 delete curr_state_copy;
533 unsigned num_squashed = 0;
537 (
te && !
te->partial))) {
542 DPRINTF(
TLB,
"Squashing table walk for address %#x\n",
548 std::make_shared<UnimpFault>(
"Squashed Inst"),
589 const auto irgn0_mask = 0x1;
590 const auto irgn1_mask = 0x40;
596 const bool have_security =
release->
has(ArmExtension::SECURITY);
598 DPRINTF(
TLB,
"Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
610 return std::make_shared<PrefetchAbort>(
616 return std::make_shared<DataAbort>(
630 return std::make_shared<PrefetchAbort>(
636 return std::make_shared<DataAbort>(
649 DPRINTF(
TLB,
" - Descriptor at address %#x (%s)\n", l1desc_addr,
654 f =
testWalk(l1desc_addr,
sizeof(uint32_t),
680 sizeof(uint32_t), flag, LookupLevel::L1,
693 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
697 DPRINTF(
TLB,
"Beginning table walk for address %#x, TTBCR: %#x\n",
709 DPRINTF(
TLB,
" - Selecting VTTBR (long-desc.)\n");
713 LookupLevel::L1 : LookupLevel::L2;
716 DPRINTF(
TLB,
" - Selecting HTTBR (long-desc.)\n");
727 ttbr0_max = (1ULL << 32) -
730 ttbr0_max = (1ULL << 32) - 1;
732 ttbr1_min = (1ULL << 32) - (1ULL << (32 -
currState->
ttbcr.t1sz));
743 DPRINTF(
TLB,
" - Selecting TTBR0 (long-desc.)\n");
747 return std::make_shared<PrefetchAbort>(
753 return std::make_shared<DataAbort>(
765 if (ttbr0_max < (1ULL << 30))
766 start_lookup_level = LookupLevel::L2;
768 DPRINTF(
TLB,
" - Selecting TTBR1 (long-desc.)\n");
772 return std::make_shared<PrefetchAbort>(
778 return std::make_shared<DataAbort>(
791 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
792 start_lookup_level = LookupLevel::L2;
796 return std::make_shared<PrefetchAbort>(
802 return std::make_shared<DataAbort>(
813 if (start_lookup_level == LookupLevel::L1) {
815 desc_addr =
mbits(ttbr, 39,
n) |
817 DPRINTF(
TLB,
" - Descriptor at address %#x (%s) (long-desc.)\n",
821 n = (tsz >= 2 ? 14 - tsz : 12);
822 desc_addr =
mbits(ttbr, 39,
n) |
824 DPRINTF(
TLB,
" - Descriptor at address %#x (%s) (long-desc.)\n",
854 sizeof(uint64_t), flag, start_lookup_level,
874 int in_max = (have_lva && tg ==
Grain64KB) ? 52 : 48;
875 int in_min = 64 - (tg ==
Grain64KB ? 47 : 48);
877 return tsz > in_max || tsz < in_min || (low_range ?
894 DPRINTF(
TLB,
"Beginning table walk for address %#llx, TCR: %#llx\n",
911 bool vaddr_fault =
false;
928 DPRINTF(
TLB,
" - Selecting TTBR0 (AArch64)\n");
935 top_bit, tg, tsz,
true);
941 DPRINTF(
TLB,
" - Selecting TTBR1 (AArch64)\n");
948 top_bit, tg, tsz,
false);
963 DPRINTF(
TLB,
" - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
966 DPRINTF(
TLB,
" - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
977 DPRINTF(
TLB,
" - Selecting TTBR0_EL1 (AArch64)\n");
984 top_bit, tg, tsz,
true);
990 DPRINTF(
TLB,
" - Selecting TTBR1_EL1 (AArch64)\n");
997 top_bit, tg, tsz,
false);
1012 DPRINTF(
TLB,
" - Selecting TTBR0_EL2 (AArch64)\n");
1020 top_bit, tg, tsz,
true);
1027 DPRINTF(
TLB,
" - Selecting TTBR1_EL2 (AArch64)\n");
1034 top_bit, tg, tsz,
false);
1049 DPRINTF(
TLB,
" - Selecting TTBR0_EL3 (AArch64)\n");
1056 top_bit, tg, tsz,
true);
1074 f = std::make_shared<PrefetchAbort>(
1079 f = std::make_shared<DataAbort>(
1099 warn_once(
"Reserved granule size requested; gem5's IMPLEMENTATION "
1100 "DEFINED behavior takes this to mean 4KB granules\n");
1112 auto [table_addr, desc_addr, start_lookup_level] =
walkAddresses(
1113 ttbr, tg, tsz, pa_range);
1118 DPRINTF(
TLB,
"Address size fault before any lookup\n");
1121 f = std::make_shared<PrefetchAbort>(
1127 f = std::make_shared<DataAbort>(
1180 sizeof(uint64_t), flag, start_lookup_level,
1184 sizeof(uint64_t), flag, -1, NULL,
1192 std::tuple<Addr, Addr, TableWalker::LookupLevel>
1197 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1198 Addr table_addr = 0;
1205 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1213 table_addr = entry->
pfn;
1219 ptops->firstLevel(64 - tsz);
1220 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1221 "Table walker couldn't find lookup level\n");
1224 int base_addr_lo = 3 + tsz -
stride * (3 - first_level) - tg;
1226 if (pa_range == 52) {
1227 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1228 table_addr =
mbits(ttbr, 47,
z);
1229 table_addr |= (
bits(ttbr, 5, 2) << 48);
1231 table_addr =
mbits(ttbr, 47, base_addr_lo);
1235 desc_addr = table_addr + ptops->index(
currState->
vaddr, first_level, tsz);
1237 return std::make_tuple(table_addr, desc_addr, first_level);
1242 uint8_t texcb,
bool s)
1246 DPRINTF(TLBVerbose,
"memAttrs texcb:%d s:%d\n", texcb,
s);
1247 te.shareable =
false;
1248 te.nonCacheable =
false;
1249 te.outerShareable =
false;
1253 te.nonCacheable =
true;
1255 te.shareable =
true;
1260 te.nonCacheable =
true;
1262 te.shareable =
true;
1270 te.outerAttrs =
bits(texcb, 1, 0);
1276 te.outerAttrs =
bits(texcb, 1, 0);
1279 te.nonCacheable =
true;
1283 te.outerAttrs =
bits(texcb, 1, 0);
1286 panic(
"Reserved texcb value!\n");
1289 panic(
"Implementation-defined texcb value!\n");
1298 te.nonCacheable =
true;
1300 te.shareable =
false;
1305 panic(
"Reserved texcb value!\n");
1310 if (
bits(texcb, 1,0) == 0 ||
bits(texcb, 3,2) == 0)
1311 te.nonCacheable =
true;
1312 te.innerAttrs =
bits(texcb, 1, 0);
1313 te.outerAttrs =
bits(texcb, 3, 2);
1316 panic(
"More than 32 states for 5 bits?\n");
1324 DPRINTF(TLBVerbose,
"memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1325 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1326 switch(
bits(texcb, 2,0)) {
1331 te.outerShareable = (prrr.nos0 == 0);
1337 te.outerShareable = (prrr.nos1 == 0);
1343 te.outerShareable = (prrr.nos2 == 0);
1349 te.outerShareable = (prrr.nos3 == 0);
1355 te.outerShareable = (prrr.nos4 == 0);
1361 te.outerShareable = (prrr.nos5 == 0);
1364 panic(
"Imp defined type\n");
1369 te.outerShareable = (prrr.nos7 == 0);
1375 DPRINTF(TLBVerbose,
"StronglyOrdered\n");
1377 te.nonCacheable =
true;
1380 te.shareable =
true;
1383 DPRINTF(TLBVerbose,
"Device ds1:%d ds0:%d s:%d\n",
1384 prrr.ds1, prrr.ds0,
s);
1386 te.nonCacheable =
true;
1390 te.shareable =
true;
1392 te.shareable =
true;
1395 DPRINTF(TLBVerbose,
"Normal ns1:%d ns0:%d s:%d\n",
1396 prrr.ns1, prrr.ns0,
s);
1399 te.shareable =
true;
1401 te.shareable =
true;
1404 panic(
"Reserved type");
1410 te.nonCacheable =
true;
1426 te.nonCacheable =
true;
1441 DPRINTF(TLBVerbose,
"memAttrs: shareable: %d, innerAttrs: %d, "
1443 te.shareable,
te.innerAttrs,
te.outerAttrs);
1444 te.setAttributes(
false);
1454 uint8_t
sh = l_descriptor.
sh();
1459 uint8_t attr_3_2 = (
attr >> 2) & 0x3;
1460 uint8_t attr_1_0 =
attr & 0x3;
1462 DPRINTF(TLBVerbose,
"memAttrsLPAE MemAttr:%#x sh:%#x\n",
attr,
sh);
1464 if (attr_3_2 == 0) {
1468 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1469 te.nonCacheable =
true;
1472 te.outerAttrs = attr_3_2 == 1 ? 0 :
1473 attr_3_2 == 2 ? 2 : 1;
1474 te.innerAttrs = attr_1_0 == 1 ? 0 :
1475 attr_1_0 == 2 ? 6 : 5;
1476 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1479 uint8_t attrIndx = l_descriptor.
attrIndx();
1487 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1488 uint8_t attr_7_4 =
bits(
attr, 7, 4);
1489 uint8_t attr_3_0 =
bits(
attr, 3, 0);
1490 DPRINTF(TLBVerbose,
"memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx,
sh,
attr);
1495 te.nonCacheable =
false;
1500 if (attr_3_0 == 0x0)
1502 else if (attr_3_0 == 0x4)
1505 panic(
"Unpredictable behavior\n");
1506 te.nonCacheable =
true;
1513 if (attr_3_0 == 0x4)
1515 te.nonCacheable =
true;
1516 else if (attr_3_0 < 0x8)
1517 panic(
"Unpredictable behavior\n");
1527 if (attr_7_4 & 0x4) {
1528 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1530 te.outerAttrs = 0x2;
1534 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1535 panic(
"Unpredictable behavior\n");
1538 panic(
"Unpredictable behavior\n");
1544 te.innerAttrs = 0x1;
1547 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1559 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1562 panic(
"Unpredictable behavior\n");
1567 te.outerShareable =
sh == 2;
1568 te.shareable = (
sh & 0x2) ?
true :
false;
1569 te.setAttributes(
true);
1570 te.attributes |= (uint64_t)
attr << 56;
1580 uint8_t
sh = l_descriptor.
sh();
1584 uint8_t attr_hi = (
attr >> 2) & 0x3;
1585 uint8_t attr_lo =
attr & 0x3;
1587 DPRINTF(TLBVerbose,
"memAttrsAArch64 MemAttr:%#x sh:%#x\n",
attr,
sh);
1593 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1594 te.nonCacheable =
true;
1597 te.outerAttrs = attr_hi == 1 ? 0 :
1598 attr_hi == 2 ? 2 : 1;
1599 te.innerAttrs = attr_lo == 1 ? 0 :
1600 attr_lo == 2 ? 6 : 5;
1603 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1604 (attr_lo == 1) || (attr_lo == 2);
1607 uint8_t attrIndx = l_descriptor.
attrIndx();
1609 DPRINTF(TLBVerbose,
"memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx,
sh);
1626 panic(
"Invalid exception level");
1631 attr =
bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1639 te.nonCacheable =
false;
1641 te.nonCacheable =
true;
1649 te.nonCacheable =
true;
1654 warn_if(!attr_hi,
"Unpredictable behavior");
1660 te.nonCacheable =
true;
1663 te.shareable =
sh == 2;
1664 te.outerShareable = (
sh & 0x2) ?
true :
false;
1666 te.attributes = ((uint64_t)
attr << 56) |
1683 DPRINTF(
TLB,
"L1 descriptor for %#x is %#x\n",
1696 DPRINTF(
TLB,
"L1 Descriptor Reserved/Ignore, causing fault\n");
1699 std::make_shared<PrefetchAbort>(
1706 std::make_shared<DataAbort>(
1729 panic(
"Haven't implemented supersections\n");
1738 DPRINTF(
TLB,
"L1 descriptor points to page table at: %#x (%s)\n",
1775 panic(
"A new type in a 2 bit field?\n");
1783 return std::make_shared<PrefetchAbort>(
1789 return std::make_shared<DataAbort>(
1809 DPRINTF(
TLB,
"L%d descriptor for %#llx is %#llx (%s)\n",
1816 DPRINTF(PageTableWalker,
"Analyzing L%d descriptor: %#llx, pxn: %d, "
1817 "xn: %d, ap: %d, af: %d, type: %d\n",
1826 DPRINTF(PageTableWalker,
"Analyzing L%d descriptor: %#llx, type: %d\n",
1836 DPRINTF(
TLB,
"L%d descriptor Invalid, causing fault type %d\n",
1855 DPRINTF(
TLB,
"L%d descriptor causing Address Size Fault\n",
1862 DPRINTF(
TLB,
"L%d descriptor causing Access Fault\n",
1892 DPRINTF(
TLB,
"L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1901 DPRINTF(
TLB,
"L%d descriptor causing Address Size Fault\n",
1937 Event *
event = NULL;
1939 case LookupLevel::L1:
1941 case LookupLevel::L2:
1942 case LookupLevel::L3:
1946 panic(
"Wrong lookup level in table walk\n");
1952 sizeof(uint64_t), flag, -1,
event,
1960 panic(
"A new type in a 2 bit field?\n");
1974 DPRINTF(
TLB,
"L2 descriptor for %#x is %#x\n",
1981 DPRINTF(
TLB,
"L2 descriptor invalid, causing fault\n");
2006 DPRINTF(
TLB,
"Generating access fault at L2, afe: %d, ap: %d\n",
2032 DPRINTF(PageTableWalker,
"L1 Desc object host addr: %p\n",
2034 DPRINTF(PageTableWalker,
"L1 Desc object data: %08x\n",
2037 DPRINTF(PageTableWalker,
"calling doL1Descriptor for vaddr:%#x\n",
2060 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2093 DPRINTF(PageTableWalker,
"calling doL2Descriptor for vaddr:%#x\n",
2104 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2163 DPRINTF(PageTableWalker,
"calling doLongDescriptor for vaddr:%#x\n",
2183 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2200 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2201 panic(
"Max. number of lookups already reached in table walk\n");
2226 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2241 fault = tran->fault;
2254 if (queueIndex >= 0) {
2255 DPRINTF(PageTableWalker,
"Adding to walker fifo: "
2256 "queue size before adding: %d\n",
2262 (this->*doDescriptor)();
2269 if (queueIndex >= 0) {
2270 DPRINTF(PageTableWalker,
"Adding to walker fifo: "
2271 "queue size before adding: %d\n",
2280 (this->*doDescriptor)();
2283 (this->*doDescriptor)();
2292 const bool have_security =
release->
has(ArmExtension::SECURITY);
2297 te.longDescFormat =
true;
2305 te.size = (1ULL <<
te.N) - 1;
2311 te.type = TypeTLB::unified;
2324 DPRINTF(
TLB,
" - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2326 DPRINTF(
TLB,
" - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2327 "vmid:%d hyp:%d nc:%d ns:%d\n",
te.vpn,
te.xn,
te.pxn,
2328 te.ap,
static_cast<uint8_t
>(
te.domain),
te.asid,
te.vmid,
te.isHyp,
2329 te.nonCacheable,
te.ns);
2330 DPRINTF(
TLB,
" - domain from L%d desc:%d data:%#x\n",
2341 const bool have_security =
release->
has(ArmExtension::SECURITY);
2346 te.longDescFormat = long_descriptor;
2352 te.size = (1<<
te.N) - 1;
2353 te.pfn = descriptor.
pfn();
2358 te.xn = descriptor.
xn();
2373 if (long_descriptor) {
2382 te.hap = l_descriptor.
ap();
2392 te.ap = descriptor.
ap();
2399 DPRINTF(
TLB,
" - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2401 DPRINTF(
TLB,
" - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2402 "vmid:%d hyp:%d nc:%d ns:%d\n",
te.vpn,
te.xn,
te.pxn,
2403 te.ap,
static_cast<uint8_t
>(
te.domain),
te.asid,
te.vmid,
te.isHyp,
2404 te.nonCacheable,
te.ns);
2405 DPRINTF(
TLB,
" - domain from L%d desc:%d data:%#x\n",
2420 switch (lookup_level_as_int) {
2421 case LookupLevel::L1:
2422 return LookupLevel::L1;
2423 case LookupLevel::L2:
2424 return LookupLevel::L2;
2425 case LookupLevel::L3:
2426 return LookupLevel::L3;
2428 panic(
"Invalid lookup level conversion");
2475 panic(
"unknown page size");
2488 auto req = std::make_shared<Request>();
2536 :
data(_data), numBytes(0),
event(_event), parent(_parent),
2539 req = std::make_shared<Request>();
2558 parent.getTableWalkerPort().sendTimingReq(
2559 req->getPaddr(), numBytes,
data, req->getFlags(),
2571 parent.mmu->translateTiming(req, tc,
this,
mode, tranType,
true);
2575 : statistics::
Group(parent),
2576 ADD_STAT(walks, statistics::units::Count::get(),
2577 "Table walker walks requested"),
2578 ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2579 "Table walker walks initiated with short descriptors"),
2580 ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2581 "Table walker walks initiated with long descriptors"),
2582 ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2583 "Level at which table walker walks with short descriptors "
2585 ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2586 "Level at which table walker walks with long descriptors "
2588 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2589 "Table walks squashed before starting"),
2590 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2591 "Table walks squashed after completion"),
2593 "Table walker wait (enqueue to first request) latency"),
2594 ADD_STAT(walkServiceTime, statistics::units::
Tick::get(),
2595 "Table walker service (enqueue to completion) latency"),
2597 "Table walker pending requests distribution"),
2598 ADD_STAT(pageSizes, statistics::units::Count::get(),
2599 "Table walker page sizes translated"),
2600 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2601 "Table walker requests started/completed, data/inst")
EventFunctionWrapper doL1DescEvent
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Tick curTick()
The universal simulation clock.
virtual std::string dbgHeader() const =0
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
enums::ArmLookupLevel LookupLevel
Derived & ysubname(off_type index, const std::string &subname)
bool isSecure
If the access comes from the secure state.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Event * LongDescEventByLevel[4]
statistics::Scalar walksLongDescriptor
virtual RegVal readMiscReg(RegIndex misc_reg)=0
constexpr decltype(nullptr) NoFault
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
ExceptionLevel el
Current exception level.
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
uint8_t ap() const override
Three bit access protection flags.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
EventFunctionWrapper doProcessEvent
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
statistics::Vector walksLongTerminatedAtLevel
void doL1LongDescriptorWrapper()
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
const ArmRelease * release
Cached copies of system-level properties.
Addr vaddr
The virtual address that is being translated with tagging removed.
DrainState drainState() const
Return the current drain state of an object.
RequestorID requestorId
Requestor id assigned by the MMU.
Port & getTableWalkerPort()
void nextWalk(ThreadContext *tc)
Derived & init(size_type _x, size_type _y)
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Addr paddr() const
Return the physical address of the entry.
const GrainSize GrainMap_tg0[]
void set(Type mask)
Set all flag's bits matching the given mask.
RequestPtr req
A pointer to the original request.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
statistics::Vector2d requestOrigin
@ NO_ACCESS
The request should not cause a memory access.
virtual bool xn() const =0
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
const FlagsType nozero
Don't print if this is zero.
virtual uint8_t texcb() const
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
@ SECURE
The request targets the secure memory space.
statistics::Scalar squashedAfter
bool cacheResponding() const
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
uint32_t data
The raw bits of the entry.
const GrainSize GrainMap_tg1[]
ByteOrder byteOrder(const ThreadContext *tc)
void schedule(Event &event, Tick when)
uint32_t data
The raw bits of the entry.
const FlagsType nonan
Don't print if this is NAN.
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
uint8_t ap() const override
2-bit access protection flags
statistics::Histogram walkServiceTime
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Addr vaddr_tainted
The virtual address that is being translated.
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
MMU::ArmTranslationType tranType
The translation type that has been requested.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Port(TableWalker *_walker, RequestorID id)
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
const FlagsType dist
Print the distribution.
uint8_t ap() const override
Three bit access protection flags.
static const unsigned COMPLETED
bool functional
If the atomic mode should be functional.
statistics::Scalar walksShortDescriptor
Cycles is a wrapper class for representing cycle counts, i.e.
Addr nextTableAddr() const
Return the address of the next page table.
static const unsigned REQUESTED
const FlagsType pdf
Print the percent of the total that this entry represents.
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
TableWalkerStats(statistics::Group *parent)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
uint16_t asid
ASID that we're servicing the request under.
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
bool xnTable() const
Is execution allowed on subsequent lookup levels?
bool has(ArmExtension ext) const
bool secureTable() const
Whether the subsequent levels of lookup are secure.
DrainState
Object drain/handover states.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
uint64_t data
The raw bits of the entry.
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
uint8_t sh() const
2-bit shareability field
@ UNCACHEABLE
The request is to an uncacheable address.
uint64_t getRawData() const override
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual std::string name() const
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
std::shared_ptr< FaultBase > Fault
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
const Params & params() const
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Fault processWalkAArch64()
static uint8_t pageSizeNtoStatBin(uint8_t N)
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
void doL0LongDescriptorWrapper()
ThreadContext * tc
Thread context that we're doing the walk for.
int physAddrRange
Current physical address range in bits.
ProbePointArg< PacketInfo > Packet
Packet probe point.
uint64_t Tick
Tick count type.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Addr l2Addr() const
Address of L2 descriptor if it exists.
EntryType type() const
Return the descriptor type.
std::shared_ptr< Request > RequestPtr
virtual Addr pfn() const =0
void insertPartialTableEntry(LongDescriptor &descriptor)
EventFunctionWrapper doL2DescEvent
virtual uint8_t offsetBits() const =0
statistics::Vector pageSizes
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
Port * port
Port shared by the two table walkers.
virtual uint64_t getRawData() const =0
void translateTiming(ThreadContext *tc)
void doL2LongDescriptorWrapper()
void doL1DescriptorWrapper()
RequestPtr req
Request that is currently being serviced.
T htog(T value, ByteOrder guest_byte_order)
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
void processWalkWrapper()
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
BaseMMU::Translation * transState
Translation state for delayed requests.
virtual bool secure(bool have_security, WalkerState *currState) const =0
GrainSize grainSize
Width of the granule size in bits.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
TableWalker * tableWalker
HTCR htcr
Cached copy of the htcr as it existed when translation began.
@ Drained
Buffers drained, ready for serialization/handover.
bool isWrite
If the access is a write.
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Fault generateLongDescFault(ArmFault::FaultSource src)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Long-descriptor format (LPAE)
Histogram & init(size_type size)
Set the parameters of this histogram.
SenderState * senderState
This packet's sender state.
const std::string & name()
statistics::Histogram pendingWalks
bool isSecure(ThreadContext *tc)
statistics::Histogram walkWaitTime
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual bool global(WalkerState *currState) const =0
bool supersection() const
Is the page a Supersection (16 MiB)?
bool aarch64
True if the current lookup is performed in AArch64 state.
bool af() const
Returns true if the access flag (AF) is set.
void signalDrainDone() const
Signal that an object is drained.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
bool stage2Req
Flag indicating if a second stage of lookup is required.
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
TableWalker(const Params &p)
Ports are used to interface objects to each other.
bool hpd
Hierarchical access permission disable.
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
void drainResume() override
Resume execution after a successful drain.
TlbEntry::DomainType domain() const override
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
MMU * mmu
The MMU to forward second stage look upts to.
void doL2DescriptorWrapper()
uint8_t attrIndx() const
Attribute index.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
bool longDescFormatInUse(ThreadContext *tc)
virtual void annotate(AnnotationIDs id, uint64_t val)
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
std::string dbgHeader() const override
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
@ PT_WALK
The request is a page table walk.
const ArmRelease * release() const
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
virtual BaseCPU * getCpuPtr()=0
bool invalid() const
Is the entry invalid.
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
ClockedObjectParams Params
Parameters of ClockedObject.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void doL3LongDescriptorWrapper()
gem5::ArmISA::TableWalker::TableWalkerStats stats
virtual uint8_t ap() const =0
HCR hcr
Cached copy of the htcr as it existed when translation began.
bool aarch64
If the access is performed in AArch64 state.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
bool timing
If the mode is timing or atomic.
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
const FlagsType total
Print the total.
virtual bool shareable() const
Derived & init(size_type size)
Set this vector to have the given size.
Bitfield< 21, 20 > stride
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool hasWalkCache() const
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
virtual TlbEntry::DomainType domain() const =0
Fault fault
The fault that we are going to return.
bool xn() const override
Is execution allowed on this mapping?
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
TLB * tlb
TLB that is initiating these table walks.
@ Draining
Draining buffers pending serialization/handover.
L1Descriptor l1Desc
Short-format descriptors.
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
statistics::Vector walksShortTerminatedAtLevel
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
BaseMMU::Mode mode
Save mode for use in delayed response.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
#define panic(...)
This implements a cprintf based panic() function.
bool pending
If a timing translation is currently in progress.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
statistics::Scalar squashedBefore
Generated on Wed Jul 13 2022 10:39:11 for gem5 by doxygen 1.8.17