Go to the documentation of this file.
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/PageTableWalker.hh"
53 #include "debug/TLB.hh"
54 #include "debug/TLBVerbose.hh"
60 using namespace ArmISA;
64 requestorId(
p.sys->getRequestorId(this)),
65 port(new
Port(this, requestorId)),
66 isStage2(
p.is_stage2),
tlb(NULL),
67 currState(NULL), pending(false),
68 numSquashable(
p.num_squash_per_cycle),
74 doL2DescEvent([
this]{ doL2DescriptorWrapper(); },
name()),
75 doL0LongDescEvent([
this]{ doL0LongDescriptorWrapper(); },
name()),
76 doL1LongDescEvent([
this]{ doL1LongDescriptorWrapper(); },
name()),
77 doL2LongDescEvent([
this]{ doL2LongDescriptorWrapper(); },
name()),
78 doL3LongDescEvent([
this]{ doL3LongDescriptorWrapper(); },
name()),
79 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
80 &doL2LongDescEvent, &doL3LongDescEvent },
81 doProcessEvent([
this]{ processWalkWrapper(); },
name())
87 ArmSystem *arm_sys =
dynamic_cast<ArmSystem *
>(
p.sys);
89 _physAddrRange = arm_sys->physAddrRange();
90 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
92 _haveLargeAsid64 =
false;
112 if (if_name ==
"port") {
126 tc(nullptr),
aarch64(false),
el(
EL0), physAddrRange(0), req(nullptr),
127 asid(0), vmid(0), isHyp(false), transState(nullptr),
128 vaddr(0), vaddr_tainted(0),
129 sctlr(0), scr(0), cpsr(0), tcr(0),
130 htcr(0), hcr(0), vtcr(0),
131 isWrite(false), isFetch(false),
isSecure(false),
132 isUncacheable(false),
133 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
134 pxnTable(false),
hpd(false), stage2Req(false),
135 stage2Tran(nullptr), timing(false), functional(false),
137 delayed(false), tableWalker(nullptr)
143 reqQueue, snoopRespQueue),
144 reqQueue(*_walker, *this), snoopRespQueue(*_walker, *this),
151 Addr desc_addr,
int size,
164 state->delay = delay;
172 Addr desc_addr,
int size,
175 auto pkt = createPacket(desc_addr, size,
data, flags, 0,
nullptr);
179 handleRespPacket(pkt);
184 Addr desc_addr,
int size,
187 auto pkt = createPacket(desc_addr, size,
data, flags, delay,
nullptr);
189 Tick lat = sendAtomic(pkt);
191 handleRespPacket(pkt, lat);
196 Addr desc_addr,
int size,
200 auto pkt = createPacket(desc_addr, size,
data, flags, delay,
event);
202 schedTimingReq(pkt,
curTick());
209 assert(pkt->
req->isUncacheable() ||
212 handleRespPacket(pkt);
227 handleResp(state, pkt->
getAddr(), pkt->
req->getSize(), delay);
252 DPRINTF(Drain,
"TableWalker done draining, processing drain event\n");
260 bool state_queues_not_empty =
false;
262 for (
int i = 0;
i < LookupLevel::Num_ArmLookupLevel; ++
i) {
264 state_queues_not_empty =
true;
270 DPRINTF(Drain,
"TableWalker not drained\n");
273 DPRINTF(Drain,
"TableWalker free, no need to drain\n");
293 bool _stage2Req,
const TlbEntry *walk_entry)
295 assert(!(_functional && _timing));
304 DPRINTF(PageTableWalker,
"creating new instance of WalkerState\n");
308 }
else if (_functional) {
313 "creating functional instance of WalkerState\n");
317 }
else if (_timing) {
326 return std::make_shared<ReExec>();
400 assert(
release->
has(ArmExtension::VIRTUALIZATION));
410 panic(
"Invalid exception level");
434 if (long_desc_format) {
451 else if (long_desc_format)
474 else if (long_desc_format)
508 if (
te &&
te->partial) {
523 curr_state_copy->
tc, curr_state_copy->
mode);
525 delete curr_state_copy;
533 unsigned num_squashed = 0;
537 (
te && !
te->partial))) {
542 DPRINTF(
TLB,
"Squashing table walk for address %#x\n",
548 std::make_shared<UnimpFault>(
"Squashed Inst"),
589 const auto irgn0_mask = 0x1;
590 const auto irgn1_mask = 0x40;
596 const bool have_security =
release->
has(ArmExtension::SECURITY);
598 DPRINTF(
TLB,
"Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
610 return std::make_shared<PrefetchAbort>(
616 return std::make_shared<DataAbort>(
630 return std::make_shared<PrefetchAbort>(
636 return std::make_shared<DataAbort>(
649 DPRINTF(
TLB,
" - Descriptor at address %#x (%s)\n", l1desc_addr,
654 f =
testWalk(l1desc_addr,
sizeof(uint32_t),
680 sizeof(uint32_t), flag, LookupLevel::L1,
693 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
697 DPRINTF(
TLB,
"Beginning table walk for address %#x, TTBCR: %#x\n",
709 DPRINTF(
TLB,
" - Selecting VTTBR (long-desc.)\n");
713 LookupLevel::L1 : LookupLevel::L2;
716 DPRINTF(
TLB,
" - Selecting HTTBR (long-desc.)\n");
727 ttbr0_max = (1ULL << 32) -
730 ttbr0_max = (1ULL << 32) - 1;
732 ttbr1_min = (1ULL << 32) - (1ULL << (32 -
currState->
ttbcr.t1sz));
743 DPRINTF(
TLB,
" - Selecting TTBR0 (long-desc.)\n");
747 return std::make_shared<PrefetchAbort>(
753 return std::make_shared<DataAbort>(
765 if (ttbr0_max < (1ULL << 30))
766 start_lookup_level = LookupLevel::L2;
768 DPRINTF(
TLB,
" - Selecting TTBR1 (long-desc.)\n");
772 return std::make_shared<PrefetchAbort>(
778 return std::make_shared<DataAbort>(
791 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
792 start_lookup_level = LookupLevel::L2;
796 return std::make_shared<PrefetchAbort>(
802 return std::make_shared<DataAbort>(
813 if (start_lookup_level == LookupLevel::L1) {
815 desc_addr =
mbits(ttbr, 39,
n) |
817 DPRINTF(
TLB,
" - Descriptor at address %#x (%s) (long-desc.)\n",
821 n = (tsz >= 2 ? 14 - tsz : 12);
822 desc_addr =
mbits(ttbr, 39,
n) |
824 DPRINTF(
TLB,
" - Descriptor at address %#x (%s) (long-desc.)\n",
854 sizeof(uint64_t), flag, start_lookup_level,
874 int in_min = 64 - (tg ==
Grain64KB ? 47 : 48);
876 return tsz > in_max || tsz < in_min || (low_range ?
893 DPRINTF(
TLB,
"Beginning table walk for address %#llx, TCR: %#llx\n",
910 bool vaddr_fault =
false;
927 DPRINTF(
TLB,
" - Selecting TTBR0 (AArch64)\n");
934 top_bit, tg, tsz,
true);
940 DPRINTF(
TLB,
" - Selecting TTBR1 (AArch64)\n");
947 top_bit, tg, tsz,
false);
962 DPRINTF(
TLB,
" - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
965 DPRINTF(
TLB,
" - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
976 DPRINTF(
TLB,
" - Selecting TTBR0_EL1 (AArch64)\n");
983 top_bit, tg, tsz,
true);
989 DPRINTF(
TLB,
" - Selecting TTBR1_EL1 (AArch64)\n");
996 top_bit, tg, tsz,
false);
1011 DPRINTF(
TLB,
" - Selecting TTBR0_EL2 (AArch64)\n");
1019 top_bit, tg, tsz,
true);
1026 DPRINTF(
TLB,
" - Selecting TTBR1_EL2 (AArch64)\n");
1033 top_bit, tg, tsz,
false);
1048 DPRINTF(
TLB,
" - Selecting TTBR0_EL3 (AArch64)\n");
1055 top_bit, tg, tsz,
true);
1073 f = std::make_shared<PrefetchAbort>(
1078 f = std::make_shared<DataAbort>(
1098 warn_once(
"Reserved granule size requested; gem5's IMPLEMENTATION "
1099 "DEFINED behavior takes this to mean 4KB granules\n");
1111 auto [table_addr, desc_addr, start_lookup_level] =
walkAddresses(
1112 ttbr, tg, tsz, pa_range);
1117 DPRINTF(
TLB,
"Address size fault before any lookup\n");
1120 f = std::make_shared<PrefetchAbort>(
1126 f = std::make_shared<DataAbort>(
1179 sizeof(uint64_t), flag, start_lookup_level,
1183 sizeof(uint64_t), flag, -1, NULL,
1191 std::tuple<Addr, Addr, TableWalker::LookupLevel>
1196 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1197 Addr table_addr = 0;
1204 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1212 table_addr = entry->
pfn;
1218 ptops->firstLevel(64 - tsz);
1219 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1220 "Table walker couldn't find lookup level\n");
1223 int base_addr_lo = 3 + tsz -
stride * (3 - first_level) - tg;
1225 if (pa_range == 52) {
1226 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1227 table_addr =
mbits(ttbr, 47,
z);
1228 table_addr |= (
bits(ttbr, 5, 2) << 48);
1230 table_addr =
mbits(ttbr, 47, base_addr_lo);
1234 desc_addr = table_addr + ptops->index(
currState->
vaddr, first_level, tsz);
1236 return std::make_tuple(table_addr, desc_addr, first_level);
1241 uint8_t texcb,
bool s)
1245 DPRINTF(TLBVerbose,
"memAttrs texcb:%d s:%d\n", texcb,
s);
1246 te.shareable =
false;
1247 te.nonCacheable =
false;
1248 te.outerShareable =
false;
1252 te.nonCacheable =
true;
1254 te.shareable =
true;
1259 te.nonCacheable =
true;
1261 te.shareable =
true;
1269 te.outerAttrs =
bits(texcb, 1, 0);
1275 te.outerAttrs =
bits(texcb, 1, 0);
1278 te.nonCacheable =
true;
1282 te.outerAttrs =
bits(texcb, 1, 0);
1285 panic(
"Reserved texcb value!\n");
1288 panic(
"Implementation-defined texcb value!\n");
1297 te.nonCacheable =
true;
1299 te.shareable =
false;
1304 panic(
"Reserved texcb value!\n");
1309 if (
bits(texcb, 1,0) == 0 ||
bits(texcb, 3,2) == 0)
1310 te.nonCacheable =
true;
1311 te.innerAttrs =
bits(texcb, 1, 0);
1312 te.outerAttrs =
bits(texcb, 3, 2);
1315 panic(
"More than 32 states for 5 bits?\n");
1323 DPRINTF(TLBVerbose,
"memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1324 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1325 switch(
bits(texcb, 2,0)) {
1330 te.outerShareable = (prrr.nos0 == 0);
1336 te.outerShareable = (prrr.nos1 == 0);
1342 te.outerShareable = (prrr.nos2 == 0);
1348 te.outerShareable = (prrr.nos3 == 0);
1354 te.outerShareable = (prrr.nos4 == 0);
1360 te.outerShareable = (prrr.nos5 == 0);
1363 panic(
"Imp defined type\n");
1368 te.outerShareable = (prrr.nos7 == 0);
1374 DPRINTF(TLBVerbose,
"StronglyOrdered\n");
1376 te.nonCacheable =
true;
1379 te.shareable =
true;
1382 DPRINTF(TLBVerbose,
"Device ds1:%d ds0:%d s:%d\n",
1383 prrr.ds1, prrr.ds0,
s);
1385 te.nonCacheable =
true;
1389 te.shareable =
true;
1391 te.shareable =
true;
1394 DPRINTF(TLBVerbose,
"Normal ns1:%d ns0:%d s:%d\n",
1395 prrr.ns1, prrr.ns0,
s);
1398 te.shareable =
true;
1400 te.shareable =
true;
1403 panic(
"Reserved type");
1409 te.nonCacheable =
true;
1425 te.nonCacheable =
true;
1440 DPRINTF(TLBVerbose,
"memAttrs: shareable: %d, innerAttrs: %d, "
1442 te.shareable,
te.innerAttrs,
te.outerAttrs);
1443 te.setAttributes(
false);
1453 uint8_t
sh = l_descriptor.
sh();
1458 uint8_t attr_3_2 = (
attr >> 2) & 0x3;
1459 uint8_t attr_1_0 =
attr & 0x3;
1461 DPRINTF(TLBVerbose,
"memAttrsLPAE MemAttr:%#x sh:%#x\n",
attr,
sh);
1463 if (attr_3_2 == 0) {
1467 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1468 te.nonCacheable =
true;
1471 te.outerAttrs = attr_3_2 == 1 ? 0 :
1472 attr_3_2 == 2 ? 2 : 1;
1473 te.innerAttrs = attr_1_0 == 1 ? 0 :
1474 attr_1_0 == 2 ? 6 : 5;
1475 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1478 uint8_t attrIndx = l_descriptor.
attrIndx();
1486 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1487 uint8_t attr_7_4 =
bits(
attr, 7, 4);
1488 uint8_t attr_3_0 =
bits(
attr, 3, 0);
1489 DPRINTF(TLBVerbose,
"memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx,
sh,
attr);
1494 te.nonCacheable =
false;
1499 if (attr_3_0 == 0x0)
1501 else if (attr_3_0 == 0x4)
1504 panic(
"Unpredictable behavior\n");
1505 te.nonCacheable =
true;
1512 if (attr_3_0 == 0x4)
1514 te.nonCacheable =
true;
1515 else if (attr_3_0 < 0x8)
1516 panic(
"Unpredictable behavior\n");
1526 if (attr_7_4 & 0x4) {
1527 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1529 te.outerAttrs = 0x2;
1533 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1534 panic(
"Unpredictable behavior\n");
1537 panic(
"Unpredictable behavior\n");
1543 te.innerAttrs = 0x1;
1546 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1558 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1561 panic(
"Unpredictable behavior\n");
1566 te.outerShareable =
sh == 2;
1567 te.shareable = (
sh & 0x2) ?
true :
false;
1568 te.setAttributes(
true);
1569 te.attributes |= (uint64_t)
attr << 56;
1579 uint8_t
sh = l_descriptor.
sh();
1583 uint8_t attr_hi = (
attr >> 2) & 0x3;
1584 uint8_t attr_lo =
attr & 0x3;
1586 DPRINTF(TLBVerbose,
"memAttrsAArch64 MemAttr:%#x sh:%#x\n",
attr,
sh);
1592 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1593 te.nonCacheable =
true;
1596 te.outerAttrs = attr_hi == 1 ? 0 :
1597 attr_hi == 2 ? 2 : 1;
1598 te.innerAttrs = attr_lo == 1 ? 0 :
1599 attr_lo == 2 ? 6 : 5;
1602 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1603 (attr_lo == 1) || (attr_lo == 2);
1606 uint8_t attrIndx = l_descriptor.
attrIndx();
1608 DPRINTF(TLBVerbose,
"memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx,
sh);
1625 panic(
"Invalid exception level");
1630 attr =
bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1638 te.nonCacheable =
false;
1640 te.nonCacheable =
true;
1648 te.nonCacheable =
true;
1653 warn_if(!attr_hi,
"Unpredictable behavior");
1659 te.nonCacheable =
true;
1662 te.shareable =
sh == 2;
1663 te.outerShareable = (
sh & 0x2) ?
true :
false;
1665 te.attributes = ((uint64_t)
attr << 56) |
1682 DPRINTF(
TLB,
"L1 descriptor for %#x is %#x\n",
1695 DPRINTF(
TLB,
"L1 Descriptor Reserved/Ignore, causing fault\n");
1698 std::make_shared<PrefetchAbort>(
1705 std::make_shared<DataAbort>(
1728 panic(
"Haven't implemented supersections\n");
1737 DPRINTF(
TLB,
"L1 descriptor points to page table at: %#x (%s)\n",
1774 panic(
"A new type in a 2 bit field?\n");
1782 return std::make_shared<PrefetchAbort>(
1788 return std::make_shared<DataAbort>(
1808 DPRINTF(
TLB,
"L%d descriptor for %#llx is %#llx (%s)\n",
1815 DPRINTF(PageTableWalker,
"Analyzing L%d descriptor: %#llx, pxn: %d, "
1816 "xn: %d, ap: %d, af: %d, type: %d\n",
1825 DPRINTF(PageTableWalker,
"Analyzing L%d descriptor: %#llx, type: %d\n",
1835 DPRINTF(
TLB,
"L%d descriptor Invalid, causing fault type %d\n",
1854 DPRINTF(
TLB,
"L%d descriptor causing Address Size Fault\n",
1861 DPRINTF(
TLB,
"L%d descriptor causing Access Fault\n",
1891 DPRINTF(
TLB,
"L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1900 DPRINTF(
TLB,
"L%d descriptor causing Address Size Fault\n",
1936 Event *
event = NULL;
1938 case LookupLevel::L1:
1940 case LookupLevel::L2:
1941 case LookupLevel::L3:
1945 panic(
"Wrong lookup level in table walk\n");
1951 sizeof(uint64_t), flag, -1,
event,
1959 panic(
"A new type in a 2 bit field?\n");
1973 DPRINTF(
TLB,
"L2 descriptor for %#x is %#x\n",
1980 DPRINTF(
TLB,
"L2 descriptor invalid, causing fault\n");
2005 DPRINTF(
TLB,
"Generating access fault at L2, afe: %d, ap: %d\n",
2031 DPRINTF(PageTableWalker,
"L1 Desc object host addr: %p\n",
2033 DPRINTF(PageTableWalker,
"L1 Desc object data: %08x\n",
2036 DPRINTF(PageTableWalker,
"calling doL1Descriptor for vaddr:%#x\n",
2059 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2092 DPRINTF(PageTableWalker,
"calling doL2Descriptor for vaddr:%#x\n",
2103 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2162 DPRINTF(PageTableWalker,
"calling doLongDescriptor for vaddr:%#x\n",
2182 DPRINTF(PageTableWalker,
"calling translateTiming again\n");
2199 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2200 panic(
"Max. number of lookups already reached in table walk\n");
2225 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2240 fault = tran->fault;
2253 if (queueIndex >= 0) {
2254 DPRINTF(PageTableWalker,
"Adding to walker fifo: "
2255 "queue size before adding: %d\n",
2261 (this->*doDescriptor)();
2268 if (queueIndex >= 0) {
2269 DPRINTF(PageTableWalker,
"Adding to walker fifo: "
2270 "queue size before adding: %d\n",
2279 (this->*doDescriptor)();
2282 (this->*doDescriptor)();
2291 const bool have_security =
release->
has(ArmExtension::SECURITY);
2296 te.longDescFormat =
true;
2304 te.size = (1ULL <<
te.N) - 1;
2310 te.type = TypeTLB::unified;
2323 DPRINTF(
TLB,
" - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2325 DPRINTF(
TLB,
" - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2326 "vmid:%d hyp:%d nc:%d ns:%d\n",
te.vpn,
te.xn,
te.pxn,
2327 te.ap,
static_cast<uint8_t
>(
te.domain),
te.asid,
te.vmid,
te.isHyp,
2328 te.nonCacheable,
te.ns);
2329 DPRINTF(
TLB,
" - domain from L%d desc:%d data:%#x\n",
2340 const bool have_security =
release->
has(ArmExtension::SECURITY);
2345 te.longDescFormat = long_descriptor;
2351 te.size = (1<<
te.N) - 1;
2352 te.pfn = descriptor.
pfn();
2357 te.xn = descriptor.
xn();
2372 if (long_descriptor) {
2381 te.hap = l_descriptor.
ap();
2391 te.ap = descriptor.
ap();
2398 DPRINTF(
TLB,
" - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2400 DPRINTF(
TLB,
" - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2401 "vmid:%d hyp:%d nc:%d ns:%d\n",
te.vpn,
te.xn,
te.pxn,
2402 te.ap,
static_cast<uint8_t
>(
te.domain),
te.asid,
te.vmid,
te.isHyp,
2403 te.nonCacheable,
te.ns);
2404 DPRINTF(
TLB,
" - domain from L%d desc:%d data:%#x\n",
2419 switch (lookup_level_as_int) {
2420 case LookupLevel::L1:
2421 return LookupLevel::L1;
2422 case LookupLevel::L2:
2423 return LookupLevel::L2;
2424 case LookupLevel::L3:
2425 return LookupLevel::L3;
2427 panic(
"Invalid lookup level conversion");
2474 panic(
"unknown page size");
2487 auto req = std::make_shared<Request>();
2535 :
data(_data), numBytes(0),
event(_event), parent(_parent),
2538 req = std::make_shared<Request>();
2557 parent.getTableWalkerPort().sendTimingReq(
2558 req->getPaddr(), numBytes,
data, req->getFlags(),
2570 parent.mmu->translateTiming(req, tc,
this,
mode, tranType,
true);
2574 : statistics::
Group(parent),
2575 ADD_STAT(walks, statistics::units::Count::get(),
2576 "Table walker walks requested"),
2577 ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2578 "Table walker walks initiated with short descriptors"),
2579 ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2580 "Table walker walks initiated with long descriptors"),
2581 ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2582 "Level at which table walker walks with short descriptors "
2584 ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2585 "Level at which table walker walks with long descriptors "
2587 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2588 "Table walks squashed before starting"),
2589 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2590 "Table walks squashed after completion"),
2592 "Table walker wait (enqueue to first request) latency"),
2593 ADD_STAT(walkServiceTime, statistics::units::
Tick::get(),
2594 "Table walker service (enqueue to completion) latency"),
2596 "Table walker pending requests distribution"),
2597 ADD_STAT(pageSizes, statistics::units::Count::get(),
2598 "Table walker page sizes translated"),
2599 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2600 "Table walker requests started/completed, data/inst")
EventFunctionWrapper doL1DescEvent
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Tick curTick()
The universal simulation clock.
virtual std::string dbgHeader() const =0
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
enums::ArmLookupLevel LookupLevel
Derived & ysubname(off_type index, const std::string &subname)
bool isSecure
If the access comes from the secure state.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Event * LongDescEventByLevel[4]
statistics::Scalar walksLongDescriptor
virtual RegVal readMiscReg(RegIndex misc_reg)=0
constexpr decltype(nullptr) NoFault
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
ExceptionLevel el
Current exception level.
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
uint8_t ap() const override
Three bit access protection flags.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
EventFunctionWrapper doProcessEvent
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
statistics::Vector walksLongTerminatedAtLevel
void doL1LongDescriptorWrapper()
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
const ArmRelease * release
Cached copies of system-level properties.
Addr vaddr
The virtual address that is being translated with tagging removed.
DrainState drainState() const
Return the current drain state of an object.
RequestorID requestorId
Requestor id assigned by the MMU.
Port & getTableWalkerPort()
void nextWalk(ThreadContext *tc)
Derived & init(size_type _x, size_type _y)
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Addr paddr() const
Return the physical address of the entry.
const GrainSize GrainMap_tg0[]
void set(Type mask)
Set all flag's bits matching the given mask.
RequestPtr req
A pointer to the original request.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
statistics::Vector2d requestOrigin
virtual bool xn() const =0
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
const FlagsType nozero
Don't print if this is zero.
virtual uint8_t texcb() const
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
@ SECURE
The request targets the secure memory space.
statistics::Scalar squashedAfter
bool cacheResponding() const
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
uint32_t data
The raw bits of the entry.
const GrainSize GrainMap_tg1[]
ByteOrder byteOrder(const ThreadContext *tc)
void schedule(Event &event, Tick when)
uint32_t data
The raw bits of the entry.
const FlagsType nonan
Don't print if this is NAN.
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
uint8_t ap() const override
2-bit access protection flags
statistics::Histogram walkServiceTime
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Addr vaddr_tainted
The virtual address that is being translated.
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
MMU::ArmTranslationType tranType
The translation type that has been requested.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Port(TableWalker *_walker, RequestorID id)
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
const FlagsType dist
Print the distribution.
bool HaveLVA(ThreadContext *tc)
uint8_t ap() const override
Three bit access protection flags.
static const unsigned COMPLETED
bool functional
If the atomic mode should be functional.
statistics::Scalar walksShortDescriptor
Cycles is a wrapper class for representing cycle counts, i.e.
Addr nextTableAddr() const
Return the address of the next page table.
static const unsigned REQUESTED
const FlagsType pdf
Print the percent of the total that this entry represents.
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
TableWalkerStats(statistics::Group *parent)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
@ PT_WALK
The request is a page table walk.
uint16_t asid
ASID that we're servicing the request under.
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
bool xnTable() const
Is execution allowed on subsequent lookup levels?
bool has(ArmExtension ext) const
bool secureTable() const
Whether the subsequent levels of lookup are secure.
DrainState
Object drain/handover states.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
uint64_t data
The raw bits of the entry.
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
uint8_t sh() const
2-bit shareability field
uint64_t getRawData() const override
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual std::string name() const
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
std::shared_ptr< FaultBase > Fault
bool HaveVirtHostExt(ThreadContext *tc)
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
const Params & params() const
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Fault processWalkAArch64()
static uint8_t pageSizeNtoStatBin(uint8_t N)
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
void doL0LongDescriptorWrapper()
ThreadContext * tc
Thread context that we're doing the walk for.
int physAddrRange
Current physical address range in bits.
ProbePointArg< PacketInfo > Packet
Packet probe point.
uint64_t Tick
Tick count type.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Addr l2Addr() const
Address of L2 descriptor if it exists.
EntryType type() const
Return the descriptor type.
std::shared_ptr< Request > RequestPtr
virtual Addr pfn() const =0
void insertPartialTableEntry(LongDescriptor &descriptor)
@ NO_ACCESS
The request should not cause a memory access.
EventFunctionWrapper doL2DescEvent
virtual uint8_t offsetBits() const =0
statistics::Vector pageSizes
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
Port * port
Port shared by the two table walkers.
virtual uint64_t getRawData() const =0
void translateTiming(ThreadContext *tc)
void doL2LongDescriptorWrapper()
void doL1DescriptorWrapper()
RequestPtr req
Request that is currently being serviced.
T htog(T value, ByteOrder guest_byte_order)
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
void processWalkWrapper()
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
BaseMMU::Translation * transState
Translation state for delayed requests.
virtual bool secure(bool have_security, WalkerState *currState) const =0
GrainSize grainSize
Width of the granule size in bits.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
TableWalker * tableWalker
HTCR htcr
Cached copy of the htcr as it existed when translation began.
@ Drained
Buffers drained, ready for serialization/handover.
bool isWrite
If the access is a write.
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Fault generateLongDescFault(ArmFault::FaultSource src)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Long-descriptor format (LPAE)
Histogram & init(size_type size)
Set the parameters of this histogram.
SenderState * senderState
This packet's sender state.
const std::string & name()
statistics::Histogram pendingWalks
bool isSecure(ThreadContext *tc)
statistics::Histogram walkWaitTime
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual bool global(WalkerState *currState) const =0
bool supersection() const
Is the page a Supersection (16 MiB)?
bool aarch64
True if the current lookup is performed in AArch64 state.
bool af() const
Returns true if the access flag (AF) is set.
void signalDrainDone() const
Signal that an object is drained.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
bool stage2Req
Flag indicating if a second stage of lookup is required.
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
TableWalker(const Params &p)
Ports are used to interface objects to each other.
bool hpd
Hierarchical access permission disable.
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
void drainResume() override
Resume execution after a successful drain.
TlbEntry::DomainType domain() const override
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
MMU * mmu
The MMU to forward second stage look upts to.
void doL2DescriptorWrapper()
uint8_t attrIndx() const
Attribute index.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
bool longDescFormatInUse(ThreadContext *tc)
virtual void annotate(AnnotationIDs id, uint64_t val)
@ UNCACHEABLE
The request is to an uncacheable address.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
std::string dbgHeader() const override
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
const ArmRelease * release() const
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
virtual BaseCPU * getCpuPtr()=0
bool invalid() const
Is the entry invalid.
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
ClockedObjectParams Params
Parameters of ClockedObject.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void doL3LongDescriptorWrapper()
gem5::ArmISA::TableWalker::TableWalkerStats stats
virtual uint8_t ap() const =0
HCR hcr
Cached copy of the htcr as it existed when translation began.
bool aarch64
If the access is performed in AArch64 state.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
bool timing
If the mode is timing or atomic.
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
const FlagsType total
Print the total.
virtual bool shareable() const
Derived & init(size_type size)
Set this vector to have the given size.
Bitfield< 21, 20 > stride
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool hasWalkCache() const
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual TlbEntry::DomainType domain() const =0
Fault fault
The fault that we are going to return.
bool xn() const override
Is execution allowed on this mapping?
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
TLB * tlb
TLB that is initiating these table walks.
@ Draining
Draining buffers pending serialization/handover.
L1Descriptor l1Desc
Short-format descriptors.
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
statistics::Vector walksShortTerminatedAtLevel
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
BaseMMU::Mode mode
Save mode for use in delayed response.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
#define panic(...)
This implements a cprintf based panic() function.
bool pending
If a timing translation is currently in progress.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
statistics::Scalar squashedBefore
Generated on Wed May 4 2022 12:13:49 for gem5 by doxygen 1.8.17