gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
table_walker.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010, 2012-2019, 2021-2025 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
38
39#include <cassert>
40#include <memory>
41
42#include "arch/arm/faults.hh"
43#include "arch/arm/mmu.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/pagetable.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "base/compiler.hh"
49#include "cpu/base.hh"
50#include "cpu/thread_context.hh"
51#include "debug/Checkpoint.hh"
52#include "debug/Drain.hh"
53#include "debug/PageTableWalker.hh"
54#include "debug/TLB.hh"
55#include "debug/TLBVerbose.hh"
56#include "params/ArmTableWalker.hh"
57#include "sim/system.hh"
58
59namespace gem5
60{
61
62using namespace ArmISA;
63
66 requestorId(p.sys->getRequestorId(this)),
67 port(new Port(*this)),
68 isStage2(p.is_stage2), tlb(NULL),
69 currState(NULL), pending(false),
70 numSquashable(p.num_squash_per_cycle),
71 release(nullptr),
72 stats(this),
73 pendingReqs(0),
76 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
77 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
78 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
79 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
80 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
81 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
82 &doL2LongDescEvent, &doL3LongDescEvent },
83 doProcessEvent([this]{ processWalkWrapper(); }, name()),
84 test(nullptr)
85{
86 sctlr = 0;
87
88 // Cache system-level properties
89 if (FullSystem) {
90 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
91 assert(arm_sys);
92 _physAddrRange = arm_sys->physAddrRange();
93 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
94 } else {
95 _haveLargeAsid64 = false;
96 _physAddrRange = 48;
97 }
98
99}
100
105
108{
109 return static_cast<Port&>(getPort("port"));
110}
111
112Port &
113TableWalker::getPort(const std::string &if_name, PortID idx)
114{
115 if (if_name == "port") {
116 return *port;
117 }
118 return ClockedObject::getPort(if_name, idx);
119}
120
121void
123{
124 mmu = _mmu;
125 release = mmu->release();
126}
127
129 tc(nullptr), aarch64(false), regime(TranslationRegime::EL10),
130 physAddrRange(0), req(nullptr),
131 asid(0), vmid(0), transState(nullptr),
132 vaddr(0), vaddr_tainted(0),
133 sctlr(0), scr(0), cpsr(0), tcr(0),
134 htcr(0), hcr(0), vtcr(0),
135 isWrite(false), isFetch(false), ss(SecurityState::NonSecure),
136 isUncacheable(false), longDescData(std::nullopt),
137 hpd(false), sh(0), irgn(0), orgn(0), stage2Req(false),
138 stage2Tran(nullptr), timing(false), functional(false),
139 mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
140 delayed(false), tableWalker(nullptr)
141{
142}
143
145 : QueuedRequestPort(_walker.name() + ".port", reqQueue, snoopRespQueue),
146 owner{_walker},
147 reqQueue(_walker, *this),
148 snoopRespQueue(_walker, *this)
149{
150}
151
154 const RequestPtr &req,
155 uint8_t *data, Tick delay,
156 Event *event)
157{
158 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
159 pkt->dataStatic(data);
160
161 auto state = new TableWalkerState;
162 state->event = event;
163 state->delay = delay;
164
165 pkt->senderState = state;
166 return pkt;
167}
168
169void
171 const RequestPtr &req, uint8_t *data)
172{
173 auto pkt = createPacket(req, data, 0, nullptr);
174
175 sendFunctional(pkt);
176
177 handleRespPacket(pkt);
178}
179
180void
182 const RequestPtr &req,
183 uint8_t *data, Tick delay)
184{
185 auto pkt = createPacket(req, data, delay, nullptr);
186
187 Tick lat = sendAtomic(pkt);
188
189 handleRespPacket(pkt, lat);
190}
191
192void
194 const RequestPtr &req,
195 uint8_t *data, Tick delay,
196 Event *event)
197{
198 auto pkt = createPacket(req, data, delay, event);
199
200 schedTimingReq(pkt, curTick());
201}
202
203bool
205{
206 // We shouldn't ever get a cacheable block in Modified state.
207 assert(pkt->req->isUncacheable() ||
208 !(pkt->cacheResponding() && !pkt->hasSharers()));
209
210 handleRespPacket(pkt);
211
212 return true;
213}
214
215void
217{
218 // Should always see a response with a sender state.
219 assert(pkt->isResponse());
220
221 // Get the DMA sender state.
222 auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
223 assert(state);
224
225 handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
226
227 delete pkt;
228}
229
230void
232 Addr size, Tick delay)
233{
234 if (state->event) {
235 owner.schedule(state->event, curTick() + delay);
236 }
237 delete state;
238}
239
240void
242{
244 stateQueues[LookupLevel::L0].empty() &&
245 stateQueues[LookupLevel::L1].empty() &&
246 stateQueues[LookupLevel::L2].empty() &&
247 stateQueues[LookupLevel::L3].empty() &&
248 pendingQueue.empty()) {
249
250 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
252 }
253}
254
257{
258 bool state_queues_not_empty = false;
259
260 for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
261 if (!stateQueues[i].empty()) {
262 state_queues_not_empty = true;
263 break;
264 }
265 }
266
267 if (state_queues_not_empty || pendingQueue.size()) {
268 DPRINTF(Drain, "TableWalker not drained\n");
270 } else {
271 DPRINTF(Drain, "TableWalker free, no need to drain\n");
272 return DrainState::Drained;
273 }
274}
275
276void
278{
279 if (params().sys->isTimingMode() && currState) {
280 delete currState;
281 currState = NULL;
283 }
284}
285
286bool
288{
289 bool disable_cacheability = isStage2 ?
290 currState->hcr.cd :
291 currState->sctlr.c == 0;
292 return disable_cacheability || currState->isUncacheable;
293}
294
295Fault
296TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
297 vmid_t _vmid, MMU::Mode _mode,
298 MMU::Translation *_trans, bool _timing, bool _functional,
299 SecurityState ss, PASpace ipaspace,
301 bool _stage2Req, const TlbEntry *walk_entry)
302{
303 assert(!(_functional && _timing));
304 ++stats.walks;
305
306 WalkerState *savedCurrState = NULL;
307
308 if (!currState && !_functional) {
309 // For atomic mode, a new WalkerState instance should be only created
310 // once per TLB. For timing mode, a new instance is generated for every
311 // TLB miss.
312 DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
313
314 currState = new WalkerState();
315 currState->tableWalker = this;
316 } else if (_functional) {
317 // If we are mixing functional mode with timing (or even
318 // atomic), we need to to be careful and clean up after
319 // ourselves to not risk getting into an inconsistent state.
320 DPRINTF(PageTableWalker,
321 "creating functional instance of WalkerState\n");
322 savedCurrState = currState;
323 currState = new WalkerState();
324 currState->tableWalker = this;
325 } else if (_timing) {
326 // This is a translation that was completed and then faulted again
327 // because some underlying parameters that affect the translation
328 // changed out from under us (e.g. asid). It will either be a
329 // misprediction, in which case nothing will happen or we'll use
330 // this fault to re-execute the faulting instruction which should clean
331 // up everything.
332 if (currState->vaddr_tainted == _req->getVaddr()) {
333 ++stats.squashedBefore;
334 return std::make_shared<ReExec>();
335 }
336 }
338
339 currState->startTime = curTick();
340 currState->tc = _tc;
341 currState->el =
344 tranType);
345
346 if (isStage2) {
348 currState->aarch64 = ELIs64(_tc, EL2);
349 currState->ipaSpace = ipaspace;
350 } else {
351 currState->regime =
353 currState->aarch64 =
354 ELIs64(_tc, translationEl(currState->regime));
355 }
356 currState->transState = _trans;
357 currState->req = _req;
358 if (walk_entry) {
359 currState->walkEntry = *walk_entry;
360 } else {
361 currState->walkEntry = TlbEntry();
362 }
363 currState->fault = NoFault;
364 currState->asid = _asid;
365 currState->vmid = _vmid;
366 currState->timing = _timing;
367 currState->functional = _functional;
368 currState->mode = _mode;
369 currState->tranType = tranType;
370 currState->ss = ss;
371 currState->secureLookup = currState->ss == SecurityState::Secure;
372 currState->physAddrRange = _physAddrRange;
373
376 currState->vaddr_tainted = currState->req->getVaddr();
377 if (currState->aarch64)
378 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
379 currState->tc, currState->el,
381 else
382 currState->vaddr = currState->vaddr_tainted;
383
384 if (currState->aarch64) {
385 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
386 if (isStage2) {
387 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
388 if (currState->ss == SecurityState::Secure &&
389 currState->ipaSpace == PASpace::Secure) {
390 currState->vtcr =
391 currState->tc->readMiscReg(MISCREG_VSTCR_EL2);
392 } else {
393 currState->vtcr =
394 currState->tc->readMiscReg(MISCREG_VTCR_EL2);
395 }
396 } else switch (currState->regime) {
398 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
399 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
400 break;
403 assert(release->has(ArmExtension::VIRTUALIZATION));
404 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
405 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
406 break;
408 assert(release->has(ArmExtension::SECURITY));
409 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
410 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
411 break;
412 default:
413 panic("Invalid translation regime");
414 break;
415 }
416 } else {
417 currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
420 currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
423 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
424 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
425 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
426 }
427 sctlr = currState->sctlr;
428
429 currState->isFetch = (currState->mode == BaseMMU::Execute);
430 currState->isWrite = (currState->mode == BaseMMU::Write);
431
432 stats.requestOrigin[REQUESTED][currState->isFetch]++;
433
434 currState->stage2Req = _stage2Req && !isStage2;
435
436 bool hyp = currState->el == EL2;
437 bool long_desc_format = currState->aarch64 || hyp || isStage2 ||
439
440 if (long_desc_format) {
441 // Helper variables used for hierarchical permissions
442 currState->longDescData = WalkerState::LongDescData();
443 currState->longDescData->rwTable = true;
444 currState->longDescData->userTable = true;
445 currState->longDescData->xnTable = false;
446 currState->longDescData->pxnTable = false;
447 ++stats.walksLongDescriptor;
448 } else {
449 currState->longDescData = std::nullopt;
450 ++stats.walksShortDescriptor;
451 }
452
453 if (currState->timing && (pending || pendingQueue.size())) {
454 pendingQueue.push_back(currState);
455 currState = NULL;
457 return NoFault;
458 } else {
459 if (currState->timing) {
460 pending = true;
462 }
463
464 Fault fault = NoFault;
465 if (currState->aarch64) {
466 fault = processWalkAArch64();
467 } else if (long_desc_format) {
468 fault = processWalkLPAE();
469 } else {
470 fault = processWalk();
471 }
472
473 // If this was a functional non-timing access restore state to
474 // how we found it.
475 if (currState->functional) {
476 delete currState;
477 currState = savedCurrState;
478 } else if (currState->timing) {
479 if (fault) {
480 pending = false;
481 nextWalk(currState->tc);
482 delete currState;
483 currState = NULL;
484 } else {
485 // Either we are using the long descriptor, which means we
486 // need to extract the queue index from longDesc, or we are
487 // using the short. In the latter we always start at L1
488 LookupLevel curr_lookup_level = long_desc_format ?
489 currState->longDesc.lookupLevel : LookupLevel::L1;
490
491 stashCurrState(curr_lookup_level);
492 }
493 } else if (fault) {
494 currState->tc = NULL;
495 currState->req = NULL;
496 }
497
498 return fault;
499 }
500}
501
502void
504{
505 assert(!currState);
506 assert(pendingQueue.size());
508 currState = pendingQueue.front();
509
510 // Check if a previous walk filled this request already
511 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
512 TlbEntry* te = mmu->lookup(currState->vaddr, currState->asid,
513 currState->vmid, currState->ss, true, false,
514 currState->regime, isStage2, currState->mode);
515
516 // Check if we still need to have a walk for this request. If the requesting
517 // instruction has been squashed, or a previous walk has filled the TLB with
518 // a match, we just want to get rid of the walk. The latter could happen
519 // when there are multiple outstanding misses to a single page and a
520 // previous request has been successfully translated.
521 if (!currState->transState->squashed() && (!te || te->partial)) {
522 // We've got a valid request, lets process it
523 pending = true;
524 pendingQueue.pop_front();
525
526 bool long_desc_format = currState->aarch64 || currState->el == EL2 ||
528
529 if (te && te->partial) {
530 currState->walkEntry = *te;
531 }
532 Fault fault;
533 if (currState->aarch64) {
534 fault = processWalkAArch64();
535 } else if (long_desc_format) {
536 fault = processWalkLPAE();
537 } else {
538 fault = processWalk();
539 }
540
541 if (fault != NoFault) {
542 pending = false;
543 nextWalk(currState->tc);
544
545 currState->transState->finish(fault, currState->req,
546 currState->tc, currState->mode);
547
548 delete currState;
549 currState = NULL;
550 } else {
551 LookupLevel curr_lookup_level = long_desc_format ?
552 currState->longDesc.lookupLevel : LookupLevel::L1;
553
554 stashCurrState(curr_lookup_level);
555 }
556 return;
557 }
558
559
560 // If the instruction that we were translating for has been
561 // squashed we shouldn't bother.
562 unsigned num_squashed = 0;
563 ThreadContext *tc = currState->tc;
564 while ((num_squashed < numSquashable) && currState &&
565 (currState->transState->squashed() ||
566 (te && !te->partial))) {
567 pendingQueue.pop_front();
568 num_squashed++;
569 stats.squashedBefore++;
570
571 DPRINTF(TLB, "Squashing table walk for address %#x\n",
572 currState->vaddr_tainted);
573
574 if (currState->transState->squashed()) {
575 // finish the translation which will delete the translation object
576 currState->transState->finish(
577 std::make_shared<UnimpFault>("Squashed Inst"),
578 currState->req, currState->tc, currState->mode);
579 } else {
580 // translate the request now that we know it will work
581 stats.walkServiceTime.sample(curTick() - currState->startTime);
582 mmu->translateTiming(currState->req, currState->tc,
583 currState->transState, currState->mode,
584 currState->tranType, isStage2);
585 }
586
587 // delete the current request
588 delete currState;
589
590 // peak at the next one
591 if (pendingQueue.size()) {
592 currState = pendingQueue.front();
593 te = mmu->lookup(currState->vaddr, currState->asid,
594 currState->vmid, currState->ss, true,
595 false, currState->regime, isStage2, currState->mode);
596 } else {
597 // Terminate the loop, nothing more to do
598 currState = NULL;
599 }
600 }
602
603 // if we still have pending translations, schedule more work
604 nextWalk(tc);
605 currState = NULL;
606}
607
608Fault
610{
611 Addr ttbr = 0;
612
613 // For short descriptors, translation configs are held in
614 // TTBR1.
615 RegVal ttbr1 = currState->tc->readMiscReg(snsBankedIndex(
618
619 const auto irgn0_mask = 0x1;
620 const auto irgn1_mask = 0x40;
621 currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
622
623 // If translation isn't enabled, we shouldn't be here
624 assert(currState->sctlr.m || isStage2);
625 const bool is_atomic = currState->req->isAtomic();
626 const bool have_security = release->has(ArmExtension::SECURITY);
627
628 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
629 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
630 32 - currState->ttbcr.n));
631
632 stats.walkWaitTime.sample(curTick() - currState->startTime);
633
634 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
635 32 - currState->ttbcr.n)) {
636 DPRINTF(TLB, " - Selecting TTBR0\n");
637 // Check if table walk is allowed when Security Extensions are enabled
638 if (have_security && currState->ttbcr.pd0) {
639 if (currState->isFetch)
640 return std::make_shared<PrefetchAbort>(
641 currState->vaddr_tainted,
642 ArmFault::TranslationLL + LookupLevel::L1,
643 isStage2,
645 else
646 return std::make_shared<DataAbort>(
647 currState->vaddr_tainted,
649 is_atomic ? false : currState->isWrite,
650 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
652 }
653 ttbr = currState->tc->readMiscReg(snsBankedIndex(
656 } else {
657 DPRINTF(TLB, " - Selecting TTBR1\n");
658 // Check if table walk is allowed when Security Extensions are enabled
659 if (have_security && currState->ttbcr.pd1) {
660 if (currState->isFetch)
661 return std::make_shared<PrefetchAbort>(
662 currState->vaddr_tainted,
663 ArmFault::TranslationLL + LookupLevel::L1,
664 isStage2,
666 else
667 return std::make_shared<DataAbort>(
668 currState->vaddr_tainted,
670 is_atomic ? false : currState->isWrite,
671 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
673 }
674 ttbr = ttbr1;
675 currState->ttbcr.n = 0;
676 }
677
678 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
679 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
680 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
681 currState->ss == SecurityState::Secure ? "s" : "ns");
682
684 if (uncacheableWalk()) {
686 }
687
688 if (currState->secureLookup) {
689 flag.set(Request::SECURE);
690 }
691
693 l1desc_addr, currState->l1Desc,
694 sizeof(uint32_t), flag, LookupLevel::L1,
697
698 return currState->fault;
699}
700
701Fault
703{
704 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
705 int tsz, n;
706 LookupLevel start_lookup_level = LookupLevel::L1;
707
708 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
709 currState->vaddr_tainted, currState->ttbcr);
710
711 stats.walkWaitTime.sample(curTick() - currState->startTime);
712
714 if (currState->secureLookup)
715 flag.set(Request::SECURE);
716
717 // work out which base address register to use, if in hyp mode we always
718 // use HTTBR
719 if (isStage2) {
720 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
721 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
722 tsz = sext<4>(currState->vtcr.t0sz);
723 start_lookup_level = currState->vtcr.sl0 ?
724 LookupLevel::L1 : LookupLevel::L2;
725 currState->isUncacheable = currState->vtcr.irgn0 == 0;
726 } else if (currState->el == EL2) {
727 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
728 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
729 tsz = currState->htcr.t0sz;
730 currState->isUncacheable = currState->htcr.irgn0 == 0;
731 } else {
732 assert(longDescFormatInUse(currState->tc));
733
734 // Determine boundaries of TTBR0/1 regions
735 if (currState->ttbcr.t0sz)
736 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
737 else if (currState->ttbcr.t1sz)
738 ttbr0_max = (1ULL << 32) -
739 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
740 else
741 ttbr0_max = (1ULL << 32) - 1;
742 if (currState->ttbcr.t1sz)
743 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
744 else
745 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
746
747 const bool is_atomic = currState->req->isAtomic();
748
749 // The following code snippet selects the appropriate translation table base
750 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
751 // depending on the address range supported by the translation table (ARM
752 // ARM issue C B3.6.4)
753 if (currState->vaddr <= ttbr0_max) {
754 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
755 // Check if table walk is allowed
756 if (currState->ttbcr.epd0) {
757 if (currState->isFetch)
758 return std::make_shared<PrefetchAbort>(
759 currState->vaddr_tainted,
760 ArmFault::TranslationLL + LookupLevel::L1,
761 isStage2,
763 else
764 return std::make_shared<DataAbort>(
765 currState->vaddr_tainted,
767 is_atomic ? false : currState->isWrite,
768 ArmFault::TranslationLL + LookupLevel::L1,
769 isStage2,
771 }
772 ttbr = currState->tc->readMiscReg(snsBankedIndex(
775 tsz = currState->ttbcr.t0sz;
776 currState->isUncacheable = currState->ttbcr.irgn0 == 0;
777 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
778 start_lookup_level = LookupLevel::L2;
779 } else if (currState->vaddr >= ttbr1_min) {
780 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
781 // Check if table walk is allowed
782 if (currState->ttbcr.epd1) {
783 if (currState->isFetch)
784 return std::make_shared<PrefetchAbort>(
785 currState->vaddr_tainted,
786 ArmFault::TranslationLL + LookupLevel::L1,
787 isStage2,
789 else
790 return std::make_shared<DataAbort>(
791 currState->vaddr_tainted,
793 is_atomic ? false : currState->isWrite,
794 ArmFault::TranslationLL + LookupLevel::L1,
795 isStage2,
797 }
798 ttbr = currState->tc->readMiscReg(snsBankedIndex(
801 tsz = currState->ttbcr.t1sz;
802 currState->isUncacheable = currState->ttbcr.irgn1 == 0;
803 // Lower limit >= 3 GiB
804 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
805 start_lookup_level = LookupLevel::L2;
806 } else {
807 // Out of boundaries -> translation fault
808 if (currState->isFetch)
809 return std::make_shared<PrefetchAbort>(
810 currState->vaddr_tainted,
811 ArmFault::TranslationLL + LookupLevel::L1,
812 isStage2,
814 else
815 return std::make_shared<DataAbort>(
816 currState->vaddr_tainted,
818 is_atomic ? false : currState->isWrite,
819 ArmFault::TranslationLL + LookupLevel::L1,
821 }
822
823 }
824
825 // Perform lookup (ARM ARM issue C B3.6.6)
826 if (start_lookup_level == LookupLevel::L1) {
827 n = 5 - tsz;
828 desc_addr = mbits(ttbr, 39, n) |
829 (bits(currState->vaddr, n + 26, 30) << 3);
830 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
831 desc_addr, currState->ss == SecurityState::Secure ?
832 "s" : "ns");
833 } else {
834 // Skip first-level lookup
835 n = (tsz >= 2 ? 14 - tsz : 12);
836 desc_addr = mbits(ttbr, 39, n) |
837 (bits(currState->vaddr, n + 17, 21) << 3);
838 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
839 desc_addr, currState->ss == SecurityState::Secure ?
840 "s" : "ns");
841 }
842
843 if (uncacheableWalk()) {
845 }
846
847 currState->longDesc.lookupLevel = start_lookup_level;
848 currState->longDesc.aarch64 = false;
849 currState->longDesc.grainSize = Grain4KB;
850 currState->longDesc.isStage2 = isStage2;
851
853 desc_addr, currState->longDesc,
854 sizeof(uint64_t), flag, start_lookup_level,
855 LongDescEventByLevel[start_lookup_level],
857
858 return currState->fault;
859}
860
861Addr
863{
864 // The effective maximum input size is 48 if ARMv8.2-LVA is not
865 // supported or if the translation granule that is in use is 4KB or
866 // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
867 // translation granule size only, the effective minimum value of
868 // 52.
869 if (HaveExt(currState->tc, ArmExtension::FEAT_LVA) && tg == Grain64KB) {
870 return 12;
871 } else {
872 return 16;
873 }
874}
875
876Addr
878{
879 if (HaveExt(currState->tc, ArmExtension::FEAT_TTST)) {
880 switch (tg) {
881 case Grain4KB: return 48;
882 case Grain16KB: return 48;
883 case Grain64KB: return 47;
884 default:
885 // If the value is programmed to either a reserved value or a size
886 // that has not been implemented, then the hardware will treat the
887 // field as if it has been programmed to an IMPLEMENTATION DEFINED
888 // choice
889 warn_once("Invalid grain size\n");
890 return 48;
891 }
892 }
893 return 39;
894}
895
896bool
898{
899 Addr min_txsz = s1MinTxSz(tg);
900 Addr max_txsz = maxTxSz(tg);
901
902 return tsz > max_txsz || tsz < min_txsz;
903}
904
905bool
906TableWalker::checkVAOutOfRange(Addr vaddr, int top_bit, int tsz, bool low_range)
907{
908 return low_range ?
909 bits(currState->vaddr, top_bit, tsz) != 0x0 :
910 bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1);
911}
912
913bool
915{
916 return (pa_range != _physAddrRange &&
917 bits(addr, _physAddrRange - 1, pa_range));
918}
919
920Fault
922{
923 assert(currState->aarch64);
924
925 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
926 currState->vaddr_tainted, currState->tcr);
927
928 stats.walkWaitTime.sample(curTick() - currState->startTime);
929
930 // Determine TTBR, table size, granule size and phys. address range
931 Addr ttbr = 0;
932 int tsz = 0, ps = 0;
933 GrainSize tg = Grain4KB; // grain size computed from tg* field
934 bool fault = false;
935
936 int top_bit = computeAddrTop(currState->tc,
937 bits(currState->vaddr, 55),
939 currState->tcr,
940 currState->el);
941
942 bool vaddr_fault = false;
943 switch (currState->regime) {
945 if (isStage2) {
946 if (currState->ss == SecurityState::Secure &&
947 currState->ipaSpace == PASpace::Secure) {
948 // Secure EL1&0 Secure IPA
949 DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
950 ttbr = currState->tc->readMiscReg(MISCREG_VSTTBR_EL2);
951 currState->secureLookup = !currState->vtcr.sw;
952 } else {
953 // Secure EL1&0 NonSecure IPA or NonSecure EL1&0
954 DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
955 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
956 currState->secureLookup = currState->ss == SecurityState::Secure ?
957 !currState->vtcr.nsw : // Secure EL1&0 NonSecure IPA
958 false; // NonSecure EL1&0
959 }
960 tsz = 64 - currState->vtcr.t0sz64;
961 tg = GrainMap_tg0[currState->vtcr.tg0];
962
963 ps = currState->vtcr.ps;
964 currState->sh = currState->vtcr.sh0;
965 currState->irgn = currState->vtcr.irgn0;
966 currState->orgn = currState->vtcr.orgn0;
967 } else {
968 switch (bits(currState->vaddr, top_bit)) {
969 case 0:
970 DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
971 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
972 tsz = 64 - currState->tcr.t0sz;
973 tg = GrainMap_tg0[currState->tcr.tg0];
974 currState->hpd = currState->tcr.hpd0;
975 currState->sh = currState->tcr.sh0;
976 currState->irgn = currState->tcr.irgn0;
977 currState->orgn = currState->tcr.orgn0;
978 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
979 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
980
981 if (vaddr_fault || currState->tcr.epd0)
982 fault = true;
983 break;
984 case 0x1:
985 DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
986 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
987 tsz = 64 - currState->tcr.t1sz;
988 tg = GrainMap_tg1[currState->tcr.tg1];
989 currState->hpd = currState->tcr.hpd1;
990 currState->sh = currState->tcr.sh1;
991 currState->irgn = currState->tcr.irgn1;
992 currState->orgn = currState->tcr.orgn1;
993 vaddr_fault = s1TxSzFault(tg, currState->tcr.t1sz) ||
994 checkVAOutOfRange(currState->vaddr, top_bit, tsz, false);
995
996 if (vaddr_fault || currState->tcr.epd1)
997 fault = true;
998 break;
999 default:
1000 // top two bytes must be all 0s or all 1s, else invalid addr
1001 fault = true;
1002 }
1003 ps = currState->tcr.ips;
1004 }
1005 break;
1008 switch(bits(currState->vaddr, top_bit)) {
1009 case 0:
1010 DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1011 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
1012 tsz = 64 - currState->tcr.t0sz;
1013 tg = GrainMap_tg0[currState->tcr.tg0];
1014 currState->hpd = currState->hcr.e2h ?
1015 currState->tcr.hpd0 : currState->tcr.hpd;
1016 currState->sh = currState->tcr.sh0;
1017 currState->irgn = currState->tcr.irgn0;
1018 currState->orgn = currState->tcr.orgn0;
1019 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
1020 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
1021
1022 if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1023 fault = true;
1024 break;
1025
1026 case 0x1:
1027 DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1028 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
1029 tsz = 64 - currState->tcr.t1sz;
1030 tg = GrainMap_tg1[currState->tcr.tg1];
1031 currState->hpd = currState->tcr.hpd1;
1032 currState->sh = currState->tcr.sh1;
1033 currState->irgn = currState->tcr.irgn1;
1034 currState->orgn = currState->tcr.orgn1;
1035 vaddr_fault = s1TxSzFault(tg, currState->tcr.t1sz) ||
1036 checkVAOutOfRange(currState->vaddr, top_bit, tsz, false);
1037
1038 if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1039 fault = true;
1040 break;
1041
1042 default:
1043 // invalid addr if top two bytes are not all 0s
1044 fault = true;
1045 }
1046 ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1047 break;
1049 switch(bits(currState->vaddr, top_bit)) {
1050 case 0:
1051 DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1052 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
1053 tsz = 64 - currState->tcr.t0sz;
1054 tg = GrainMap_tg0[currState->tcr.tg0];
1055 currState->hpd = currState->tcr.hpd;
1056 currState->sh = currState->tcr.sh0;
1057 currState->irgn = currState->tcr.irgn0;
1058 currState->orgn = currState->tcr.orgn0;
1059 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
1060 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
1061
1062 if (vaddr_fault)
1063 fault = true;
1064 break;
1065 default:
1066 // invalid addr if top two bytes are not all 0s
1067 fault = true;
1068 }
1069 ps = currState->tcr.ps;
1070 break;
1071 }
1072
1073 currState->isUncacheable = currState->irgn == 0 ||
1074 currState->orgn == 0;
1075
1076 const bool is_atomic = currState->req->isAtomic();
1077
1078 if (fault) {
1079 if (currState->isFetch) {
1080 return std::make_shared<PrefetchAbort>(
1081 currState->vaddr_tainted,
1082 ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1084 } else {
1085 return std::make_shared<DataAbort>(
1086 currState->vaddr_tainted,
1088 is_atomic ? false : currState->isWrite,
1089 ArmFault::TranslationLL + LookupLevel::L0,
1091 }
1092 }
1093
1094 if (tg == ReservedGrain) {
1095 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1096 "DEFINED behavior takes this to mean 4KB granules\n");
1097 tg = Grain4KB;
1098 }
1099
1100 // Clamp to lower limit
1101 int pa_range = decodePhysAddrRange64(ps);
1102 if (pa_range > _physAddrRange) {
1103 currState->physAddrRange = _physAddrRange;
1104 } else {
1105 currState->physAddrRange = pa_range;
1106 }
1107
1108 auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1109 ttbr, tg, tsz, pa_range);
1110
1111 // Determine physical address size and raise an Address Size Fault if
1112 // necessary
1113 if (checkAddrSizeFaultAArch64(table_addr, currState->physAddrRange)) {
1114 DPRINTF(TLB, "Address size fault before any lookup\n");
1115 if (currState->isFetch)
1116 return std::make_shared<PrefetchAbort>(
1117 currState->vaddr_tainted,
1118 ArmFault::AddressSizeLL + start_lookup_level,
1119 isStage2,
1121 else
1122 return std::make_shared<DataAbort>(
1123 currState->vaddr_tainted,
1125 is_atomic ? false : currState->isWrite,
1126 ArmFault::AddressSizeLL + start_lookup_level,
1127 isStage2,
1129 }
1130
1132 if (uncacheableWalk()) {
1134 }
1135
1136 if (currState->secureLookup) {
1137 flag.set(Request::SECURE);
1138 }
1139
1140 currState->longDesc.lookupLevel = start_lookup_level;
1141 currState->longDesc.aarch64 = true;
1142 currState->longDesc.grainSize = tg;
1143 currState->longDesc.physAddrRange = _physAddrRange;
1144 currState->longDesc.isStage2 = isStage2;
1145
1146 assert(start_lookup_level < LookupLevel::Num_ArmLookupLevel);
1147
1148 fetchDescriptor(desc_addr, currState->longDesc,
1149 sizeof(uint64_t), flag, start_lookup_level,
1150 LongDescEventByLevel[start_lookup_level],
1152
1153 return currState->fault;
1154}
1155
1156std::tuple<Addr, Addr, TableWalker::LookupLevel>
1157TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1158{
1159 const auto* ptops = getPageTableOps(tg);
1160
1161 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1162 Addr table_addr = 0;
1163 Addr desc_addr = 0;
1164
1165 if (currState->walkEntry.valid) {
1166 // WalkCache hit
1167 TlbEntry* entry = &currState->walkEntry;
1168 DPRINTF(PageTableWalker,
1169 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1170 currState->vaddr, entry->lookupLevel, entry->pfn);
1171
1172 if (currState->longDescData.has_value()) {
1173 currState->longDescData->xnTable = entry->xn;
1174 currState->longDescData->pxnTable = entry->pxn;
1175 currState->longDescData->rwTable = bits(entry->ap, 1);
1176 currState->longDescData->userTable = bits(entry->ap, 0);
1177 }
1178
1179 table_addr = entry->pfn;
1180 first_level = (LookupLevel)(entry->lookupLevel + 1);
1181 } else {
1182 // WalkCache miss
1183 first_level = isStage2 ?
1184 ptops->firstS2Level(currState->vtcr.sl0) :
1185 ptops->firstLevel(64 - tsz);
1186 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1187 "Table walker couldn't find lookup level\n");
1188
1189 int stride = tg - 3;
1190 int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1191
1192 if (pa_range == 52) {
1193 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1194 table_addr = mbits(ttbr, 47, z);
1195 table_addr |= (bits(ttbr, 5, 2) << 48);
1196 } else {
1197 table_addr = mbits(ttbr, 47, base_addr_lo);
1198 }
1199 }
1200
1201 desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1202
1203 return std::make_tuple(table_addr, desc_addr, first_level);
1204}
1205
1206void
1208 uint8_t texcb, bool s)
1209{
1210 // Note: tc and sctlr local variables are hiding tc and sctrl class
1211 // variables
1212 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1213 te.shareable = false; // default value
1214 te.nonCacheable = false;
1215 te.outerShareable = false;
1216 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1217 switch(texcb) {
1218 case 0: // Stongly-ordered
1219 te.nonCacheable = true;
1221 te.shareable = true;
1222 te.innerAttrs = 1;
1223 te.outerAttrs = 0;
1224 break;
1225 case 1: // Shareable Device
1226 te.nonCacheable = true;
1228 te.shareable = true;
1229 te.innerAttrs = 3;
1230 te.outerAttrs = 0;
1231 break;
1232 case 2: // Outer and Inner Write-Through, no Write-Allocate
1234 te.shareable = s;
1235 te.innerAttrs = 6;
1236 te.outerAttrs = bits(texcb, 1, 0);
1237 break;
1238 case 3: // Outer and Inner Write-Back, no Write-Allocate
1240 te.shareable = s;
1241 te.innerAttrs = 7;
1242 te.outerAttrs = bits(texcb, 1, 0);
1243 break;
1244 case 4: // Outer and Inner Non-cacheable
1245 te.nonCacheable = true;
1247 te.shareable = s;
1248 te.innerAttrs = 0;
1249 te.outerAttrs = bits(texcb, 1, 0);
1250 break;
1251 case 5: // Reserved
1252 panic("Reserved texcb value!\n");
1253 break;
1254 case 6: // Implementation Defined
1255 panic("Implementation-defined texcb value!\n");
1256 break;
1257 case 7: // Outer and Inner Write-Back, Write-Allocate
1259 te.shareable = s;
1260 te.innerAttrs = 5;
1261 te.outerAttrs = 1;
1262 break;
1263 case 8: // Non-shareable Device
1264 te.nonCacheable = true;
1266 te.shareable = false;
1267 te.innerAttrs = 3;
1268 te.outerAttrs = 0;
1269 break;
1270 case 9 ... 15: // Reserved
1271 panic("Reserved texcb value!\n");
1272 break;
1273 case 16 ... 31: // Cacheable Memory
1275 te.shareable = s;
1276 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1277 te.nonCacheable = true;
1278 te.innerAttrs = bits(texcb, 1, 0);
1279 te.outerAttrs = bits(texcb, 3, 2);
1280 break;
1281 default:
1282 panic("More than 32 states for 5 bits?\n");
1283 }
1284 } else {
1285 assert(tc);
1286 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1288 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1290 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1291 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1292 switch(bits(texcb, 2,0)) {
1293 case 0:
1294 curr_tr = prrr.tr0;
1295 curr_ir = nmrr.ir0;
1296 curr_or = nmrr.or0;
1297 te.outerShareable = (prrr.nos0 == 0);
1298 break;
1299 case 1:
1300 curr_tr = prrr.tr1;
1301 curr_ir = nmrr.ir1;
1302 curr_or = nmrr.or1;
1303 te.outerShareable = (prrr.nos1 == 0);
1304 break;
1305 case 2:
1306 curr_tr = prrr.tr2;
1307 curr_ir = nmrr.ir2;
1308 curr_or = nmrr.or2;
1309 te.outerShareable = (prrr.nos2 == 0);
1310 break;
1311 case 3:
1312 curr_tr = prrr.tr3;
1313 curr_ir = nmrr.ir3;
1314 curr_or = nmrr.or3;
1315 te.outerShareable = (prrr.nos3 == 0);
1316 break;
1317 case 4:
1318 curr_tr = prrr.tr4;
1319 curr_ir = nmrr.ir4;
1320 curr_or = nmrr.or4;
1321 te.outerShareable = (prrr.nos4 == 0);
1322 break;
1323 case 5:
1324 curr_tr = prrr.tr5;
1325 curr_ir = nmrr.ir5;
1326 curr_or = nmrr.or5;
1327 te.outerShareable = (prrr.nos5 == 0);
1328 break;
1329 case 6:
1330 panic("Imp defined type\n");
1331 case 7:
1332 curr_tr = prrr.tr7;
1333 curr_ir = nmrr.ir7;
1334 curr_or = nmrr.or7;
1335 te.outerShareable = (prrr.nos7 == 0);
1336 break;
1337 }
1338
1339 switch(curr_tr) {
1340 case 0:
1341 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1343 te.nonCacheable = true;
1344 te.innerAttrs = 1;
1345 te.outerAttrs = 0;
1346 te.shareable = true;
1347 break;
1348 case 1:
1349 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1350 prrr.ds1, prrr.ds0, s);
1352 te.nonCacheable = true;
1353 te.innerAttrs = 3;
1354 te.outerAttrs = 0;
1355 if (prrr.ds1 && s)
1356 te.shareable = true;
1357 if (prrr.ds0 && !s)
1358 te.shareable = true;
1359 break;
1360 case 2:
1361 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1362 prrr.ns1, prrr.ns0, s);
1364 if (prrr.ns1 && s)
1365 te.shareable = true;
1366 if (prrr.ns0 && !s)
1367 te.shareable = true;
1368 break;
1369 case 3:
1370 panic("Reserved type");
1371 }
1372
1373 if (te.mtype == TlbEntry::MemoryType::Normal){
1374 switch(curr_ir) {
1375 case 0:
1376 te.nonCacheable = true;
1377 te.innerAttrs = 0;
1378 break;
1379 case 1:
1380 te.innerAttrs = 5;
1381 break;
1382 case 2:
1383 te.innerAttrs = 6;
1384 break;
1385 case 3:
1386 te.innerAttrs = 7;
1387 break;
1388 }
1389
1390 switch(curr_or) {
1391 case 0:
1392 te.nonCacheable = true;
1393 te.outerAttrs = 0;
1394 break;
1395 case 1:
1396 te.outerAttrs = 1;
1397 break;
1398 case 2:
1399 te.outerAttrs = 2;
1400 break;
1401 case 3:
1402 te.outerAttrs = 3;
1403 break;
1404 }
1405 }
1406 }
1407 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1408 "outerAttrs: %d\n",
1409 te.shareable, te.innerAttrs, te.outerAttrs);
1410 te.setAttributes(false);
1411}
1412
1413void
1415 LongDescriptor &l_descriptor)
1416{
1417 assert(release->has(ArmExtension::LPAE));
1418
1419 uint8_t attr;
1420 uint8_t sh = l_descriptor.sh();
1421 // Different format and source of attributes if this is a stage 2
1422 // translation
1423 if (isStage2) {
1424 attr = l_descriptor.memAttr();
1425 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1426 uint8_t attr_1_0 = attr & 0x3;
1427
1428 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1429
1430 if (attr_3_2 == 0) {
1431 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1433 te.outerAttrs = 0;
1434 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1435 te.nonCacheable = true;
1436 } else {
1438 te.outerAttrs = attr_3_2 == 1 ? 0 :
1439 attr_3_2 == 2 ? 2 : 1;
1440 te.innerAttrs = attr_1_0 == 1 ? 0 :
1441 attr_1_0 == 2 ? 6 : 5;
1442 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1443 }
1444 } else {
1445 uint8_t attrIndx = l_descriptor.attrIndx();
1446
1447 // LPAE always uses remapping of memory attributes, irrespective of the
1448 // value of SCTLR.TRE
1449 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1450 int reg_as_int = snsBankedIndex(reg, currState->tc,
1452 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1453 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1454 uint8_t attr_7_4 = bits(attr, 7, 4);
1455 uint8_t attr_3_0 = bits(attr, 3, 0);
1456 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1457
1458 // Note: the memory subsystem only cares about the 'cacheable' memory
1459 // attribute. The other attributes are only used to fill the PAR register
1460 // accordingly to provide the illusion of full support
1461 te.nonCacheable = false;
1462
1463 switch (attr_7_4) {
1464 case 0x0:
1465 // Strongly-ordered or Device memory
1466 if (attr_3_0 == 0x0)
1468 else if (attr_3_0 == 0x4)
1470 else
1471 panic("Unpredictable behavior\n");
1472 te.nonCacheable = true;
1473 te.outerAttrs = 0;
1474 break;
1475 case 0x4:
1476 // Normal memory, Outer Non-cacheable
1478 te.outerAttrs = 0;
1479 if (attr_3_0 == 0x4)
1480 // Inner Non-cacheable
1481 te.nonCacheable = true;
1482 else if (attr_3_0 < 0x8)
1483 panic("Unpredictable behavior\n");
1484 break;
1485 case 0x8:
1486 case 0x9:
1487 case 0xa:
1488 case 0xb:
1489 case 0xc:
1490 case 0xd:
1491 case 0xe:
1492 case 0xf:
1493 if (attr_7_4 & 0x4) {
1494 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1495 } else {
1496 te.outerAttrs = 0x2;
1497 }
1498 // Normal memory, Outer Cacheable
1500 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1501 panic("Unpredictable behavior\n");
1502 break;
1503 default:
1504 panic("Unpredictable behavior\n");
1505 break;
1506 }
1507
1508 switch (attr_3_0) {
1509 case 0x0:
1510 te.innerAttrs = 0x1;
1511 break;
1512 case 0x4:
1513 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1514 break;
1515 case 0x8:
1516 case 0x9:
1517 case 0xA:
1518 case 0xB:
1519 te.innerAttrs = 6;
1520 break;
1521 case 0xC:
1522 case 0xD:
1523 case 0xE:
1524 case 0xF:
1525 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1526 break;
1527 default:
1528 panic("Unpredictable behavior\n");
1529 break;
1530 }
1531 }
1532
1533 te.outerShareable = sh == 2;
1534 te.shareable = (sh & 0x2) ? true : false;
1535 te.setAttributes(true);
1536 te.attributes |= (uint64_t) attr << 56;
1537}
1538
1539bool
1541{
1542 return !bits(attrs, 2) || // Write-through
1543 attrs == 0b0100; // NonCacheable
1544}
1545
1546void
1548 LongDescriptor &l_descriptor)
1549{
1550 uint8_t attr;
1551 uint8_t attr_hi;
1552 uint8_t attr_lo;
1553 uint8_t sh = l_descriptor.sh();
1554
1555 if (isStage2) {
1556 attr = l_descriptor.memAttr();
1557 uint8_t attr_hi = (attr >> 2) & 0x3;
1558 uint8_t attr_lo = attr & 0x3;
1559
1560 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1561
1562 if (attr_hi == 0) {
1563 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1565 te.outerAttrs = 0;
1566 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1567 te.nonCacheable = true;
1568 } else {
1570 te.outerAttrs = attr_hi == 1 ? 0 :
1571 attr_hi == 2 ? 2 : 1;
1572 te.innerAttrs = attr_lo == 1 ? 0 :
1573 attr_lo == 2 ? 6 : 5;
1574 // Treat write-through memory as uncacheable, this is safe
1575 // but for performance reasons not optimal.
1576 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1577 (attr_lo == 1) || (attr_lo == 2);
1578
1579 // To be used when merging stage1 and astage 2 attributes
1580 te.xs = !l_descriptor.fnxs();
1581 }
1582 } else {
1583 uint8_t attrIndx = l_descriptor.attrIndx();
1584
1585 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1586
1587 // Select MAIR
1588 uint64_t mair;
1589 switch (currState->regime) {
1591 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1592 break;
1595 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1596 break;
1598 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1599 break;
1600 default:
1601 panic("Invalid exception level");
1602 break;
1603 }
1604
1605 // Select attributes
1606 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1607 attr_lo = bits(attr, 3, 0);
1608 attr_hi = bits(attr, 7, 4);
1609
1610 // Treat write-through memory as uncacheable, this is safe
1611 // but for performance reasons not optimal.
1612 switch (attr) {
1613 case 0b00000000 ... 0b00001111: // Device Memory
1615 te.nonCacheable = true;
1616 te.xs = !bits(attr, 0);
1617 break;
1618 case 0b01000000: // Normal memory, Non-cacheable
1620 te.nonCacheable = true;
1621 te.xs = false;
1622 break;
1623 case 0b10100000: // Normal memory, Write-through
1625 te.nonCacheable = true;
1626 te.xs = false;
1627 break;
1628 default:
1630 te.nonCacheable = uncacheableFromAttrs(attr_hi) ||
1631 uncacheableFromAttrs(attr_lo);
1632 // XS is 0 only for write-back regions (cacheable)
1633 te.xs = te.nonCacheable;
1634 break;
1635 }
1636
1637 te.shareable = sh == 2;
1638 te.outerShareable = (sh & 0x2) ? true : false;
1639 // Attributes formatted according to the 64-bit PAR
1640 te.attributes = ((uint64_t) attr << 56) |
1641 (1 << 11) | // LPAE bit
1642 (te.ns << 9) | // NS bit
1643 (sh << 7);
1644 }
1645}
1646
1647void
1649{
1651 if (uncacheableWalk()) {
1652 te.shareable = 3;
1653 te.outerAttrs = 0;
1654 te.innerAttrs = 0;
1655 te.nonCacheable = true;
1656 } else {
1657 te.shareable = currState->sh;
1658 te.outerAttrs = currState->orgn;
1659 te.innerAttrs = currState->irgn;
1660 te.nonCacheable = (te.outerAttrs == 0 || te.outerAttrs == 2) &&
1661 (te.innerAttrs == 0 || te.innerAttrs == 2);
1662 }
1663
1664 // XS is 0 only for write-back regions (cacheable)
1665 te.xs = te.nonCacheable;
1666}
1667
1668void
1670{
1671 if (currState->fault != NoFault) {
1672 return;
1673 }
1674
1675 currState->l1Desc.data = htog(currState->l1Desc.data,
1676 byteOrder(currState->tc));
1677
1678 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1679 currState->vaddr_tainted, currState->l1Desc.data);
1680 TlbEntry te;
1681
1682 const bool is_atomic = currState->req->isAtomic();
1683
1684 switch (currState->l1Desc.type()) {
1687 if (!currState->timing) {
1688 currState->tc = NULL;
1689 currState->req = NULL;
1690 }
1691 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1692 if (currState->isFetch)
1693 currState->fault =
1694 std::make_shared<PrefetchAbort>(
1695 currState->vaddr_tainted,
1696 ArmFault::TranslationLL + LookupLevel::L1,
1697 isStage2,
1699 else
1700 currState->fault =
1701 std::make_shared<DataAbort>(
1702 currState->vaddr_tainted,
1704 is_atomic ? false : currState->isWrite,
1705 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1707 return;
1709 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1714
1715 currState->fault = std::make_shared<DataAbort>(
1716 currState->vaddr_tainted,
1717 currState->l1Desc.domain(),
1718 is_atomic ? false : currState->isWrite,
1719 ArmFault::AccessFlagLL + LookupLevel::L1,
1720 isStage2,
1722 }
1723 if (currState->l1Desc.supersection()) {
1724 panic("Haven't implemented supersections\n");
1725 }
1726 insertTableEntry(currState->l1Desc, false);
1727 return;
1729 {
1730 Addr l2desc_addr;
1731 l2desc_addr = currState->l1Desc.l2Addr() |
1732 (bits(currState->vaddr, 19, 12) << 2);
1733 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1734 l2desc_addr, currState->ss == SecurityState::Secure ?
1735 "s" : "ns");
1736
1738
1739 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1741 }
1742
1743 if (currState->secureLookup)
1744 flag.set(Request::SECURE);
1745
1747 l2desc_addr, currState->l2Desc,
1748 sizeof(uint32_t), flag, LookupLevel::L2,
1751
1752 currState->delayed = currState->timing;
1753
1754 return;
1755 }
1756 default:
1757 panic("A new type in a 2 bit field?\n");
1758 }
1759}
1760
1761Fault
1763{
1764 if (currState->isFetch) {
1765 return std::make_shared<PrefetchAbort>(
1766 currState->vaddr_tainted,
1767 src + currState->longDesc.lookupLevel,
1768 isStage2,
1770 } else {
1771 return std::make_shared<DataAbort>(
1772 currState->vaddr_tainted,
1774 currState->req->isAtomic() ? false : currState->isWrite,
1775 src + currState->longDesc.lookupLevel,
1776 isStage2,
1778 }
1779}
1780
1781void
1783{
1784 if (currState->fault != NoFault) {
1785 return;
1786 }
1787
1788 currState->longDesc.data = htog(currState->longDesc.data,
1789 byteOrder(currState->tc));
1790
1791 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1792 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1793 currState->longDesc.data,
1794 currState->aarch64 ? "AArch64" : "long-desc.");
1795
1796 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1797 (currState->longDesc.type() == LongDescriptor::Page)) {
1798 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1799 "xn: %d, ap: %d, piindex: %d, af: %d, type: %d\n",
1800 currState->longDesc.lookupLevel,
1801 currState->longDesc.data,
1802 currState->longDesc.pxn(),
1803 currState->longDesc.xn(),
1804 currState->longDesc.ap(),
1805 currState->longDesc.piindex(),
1806 currState->longDesc.af(),
1807 currState->longDesc.type());
1808 } else {
1809 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1810 currState->longDesc.lookupLevel,
1811 currState->longDesc.data,
1812 currState->longDesc.type());
1813 }
1814
1815 TlbEntry te;
1816
1817 switch (currState->longDesc.type()) {
1819 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1820 currState->longDesc.lookupLevel,
1821 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1822
1824 if (!currState->timing) {
1825 currState->tc = NULL;
1826 currState->req = NULL;
1827 }
1828 return;
1829
1832 {
1833 auto fault_source = ArmFault::FaultSourceInvalid;
1834 // Check for address size fault
1835 if (checkAddrSizeFaultAArch64(currState->longDesc.paddr(),
1836 currState->physAddrRange)) {
1837
1838 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1839 currState->longDesc.lookupLevel);
1840 fault_source = ArmFault::AddressSizeLL;
1841
1842 // Check for access fault
1843 } else if (currState->longDesc.af() == 0) {
1844
1845 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1846 currState->longDesc.lookupLevel);
1847 fault_source = ArmFault::AccessFlagLL;
1848 }
1849
1850 if (fault_source != ArmFault::FaultSourceInvalid) {
1851 currState->fault = generateLongDescFault(fault_source);
1852 } else {
1853 insertTableEntry(currState->longDesc, true);
1854 }
1855 }
1856 return;
1858 {
1859 // Set hierarchical permission flags
1860 if (!isStage2) {
1861 currState->secureLookup = currState->secureLookup &&
1862 currState->longDesc.secureTable();
1863 }
1864 currState->longDescData->rwTable =
1865 currState->longDescData->rwTable &&
1866 (currState->longDesc.rwTable() || currState->hpd);
1867 currState->longDescData->userTable =
1868 currState->longDescData->userTable &&
1869 (currState->longDesc.userTable() || currState->hpd);
1870 currState->longDescData->xnTable =
1871 currState->longDescData->xnTable ||
1872 (currState->longDesc.xnTable() && !currState->hpd);
1873 currState->longDescData->pxnTable =
1874 currState->longDescData->pxnTable ||
1875 (currState->longDesc.pxnTable() && !currState->hpd);
1876
1877 // Set up next level lookup
1878 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1879 currState->vaddr);
1880
1881 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1882 currState->longDesc.lookupLevel,
1883 currState->longDesc.lookupLevel + 1,
1884 next_desc_addr,
1885 currState->secureLookup ? "s" : "ns");
1886
1887 // Check for address size fault
1888 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1889 next_desc_addr, currState->physAddrRange)) {
1890 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1891 currState->longDesc.lookupLevel);
1892
1895 return;
1896 }
1897
1898 if (mmu->hasWalkCache()) {
1900 }
1901
1903 if (currState->secureLookup)
1904 flag.set(Request::SECURE);
1905
1906 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1908 }
1909
1910 LookupLevel L = currState->longDesc.lookupLevel =
1911 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1912 Event *event = NULL;
1913 switch (L) {
1914 case LookupLevel::L1:
1915 assert(currState->aarch64);
1916 [[fallthrough]];
1917 case LookupLevel::L2:
1918 case LookupLevel::L3:
1919 event = LongDescEventByLevel[L];
1920 break;
1921 default:
1922 panic("Wrong lookup level in table walk\n");
1923 break;
1924 }
1925
1927 next_desc_addr, currState->longDesc,
1928 sizeof(uint64_t), flag, L, event,
1930
1931 currState->delayed = currState->timing;
1932 }
1933 return;
1934 default:
1935 panic("A new type in a 2 bit field?\n");
1936 }
1937}
1938
1939void
1941{
1942 if (currState->fault != NoFault) {
1943 return;
1944 }
1945
1946 currState->l2Desc.data = htog(currState->l2Desc.data,
1947 byteOrder(currState->tc));
1948
1949 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1950 currState->vaddr_tainted, currState->l2Desc.data);
1951 TlbEntry te;
1952
1953 const bool is_atomic = currState->req->isAtomic();
1954
1955 if (currState->l2Desc.invalid()) {
1956 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1957 if (!currState->timing) {
1958 currState->tc = NULL;
1959 currState->req = NULL;
1960 }
1961 if (currState->isFetch)
1962 currState->fault = std::make_shared<PrefetchAbort>(
1963 currState->vaddr_tainted,
1964 ArmFault::TranslationLL + LookupLevel::L2,
1965 isStage2,
1967 else
1968 currState->fault = std::make_shared<DataAbort>(
1969 currState->vaddr_tainted, currState->l1Desc.domain(),
1970 is_atomic ? false : currState->isWrite,
1971 ArmFault::TranslationLL + LookupLevel::L2,
1972 isStage2,
1974 return;
1975 }
1976
1977 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1981 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1982 currState->sctlr.afe, currState->l2Desc.ap());
1983
1984 currState->fault = std::make_shared<DataAbort>(
1985 currState->vaddr_tainted,
1987 is_atomic ? false : currState->isWrite,
1988 ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
1990 }
1991
1992 insertTableEntry(currState->l2Desc, false);
1993}
1994
1995void
1997{
1998 currState = stateQueues[LookupLevel::L1].front();
1999 currState->delayed = false;
2000 // if there's a stage2 translation object we don't need it any more
2001 if (currState->stage2Tran) {
2002 delete currState->stage2Tran;
2003 currState->stage2Tran = NULL;
2004 }
2005
2006
2007 DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2008 &currState->l1Desc.data);
2009 DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2010 currState->l1Desc.data);
2011
2012 DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2013 currState->vaddr_tainted);
2015
2016 stateQueues[LookupLevel::L1].pop_front();
2017 // Check if fault was generated
2018 if (currState->fault != NoFault) {
2019 currState->transState->finish(currState->fault, currState->req,
2020 currState->tc, currState->mode);
2021 stats.walksShortTerminatedAtLevel[0]++;
2022
2023 pending = false;
2024 nextWalk(currState->tc);
2025
2026 currState->req = NULL;
2027 currState->tc = NULL;
2028 currState->delayed = false;
2029 delete currState;
2030 }
2031 else if (!currState->delayed) {
2032 // delay is not set so there is no L2 to do
2033 // Don't finish the translation if a stage 2 look up is underway
2034 stats.walkServiceTime.sample(curTick() - currState->startTime);
2035 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2036
2037 mmu->translateTiming(currState->req, currState->tc,
2038 currState->transState, currState->mode,
2039 currState->tranType, isStage2);
2040
2041 stats.walksShortTerminatedAtLevel[0]++;
2042
2043 pending = false;
2044 nextWalk(currState->tc);
2045
2046 currState->req = NULL;
2047 currState->tc = NULL;
2048 currState->delayed = false;
2049 delete currState;
2050 } else {
2051 // need to do L2 descriptor
2052 stashCurrState(LookupLevel::L2);
2053 }
2054 currState = NULL;
2055}
2056
2057void
2059{
2060 currState = stateQueues[LookupLevel::L2].front();
2061 assert(currState->delayed);
2062 // if there's a stage2 translation object we don't need it any more
2063 if (currState->stage2Tran) {
2064 delete currState->stage2Tran;
2065 currState->stage2Tran = NULL;
2066 }
2067
2068 DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2069 currState->vaddr_tainted);
2071
2072 // Check if fault was generated
2073 if (currState->fault != NoFault) {
2074 currState->transState->finish(currState->fault, currState->req,
2075 currState->tc, currState->mode);
2076 stats.walksShortTerminatedAtLevel[1]++;
2077 } else {
2078 stats.walkServiceTime.sample(curTick() - currState->startTime);
2079 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2080
2081 mmu->translateTiming(currState->req, currState->tc,
2082 currState->transState, currState->mode,
2083 currState->tranType, isStage2);
2084
2085 stats.walksShortTerminatedAtLevel[1]++;
2086 }
2087
2088
2089 stateQueues[LookupLevel::L2].pop_front();
2090 pending = false;
2091 nextWalk(currState->tc);
2092
2093 currState->req = NULL;
2094 currState->tc = NULL;
2095 currState->delayed = false;
2096
2097 delete currState;
2098 currState = NULL;
2099}
2100
2101void
2106
2107void
2112
2113void
2118
2119void
2124
2125void
2127{
2128 currState = stateQueues[curr_lookup_level].front();
2129 assert(curr_lookup_level == currState->longDesc.lookupLevel);
2130 currState->delayed = false;
2131
2132 // if there's a stage2 translation object we don't need it any more
2133 if (currState->stage2Tran) {
2134 delete currState->stage2Tran;
2135 currState->stage2Tran = NULL;
2136 }
2137
2138 DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2139 currState->vaddr_tainted);
2141
2142 stateQueues[curr_lookup_level].pop_front();
2143
2144 if (currState->fault != NoFault) {
2145 // A fault was generated
2146 currState->transState->finish(currState->fault, currState->req,
2147 currState->tc, currState->mode);
2148
2149 pending = false;
2150 nextWalk(currState->tc);
2151
2152 currState->req = NULL;
2153 currState->tc = NULL;
2154 currState->delayed = false;
2155 delete currState;
2156 } else if (!currState->delayed) {
2157 // No additional lookups required
2158 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2159 stats.walkServiceTime.sample(curTick() - currState->startTime);
2160
2161 mmu->translateTiming(currState->req, currState->tc,
2162 currState->transState, currState->mode,
2163 currState->tranType, isStage2);
2164
2165 stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2166
2167 pending = false;
2168 nextWalk(currState->tc);
2169
2170 currState->req = NULL;
2171 currState->tc = NULL;
2172 currState->delayed = false;
2173 delete currState;
2174 } else {
2175 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2176 panic("Max. number of lookups already reached in table walk\n");
2177 // Need to perform additional lookups
2178 stashCurrState(currState->longDesc.lookupLevel);
2179 }
2180 currState = NULL;
2181}
2182
2183
2184void
2186{
2187 if (pendingQueue.size())
2189 else
2190 completeDrain();
2191}
2192
2193void
2195 DescriptorBase &descriptor, int num_bytes,
2196 Request::Flags flags, LookupLevel lookup_level, Event *event,
2197 void (TableWalker::*doDescriptor)())
2198{
2199 uint8_t *data = descriptor.getRawPtr();
2200
2201 DPRINTF(PageTableWalker,
2202 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2203 desc_addr, currState->stage2Req);
2204
2205 // If this translation has a stage 2 then we know desc_addr is an IPA and
2206 // needs to be translated before we can access the page table. Do that
2207 // check here.
2208 if (currState->stage2Req) {
2209 Fault fault;
2210
2211 if (currState->timing) {
2212 auto *tran = new
2213 Stage2Walk(*this, data, event, currState->vaddr,
2214 currState->mode, currState->tranType);
2215 currState->stage2Tran = tran;
2216 readDataTimed(currState->tc, desc_addr, tran, num_bytes, flags);
2217 fault = tran->fault;
2218
2219 if (fault != NoFault) {
2220 currState->fault = fault;
2221 }
2222 } else {
2223 fault = readDataUntimed(currState->tc,
2224 currState->vaddr, desc_addr, data, num_bytes, flags,
2225 currState->mode,
2226 currState->tranType,
2227 currState->functional);
2228
2229 if (fault != NoFault) {
2230 currState->fault = fault;
2231 }
2232
2233 (this->*doDescriptor)();
2234 }
2235 } else {
2236 RequestPtr req = std::make_shared<Request>(
2237 desc_addr, num_bytes, flags, requestorId);
2238 req->taskId(context_switch_task_id::DMA);
2239
2240 mpamTagTableWalk(req);
2241
2242 Fault fault = testWalk(req, descriptor.domain(),
2243 lookup_level);
2244
2245 if (fault != NoFault) {
2246 currState->fault = fault;
2247 return;
2248 }
2249
2250 if (currState->timing) {
2251 port->sendTimingReq(req, data,
2252 currState->tc->getCpuPtr()->clockPeriod(), event);
2253
2254 } else if (!currState->functional) {
2255 port->sendAtomicReq(req, data,
2256 currState->tc->getCpuPtr()->clockPeriod());
2257
2258 (this->*doDescriptor)();
2259 } else {
2260 port->sendFunctionalReq(req, data);
2261 (this->*doDescriptor)();
2262 }
2263 }
2264}
2265
2266void
2268{
2269 DPRINTF(PageTableWalker, "Adding to walker fifo: "
2270 "queue size before adding: %d\n",
2271 stateQueues[queue_idx].size());
2272 stateQueues[queue_idx].push_back(currState);
2273 currState = NULL;
2274}
2275
2276void
2278{
2279 const bool have_security = release->has(ArmExtension::SECURITY);
2280 TlbEntry te;
2281
2282 // Create and fill a new page table entry
2283 te.valid = true;
2284 te.longDescFormat = true;
2285 te.partial = true;
2286 // The entry is global if there is no address space identifier
2287 // to differentiate translation contexts
2288 te.global = !mmu->hasUnprivRegime(currState->regime);
2289 te.asid = currState->asid;
2290 te.vmid = currState->vmid;
2291 te.N = descriptor.offsetBits();
2292 te.tg = descriptor.grainSize;
2293 te.vpn = currState->vaddr >> te.N;
2294 te.size = (1ULL << te.N) - 1;
2295 te.pfn = descriptor.nextTableAddr();
2296 te.domain = descriptor.domain();
2297 te.lookupLevel = descriptor.lookupLevel;
2298 te.ns = !descriptor.secure(have_security, currState);
2299 te.ss = currState->ss;
2300 te.ipaSpace = currState->ipaSpace; // Used by stage2 entries only
2301 te.type = TypeTLB::unified;
2302
2303 te.regime = currState->regime;
2304
2305 te.xn = currState->longDescData->xnTable;
2306 te.pxn = currState->longDescData->pxnTable;
2307 te.ap = (currState->longDescData->rwTable << 1) |
2308 (currState->longDescData->userTable);
2309
2311
2312 // Debug output
2313 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2314 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2315 te.N, te.pfn, te.size, te.global, te.valid);
2316 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2317 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2318 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2319 te.nonCacheable, te.ns);
2320 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2321 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2322 descriptor.getRawData());
2323
2324 // Insert the entry into the TLBs
2325 tlb->multiInsert(TlbEntry::KeyType(te), te);
2326}
2327
2328void
2329TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2330{
2331 const bool have_security = release->has(ArmExtension::SECURITY);
2332 TlbEntry te;
2333
2334 // Create and fill a new page table entry
2335 te.valid = true;
2336 te.longDescFormat = long_descriptor;
2337 te.asid = currState->asid;
2338 te.vmid = currState->vmid;
2339 te.N = descriptor.offsetBits();
2340 te.vpn = currState->vaddr >> te.N;
2341 te.size = (1<<te.N) - 1;
2342 te.pfn = descriptor.pfn();
2343 te.domain = descriptor.domain();
2344 te.lookupLevel = descriptor.lookupLevel;
2345 te.ns = !descriptor.secure(have_security, currState);
2346 te.ss = currState->ss;
2347 te.ipaSpace = currState->ipaSpace; // Used by stage2 entries only
2348 te.xn = descriptor.xn();
2349 te.type = currState->mode == BaseMMU::Execute ?
2350 TypeTLB::instruction : TypeTLB::data;
2351
2352 te.regime = currState->regime;
2353
2354 stats.pageSizes[pageSizeNtoStatBin(te.N)]++;
2355 stats.requestOrigin[COMPLETED][currState->isFetch]++;
2356
2357 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2358 // as global
2359 te.global = descriptor.global(currState) || isStage2;
2360 if (long_descriptor) {
2361 LongDescriptor l_descriptor =
2362 dynamic_cast<LongDescriptor &>(descriptor);
2363
2364 te.tg = l_descriptor.grainSize;
2365 te.xn |= currState->longDescData->xnTable;
2366 te.pxn = currState->longDescData->pxnTable || l_descriptor.pxn();
2367 if (isStage2) {
2368 // this is actually the HAP field, but its stored in the same bit
2369 // possitions as the AP field in a stage 1 translation.
2370 te.hap = l_descriptor.ap();
2371 } else {
2372 te.ap = ((!currState->longDescData->rwTable ||
2373 descriptor.ap() >> 1) << 1) |
2374 (currState->longDescData->userTable && (descriptor.ap() & 0x1));
2375 // Add index of Indirect Permission.
2376 te.piindex = l_descriptor.piindex();
2377 }
2378 if (currState->aarch64)
2379 memAttrsAArch64(currState->tc, te, l_descriptor);
2380 else
2381 memAttrsLPAE(currState->tc, te, l_descriptor);
2382 } else {
2383 te.ap = descriptor.ap();
2384 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2385 descriptor.shareable());
2386 }
2387
2388 // Debug output
2389 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2390 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2391 te.N, te.pfn, te.size, te.global, te.valid);
2392 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d piindex:%d domain:%d asid:%d "
2393 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2394 te.ap, te.piindex,
2395 static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2396 te.nonCacheable, te.ns);
2397 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2398 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2399 descriptor.getRawData());
2400
2401 // Insert the entry into the TLBs
2402 tlb->multiInsert(TlbEntry::KeyType(te), te);
2403 if (!currState->timing) {
2404 currState->tc = NULL;
2405 currState->req = NULL;
2406 }
2407}
2408
2410TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2411{
2412 switch (lookup_level_as_int) {
2413 case LookupLevel::L1:
2414 return LookupLevel::L1;
2415 case LookupLevel::L2:
2416 return LookupLevel::L2;
2417 case LookupLevel::L3:
2418 return LookupLevel::L3;
2419 default:
2420 panic("Invalid lookup level conversion");
2421 }
2422}
2423
2424/* this method keeps track of the table walker queue's residency, so
2425 * needs to be called whenever requests start and complete. */
2426void
2428{
2429 unsigned n = pendingQueue.size();
2430 if ((currState != NULL) && (currState != pendingQueue.front())) {
2431 ++n;
2432 }
2433
2434 if (n != pendingReqs) {
2435 Tick now = curTick();
2436 stats.pendingWalks.sample(pendingReqs, now - pendingChangeTick);
2437 pendingReqs = n;
2438 pendingChangeTick = now;
2439 }
2440}
2441
2442Fault
2444 LookupLevel lookup_level)
2445{
2446 if (!test) {
2447 return NoFault;
2448 } else {
2449 return test->walkCheck(walk_req, currState->vaddr,
2451 currState->el != EL0,
2452 currState->mode, domain, lookup_level);
2453 }
2454}
2455
2456void
2461
2462uint8_t
2464{
2465 /* for stats.pageSizes */
2466 switch(N) {
2467 case 12: return 0; // 4K
2468 case 14: return 1; // 16K (using 16K granule in v8-64)
2469 case 16: return 2; // 64K
2470 case 20: return 3; // 1M
2471 case 21: return 4; // 2M-LPAE
2472 case 24: return 5; // 16M
2473 case 25: return 6; // 32M (using 16K granule in v8-64)
2474 case 29: return 7; // 512M (using 64K granule in v8-64)
2475 case 30: return 8; // 1G-LPAE
2476 case 42: return 9; // 1G-LPAE
2477 default:
2478 panic("unknown page size");
2479 return 255;
2480 }
2481}
2482
2483Fault
2485 uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2486 MMU::ArmTranslationType tran_type, bool functional)
2487{
2488 Fault fault;
2489
2490 // translate to physical address using the second stage MMU
2491 auto req = std::make_shared<Request>();
2492 req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2493 requestorId, 0);
2494
2495 if (functional) {
2496 fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2497 tran_type, true);
2498 } else {
2499 fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2500 tran_type, true);
2501 }
2502
2503 // Now do the access.
2504 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2505 Packet pkt = Packet(req, MemCmd::ReadReq);
2506 pkt.dataStatic(data);
2507 if (functional) {
2508 port->sendFunctional(&pkt);
2509 } else {
2510 port->sendAtomic(&pkt);
2511 }
2512 assert(!pkt.isError());
2513 }
2514
2515 // If there was a fault annotate it with the flag saying the foult occured
2516 // while doing a translation for a stage 1 page table walk.
2517 if (fault != NoFault) {
2518 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2519 arm_fault->annotate(ArmFault::S1PTW, true);
2520 arm_fault->annotate(ArmFault::OVA, vaddr);
2521 }
2522 return fault;
2523}
2524
2525void
2527{
2528 mpam::tagRequest(currState->tc, req, currState->isFetch);
2529}
2530
2531void
2533 Stage2Walk *translation, int num_bytes,
2534 Request::Flags flags)
2535{
2536 // translate to physical address using the second stage MMU
2537 translation->setVirt(
2538 desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2539 translation->translateTiming(tc);
2540}
2541
2543 uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2544 MMU::ArmTranslationType tran_type)
2545 : data(_data), numBytes(0), event(_event), parent(_parent),
2546 oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2547{
2548 req = std::make_shared<Request>();
2549}
2550
2551void
2553 const RequestPtr &req,
2555{
2556 fault = _fault;
2557
2558 // If there was a fault annotate it with the flag saying the foult occured
2559 // while doing a translation for a stage 1 page table walk.
2560 if (fault != NoFault) {
2561 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2562 arm_fault->annotate(ArmFault::S1PTW, true);
2563 arm_fault->annotate(ArmFault::OVA, oVAddr);
2564 }
2565
2566 if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2567 parent.getTableWalkerPort().sendTimingReq(req, data,
2568 tc->getCpuPtr()->clockPeriod(), event);
2569 } else {
2570 // We can't do the DMA access as there's been a problem, so tell the
2571 // event we're done
2572 event->process();
2573 }
2574}
2575
2576void
2578{
2579 parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2580}
2581
2583 : statistics::Group(parent),
2584 ADD_STAT(walks, statistics::units::Count::get(),
2585 "Table walker walks requested"),
2587 "Table walker walks initiated with short descriptors"),
2589 "Table walker walks initiated with long descriptors"),
2591 "Level at which table walker walks with short descriptors "
2592 "terminate"),
2594 "Level at which table walker walks with long descriptors "
2595 "terminate"),
2596 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2597 "Table walks squashed before starting"),
2598 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2599 "Table walks squashed after completion"),
2601 "Table walker wait (enqueue to first request) latency"),
2603 "Table walker service (enqueue to completion) latency"),
2605 "Table walker pending requests distribution"),
2606 ADD_STAT(pageSizes, statistics::units::Count::get(),
2607 "Table walker page sizes translated"),
2608 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2609 "Table walker requests started/completed, data/inst")
2610{
2612 .flags(statistics::nozero);
2613
2615 .flags(statistics::nozero);
2616
2618 .init(2)
2619 .flags(statistics::nozero);
2620
2621 walksShortTerminatedAtLevel.subname(0, "Level1");
2622 walksShortTerminatedAtLevel.subname(1, "Level2");
2623
2625 .init(4)
2626 .flags(statistics::nozero);
2627 walksLongTerminatedAtLevel.subname(0, "Level0");
2628 walksLongTerminatedAtLevel.subname(1, "Level1");
2629 walksLongTerminatedAtLevel.subname(2, "Level2");
2630 walksLongTerminatedAtLevel.subname(3, "Level3");
2631
2633 .flags(statistics::nozero);
2634
2636 .flags(statistics::nozero);
2637
2639 .init(16)
2641
2643 .init(16)
2645
2647 .init(16)
2650
2651 pageSizes // see DDI 0487A D4-1661
2652 .init(10)
2655 pageSizes.subname(0, "4KiB");
2656 pageSizes.subname(1, "16KiB");
2657 pageSizes.subname(2, "64KiB");
2658 pageSizes.subname(3, "1MiB");
2659 pageSizes.subname(4, "2MiB");
2660 pageSizes.subname(5, "16MiB");
2661 pageSizes.subname(6, "32MiB");
2662 pageSizes.subname(7, "512MiB");
2663 pageSizes.subname(8, "1GiB");
2664 pageSizes.subname(9, "4TiB");
2665
2667 .init(2,2) // Instruction/Data, requests/completed
2668 .flags(statistics::total);
2669 requestOrigin.subname(0,"Requested");
2670 requestOrigin.subname(1,"Completed");
2671 requestOrigin.ysubname(0,"Data");
2672 requestOrigin.ysubname(1,"Inst");
2673}
2674
2675} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition faults.hh:96
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:231
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1582
virtual bool global(WalkerState *currState) const =0
virtual uint64_t getRawData() const =0
virtual DomainType domain() const =0
virtual std::string dbgHeader() const =0
virtual uint8_t offsetBits() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual bool secure(bool have_security, WalkerState *currState) const =0
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping?
uint8_t piindex() const
Stage 1 Indirect permissions.
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
bool fnxs() const
FNXS for FEAT_XS only.
SnoopRespPacketQueue snoopRespQueue
Packet queue used to store outgoing snoop responses.
ReqPacketQueue reqQueue
Packet queue used to store outgoing requests.
void sendAtomicReq(const RequestPtr &req, uint8_t *data, Tick delay)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void sendFunctionalReq(const RequestPtr &req, uint8_t *data)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendTimingReq(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
PacketPtr createPacket(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
CPSR cpsr
Cached copy of the cpsr as it existed when translation began.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
SecurityState ss
Security State of the access.
std::optional< LongDescData > longDescData
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
SCR scr
Cached copy of the scr as it existed when translation began.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TranslationRegime regime
Current translation regime.
bool timing
If the mode is timing or atomic.
int physAddrRange
Current physical address range in bits.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
const ArmRelease * release
Cached copies of system-level properties.
bool checkVAOutOfRange(Addr addr, int top_bit, int tsz, bool low_range)
Fault generateLongDescFault(ArmFault::FaultSource src)
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
bool uncacheableWalk() const
Returns true if the table walk should be uncacheable.
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void fetchDescriptor(Addr desc_addr, DescriptorBase &descriptor, int num_bytes, Request::Flags flags, LookupLevel lookup_lvl, Event *event, void(TableWalker::*doDescriptor)())
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
Fault testWalk(const RequestPtr &walk_req, DomainType domain, LookupLevel lookup_level)
Addr s1MinTxSz(GrainSize tg) const
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
void nextWalk(ThreadContext *tc)
TlbTestInterface * test
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
bool s1TxSzFault(GrainSize tg, int tsz) const
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
gem5::ArmISA::TableWalker::TableWalkerStats stats
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool uncacheableFromAttrs(uint8_t attrs)
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
void mpamTagTableWalk(RequestPtr &req) const
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void setTestInterface(TlbTestInterface *ti)
void memAttrsWalkAArch64(TlbEntry &te)
Addr maxTxSz(GrainSize tg) const
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
void stashCurrState(int queue_idx)
Timing mode: saves the currState into the stateQueues.
ClockedObject(const ClockedObjectParams &p)
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool cacheResponding() const
Definition packet.hh:659
bool hasSharers() const
Definition packet.hh:686
Ports are used to interface objects to each other.
Definition port.hh:62
const std::string name() const
Return port name (for DPRINTF).
Definition port.hh:111
QueuedRequestPort(const std::string &name, ReqPacketQueue &req_queue, SnoopRespPacketQueue &snoop_resp_queue, PortID id=InvalidPortID)
Create a QueuedPort with a given name, and a supplied implementation of two packet queues.
Definition qport.hh:134
void schedTimingReq(PacketPtr pkt, Tick when)
Schedule the sending of a timing request.
Definition qport.hh:150
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition port.hh:579
@ PT_WALK
The request is a page table walk.
Definition request.hh:188
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
gem5::Flags< FlagsType > Flags
Definition request.hh:102
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition bitfield.hh:106
constexpr uint64_t sext(uint64_t val)
Sign-extend an N-bit value to 64 bits.
Definition bitfield.hh:129
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:310
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:329
DrainState
Object drain/handover states.
Definition drain.hh:76
@ Draining
Draining buffers pending serialization/handover.
Definition drain.hh:78
@ Drained
Buffers drained, ready for serialization/handover.
Definition drain.hh:79
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void set(Type mask)
Set all flag's bits matching the given mask.
Definition flags.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:246
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
#define warn_once(...)
Definition logging.hh:292
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
ByteOrder byteOrder(const ThreadContext *tc)
Definition utility.hh:361
Bitfield< 30 > te
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition pagetable.cc:477
Bitfield< 31 > n
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 21, 20 > stride
Bitfield< 18, 16 > ps
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:141
Bitfield< 4 > s
Bitfield< 8, 7 > sh
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 7, 4 > domain
Bitfield< 11 > z
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:277
const GrainSize GrainMap_tg1[]
Definition pagetable.cc:51
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1410
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1429
SecurityState
Security State.
Definition types.hh:273
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:413
@ MISCREG_HCR
Definition misc.hh:266
@ MISCREG_VSTCR_EL2
Definition misc.hh:646
@ MISCREG_SCTLR_EL2
Definition misc.hh:616
@ MISCREG_TCR_EL2
Definition misc.hh:641
@ MISCREG_MAIR_EL1
Definition misc.hh:810
@ MISCREG_SCTLR
Definition misc.hh:253
@ MISCREG_TTBCR
Definition misc.hh:278
@ MISCREG_MAIR_EL2
Definition misc.hh:814
@ MISCREG_SCR_EL3
Definition misc.hh:628
@ MISCREG_TCR_EL3
Definition misc.hh:648
@ MISCREG_TCR_EL1
Definition misc.hh:636
@ MISCREG_SCTLR_EL1
Definition misc.hh:609
@ MISCREG_PRRR
Definition misc.hh:399
@ MISCREG_MAIR_EL3
Definition misc.hh:816
@ MISCREG_CPSR
Definition misc.hh:79
@ MISCREG_NMRR
Definition misc.hh:405
@ MISCREG_TTBR1_EL1
Definition misc.hh:634
@ MISCREG_MAIR1
Definition misc.hh:408
@ MISCREG_TTBR1_EL2
Definition misc.hh:904
@ MISCREG_HTTBR
Definition misc.hh:477
@ MISCREG_HCR_EL2
Definition misc.hh:619
@ MISCREG_TTBR1
Definition misc.hh:275
@ MISCREG_VTCR_EL2
Definition misc.hh:644
@ MISCREG_VTTBR
Definition misc.hh:478
@ MISCREG_HTCR
Definition misc.hh:281
@ MISCREG_TTBR0_EL3
Definition misc.hh:647
@ MISCREG_VTCR
Definition misc.hh:282
@ MISCREG_TTBR0
Definition misc.hh:272
@ MISCREG_TTBR0_EL2
Definition misc.hh:640
@ MISCREG_TTBR0_EL1
Definition misc.hh:632
@ MISCREG_SCTLR_EL3
Definition misc.hh:625
@ MISCREG_VSTTBR_EL2
Definition misc.hh:645
@ MISCREG_VTTBR_EL2
Definition misc.hh:643
@ MISCREG_MAIR0
Definition misc.hh:402
uint16_t vmid_t
Definition types.hh:57
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition utility.cc:1319
const GrainSize GrainMap_tg0[]
Definition pagetable.cc:49
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:686
PASpace
Physical Address Space.
Definition types.hh:280
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:232
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition utility.cc:474
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 7, 0 > L
Definition int.hh:62
Units for Stats.
Definition units.hh:113
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t RegVal
Definition types.hh:173
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
Packet * PacketPtr
T htog(T value, ByteOrder guest_byte_order)
Definition byteswap.hh:187
Bitfield< 9 > hyp
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Overload hash function for BasicBlockRange type.
Definition binary32.hh:81
TableWalkerStats(statistics::Group *parent)
Helper variables used to implement hierarchical access permissions when the long-desc.
LookupLevel lookupLevel
Definition pagetable.hh:254
TLBTypes::KeyType KeyType
Definition pagetable.hh:238
const std::string & name()
Definition trace.cc:48

Generated on Mon Oct 27 2025 04:12:55 for gem5 by doxygen 1.14.0