gem5 [DEVELOP-FOR-25.0]
Loading...
Searching...
No Matches
table_walker.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010, 2012-2019, 2021-2025 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
38
39#include <cassert>
40#include <memory>
41
42#include "arch/arm/faults.hh"
43#include "arch/arm/mmu.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/pagetable.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "base/compiler.hh"
49#include "cpu/base.hh"
50#include "cpu/thread_context.hh"
51#include "debug/Checkpoint.hh"
52#include "debug/Drain.hh"
53#include "debug/PageTableWalker.hh"
54#include "debug/TLB.hh"
55#include "debug/TLBVerbose.hh"
56#include "sim/system.hh"
57
58namespace gem5
59{
60
61using namespace ArmISA;
62
65 requestorId(p.sys->getRequestorId(this)),
66 port(new Port(*this)),
67 isStage2(p.is_stage2), tlb(NULL),
68 currState(NULL), pending(false),
69 numSquashable(p.num_squash_per_cycle),
70 release(nullptr),
71 stats(this),
72 pendingReqs(0),
75 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
76 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
77 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
78 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
79 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
80 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
81 &doL2LongDescEvent, &doL3LongDescEvent },
82 doProcessEvent([this]{ processWalkWrapper(); }, name()),
83 test(nullptr)
84{
85 sctlr = 0;
86
87 // Cache system-level properties
88 if (FullSystem) {
89 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
90 assert(arm_sys);
91 _physAddrRange = arm_sys->physAddrRange();
92 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
93 } else {
94 _haveLargeAsid64 = false;
95 _physAddrRange = 48;
96 }
97
98}
99
104
107{
108 return static_cast<Port&>(getPort("port"));
109}
110
111Port &
112TableWalker::getPort(const std::string &if_name, PortID idx)
113{
114 if (if_name == "port") {
115 return *port;
116 }
117 return ClockedObject::getPort(if_name, idx);
118}
119
120void
122{
123 mmu = _mmu;
124 release = mmu->release();
125}
126
128 tc(nullptr), aarch64(false), regime(TranslationRegime::EL10),
129 physAddrRange(0), req(nullptr),
130 asid(0), vmid(0), transState(nullptr),
131 vaddr(0), vaddr_tainted(0),
132 sctlr(0), scr(0), cpsr(0), tcr(0),
133 htcr(0), hcr(0), vtcr(0),
134 isWrite(false), isFetch(false), ss(SecurityState::NonSecure),
135 isUncacheable(false), longDescData(std::nullopt),
136 hpd(false), sh(0), irgn(0), orgn(0), stage2Req(false),
137 stage2Tran(nullptr), timing(false), functional(false),
138 mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
139 delayed(false), tableWalker(nullptr)
140{
141}
142
144 : QueuedRequestPort(_walker.name() + ".port", reqQueue, snoopRespQueue),
145 owner{_walker},
146 reqQueue(_walker, *this),
147 snoopRespQueue(_walker, *this)
148{
149}
150
153 const RequestPtr &req,
154 uint8_t *data, Tick delay,
155 Event *event)
156{
157 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
158 pkt->dataStatic(data);
159
160 auto state = new TableWalkerState;
161 state->event = event;
162 state->delay = delay;
163
164 pkt->senderState = state;
165 return pkt;
166}
167
168void
170 const RequestPtr &req, uint8_t *data)
171{
172 auto pkt = createPacket(req, data, 0, nullptr);
173
174 sendFunctional(pkt);
175
176 handleRespPacket(pkt);
177}
178
179void
181 const RequestPtr &req,
182 uint8_t *data, Tick delay)
183{
184 auto pkt = createPacket(req, data, delay, nullptr);
185
186 Tick lat = sendAtomic(pkt);
187
188 handleRespPacket(pkt, lat);
189}
190
191void
193 const RequestPtr &req,
194 uint8_t *data, Tick delay,
195 Event *event)
196{
197 auto pkt = createPacket(req, data, delay, event);
198
199 schedTimingReq(pkt, curTick());
200}
201
202bool
204{
205 // We shouldn't ever get a cacheable block in Modified state.
206 assert(pkt->req->isUncacheable() ||
207 !(pkt->cacheResponding() && !pkt->hasSharers()));
208
209 handleRespPacket(pkt);
210
211 return true;
212}
213
214void
216{
217 // Should always see a response with a sender state.
218 assert(pkt->isResponse());
219
220 // Get the DMA sender state.
221 auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
222 assert(state);
223
224 handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
225
226 delete pkt;
227}
228
229void
231 Addr size, Tick delay)
232{
233 if (state->event) {
234 owner.schedule(state->event, curTick() + delay);
235 }
236 delete state;
237}
238
239void
241{
243 stateQueues[LookupLevel::L0].empty() &&
244 stateQueues[LookupLevel::L1].empty() &&
245 stateQueues[LookupLevel::L2].empty() &&
246 stateQueues[LookupLevel::L3].empty() &&
247 pendingQueue.empty()) {
248
249 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
251 }
252}
253
256{
257 bool state_queues_not_empty = false;
258
259 for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
260 if (!stateQueues[i].empty()) {
261 state_queues_not_empty = true;
262 break;
263 }
264 }
265
266 if (state_queues_not_empty || pendingQueue.size()) {
267 DPRINTF(Drain, "TableWalker not drained\n");
269 } else {
270 DPRINTF(Drain, "TableWalker free, no need to drain\n");
271 return DrainState::Drained;
272 }
273}
274
275void
277{
278 if (params().sys->isTimingMode() && currState) {
279 delete currState;
280 currState = NULL;
282 }
283}
284
285bool
287{
288 bool disable_cacheability = isStage2 ?
289 currState->hcr.cd :
290 currState->sctlr.c == 0;
291 return disable_cacheability || currState->isUncacheable;
292}
293
294Fault
295TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
296 vmid_t _vmid, MMU::Mode _mode,
297 MMU::Translation *_trans, bool _timing, bool _functional,
298 SecurityState ss, PASpace ipaspace,
300 bool _stage2Req, const TlbEntry *walk_entry)
301{
302 assert(!(_functional && _timing));
303 ++stats.walks;
304
305 WalkerState *savedCurrState = NULL;
306
307 if (!currState && !_functional) {
308 // For atomic mode, a new WalkerState instance should be only created
309 // once per TLB. For timing mode, a new instance is generated for every
310 // TLB miss.
311 DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
312
313 currState = new WalkerState();
314 currState->tableWalker = this;
315 } else if (_functional) {
316 // If we are mixing functional mode with timing (or even
317 // atomic), we need to to be careful and clean up after
318 // ourselves to not risk getting into an inconsistent state.
319 DPRINTF(PageTableWalker,
320 "creating functional instance of WalkerState\n");
321 savedCurrState = currState;
322 currState = new WalkerState();
323 currState->tableWalker = this;
324 } else if (_timing) {
325 // This is a translation that was completed and then faulted again
326 // because some underlying parameters that affect the translation
327 // changed out from under us (e.g. asid). It will either be a
328 // misprediction, in which case nothing will happen or we'll use
329 // this fault to re-execute the faulting instruction which should clean
330 // up everything.
331 if (currState->vaddr_tainted == _req->getVaddr()) {
332 ++stats.squashedBefore;
333 return std::make_shared<ReExec>();
334 }
335 }
337
338 currState->startTime = curTick();
339 currState->tc = _tc;
340 currState->el =
343 tranType);
344
345 if (isStage2) {
347 currState->aarch64 = ELIs64(_tc, EL2);
348 currState->ipaSpace = ipaspace;
349 } else {
350 currState->regime =
352 currState->aarch64 =
353 ELIs64(_tc, translationEl(currState->regime));
354 }
355 currState->transState = _trans;
356 currState->req = _req;
357 if (walk_entry) {
358 currState->walkEntry = *walk_entry;
359 } else {
360 currState->walkEntry = TlbEntry();
361 }
362 currState->fault = NoFault;
363 currState->asid = _asid;
364 currState->vmid = _vmid;
365 currState->timing = _timing;
366 currState->functional = _functional;
367 currState->mode = _mode;
368 currState->tranType = tranType;
369 currState->ss = ss;
370 currState->secureLookup = currState->ss == SecurityState::Secure;
371 currState->physAddrRange = _physAddrRange;
372
375 currState->vaddr_tainted = currState->req->getVaddr();
376 if (currState->aarch64)
377 currState->vaddr = purifyTaggedAddr(currState->vaddr_tainted,
378 currState->tc, currState->el,
380 else
381 currState->vaddr = currState->vaddr_tainted;
382
383 if (currState->aarch64) {
384 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR_EL2);
385 if (isStage2) {
386 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
387 if (currState->ss == SecurityState::Secure &&
388 currState->ipaSpace == PASpace::Secure) {
389 currState->vtcr =
390 currState->tc->readMiscReg(MISCREG_VSTCR_EL2);
391 } else {
392 currState->vtcr =
393 currState->tc->readMiscReg(MISCREG_VTCR_EL2);
394 }
395 } else switch (currState->regime) {
397 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL1);
398 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL1);
399 break;
402 assert(release->has(ArmExtension::VIRTUALIZATION));
403 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL2);
404 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL2);
405 break;
407 assert(release->has(ArmExtension::SECURITY));
408 currState->sctlr = currState->tc->readMiscReg(MISCREG_SCTLR_EL3);
409 currState->tcr = currState->tc->readMiscReg(MISCREG_TCR_EL3);
410 break;
411 default:
412 panic("Invalid translation regime");
413 break;
414 }
415 } else {
416 currState->sctlr = currState->tc->readMiscReg(snsBankedIndex(
419 currState->ttbcr = currState->tc->readMiscReg(snsBankedIndex(
422 currState->htcr = currState->tc->readMiscReg(MISCREG_HTCR);
423 currState->hcr = currState->tc->readMiscReg(MISCREG_HCR);
424 currState->vtcr = currState->tc->readMiscReg(MISCREG_VTCR);
425 }
426 sctlr = currState->sctlr;
427
428 currState->isFetch = (currState->mode == BaseMMU::Execute);
429 currState->isWrite = (currState->mode == BaseMMU::Write);
430
431 stats.requestOrigin[REQUESTED][currState->isFetch]++;
432
433 currState->stage2Req = _stage2Req && !isStage2;
434
435 bool hyp = currState->el == EL2;
436 bool long_desc_format = currState->aarch64 || hyp || isStage2 ||
438
439 if (long_desc_format) {
440 // Helper variables used for hierarchical permissions
441 currState->longDescData = WalkerState::LongDescData();
442 currState->longDescData->rwTable = true;
443 currState->longDescData->userTable = true;
444 currState->longDescData->xnTable = false;
445 currState->longDescData->pxnTable = false;
446 ++stats.walksLongDescriptor;
447 } else {
448 currState->longDescData = std::nullopt;
449 ++stats.walksShortDescriptor;
450 }
451
452 if (currState->timing && (pending || pendingQueue.size())) {
453 pendingQueue.push_back(currState);
454 currState = NULL;
456 return NoFault;
457 } else {
458 if (currState->timing) {
459 pending = true;
461 }
462
463 Fault fault = NoFault;
464 if (currState->aarch64) {
465 fault = processWalkAArch64();
466 } else if (long_desc_format) {
467 fault = processWalkLPAE();
468 } else {
469 fault = processWalk();
470 }
471
472 // If this was a functional non-timing access restore state to
473 // how we found it.
474 if (currState->functional) {
475 delete currState;
476 currState = savedCurrState;
477 } else if (currState->timing) {
478 if (fault) {
479 pending = false;
480 nextWalk(currState->tc);
481 delete currState;
482 currState = NULL;
483 } else {
484 // Either we are using the long descriptor, which means we
485 // need to extract the queue index from longDesc, or we are
486 // using the short. In the latter we always start at L1
487 LookupLevel curr_lookup_level = long_desc_format ?
488 currState->longDesc.lookupLevel : LookupLevel::L1;
489
490 stashCurrState(curr_lookup_level);
491 }
492 } else if (fault) {
493 currState->tc = NULL;
494 currState->req = NULL;
495 }
496
497 return fault;
498 }
499}
500
501void
503{
504 assert(!currState);
505 assert(pendingQueue.size());
507 currState = pendingQueue.front();
508
509 // Check if a previous walk filled this request already
510 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
511 TlbEntry* te = mmu->lookup(currState->vaddr, currState->asid,
512 currState->vmid, currState->ss, true, false,
513 currState->regime, isStage2, currState->mode);
514
515 // Check if we still need to have a walk for this request. If the requesting
516 // instruction has been squashed, or a previous walk has filled the TLB with
517 // a match, we just want to get rid of the walk. The latter could happen
518 // when there are multiple outstanding misses to a single page and a
519 // previous request has been successfully translated.
520 if (!currState->transState->squashed() && (!te || te->partial)) {
521 // We've got a valid request, lets process it
522 pending = true;
523 pendingQueue.pop_front();
524
525 bool long_desc_format = currState->aarch64 || currState->el == EL2 ||
527
528 if (te && te->partial) {
529 currState->walkEntry = *te;
530 }
531 Fault fault;
532 if (currState->aarch64) {
533 fault = processWalkAArch64();
534 } else if (long_desc_format) {
535 fault = processWalkLPAE();
536 } else {
537 fault = processWalk();
538 }
539
540 if (fault != NoFault) {
541 pending = false;
542 nextWalk(currState->tc);
543
544 currState->transState->finish(fault, currState->req,
545 currState->tc, currState->mode);
546
547 delete currState;
548 currState = NULL;
549 } else {
550 LookupLevel curr_lookup_level = long_desc_format ?
551 currState->longDesc.lookupLevel : LookupLevel::L1;
552
553 stashCurrState(curr_lookup_level);
554 }
555 return;
556 }
557
558
559 // If the instruction that we were translating for has been
560 // squashed we shouldn't bother.
561 unsigned num_squashed = 0;
562 ThreadContext *tc = currState->tc;
563 while ((num_squashed < numSquashable) && currState &&
564 (currState->transState->squashed() ||
565 (te && !te->partial))) {
566 pendingQueue.pop_front();
567 num_squashed++;
568 stats.squashedBefore++;
569
570 DPRINTF(TLB, "Squashing table walk for address %#x\n",
571 currState->vaddr_tainted);
572
573 if (currState->transState->squashed()) {
574 // finish the translation which will delete the translation object
575 currState->transState->finish(
576 std::make_shared<UnimpFault>("Squashed Inst"),
577 currState->req, currState->tc, currState->mode);
578 } else {
579 // translate the request now that we know it will work
580 stats.walkServiceTime.sample(curTick() - currState->startTime);
581 mmu->translateTiming(currState->req, currState->tc,
582 currState->transState, currState->mode,
583 currState->tranType, isStage2);
584 }
585
586 // delete the current request
587 delete currState;
588
589 // peak at the next one
590 if (pendingQueue.size()) {
591 currState = pendingQueue.front();
592 te = mmu->lookup(currState->vaddr, currState->asid,
593 currState->vmid, currState->ss, true,
594 false, currState->regime, isStage2, currState->mode);
595 } else {
596 // Terminate the loop, nothing more to do
597 currState = NULL;
598 }
599 }
601
602 // if we still have pending translations, schedule more work
603 nextWalk(tc);
604 currState = NULL;
605}
606
607Fault
609{
610 Addr ttbr = 0;
611
612 // For short descriptors, translation configs are held in
613 // TTBR1.
614 RegVal ttbr1 = currState->tc->readMiscReg(snsBankedIndex(
617
618 const auto irgn0_mask = 0x1;
619 const auto irgn1_mask = 0x40;
620 currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
621
622 // If translation isn't enabled, we shouldn't be here
623 assert(currState->sctlr.m || isStage2);
624 const bool is_atomic = currState->req->isAtomic();
625 const bool have_security = release->has(ArmExtension::SECURITY);
626
627 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
628 currState->vaddr_tainted, currState->ttbcr, mbits(currState->vaddr, 31,
629 32 - currState->ttbcr.n));
630
631 stats.walkWaitTime.sample(curTick() - currState->startTime);
632
633 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
634 32 - currState->ttbcr.n)) {
635 DPRINTF(TLB, " - Selecting TTBR0\n");
636 // Check if table walk is allowed when Security Extensions are enabled
637 if (have_security && currState->ttbcr.pd0) {
638 if (currState->isFetch)
639 return std::make_shared<PrefetchAbort>(
640 currState->vaddr_tainted,
641 ArmFault::TranslationLL + LookupLevel::L1,
642 isStage2,
644 else
645 return std::make_shared<DataAbort>(
646 currState->vaddr_tainted,
648 is_atomic ? false : currState->isWrite,
649 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
651 }
652 ttbr = currState->tc->readMiscReg(snsBankedIndex(
655 } else {
656 DPRINTF(TLB, " - Selecting TTBR1\n");
657 // Check if table walk is allowed when Security Extensions are enabled
658 if (have_security && currState->ttbcr.pd1) {
659 if (currState->isFetch)
660 return std::make_shared<PrefetchAbort>(
661 currState->vaddr_tainted,
662 ArmFault::TranslationLL + LookupLevel::L1,
663 isStage2,
665 else
666 return std::make_shared<DataAbort>(
667 currState->vaddr_tainted,
669 is_atomic ? false : currState->isWrite,
670 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
672 }
673 ttbr = ttbr1;
674 currState->ttbcr.n = 0;
675 }
676
677 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
678 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
679 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
680 currState->ss == SecurityState::Secure ? "s" : "ns");
681
683 if (uncacheableWalk()) {
685 }
686
687 if (currState->secureLookup) {
688 flag.set(Request::SECURE);
689 }
690
692 l1desc_addr, currState->l1Desc,
693 sizeof(uint32_t), flag, LookupLevel::L1,
696
697 return currState->fault;
698}
699
700Fault
702{
703 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
704 int tsz, n;
705 LookupLevel start_lookup_level = LookupLevel::L1;
706
707 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
708 currState->vaddr_tainted, currState->ttbcr);
709
710 stats.walkWaitTime.sample(curTick() - currState->startTime);
711
713 if (currState->secureLookup)
714 flag.set(Request::SECURE);
715
716 // work out which base address register to use, if in hyp mode we always
717 // use HTTBR
718 if (isStage2) {
719 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
720 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR);
721 tsz = sext<4>(currState->vtcr.t0sz);
722 start_lookup_level = currState->vtcr.sl0 ?
723 LookupLevel::L1 : LookupLevel::L2;
724 currState->isUncacheable = currState->vtcr.irgn0 == 0;
725 } else if (currState->el == EL2) {
726 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
727 ttbr = currState->tc->readMiscReg(MISCREG_HTTBR);
728 tsz = currState->htcr.t0sz;
729 currState->isUncacheable = currState->htcr.irgn0 == 0;
730 } else {
731 assert(longDescFormatInUse(currState->tc));
732
733 // Determine boundaries of TTBR0/1 regions
734 if (currState->ttbcr.t0sz)
735 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
736 else if (currState->ttbcr.t1sz)
737 ttbr0_max = (1ULL << 32) -
738 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
739 else
740 ttbr0_max = (1ULL << 32) - 1;
741 if (currState->ttbcr.t1sz)
742 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
743 else
744 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
745
746 const bool is_atomic = currState->req->isAtomic();
747
748 // The following code snippet selects the appropriate translation table base
749 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
750 // depending on the address range supported by the translation table (ARM
751 // ARM issue C B3.6.4)
752 if (currState->vaddr <= ttbr0_max) {
753 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
754 // Check if table walk is allowed
755 if (currState->ttbcr.epd0) {
756 if (currState->isFetch)
757 return std::make_shared<PrefetchAbort>(
758 currState->vaddr_tainted,
759 ArmFault::TranslationLL + LookupLevel::L1,
760 isStage2,
762 else
763 return std::make_shared<DataAbort>(
764 currState->vaddr_tainted,
766 is_atomic ? false : currState->isWrite,
767 ArmFault::TranslationLL + LookupLevel::L1,
768 isStage2,
770 }
771 ttbr = currState->tc->readMiscReg(snsBankedIndex(
774 tsz = currState->ttbcr.t0sz;
775 currState->isUncacheable = currState->ttbcr.irgn0 == 0;
776 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
777 start_lookup_level = LookupLevel::L2;
778 } else if (currState->vaddr >= ttbr1_min) {
779 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
780 // Check if table walk is allowed
781 if (currState->ttbcr.epd1) {
782 if (currState->isFetch)
783 return std::make_shared<PrefetchAbort>(
784 currState->vaddr_tainted,
785 ArmFault::TranslationLL + LookupLevel::L1,
786 isStage2,
788 else
789 return std::make_shared<DataAbort>(
790 currState->vaddr_tainted,
792 is_atomic ? false : currState->isWrite,
793 ArmFault::TranslationLL + LookupLevel::L1,
794 isStage2,
796 }
797 ttbr = currState->tc->readMiscReg(snsBankedIndex(
800 tsz = currState->ttbcr.t1sz;
801 currState->isUncacheable = currState->ttbcr.irgn1 == 0;
802 // Lower limit >= 3 GiB
803 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
804 start_lookup_level = LookupLevel::L2;
805 } else {
806 // Out of boundaries -> translation fault
807 if (currState->isFetch)
808 return std::make_shared<PrefetchAbort>(
809 currState->vaddr_tainted,
810 ArmFault::TranslationLL + LookupLevel::L1,
811 isStage2,
813 else
814 return std::make_shared<DataAbort>(
815 currState->vaddr_tainted,
817 is_atomic ? false : currState->isWrite,
818 ArmFault::TranslationLL + LookupLevel::L1,
820 }
821
822 }
823
824 // Perform lookup (ARM ARM issue C B3.6.6)
825 if (start_lookup_level == LookupLevel::L1) {
826 n = 5 - tsz;
827 desc_addr = mbits(ttbr, 39, n) |
828 (bits(currState->vaddr, n + 26, 30) << 3);
829 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
830 desc_addr, currState->ss == SecurityState::Secure ?
831 "s" : "ns");
832 } else {
833 // Skip first-level lookup
834 n = (tsz >= 2 ? 14 - tsz : 12);
835 desc_addr = mbits(ttbr, 39, n) |
836 (bits(currState->vaddr, n + 17, 21) << 3);
837 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
838 desc_addr, currState->ss == SecurityState::Secure ?
839 "s" : "ns");
840 }
841
842 if (uncacheableWalk()) {
844 }
845
846 currState->longDesc.lookupLevel = start_lookup_level;
847 currState->longDesc.aarch64 = false;
848 currState->longDesc.grainSize = Grain4KB;
849 currState->longDesc.isStage2 = isStage2;
850
852 desc_addr, currState->longDesc,
853 sizeof(uint64_t), flag, start_lookup_level,
854 LongDescEventByLevel[start_lookup_level],
856
857 return currState->fault;
858}
859
860Addr
862{
863 // The effective maximum input size is 48 if ARMv8.2-LVA is not
864 // supported or if the translation granule that is in use is 4KB or
865 // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
866 // translation granule size only, the effective minimum value of
867 // 52.
868 if (HaveExt(currState->tc, ArmExtension::FEAT_LVA) && tg == Grain64KB) {
869 return 12;
870 } else {
871 return 16;
872 }
873}
874
875Addr
877{
878 if (HaveExt(currState->tc, ArmExtension::FEAT_TTST)) {
879 switch (tg) {
880 case Grain4KB: return 48;
881 case Grain16KB: return 48;
882 case Grain64KB: return 47;
883 default:
884 // If the value is programmed to either a reserved value or a size
885 // that has not been implemented, then the hardware will treat the
886 // field as if it has been programmed to an IMPLEMENTATION DEFINED
887 // choice
888 warn_once("Invalid grain size\n");
889 return 48;
890 }
891 }
892 return 39;
893}
894
895bool
897{
898 Addr min_txsz = s1MinTxSz(tg);
899 Addr max_txsz = maxTxSz(tg);
900
901 return tsz > max_txsz || tsz < min_txsz;
902}
903
904bool
905TableWalker::checkVAOutOfRange(Addr vaddr, int top_bit, int tsz, bool low_range)
906{
907 return low_range ?
908 bits(currState->vaddr, top_bit, tsz) != 0x0 :
909 bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1);
910}
911
912bool
914{
915 return (pa_range != _physAddrRange &&
916 bits(addr, _physAddrRange - 1, pa_range));
917}
918
919Fault
921{
922 assert(currState->aarch64);
923
924 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
925 currState->vaddr_tainted, currState->tcr);
926
927 stats.walkWaitTime.sample(curTick() - currState->startTime);
928
929 // Determine TTBR, table size, granule size and phys. address range
930 Addr ttbr = 0;
931 int tsz = 0, ps = 0;
932 GrainSize tg = Grain4KB; // grain size computed from tg* field
933 bool fault = false;
934
935 int top_bit = computeAddrTop(currState->tc,
936 bits(currState->vaddr, 55),
938 currState->tcr,
939 currState->el);
940
941 bool vaddr_fault = false;
942 switch (currState->regime) {
944 if (isStage2) {
945 if (currState->ss == SecurityState::Secure &&
946 currState->ipaSpace == PASpace::Secure) {
947 // Secure EL1&0 Secure IPA
948 DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
949 ttbr = currState->tc->readMiscReg(MISCREG_VSTTBR_EL2);
950 currState->secureLookup = !currState->vtcr.sw;
951 } else {
952 // Secure EL1&0 NonSecure IPA or NonSecure EL1&0
953 DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
954 ttbr = currState->tc->readMiscReg(MISCREG_VTTBR_EL2);
955 currState->secureLookup = currState->ss == SecurityState::Secure ?
956 !currState->vtcr.nsw : // Secure EL1&0 NonSecure IPA
957 false; // NonSecure EL1&0
958 }
959 tsz = 64 - currState->vtcr.t0sz64;
960 tg = GrainMap_tg0[currState->vtcr.tg0];
961
962 ps = currState->vtcr.ps;
963 currState->sh = currState->vtcr.sh0;
964 currState->irgn = currState->vtcr.irgn0;
965 currState->orgn = currState->vtcr.orgn0;
966 } else {
967 switch (bits(currState->vaddr, top_bit)) {
968 case 0:
969 DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
970 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL1);
971 tsz = 64 - currState->tcr.t0sz;
972 tg = GrainMap_tg0[currState->tcr.tg0];
973 currState->hpd = currState->tcr.hpd0;
974 currState->sh = currState->tcr.sh0;
975 currState->irgn = currState->tcr.irgn0;
976 currState->orgn = currState->tcr.orgn0;
977 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
978 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
979
980 if (vaddr_fault || currState->tcr.epd0)
981 fault = true;
982 break;
983 case 0x1:
984 DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
985 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL1);
986 tsz = 64 - currState->tcr.t1sz;
987 tg = GrainMap_tg1[currState->tcr.tg1];
988 currState->hpd = currState->tcr.hpd1;
989 currState->sh = currState->tcr.sh1;
990 currState->irgn = currState->tcr.irgn1;
991 currState->orgn = currState->tcr.orgn1;
992 vaddr_fault = s1TxSzFault(tg, currState->tcr.t1sz) ||
993 checkVAOutOfRange(currState->vaddr, top_bit, tsz, false);
994
995 if (vaddr_fault || currState->tcr.epd1)
996 fault = true;
997 break;
998 default:
999 // top two bytes must be all 0s or all 1s, else invalid addr
1000 fault = true;
1001 }
1002 ps = currState->tcr.ips;
1003 }
1004 break;
1007 switch(bits(currState->vaddr, top_bit)) {
1008 case 0:
1009 DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1010 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL2);
1011 tsz = 64 - currState->tcr.t0sz;
1012 tg = GrainMap_tg0[currState->tcr.tg0];
1013 currState->hpd = currState->hcr.e2h ?
1014 currState->tcr.hpd0 : currState->tcr.hpd;
1015 currState->sh = currState->tcr.sh0;
1016 currState->irgn = currState->tcr.irgn0;
1017 currState->orgn = currState->tcr.orgn0;
1018 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
1019 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
1020
1021 if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1022 fault = true;
1023 break;
1024
1025 case 0x1:
1026 DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1027 ttbr = currState->tc->readMiscReg(MISCREG_TTBR1_EL2);
1028 tsz = 64 - currState->tcr.t1sz;
1029 tg = GrainMap_tg1[currState->tcr.tg1];
1030 currState->hpd = currState->tcr.hpd1;
1031 currState->sh = currState->tcr.sh1;
1032 currState->irgn = currState->tcr.irgn1;
1033 currState->orgn = currState->tcr.orgn1;
1034 vaddr_fault = s1TxSzFault(tg, currState->tcr.t1sz) ||
1035 checkVAOutOfRange(currState->vaddr, top_bit, tsz, false);
1036
1037 if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1038 fault = true;
1039 break;
1040
1041 default:
1042 // invalid addr if top two bytes are not all 0s
1043 fault = true;
1044 }
1045 ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1046 break;
1048 switch(bits(currState->vaddr, top_bit)) {
1049 case 0:
1050 DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1051 ttbr = currState->tc->readMiscReg(MISCREG_TTBR0_EL3);
1052 tsz = 64 - currState->tcr.t0sz;
1053 tg = GrainMap_tg0[currState->tcr.tg0];
1054 currState->hpd = currState->tcr.hpd;
1055 currState->sh = currState->tcr.sh0;
1056 currState->irgn = currState->tcr.irgn0;
1057 currState->orgn = currState->tcr.orgn0;
1058 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
1059 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
1060
1061 if (vaddr_fault)
1062 fault = true;
1063 break;
1064 default:
1065 // invalid addr if top two bytes are not all 0s
1066 fault = true;
1067 }
1068 ps = currState->tcr.ps;
1069 break;
1070 }
1071
1072 currState->isUncacheable = currState->irgn == 0 ||
1073 currState->orgn == 0;
1074
1075 const bool is_atomic = currState->req->isAtomic();
1076
1077 if (fault) {
1078 if (currState->isFetch) {
1079 return std::make_shared<PrefetchAbort>(
1080 currState->vaddr_tainted,
1081 ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1083 } else {
1084 return std::make_shared<DataAbort>(
1085 currState->vaddr_tainted,
1087 is_atomic ? false : currState->isWrite,
1088 ArmFault::TranslationLL + LookupLevel::L0,
1090 }
1091 }
1092
1093 if (tg == ReservedGrain) {
1094 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1095 "DEFINED behavior takes this to mean 4KB granules\n");
1096 tg = Grain4KB;
1097 }
1098
1099 // Clamp to lower limit
1100 int pa_range = decodePhysAddrRange64(ps);
1101 if (pa_range > _physAddrRange) {
1102 currState->physAddrRange = _physAddrRange;
1103 } else {
1104 currState->physAddrRange = pa_range;
1105 }
1106
1107 auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1108 ttbr, tg, tsz, pa_range);
1109
1110 // Determine physical address size and raise an Address Size Fault if
1111 // necessary
1112 if (checkAddrSizeFaultAArch64(table_addr, currState->physAddrRange)) {
1113 DPRINTF(TLB, "Address size fault before any lookup\n");
1114 if (currState->isFetch)
1115 return std::make_shared<PrefetchAbort>(
1116 currState->vaddr_tainted,
1117 ArmFault::AddressSizeLL + start_lookup_level,
1118 isStage2,
1120 else
1121 return std::make_shared<DataAbort>(
1122 currState->vaddr_tainted,
1124 is_atomic ? false : currState->isWrite,
1125 ArmFault::AddressSizeLL + start_lookup_level,
1126 isStage2,
1128 }
1129
1131 if (uncacheableWalk()) {
1133 }
1134
1135 if (currState->secureLookup) {
1136 flag.set(Request::SECURE);
1137 }
1138
1139 currState->longDesc.lookupLevel = start_lookup_level;
1140 currState->longDesc.aarch64 = true;
1141 currState->longDesc.grainSize = tg;
1142 currState->longDesc.physAddrRange = _physAddrRange;
1143 currState->longDesc.isStage2 = isStage2;
1144
1145 fetchDescriptor(desc_addr, currState->longDesc,
1146 sizeof(uint64_t), flag, start_lookup_level,
1147 LongDescEventByLevel[start_lookup_level],
1149
1150 return currState->fault;
1151}
1152
1153std::tuple<Addr, Addr, TableWalker::LookupLevel>
1154TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1155{
1156 const auto* ptops = getPageTableOps(tg);
1157
1158 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1159 Addr table_addr = 0;
1160 Addr desc_addr = 0;
1161
1162 if (currState->walkEntry.valid) {
1163 // WalkCache hit
1164 TlbEntry* entry = &currState->walkEntry;
1165 DPRINTF(PageTableWalker,
1166 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1167 currState->vaddr, entry->lookupLevel, entry->pfn);
1168
1169 if (currState->longDescData.has_value()) {
1170 currState->longDescData->xnTable = entry->xn;
1171 currState->longDescData->pxnTable = entry->pxn;
1172 currState->longDescData->rwTable = bits(entry->ap, 1);
1173 currState->longDescData->userTable = bits(entry->ap, 0);
1174 }
1175
1176 table_addr = entry->pfn;
1177 first_level = (LookupLevel)(entry->lookupLevel + 1);
1178 } else {
1179 // WalkCache miss
1180 first_level = isStage2 ?
1181 ptops->firstS2Level(currState->vtcr.sl0) :
1182 ptops->firstLevel(64 - tsz);
1183 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1184 "Table walker couldn't find lookup level\n");
1185
1186 int stride = tg - 3;
1187 int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1188
1189 if (pa_range == 52) {
1190 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1191 table_addr = mbits(ttbr, 47, z);
1192 table_addr |= (bits(ttbr, 5, 2) << 48);
1193 } else {
1194 table_addr = mbits(ttbr, 47, base_addr_lo);
1195 }
1196 }
1197
1198 desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1199
1200 return std::make_tuple(table_addr, desc_addr, first_level);
1201}
1202
1203void
1205 uint8_t texcb, bool s)
1206{
1207 // Note: tc and sctlr local variables are hiding tc and sctrl class
1208 // variables
1209 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1210 te.shareable = false; // default value
1211 te.nonCacheable = false;
1212 te.outerShareable = false;
1213 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1214 switch(texcb) {
1215 case 0: // Stongly-ordered
1216 te.nonCacheable = true;
1218 te.shareable = true;
1219 te.innerAttrs = 1;
1220 te.outerAttrs = 0;
1221 break;
1222 case 1: // Shareable Device
1223 te.nonCacheable = true;
1225 te.shareable = true;
1226 te.innerAttrs = 3;
1227 te.outerAttrs = 0;
1228 break;
1229 case 2: // Outer and Inner Write-Through, no Write-Allocate
1231 te.shareable = s;
1232 te.innerAttrs = 6;
1233 te.outerAttrs = bits(texcb, 1, 0);
1234 break;
1235 case 3: // Outer and Inner Write-Back, no Write-Allocate
1237 te.shareable = s;
1238 te.innerAttrs = 7;
1239 te.outerAttrs = bits(texcb, 1, 0);
1240 break;
1241 case 4: // Outer and Inner Non-cacheable
1242 te.nonCacheable = true;
1244 te.shareable = s;
1245 te.innerAttrs = 0;
1246 te.outerAttrs = bits(texcb, 1, 0);
1247 break;
1248 case 5: // Reserved
1249 panic("Reserved texcb value!\n");
1250 break;
1251 case 6: // Implementation Defined
1252 panic("Implementation-defined texcb value!\n");
1253 break;
1254 case 7: // Outer and Inner Write-Back, Write-Allocate
1256 te.shareable = s;
1257 te.innerAttrs = 5;
1258 te.outerAttrs = 1;
1259 break;
1260 case 8: // Non-shareable Device
1261 te.nonCacheable = true;
1263 te.shareable = false;
1264 te.innerAttrs = 3;
1265 te.outerAttrs = 0;
1266 break;
1267 case 9 ... 15: // Reserved
1268 panic("Reserved texcb value!\n");
1269 break;
1270 case 16 ... 31: // Cacheable Memory
1272 te.shareable = s;
1273 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1274 te.nonCacheable = true;
1275 te.innerAttrs = bits(texcb, 1, 0);
1276 te.outerAttrs = bits(texcb, 3, 2);
1277 break;
1278 default:
1279 panic("More than 32 states for 5 bits?\n");
1280 }
1281 } else {
1282 assert(tc);
1283 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1285 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1287 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1288 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1289 switch(bits(texcb, 2,0)) {
1290 case 0:
1291 curr_tr = prrr.tr0;
1292 curr_ir = nmrr.ir0;
1293 curr_or = nmrr.or0;
1294 te.outerShareable = (prrr.nos0 == 0);
1295 break;
1296 case 1:
1297 curr_tr = prrr.tr1;
1298 curr_ir = nmrr.ir1;
1299 curr_or = nmrr.or1;
1300 te.outerShareable = (prrr.nos1 == 0);
1301 break;
1302 case 2:
1303 curr_tr = prrr.tr2;
1304 curr_ir = nmrr.ir2;
1305 curr_or = nmrr.or2;
1306 te.outerShareable = (prrr.nos2 == 0);
1307 break;
1308 case 3:
1309 curr_tr = prrr.tr3;
1310 curr_ir = nmrr.ir3;
1311 curr_or = nmrr.or3;
1312 te.outerShareable = (prrr.nos3 == 0);
1313 break;
1314 case 4:
1315 curr_tr = prrr.tr4;
1316 curr_ir = nmrr.ir4;
1317 curr_or = nmrr.or4;
1318 te.outerShareable = (prrr.nos4 == 0);
1319 break;
1320 case 5:
1321 curr_tr = prrr.tr5;
1322 curr_ir = nmrr.ir5;
1323 curr_or = nmrr.or5;
1324 te.outerShareable = (prrr.nos5 == 0);
1325 break;
1326 case 6:
1327 panic("Imp defined type\n");
1328 case 7:
1329 curr_tr = prrr.tr7;
1330 curr_ir = nmrr.ir7;
1331 curr_or = nmrr.or7;
1332 te.outerShareable = (prrr.nos7 == 0);
1333 break;
1334 }
1335
1336 switch(curr_tr) {
1337 case 0:
1338 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1340 te.nonCacheable = true;
1341 te.innerAttrs = 1;
1342 te.outerAttrs = 0;
1343 te.shareable = true;
1344 break;
1345 case 1:
1346 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1347 prrr.ds1, prrr.ds0, s);
1349 te.nonCacheable = true;
1350 te.innerAttrs = 3;
1351 te.outerAttrs = 0;
1352 if (prrr.ds1 && s)
1353 te.shareable = true;
1354 if (prrr.ds0 && !s)
1355 te.shareable = true;
1356 break;
1357 case 2:
1358 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1359 prrr.ns1, prrr.ns0, s);
1361 if (prrr.ns1 && s)
1362 te.shareable = true;
1363 if (prrr.ns0 && !s)
1364 te.shareable = true;
1365 break;
1366 case 3:
1367 panic("Reserved type");
1368 }
1369
1370 if (te.mtype == TlbEntry::MemoryType::Normal){
1371 switch(curr_ir) {
1372 case 0:
1373 te.nonCacheable = true;
1374 te.innerAttrs = 0;
1375 break;
1376 case 1:
1377 te.innerAttrs = 5;
1378 break;
1379 case 2:
1380 te.innerAttrs = 6;
1381 break;
1382 case 3:
1383 te.innerAttrs = 7;
1384 break;
1385 }
1386
1387 switch(curr_or) {
1388 case 0:
1389 te.nonCacheable = true;
1390 te.outerAttrs = 0;
1391 break;
1392 case 1:
1393 te.outerAttrs = 1;
1394 break;
1395 case 2:
1396 te.outerAttrs = 2;
1397 break;
1398 case 3:
1399 te.outerAttrs = 3;
1400 break;
1401 }
1402 }
1403 }
1404 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1405 "outerAttrs: %d\n",
1406 te.shareable, te.innerAttrs, te.outerAttrs);
1407 te.setAttributes(false);
1408}
1409
1410void
1412 LongDescriptor &l_descriptor)
1413{
1414 assert(release->has(ArmExtension::LPAE));
1415
1416 uint8_t attr;
1417 uint8_t sh = l_descriptor.sh();
1418 // Different format and source of attributes if this is a stage 2
1419 // translation
1420 if (isStage2) {
1421 attr = l_descriptor.memAttr();
1422 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1423 uint8_t attr_1_0 = attr & 0x3;
1424
1425 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1426
1427 if (attr_3_2 == 0) {
1428 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1430 te.outerAttrs = 0;
1431 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1432 te.nonCacheable = true;
1433 } else {
1435 te.outerAttrs = attr_3_2 == 1 ? 0 :
1436 attr_3_2 == 2 ? 2 : 1;
1437 te.innerAttrs = attr_1_0 == 1 ? 0 :
1438 attr_1_0 == 2 ? 6 : 5;
1439 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1440 }
1441 } else {
1442 uint8_t attrIndx = l_descriptor.attrIndx();
1443
1444 // LPAE always uses remapping of memory attributes, irrespective of the
1445 // value of SCTLR.TRE
1446 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1447 int reg_as_int = snsBankedIndex(reg, currState->tc,
1449 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1450 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1451 uint8_t attr_7_4 = bits(attr, 7, 4);
1452 uint8_t attr_3_0 = bits(attr, 3, 0);
1453 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1454
1455 // Note: the memory subsystem only cares about the 'cacheable' memory
1456 // attribute. The other attributes are only used to fill the PAR register
1457 // accordingly to provide the illusion of full support
1458 te.nonCacheable = false;
1459
1460 switch (attr_7_4) {
1461 case 0x0:
1462 // Strongly-ordered or Device memory
1463 if (attr_3_0 == 0x0)
1465 else if (attr_3_0 == 0x4)
1467 else
1468 panic("Unpredictable behavior\n");
1469 te.nonCacheable = true;
1470 te.outerAttrs = 0;
1471 break;
1472 case 0x4:
1473 // Normal memory, Outer Non-cacheable
1475 te.outerAttrs = 0;
1476 if (attr_3_0 == 0x4)
1477 // Inner Non-cacheable
1478 te.nonCacheable = true;
1479 else if (attr_3_0 < 0x8)
1480 panic("Unpredictable behavior\n");
1481 break;
1482 case 0x8:
1483 case 0x9:
1484 case 0xa:
1485 case 0xb:
1486 case 0xc:
1487 case 0xd:
1488 case 0xe:
1489 case 0xf:
1490 if (attr_7_4 & 0x4) {
1491 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1492 } else {
1493 te.outerAttrs = 0x2;
1494 }
1495 // Normal memory, Outer Cacheable
1497 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1498 panic("Unpredictable behavior\n");
1499 break;
1500 default:
1501 panic("Unpredictable behavior\n");
1502 break;
1503 }
1504
1505 switch (attr_3_0) {
1506 case 0x0:
1507 te.innerAttrs = 0x1;
1508 break;
1509 case 0x4:
1510 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1511 break;
1512 case 0x8:
1513 case 0x9:
1514 case 0xA:
1515 case 0xB:
1516 te.innerAttrs = 6;
1517 break;
1518 case 0xC:
1519 case 0xD:
1520 case 0xE:
1521 case 0xF:
1522 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1523 break;
1524 default:
1525 panic("Unpredictable behavior\n");
1526 break;
1527 }
1528 }
1529
1530 te.outerShareable = sh == 2;
1531 te.shareable = (sh & 0x2) ? true : false;
1532 te.setAttributes(true);
1533 te.attributes |= (uint64_t) attr << 56;
1534}
1535
1536bool
1538{
1539 return !bits(attrs, 2) || // Write-through
1540 attrs == 0b0100; // NonCacheable
1541}
1542
1543void
1545 LongDescriptor &l_descriptor)
1546{
1547 uint8_t attr;
1548 uint8_t attr_hi;
1549 uint8_t attr_lo;
1550 uint8_t sh = l_descriptor.sh();
1551
1552 if (isStage2) {
1553 attr = l_descriptor.memAttr();
1554 uint8_t attr_hi = (attr >> 2) & 0x3;
1555 uint8_t attr_lo = attr & 0x3;
1556
1557 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1558
1559 if (attr_hi == 0) {
1560 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1562 te.outerAttrs = 0;
1563 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1564 te.nonCacheable = true;
1565 } else {
1567 te.outerAttrs = attr_hi == 1 ? 0 :
1568 attr_hi == 2 ? 2 : 1;
1569 te.innerAttrs = attr_lo == 1 ? 0 :
1570 attr_lo == 2 ? 6 : 5;
1571 // Treat write-through memory as uncacheable, this is safe
1572 // but for performance reasons not optimal.
1573 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1574 (attr_lo == 1) || (attr_lo == 2);
1575
1576 // To be used when merging stage1 and astage 2 attributes
1577 te.xs = !l_descriptor.fnxs();
1578 }
1579 } else {
1580 uint8_t attrIndx = l_descriptor.attrIndx();
1581
1582 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1583
1584 // Select MAIR
1585 uint64_t mair;
1586 switch (currState->regime) {
1588 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1589 break;
1592 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1593 break;
1595 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1596 break;
1597 default:
1598 panic("Invalid exception level");
1599 break;
1600 }
1601
1602 // Select attributes
1603 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1604 attr_lo = bits(attr, 3, 0);
1605 attr_hi = bits(attr, 7, 4);
1606
1607 // Treat write-through memory as uncacheable, this is safe
1608 // but for performance reasons not optimal.
1609 switch (attr) {
1610 case 0b00000000 ... 0b00001111: // Device Memory
1612 te.nonCacheable = true;
1613 te.xs = !bits(attr, 0);
1614 break;
1615 case 0b01000000: // Normal memory, Non-cacheable
1617 te.nonCacheable = true;
1618 te.xs = false;
1619 break;
1620 case 0b10100000: // Normal memory, Write-through
1622 te.nonCacheable = true;
1623 te.xs = false;
1624 break;
1625 default:
1627 te.nonCacheable = uncacheableFromAttrs(attr_hi) ||
1628 uncacheableFromAttrs(attr_lo);
1629 // XS is 0 only for write-back regions (cacheable)
1630 te.xs = te.nonCacheable;
1631 break;
1632 }
1633
1634 te.shareable = sh == 2;
1635 te.outerShareable = (sh & 0x2) ? true : false;
1636 // Attributes formatted according to the 64-bit PAR
1637 te.attributes = ((uint64_t) attr << 56) |
1638 (1 << 11) | // LPAE bit
1639 (te.ns << 9) | // NS bit
1640 (sh << 7);
1641 }
1642}
1643
1644void
1646{
1648 if (uncacheableWalk()) {
1649 te.shareable = 3;
1650 te.outerAttrs = 0;
1651 te.innerAttrs = 0;
1652 te.nonCacheable = true;
1653 } else {
1654 te.shareable = currState->sh;
1655 te.outerAttrs = currState->orgn;
1656 te.innerAttrs = currState->irgn;
1657 te.nonCacheable = (te.outerAttrs == 0 || te.outerAttrs == 2) &&
1658 (te.innerAttrs == 0 || te.innerAttrs == 2);
1659 }
1660
1661 // XS is 0 only for write-back regions (cacheable)
1662 te.xs = te.nonCacheable;
1663}
1664
1665void
1667{
1668 if (currState->fault != NoFault) {
1669 return;
1670 }
1671
1672 currState->l1Desc.data = htog(currState->l1Desc.data,
1673 byteOrder(currState->tc));
1674
1675 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1676 currState->vaddr_tainted, currState->l1Desc.data);
1677 TlbEntry te;
1678
1679 const bool is_atomic = currState->req->isAtomic();
1680
1681 switch (currState->l1Desc.type()) {
1684 if (!currState->timing) {
1685 currState->tc = NULL;
1686 currState->req = NULL;
1687 }
1688 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1689 if (currState->isFetch)
1690 currState->fault =
1691 std::make_shared<PrefetchAbort>(
1692 currState->vaddr_tainted,
1693 ArmFault::TranslationLL + LookupLevel::L1,
1694 isStage2,
1696 else
1697 currState->fault =
1698 std::make_shared<DataAbort>(
1699 currState->vaddr_tainted,
1701 is_atomic ? false : currState->isWrite,
1702 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1704 return;
1706 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1711
1712 currState->fault = std::make_shared<DataAbort>(
1713 currState->vaddr_tainted,
1714 currState->l1Desc.domain(),
1715 is_atomic ? false : currState->isWrite,
1716 ArmFault::AccessFlagLL + LookupLevel::L1,
1717 isStage2,
1719 }
1720 if (currState->l1Desc.supersection()) {
1721 panic("Haven't implemented supersections\n");
1722 }
1723 insertTableEntry(currState->l1Desc, false);
1724 return;
1726 {
1727 Addr l2desc_addr;
1728 l2desc_addr = currState->l1Desc.l2Addr() |
1729 (bits(currState->vaddr, 19, 12) << 2);
1730 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1731 l2desc_addr, currState->ss == SecurityState::Secure ?
1732 "s" : "ns");
1733
1735
1736 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1738 }
1739
1740 if (currState->secureLookup)
1741 flag.set(Request::SECURE);
1742
1744 l2desc_addr, currState->l2Desc,
1745 sizeof(uint32_t), flag, LookupLevel::L2,
1748
1749 currState->delayed = currState->timing;
1750
1751 return;
1752 }
1753 default:
1754 panic("A new type in a 2 bit field?\n");
1755 }
1756}
1757
1758Fault
1760{
1761 if (currState->isFetch) {
1762 return std::make_shared<PrefetchAbort>(
1763 currState->vaddr_tainted,
1764 src + currState->longDesc.lookupLevel,
1765 isStage2,
1767 } else {
1768 return std::make_shared<DataAbort>(
1769 currState->vaddr_tainted,
1771 currState->req->isAtomic() ? false : currState->isWrite,
1772 src + currState->longDesc.lookupLevel,
1773 isStage2,
1775 }
1776}
1777
1778void
1780{
1781 if (currState->fault != NoFault) {
1782 return;
1783 }
1784
1785 currState->longDesc.data = htog(currState->longDesc.data,
1786 byteOrder(currState->tc));
1787
1788 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1789 currState->longDesc.lookupLevel, currState->vaddr_tainted,
1790 currState->longDesc.data,
1791 currState->aarch64 ? "AArch64" : "long-desc.");
1792
1793 if ((currState->longDesc.type() == LongDescriptor::Block) ||
1794 (currState->longDesc.type() == LongDescriptor::Page)) {
1795 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1796 "xn: %d, ap: %d, piindex: %d, af: %d, type: %d\n",
1797 currState->longDesc.lookupLevel,
1798 currState->longDesc.data,
1799 currState->longDesc.pxn(),
1800 currState->longDesc.xn(),
1801 currState->longDesc.ap(),
1802 currState->longDesc.piindex(),
1803 currState->longDesc.af(),
1804 currState->longDesc.type());
1805 } else {
1806 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1807 currState->longDesc.lookupLevel,
1808 currState->longDesc.data,
1809 currState->longDesc.type());
1810 }
1811
1812 TlbEntry te;
1813
1814 switch (currState->longDesc.type()) {
1816 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1817 currState->longDesc.lookupLevel,
1818 ArmFault::TranslationLL + currState->longDesc.lookupLevel);
1819
1821 if (!currState->timing) {
1822 currState->tc = NULL;
1823 currState->req = NULL;
1824 }
1825 return;
1826
1829 {
1830 auto fault_source = ArmFault::FaultSourceInvalid;
1831 // Check for address size fault
1832 if (checkAddrSizeFaultAArch64(currState->longDesc.paddr(),
1833 currState->physAddrRange)) {
1834
1835 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1836 currState->longDesc.lookupLevel);
1837 fault_source = ArmFault::AddressSizeLL;
1838
1839 // Check for access fault
1840 } else if (currState->longDesc.af() == 0) {
1841
1842 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1843 currState->longDesc.lookupLevel);
1844 fault_source = ArmFault::AccessFlagLL;
1845 }
1846
1847 if (fault_source != ArmFault::FaultSourceInvalid) {
1848 currState->fault = generateLongDescFault(fault_source);
1849 } else {
1850 insertTableEntry(currState->longDesc, true);
1851 }
1852 }
1853 return;
1855 {
1856 // Set hierarchical permission flags
1857 if (!isStage2) {
1858 currState->secureLookup = currState->secureLookup &&
1859 currState->longDesc.secureTable();
1860 }
1861 currState->longDescData->rwTable =
1862 currState->longDescData->rwTable &&
1863 (currState->longDesc.rwTable() || currState->hpd);
1864 currState->longDescData->userTable =
1865 currState->longDescData->userTable &&
1866 (currState->longDesc.userTable() || currState->hpd);
1867 currState->longDescData->xnTable =
1868 currState->longDescData->xnTable ||
1869 (currState->longDesc.xnTable() && !currState->hpd);
1870 currState->longDescData->pxnTable =
1871 currState->longDescData->pxnTable ||
1872 (currState->longDesc.pxnTable() && !currState->hpd);
1873
1874 // Set up next level lookup
1875 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1876 currState->vaddr);
1877
1878 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1879 currState->longDesc.lookupLevel,
1880 currState->longDesc.lookupLevel + 1,
1881 next_desc_addr,
1882 currState->secureLookup ? "s" : "ns");
1883
1884 // Check for address size fault
1885 if (currState->aarch64 && checkAddrSizeFaultAArch64(
1886 next_desc_addr, currState->physAddrRange)) {
1887 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1888 currState->longDesc.lookupLevel);
1889
1892 return;
1893 }
1894
1895 if (mmu->hasWalkCache()) {
1897 }
1898
1900 if (currState->secureLookup)
1901 flag.set(Request::SECURE);
1902
1903 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1905 }
1906
1907 LookupLevel L = currState->longDesc.lookupLevel =
1908 (LookupLevel) (currState->longDesc.lookupLevel + 1);
1909 Event *event = NULL;
1910 switch (L) {
1911 case LookupLevel::L1:
1912 assert(currState->aarch64);
1913 case LookupLevel::L2:
1914 case LookupLevel::L3:
1915 event = LongDescEventByLevel[L];
1916 break;
1917 default:
1918 panic("Wrong lookup level in table walk\n");
1919 break;
1920 }
1921
1923 next_desc_addr, currState->longDesc,
1924 sizeof(uint64_t), flag, L, event,
1926
1927 currState->delayed = currState->timing;
1928 }
1929 return;
1930 default:
1931 panic("A new type in a 2 bit field?\n");
1932 }
1933}
1934
1935void
1937{
1938 if (currState->fault != NoFault) {
1939 return;
1940 }
1941
1942 currState->l2Desc.data = htog(currState->l2Desc.data,
1943 byteOrder(currState->tc));
1944
1945 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1946 currState->vaddr_tainted, currState->l2Desc.data);
1947 TlbEntry te;
1948
1949 const bool is_atomic = currState->req->isAtomic();
1950
1951 if (currState->l2Desc.invalid()) {
1952 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1953 if (!currState->timing) {
1954 currState->tc = NULL;
1955 currState->req = NULL;
1956 }
1957 if (currState->isFetch)
1958 currState->fault = std::make_shared<PrefetchAbort>(
1959 currState->vaddr_tainted,
1960 ArmFault::TranslationLL + LookupLevel::L2,
1961 isStage2,
1963 else
1964 currState->fault = std::make_shared<DataAbort>(
1965 currState->vaddr_tainted, currState->l1Desc.domain(),
1966 is_atomic ? false : currState->isWrite,
1967 ArmFault::TranslationLL + LookupLevel::L2,
1968 isStage2,
1970 return;
1971 }
1972
1973 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1977 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1978 currState->sctlr.afe, currState->l2Desc.ap());
1979
1980 currState->fault = std::make_shared<DataAbort>(
1981 currState->vaddr_tainted,
1983 is_atomic ? false : currState->isWrite,
1984 ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
1986 }
1987
1988 insertTableEntry(currState->l2Desc, false);
1989}
1990
1991void
1993{
1994 currState = stateQueues[LookupLevel::L1].front();
1995 currState->delayed = false;
1996 // if there's a stage2 translation object we don't need it any more
1997 if (currState->stage2Tran) {
1998 delete currState->stage2Tran;
1999 currState->stage2Tran = NULL;
2000 }
2001
2002
2003 DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2004 &currState->l1Desc.data);
2005 DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2006 currState->l1Desc.data);
2007
2008 DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2009 currState->vaddr_tainted);
2011
2012 stateQueues[LookupLevel::L1].pop_front();
2013 // Check if fault was generated
2014 if (currState->fault != NoFault) {
2015 currState->transState->finish(currState->fault, currState->req,
2016 currState->tc, currState->mode);
2017 stats.walksShortTerminatedAtLevel[0]++;
2018
2019 pending = false;
2020 nextWalk(currState->tc);
2021
2022 currState->req = NULL;
2023 currState->tc = NULL;
2024 currState->delayed = false;
2025 delete currState;
2026 }
2027 else if (!currState->delayed) {
2028 // delay is not set so there is no L2 to do
2029 // Don't finish the translation if a stage 2 look up is underway
2030 stats.walkServiceTime.sample(curTick() - currState->startTime);
2031 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2032
2033 mmu->translateTiming(currState->req, currState->tc,
2034 currState->transState, currState->mode,
2035 currState->tranType, isStage2);
2036
2037 stats.walksShortTerminatedAtLevel[0]++;
2038
2039 pending = false;
2040 nextWalk(currState->tc);
2041
2042 currState->req = NULL;
2043 currState->tc = NULL;
2044 currState->delayed = false;
2045 delete currState;
2046 } else {
2047 // need to do L2 descriptor
2048 stashCurrState(LookupLevel::L2);
2049 }
2050 currState = NULL;
2051}
2052
2053void
2055{
2056 currState = stateQueues[LookupLevel::L2].front();
2057 assert(currState->delayed);
2058 // if there's a stage2 translation object we don't need it any more
2059 if (currState->stage2Tran) {
2060 delete currState->stage2Tran;
2061 currState->stage2Tran = NULL;
2062 }
2063
2064 DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2065 currState->vaddr_tainted);
2067
2068 // Check if fault was generated
2069 if (currState->fault != NoFault) {
2070 currState->transState->finish(currState->fault, currState->req,
2071 currState->tc, currState->mode);
2072 stats.walksShortTerminatedAtLevel[1]++;
2073 } else {
2074 stats.walkServiceTime.sample(curTick() - currState->startTime);
2075 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2076
2077 mmu->translateTiming(currState->req, currState->tc,
2078 currState->transState, currState->mode,
2079 currState->tranType, isStage2);
2080
2081 stats.walksShortTerminatedAtLevel[1]++;
2082 }
2083
2084
2085 stateQueues[LookupLevel::L2].pop_front();
2086 pending = false;
2087 nextWalk(currState->tc);
2088
2089 currState->req = NULL;
2090 currState->tc = NULL;
2091 currState->delayed = false;
2092
2093 delete currState;
2094 currState = NULL;
2095}
2096
2097void
2102
2103void
2108
2109void
2114
2115void
2120
2121void
2123{
2124 currState = stateQueues[curr_lookup_level].front();
2125 assert(curr_lookup_level == currState->longDesc.lookupLevel);
2126 currState->delayed = false;
2127
2128 // if there's a stage2 translation object we don't need it any more
2129 if (currState->stage2Tran) {
2130 delete currState->stage2Tran;
2131 currState->stage2Tran = NULL;
2132 }
2133
2134 DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2135 currState->vaddr_tainted);
2137
2138 stateQueues[curr_lookup_level].pop_front();
2139
2140 if (currState->fault != NoFault) {
2141 // A fault was generated
2142 currState->transState->finish(currState->fault, currState->req,
2143 currState->tc, currState->mode);
2144
2145 pending = false;
2146 nextWalk(currState->tc);
2147
2148 currState->req = NULL;
2149 currState->tc = NULL;
2150 currState->delayed = false;
2151 delete currState;
2152 } else if (!currState->delayed) {
2153 // No additional lookups required
2154 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2155 stats.walkServiceTime.sample(curTick() - currState->startTime);
2156
2157 mmu->translateTiming(currState->req, currState->tc,
2158 currState->transState, currState->mode,
2159 currState->tranType, isStage2);
2160
2161 stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2162
2163 pending = false;
2164 nextWalk(currState->tc);
2165
2166 currState->req = NULL;
2167 currState->tc = NULL;
2168 currState->delayed = false;
2169 delete currState;
2170 } else {
2171 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2172 panic("Max. number of lookups already reached in table walk\n");
2173 // Need to perform additional lookups
2174 stashCurrState(currState->longDesc.lookupLevel);
2175 }
2176 currState = NULL;
2177}
2178
2179
2180void
2182{
2183 if (pendingQueue.size())
2185 else
2186 completeDrain();
2187}
2188
2189void
2191 DescriptorBase &descriptor, int num_bytes,
2192 Request::Flags flags, LookupLevel lookup_level, Event *event,
2193 void (TableWalker::*doDescriptor)())
2194{
2195 uint8_t *data = descriptor.getRawPtr();
2196
2197 DPRINTF(PageTableWalker,
2198 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2199 desc_addr, currState->stage2Req);
2200
2201 // If this translation has a stage 2 then we know desc_addr is an IPA and
2202 // needs to be translated before we can access the page table. Do that
2203 // check here.
2204 if (currState->stage2Req) {
2205 Fault fault;
2206
2207 if (currState->timing) {
2208 auto *tran = new
2209 Stage2Walk(*this, data, event, currState->vaddr,
2210 currState->mode, currState->tranType);
2211 currState->stage2Tran = tran;
2212 readDataTimed(currState->tc, desc_addr, tran, num_bytes, flags);
2213 fault = tran->fault;
2214
2215 if (fault != NoFault) {
2216 currState->fault = fault;
2217 }
2218 } else {
2219 fault = readDataUntimed(currState->tc,
2220 currState->vaddr, desc_addr, data, num_bytes, flags,
2221 currState->mode,
2222 currState->tranType,
2223 currState->functional);
2224
2225 if (fault != NoFault) {
2226 currState->fault = fault;
2227 }
2228
2229 (this->*doDescriptor)();
2230 }
2231 } else {
2232 RequestPtr req = std::make_shared<Request>(
2233 desc_addr, num_bytes, flags, requestorId);
2234 req->taskId(context_switch_task_id::DMA);
2235
2236 mpamTagTableWalk(req);
2237
2238 Fault fault = testWalk(req, descriptor.domain(),
2239 lookup_level);
2240
2241 if (fault != NoFault) {
2242 currState->fault = fault;
2243 return;
2244 }
2245
2246 if (currState->timing) {
2247 port->sendTimingReq(req, data,
2248 currState->tc->getCpuPtr()->clockPeriod(), event);
2249
2250 } else if (!currState->functional) {
2251 port->sendAtomicReq(req, data,
2252 currState->tc->getCpuPtr()->clockPeriod());
2253
2254 (this->*doDescriptor)();
2255 } else {
2256 port->sendFunctionalReq(req, data);
2257 (this->*doDescriptor)();
2258 }
2259 }
2260}
2261
2262void
2264{
2265 DPRINTF(PageTableWalker, "Adding to walker fifo: "
2266 "queue size before adding: %d\n",
2267 stateQueues[queue_idx].size());
2268 stateQueues[queue_idx].push_back(currState);
2269 currState = NULL;
2270}
2271
2272void
2274{
2275 const bool have_security = release->has(ArmExtension::SECURITY);
2276 TlbEntry te;
2277
2278 // Create and fill a new page table entry
2279 te.valid = true;
2280 te.longDescFormat = true;
2281 te.partial = true;
2282 // The entry is global if there is no address space identifier
2283 // to differentiate translation contexts
2284 te.global = !mmu->hasUnprivRegime(currState->regime);
2285 te.asid = currState->asid;
2286 te.vmid = currState->vmid;
2287 te.N = descriptor.offsetBits();
2288 te.tg = descriptor.grainSize;
2289 te.vpn = currState->vaddr >> te.N;
2290 te.size = (1ULL << te.N) - 1;
2291 te.pfn = descriptor.nextTableAddr();
2292 te.domain = descriptor.domain();
2293 te.lookupLevel = descriptor.lookupLevel;
2294 te.ns = !descriptor.secure(have_security, currState);
2295 te.ss = currState->ss;
2296 te.ipaSpace = currState->ipaSpace; // Used by stage2 entries only
2297 te.type = TypeTLB::unified;
2298
2299 te.regime = currState->regime;
2300
2301 te.xn = currState->longDescData->xnTable;
2302 te.pxn = currState->longDescData->pxnTable;
2303 te.ap = (currState->longDescData->rwTable << 1) |
2304 (currState->longDescData->userTable);
2305
2307
2308 // Debug output
2309 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2310 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2311 te.N, te.pfn, te.size, te.global, te.valid);
2312 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2313 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2314 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2315 te.nonCacheable, te.ns);
2316 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2317 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2318 descriptor.getRawData());
2319
2320 // Insert the entry into the TLBs
2321 tlb->multiInsert(TlbEntry::KeyType(te), te);
2322}
2323
2324void
2325TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2326{
2327 const bool have_security = release->has(ArmExtension::SECURITY);
2328 TlbEntry te;
2329
2330 // Create and fill a new page table entry
2331 te.valid = true;
2332 te.longDescFormat = long_descriptor;
2333 te.asid = currState->asid;
2334 te.vmid = currState->vmid;
2335 te.N = descriptor.offsetBits();
2336 te.vpn = currState->vaddr >> te.N;
2337 te.size = (1<<te.N) - 1;
2338 te.pfn = descriptor.pfn();
2339 te.domain = descriptor.domain();
2340 te.lookupLevel = descriptor.lookupLevel;
2341 te.ns = !descriptor.secure(have_security, currState);
2342 te.ss = currState->ss;
2343 te.ipaSpace = currState->ipaSpace; // Used by stage2 entries only
2344 te.xn = descriptor.xn();
2345 te.type = currState->mode == BaseMMU::Execute ?
2346 TypeTLB::instruction : TypeTLB::data;
2347
2348 te.regime = currState->regime;
2349
2350 stats.pageSizes[pageSizeNtoStatBin(te.N)]++;
2351 stats.requestOrigin[COMPLETED][currState->isFetch]++;
2352
2353 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2354 // as global
2355 te.global = descriptor.global(currState) || isStage2;
2356 if (long_descriptor) {
2357 LongDescriptor l_descriptor =
2358 dynamic_cast<LongDescriptor &>(descriptor);
2359
2360 te.tg = l_descriptor.grainSize;
2361 te.xn |= currState->longDescData->xnTable;
2362 te.pxn = currState->longDescData->pxnTable || l_descriptor.pxn();
2363 if (isStage2) {
2364 // this is actually the HAP field, but its stored in the same bit
2365 // possitions as the AP field in a stage 1 translation.
2366 te.hap = l_descriptor.ap();
2367 } else {
2368 te.ap = ((!currState->longDescData->rwTable ||
2369 descriptor.ap() >> 1) << 1) |
2370 (currState->longDescData->userTable && (descriptor.ap() & 0x1));
2371 // Add index of Indirect Permission.
2372 te.piindex = l_descriptor.piindex();
2373 }
2374 if (currState->aarch64)
2375 memAttrsAArch64(currState->tc, te, l_descriptor);
2376 else
2377 memAttrsLPAE(currState->tc, te, l_descriptor);
2378 } else {
2379 te.ap = descriptor.ap();
2380 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2381 descriptor.shareable());
2382 }
2383
2384 // Debug output
2385 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2386 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2387 te.N, te.pfn, te.size, te.global, te.valid);
2388 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d piindex:%d domain:%d asid:%d "
2389 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2390 te.ap, te.piindex,
2391 static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2392 te.nonCacheable, te.ns);
2393 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2394 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2395 descriptor.getRawData());
2396
2397 // Insert the entry into the TLBs
2398 tlb->multiInsert(TlbEntry::KeyType(te), te);
2399 if (!currState->timing) {
2400 currState->tc = NULL;
2401 currState->req = NULL;
2402 }
2403}
2404
2406TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2407{
2408 switch (lookup_level_as_int) {
2409 case LookupLevel::L1:
2410 return LookupLevel::L1;
2411 case LookupLevel::L2:
2412 return LookupLevel::L2;
2413 case LookupLevel::L3:
2414 return LookupLevel::L3;
2415 default:
2416 panic("Invalid lookup level conversion");
2417 }
2418}
2419
2420/* this method keeps track of the table walker queue's residency, so
2421 * needs to be called whenever requests start and complete. */
2422void
2424{
2425 unsigned n = pendingQueue.size();
2426 if ((currState != NULL) && (currState != pendingQueue.front())) {
2427 ++n;
2428 }
2429
2430 if (n != pendingReqs) {
2431 Tick now = curTick();
2432 stats.pendingWalks.sample(pendingReqs, now - pendingChangeTick);
2433 pendingReqs = n;
2434 pendingChangeTick = now;
2435 }
2436}
2437
2438Fault
2440 LookupLevel lookup_level)
2441{
2442 if (!test) {
2443 return NoFault;
2444 } else {
2445 return test->walkCheck(walk_req, currState->vaddr,
2447 currState->el != EL0,
2448 currState->mode, domain, lookup_level);
2449 }
2450}
2451
2452void
2457
2458uint8_t
2460{
2461 /* for stats.pageSizes */
2462 switch(N) {
2463 case 12: return 0; // 4K
2464 case 14: return 1; // 16K (using 16K granule in v8-64)
2465 case 16: return 2; // 64K
2466 case 20: return 3; // 1M
2467 case 21: return 4; // 2M-LPAE
2468 case 24: return 5; // 16M
2469 case 25: return 6; // 32M (using 16K granule in v8-64)
2470 case 29: return 7; // 512M (using 64K granule in v8-64)
2471 case 30: return 8; // 1G-LPAE
2472 case 42: return 9; // 1G-LPAE
2473 default:
2474 panic("unknown page size");
2475 return 255;
2476 }
2477}
2478
2479Fault
2481 uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2482 MMU::ArmTranslationType tran_type, bool functional)
2483{
2484 Fault fault;
2485
2486 // translate to physical address using the second stage MMU
2487 auto req = std::make_shared<Request>();
2488 req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2489 requestorId, 0);
2490
2491 if (functional) {
2492 fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2493 tran_type, true);
2494 } else {
2495 fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2496 tran_type, true);
2497 }
2498
2499 // Now do the access.
2500 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2501 Packet pkt = Packet(req, MemCmd::ReadReq);
2502 pkt.dataStatic(data);
2503 if (functional) {
2504 port->sendFunctional(&pkt);
2505 } else {
2506 port->sendAtomic(&pkt);
2507 }
2508 assert(!pkt.isError());
2509 }
2510
2511 // If there was a fault annotate it with the flag saying the foult occured
2512 // while doing a translation for a stage 1 page table walk.
2513 if (fault != NoFault) {
2514 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2515 arm_fault->annotate(ArmFault::S1PTW, true);
2516 arm_fault->annotate(ArmFault::OVA, vaddr);
2517 }
2518 return fault;
2519}
2520
2521void
2523{
2524 mpam::tagRequest(currState->tc, req, currState->isFetch);
2525}
2526
2527void
2529 Stage2Walk *translation, int num_bytes,
2530 Request::Flags flags)
2531{
2532 // translate to physical address using the second stage MMU
2533 translation->setVirt(
2534 desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2535 translation->translateTiming(tc);
2536}
2537
2539 uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2540 MMU::ArmTranslationType tran_type)
2541 : data(_data), numBytes(0), event(_event), parent(_parent),
2542 oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2543{
2544 req = std::make_shared<Request>();
2545}
2546
2547void
2549 const RequestPtr &req,
2551{
2552 fault = _fault;
2553
2554 // If there was a fault annotate it with the flag saying the foult occured
2555 // while doing a translation for a stage 1 page table walk.
2556 if (fault != NoFault) {
2557 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2558 arm_fault->annotate(ArmFault::S1PTW, true);
2559 arm_fault->annotate(ArmFault::OVA, oVAddr);
2560 }
2561
2562 if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2563 parent.getTableWalkerPort().sendTimingReq(req, data,
2564 tc->getCpuPtr()->clockPeriod(), event);
2565 } else {
2566 // We can't do the DMA access as there's been a problem, so tell the
2567 // event we're done
2568 event->process();
2569 }
2570}
2571
2572void
2574{
2575 parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2576}
2577
2579 : statistics::Group(parent),
2580 ADD_STAT(walks, statistics::units::Count::get(),
2581 "Table walker walks requested"),
2583 "Table walker walks initiated with short descriptors"),
2585 "Table walker walks initiated with long descriptors"),
2587 "Level at which table walker walks with short descriptors "
2588 "terminate"),
2590 "Level at which table walker walks with long descriptors "
2591 "terminate"),
2592 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2593 "Table walks squashed before starting"),
2594 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2595 "Table walks squashed after completion"),
2597 "Table walker wait (enqueue to first request) latency"),
2599 "Table walker service (enqueue to completion) latency"),
2601 "Table walker pending requests distribution"),
2602 ADD_STAT(pageSizes, statistics::units::Count::get(),
2603 "Table walker page sizes translated"),
2604 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2605 "Table walker requests started/completed, data/inst")
2606{
2608 .flags(statistics::nozero);
2609
2611 .flags(statistics::nozero);
2612
2614 .init(2)
2615 .flags(statistics::nozero);
2616
2617 walksShortTerminatedAtLevel.subname(0, "Level1");
2618 walksShortTerminatedAtLevel.subname(1, "Level2");
2619
2621 .init(4)
2622 .flags(statistics::nozero);
2623 walksLongTerminatedAtLevel.subname(0, "Level0");
2624 walksLongTerminatedAtLevel.subname(1, "Level1");
2625 walksLongTerminatedAtLevel.subname(2, "Level2");
2626 walksLongTerminatedAtLevel.subname(3, "Level3");
2627
2629 .flags(statistics::nozero);
2630
2632 .flags(statistics::nozero);
2633
2635 .init(16)
2637
2639 .init(16)
2641
2643 .init(16)
2646
2647 pageSizes // see DDI 0487A D4-1661
2648 .init(10)
2651 pageSizes.subname(0, "4KiB");
2652 pageSizes.subname(1, "16KiB");
2653 pageSizes.subname(2, "64KiB");
2654 pageSizes.subname(3, "1MiB");
2655 pageSizes.subname(4, "2MiB");
2656 pageSizes.subname(5, "16MiB");
2657 pageSizes.subname(6, "32MiB");
2658 pageSizes.subname(7, "512MiB");
2659 pageSizes.subname(8, "1GiB");
2660 pageSizes.subname(9, "4TiB");
2661
2663 .init(2,2) // Instruction/Data, requests/completed
2664 .flags(statistics::total);
2665 requestOrigin.subname(0,"Requested");
2666 requestOrigin.subname(1,"Completed");
2667 requestOrigin.ysubname(0,"Data");
2668 requestOrigin.ysubname(1,"Inst");
2669}
2670
2671} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition faults.hh:96
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:231
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1581
virtual bool global(WalkerState *currState) const =0
virtual uint64_t getRawData() const =0
virtual DomainType domain() const =0
virtual std::string dbgHeader() const =0
virtual uint8_t offsetBits() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual bool secure(bool have_security, WalkerState *currState) const =0
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping?
uint8_t piindex() const
Stage 1 Indirect permissions.
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
bool fnxs() const
FNXS for FEAT_XS only.
SnoopRespPacketQueue snoopRespQueue
Packet queue used to store outgoing snoop responses.
ReqPacketQueue reqQueue
Packet queue used to store outgoing requests.
void sendAtomicReq(const RequestPtr &req, uint8_t *data, Tick delay)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void sendFunctionalReq(const RequestPtr &req, uint8_t *data)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendTimingReq(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
PacketPtr createPacket(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
CPSR cpsr
Cached copy of the cpsr as it existed when translation began.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
SecurityState ss
Security State of the access.
std::optional< LongDescData > longDescData
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
SCR scr
Cached copy of the scr as it existed when translation began.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TranslationRegime regime
Current translation regime.
bool timing
If the mode is timing or atomic.
int physAddrRange
Current physical address range in bits.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
const ArmRelease * release
Cached copies of system-level properties.
bool checkVAOutOfRange(Addr addr, int top_bit, int tsz, bool low_range)
Fault generateLongDescFault(ArmFault::FaultSource src)
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
bool uncacheableWalk() const
Returns true if the table walk should be uncacheable.
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void fetchDescriptor(Addr desc_addr, DescriptorBase &descriptor, int num_bytes, Request::Flags flags, LookupLevel lookup_lvl, Event *event, void(TableWalker::*doDescriptor)())
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
Fault testWalk(const RequestPtr &walk_req, DomainType domain, LookupLevel lookup_level)
Addr s1MinTxSz(GrainSize tg) const
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
void nextWalk(ThreadContext *tc)
TlbTestInterface * test
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
bool s1TxSzFault(GrainSize tg, int tsz) const
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
gem5::ArmISA::TableWalker::TableWalkerStats stats
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool uncacheableFromAttrs(uint8_t attrs)
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
void mpamTagTableWalk(RequestPtr &req) const
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void setTestInterface(TlbTestInterface *ti)
void memAttrsWalkAArch64(TlbEntry &te)
Addr maxTxSz(GrainSize tg) const
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
void stashCurrState(int queue_idx)
Timing mode: saves the currState into the stateQueues.
ClockedObject(const ClockedObjectParams &p)
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool cacheResponding() const
Definition packet.hh:659
bool hasSharers() const
Definition packet.hh:686
Ports are used to interface objects to each other.
Definition port.hh:62
const std::string name() const
Return port name (for DPRINTF).
Definition port.hh:111
QueuedRequestPort(const std::string &name, ReqPacketQueue &req_queue, SnoopRespPacketQueue &snoop_resp_queue, PortID id=InvalidPortID)
Create a QueuedPort with a given name, and a supplied implementation of two packet queues.
Definition qport.hh:134
void schedTimingReq(PacketPtr pkt, Tick when)
Schedule the sending of a timing request.
Definition qport.hh:150
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition port.hh:579
@ PT_WALK
The request is a page table walk.
Definition request.hh:188
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
gem5::Flags< FlagsType > Flags
Definition request.hh:102
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition bitfield.hh:106
constexpr uint64_t sext(uint64_t val)
Sign-extend an N-bit value to 64 bits.
Definition bitfield.hh:129
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
Definition drain.hh:77
@ Drained
Buffers drained, ready for serialization/handover.
Definition drain.hh:78
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void set(Type mask)
Set all flag's bits matching the given mask.
Definition flags.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:246
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
#define warn_once(...)
Definition logging.hh:292
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
ByteOrder byteOrder(const ThreadContext *tc)
Definition utility.hh:359
Bitfield< 30 > te
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition pagetable.cc:477
Bitfield< 31 > n
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 18, 16 > ps
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:141
Bitfield< 4 > s
Bitfield< 8, 7 > sh
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 7, 4 > domain
Bitfield< 11 > z
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:277
const GrainSize GrainMap_tg1[]
Definition pagetable.cc:51
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1380
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1399
SecurityState
Security State.
Definition types.hh:273
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:413
Bitfield< 21, 20 > stride
@ MISCREG_HCR
Definition misc.hh:266
@ MISCREG_VSTCR_EL2
Definition misc.hh:646
@ MISCREG_SCTLR_EL2
Definition misc.hh:616
@ MISCREG_TCR_EL2
Definition misc.hh:641
@ MISCREG_MAIR_EL1
Definition misc.hh:810
@ MISCREG_SCTLR
Definition misc.hh:253
@ MISCREG_TTBCR
Definition misc.hh:278
@ MISCREG_MAIR_EL2
Definition misc.hh:814
@ MISCREG_SCR_EL3
Definition misc.hh:628
@ MISCREG_TCR_EL3
Definition misc.hh:648
@ MISCREG_TCR_EL1
Definition misc.hh:636
@ MISCREG_SCTLR_EL1
Definition misc.hh:609
@ MISCREG_PRRR
Definition misc.hh:399
@ MISCREG_MAIR_EL3
Definition misc.hh:816
@ MISCREG_CPSR
Definition misc.hh:79
@ MISCREG_NMRR
Definition misc.hh:405
@ MISCREG_TTBR1_EL1
Definition misc.hh:634
@ MISCREG_MAIR1
Definition misc.hh:408
@ MISCREG_TTBR1_EL2
Definition misc.hh:904
@ MISCREG_HTTBR
Definition misc.hh:477
@ MISCREG_HCR_EL2
Definition misc.hh:619
@ MISCREG_TTBR1
Definition misc.hh:275
@ MISCREG_VTCR_EL2
Definition misc.hh:644
@ MISCREG_VTTBR
Definition misc.hh:478
@ MISCREG_HTCR
Definition misc.hh:281
@ MISCREG_TTBR0_EL3
Definition misc.hh:647
@ MISCREG_VTCR
Definition misc.hh:282
@ MISCREG_TTBR0
Definition misc.hh:272
@ MISCREG_TTBR0_EL2
Definition misc.hh:640
@ MISCREG_TTBR0_EL1
Definition misc.hh:632
@ MISCREG_SCTLR_EL3
Definition misc.hh:625
@ MISCREG_VSTTBR_EL2
Definition misc.hh:645
@ MISCREG_VTTBR_EL2
Definition misc.hh:643
@ MISCREG_MAIR0
Definition misc.hh:402
uint16_t vmid_t
Definition types.hh:57
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition utility.cc:1289
const GrainSize GrainMap_tg0[]
Definition pagetable.cc:49
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:686
PASpace
Physical Address Space.
Definition types.hh:280
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:232
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition utility.cc:474
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 7, 0 > L
Definition int.hh:62
Units for Stats.
Definition units.hh:113
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t RegVal
Definition types.hh:173
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
Packet * PacketPtr
T htog(T value, ByteOrder guest_byte_order)
Definition byteswap.hh:187
Bitfield< 9 > hyp
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Overload hash function for BasicBlockRange type.
Definition binary32.hh:81
TableWalkerStats(statistics::Group *parent)
Helper variables used to implement hierarchical access permissions when the long-desc.
LookupLevel lookupLevel
Definition pagetable.hh:252
TLBTypes::KeyType KeyType
Definition pagetable.hh:236
const std::string & name()
Definition trace.cc:48

Generated on Mon May 26 2025 09:18:58 for gem5 by doxygen 1.13.2