gem5 v24.1.0.1
Loading...
Searching...
No Matches
table_walker.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010, 2012-2019, 2021-2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
38
39#include <cassert>
40#include <memory>
41
42#include "arch/arm/faults.hh"
43#include "arch/arm/mmu.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/pagetable.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "base/compiler.hh"
49#include "cpu/base.hh"
50#include "cpu/thread_context.hh"
51#include "debug/Checkpoint.hh"
52#include "debug/Drain.hh"
53#include "debug/PageTableWalker.hh"
54#include "debug/TLB.hh"
55#include "debug/TLBVerbose.hh"
56#include "sim/system.hh"
57
58namespace gem5
59{
60
61using namespace ArmISA;
62
65 requestorId(p.sys->getRequestorId(this)),
66 port(new Port(*this)),
67 isStage2(p.is_stage2), tlb(NULL),
68 currState(NULL), pending(false),
69 numSquashable(p.num_squash_per_cycle),
70 release(nullptr),
71 stats(this),
72 pendingReqs(0),
73 pendingChangeTick(curTick()),
74 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
75 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
76 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
77 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
78 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
79 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
80 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
81 &doL2LongDescEvent, &doL3LongDescEvent },
82 doProcessEvent([this]{ processWalkWrapper(); }, name()),
83 test(nullptr)
84{
85 sctlr = 0;
86
87 // Cache system-level properties
88 if (FullSystem) {
89 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
90 assert(arm_sys);
91 _physAddrRange = arm_sys->physAddrRange();
92 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
93 } else {
94 _haveLargeAsid64 = false;
95 _physAddrRange = 48;
96 }
97
98}
99
104
107{
108 return static_cast<Port&>(getPort("port"));
109}
110
111Port &
112TableWalker::getPort(const std::string &if_name, PortID idx)
113{
114 if (if_name == "port") {
115 return *port;
116 }
117 return ClockedObject::getPort(if_name, idx);
118}
119
120void
122{
123 mmu = _mmu;
124 release = mmu->release();
125}
126
128 tc(nullptr), aarch64(false), regime(TranslationRegime::EL10),
129 physAddrRange(0), req(nullptr),
130 asid(0), vmid(0), transState(nullptr),
131 vaddr(0), vaddr_tainted(0),
132 sctlr(0), scr(0), cpsr(0), tcr(0),
133 htcr(0), hcr(0), vtcr(0),
134 isWrite(false), isFetch(false), ss(SecurityState::NonSecure),
135 isUncacheable(false), longDescData(std::nullopt),
136 hpd(false), sh(0), irgn(0), orgn(0), stage2Req(false),
137 stage2Tran(nullptr), timing(false), functional(false),
138 mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
139 delayed(false), tableWalker(nullptr)
140{
141}
142
144 : QueuedRequestPort(_walker.name() + ".port", reqQueue, snoopRespQueue),
145 owner{_walker},
146 reqQueue(_walker, *this),
147 snoopRespQueue(_walker, *this)
148{
149}
150
153 const RequestPtr &req,
154 uint8_t *data, Tick delay,
155 Event *event)
156{
157 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
158 pkt->dataStatic(data);
159
160 auto state = new TableWalkerState;
161 state->event = event;
162 state->delay = delay;
163
164 pkt->senderState = state;
165 return pkt;
166}
167
168void
170 const RequestPtr &req, uint8_t *data)
171{
172 auto pkt = createPacket(req, data, 0, nullptr);
173
174 sendFunctional(pkt);
175
176 handleRespPacket(pkt);
177}
178
179void
181 const RequestPtr &req,
182 uint8_t *data, Tick delay)
183{
184 auto pkt = createPacket(req, data, delay, nullptr);
185
186 Tick lat = sendAtomic(pkt);
187
188 handleRespPacket(pkt, lat);
189}
190
191void
193 const RequestPtr &req,
194 uint8_t *data, Tick delay,
195 Event *event)
196{
197 auto pkt = createPacket(req, data, delay, event);
198
199 schedTimingReq(pkt, curTick());
200}
201
202bool
204{
205 // We shouldn't ever get a cacheable block in Modified state.
206 assert(pkt->req->isUncacheable() ||
207 !(pkt->cacheResponding() && !pkt->hasSharers()));
208
209 handleRespPacket(pkt);
210
211 return true;
212}
213
214void
216{
217 // Should always see a response with a sender state.
218 assert(pkt->isResponse());
219
220 // Get the DMA sender state.
221 auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
222 assert(state);
223
224 handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
225
226 delete pkt;
227}
228
229void
231 Addr size, Tick delay)
232{
233 if (state->event) {
234 owner.schedule(state->event, curTick() + delay);
235 }
236 delete state;
237}
238
239void
241{
243 stateQueues[LookupLevel::L0].empty() &&
244 stateQueues[LookupLevel::L1].empty() &&
245 stateQueues[LookupLevel::L2].empty() &&
246 stateQueues[LookupLevel::L3].empty() &&
247 pendingQueue.empty()) {
248
249 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
251 }
252}
253
256{
257 bool state_queues_not_empty = false;
258
259 for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
260 if (!stateQueues[i].empty()) {
261 state_queues_not_empty = true;
262 break;
263 }
264 }
265
266 if (state_queues_not_empty || pendingQueue.size()) {
267 DPRINTF(Drain, "TableWalker not drained\n");
269 } else {
270 DPRINTF(Drain, "TableWalker free, no need to drain\n");
271 return DrainState::Drained;
272 }
273}
274
275void
277{
278 if (params().sys->isTimingMode() && currState) {
279 delete currState;
280 currState = NULL;
282 }
283}
284
285bool
287{
288 bool disable_cacheability = isStage2 ?
289 currState->hcr.cd :
290 currState->sctlr.c == 0;
291 return disable_cacheability || currState->isUncacheable;
292}
293
294Fault
295TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
296 vmid_t _vmid, MMU::Mode _mode,
297 MMU::Translation *_trans, bool _timing, bool _functional,
298 SecurityState ss, PASpace ipaspace,
300 bool _stage2Req, const TlbEntry *walk_entry)
301{
302 assert(!(_functional && _timing));
303 ++stats.walks;
304
305 WalkerState *savedCurrState = NULL;
306
307 if (!currState && !_functional) {
308 // For atomic mode, a new WalkerState instance should be only created
309 // once per TLB. For timing mode, a new instance is generated for every
310 // TLB miss.
311 DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
312
313 currState = new WalkerState();
314 currState->tableWalker = this;
315 } else if (_functional) {
316 // If we are mixing functional mode with timing (or even
317 // atomic), we need to to be careful and clean up after
318 // ourselves to not risk getting into an inconsistent state.
319 DPRINTF(PageTableWalker,
320 "creating functional instance of WalkerState\n");
321 savedCurrState = currState;
322 currState = new WalkerState();
323 currState->tableWalker = this;
324 } else if (_timing) {
325 // This is a translation that was completed and then faulted again
326 // because some underlying parameters that affect the translation
327 // changed out from under us (e.g. asid). It will either be a
328 // misprediction, in which case nothing will happen or we'll use
329 // this fault to re-execute the faulting instruction which should clean
330 // up everything.
331 if (currState->vaddr_tainted == _req->getVaddr()) {
333 return std::make_shared<ReExec>();
334 }
335 }
337
339 currState->tc = _tc;
340 currState->el =
343 tranType);
344
345 if (isStage2) {
347 currState->aarch64 = ELIs64(_tc, EL2);
348 currState->ipaSpace = ipaspace;
349 } else {
354 }
355 currState->transState = _trans;
356 currState->req = _req;
357 if (walk_entry) {
358 currState->walkEntry = *walk_entry;
359 } else {
361 }
363 currState->asid = _asid;
364 currState->vmid = _vmid;
365 currState->timing = _timing;
366 currState->functional = _functional;
367 currState->mode = _mode;
368 currState->tranType = tranType;
369 currState->ss = ss;
372
375 currState->vaddr_tainted = currState->req->getVaddr();
376 if (currState->aarch64)
380 else
382
383 if (currState->aarch64) {
385 if (isStage2) {
389 currState->vtcr =
391 } else {
392 currState->vtcr =
394 }
395 } else switch (currState->regime) {
399 break;
402 assert(release->has(ArmExtension::VIRTUALIZATION));
405 break;
407 assert(release->has(ArmExtension::SECURITY));
410 break;
411 default:
412 panic("Invalid translation regime");
413 break;
414 }
415 } else {
425 }
427
430
432
433 currState->stage2Req = _stage2Req && !isStage2;
434
435 bool hyp = currState->el == EL2;
436 bool long_desc_format = currState->aarch64 || hyp || isStage2 ||
438
439 if (long_desc_format) {
440 // Helper variables used for hierarchical permissions
442 currState->longDescData->rwTable = true;
443 currState->longDescData->userTable = true;
444 currState->longDescData->xnTable = false;
445 currState->longDescData->pxnTable = false;
447 } else {
448 currState->longDescData = std::nullopt;
450 }
451
452 if (currState->timing && (pending || pendingQueue.size())) {
453 pendingQueue.push_back(currState);
454 currState = NULL;
456 return NoFault;
457 } else {
458 if (currState->timing) {
459 pending = true;
461 }
462
463 Fault fault = NoFault;
464 if (currState->aarch64) {
465 fault = processWalkAArch64();
466 } else if (long_desc_format) {
467 fault = processWalkLPAE();
468 } else {
469 fault = processWalk();
470 }
471
472 // If this was a functional non-timing access restore state to
473 // how we found it.
474 if (currState->functional) {
475 delete currState;
476 currState = savedCurrState;
477 } else if (currState->timing) {
478 if (fault) {
479 pending = false;
481 delete currState;
482 currState = NULL;
483 } else {
484 // Either we are using the long descriptor, which means we
485 // need to extract the queue index from longDesc, or we are
486 // using the short. In the latter we always start at L1
487 LookupLevel curr_lookup_level = long_desc_format ?
488 currState->longDesc.lookupLevel : LookupLevel::L1;
489
490 stashCurrState(curr_lookup_level);
491 }
492 } else if (fault) {
493 currState->tc = NULL;
494 currState->req = NULL;
495 }
496
497 return fault;
498 }
499}
500
501void
503{
504 assert(!currState);
505 assert(pendingQueue.size());
507 currState = pendingQueue.front();
508
509 // Check if a previous walk filled this request already
510 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
512 currState->vmid, currState->ss, true, false,
514
515 // Check if we still need to have a walk for this request. If the requesting
516 // instruction has been squashed, or a previous walk has filled the TLB with
517 // a match, we just want to get rid of the walk. The latter could happen
518 // when there are multiple outstanding misses to a single page and a
519 // previous request has been successfully translated.
520 if (!currState->transState->squashed() && (!te || te->partial)) {
521 // We've got a valid request, lets process it
522 pending = true;
523 pendingQueue.pop_front();
524
525 bool long_desc_format = currState->aarch64 || currState->el == EL2 ||
527
528 if (te && te->partial) {
530 }
531 Fault fault;
532 if (currState->aarch64) {
533 fault = processWalkAArch64();
534 } else if (long_desc_format) {
535 fault = processWalkLPAE();
536 } else {
537 fault = processWalk();
538 }
539
540 if (fault != NoFault) {
541 pending = false;
543
546
547 delete currState;
548 currState = NULL;
549 } else {
550 LookupLevel curr_lookup_level = long_desc_format ?
551 currState->longDesc.lookupLevel : LookupLevel::L1;
552
553 stashCurrState(curr_lookup_level);
554 }
555 return;
556 }
557
558
559 // If the instruction that we were translating for has been
560 // squashed we shouldn't bother.
561 unsigned num_squashed = 0;
563 while ((num_squashed < numSquashable) && currState &&
565 (te && !te->partial))) {
566 pendingQueue.pop_front();
567 num_squashed++;
569
570 DPRINTF(TLB, "Squashing table walk for address %#x\n",
572
573 if (currState->transState->squashed()) {
574 // finish the translation which will delete the translation object
576 std::make_shared<UnimpFault>("Squashed Inst"),
578 } else {
579 // translate the request now that we know it will work
584 }
585
586 // delete the current request
587 delete currState;
588
589 // peak at the next one
590 if (pendingQueue.size()) {
591 currState = pendingQueue.front();
593 currState->vmid, currState->ss, true,
595 } else {
596 // Terminate the loop, nothing more to do
597 currState = NULL;
598 }
599 }
601
602 // if we still have pending translations, schedule more work
603 nextWalk(tc);
604 currState = NULL;
605}
606
607Fault
609{
610 Addr ttbr = 0;
611
612 // For short descriptors, translation configs are held in
613 // TTBR1.
617
618 const auto irgn0_mask = 0x1;
619 const auto irgn1_mask = 0x40;
620 currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
621
622 // If translation isn't enabled, we shouldn't be here
623 assert(currState->sctlr.m || isStage2);
624 const bool is_atomic = currState->req->isAtomic();
625 const bool have_security = release->has(ArmExtension::SECURITY);
626
627 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
629 32 - currState->ttbcr.n));
630
632
633 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
634 32 - currState->ttbcr.n)) {
635 DPRINTF(TLB, " - Selecting TTBR0\n");
636 // Check if table walk is allowed when Security Extensions are enabled
637 if (have_security && currState->ttbcr.pd0) {
638 if (currState->isFetch)
639 return std::make_shared<PrefetchAbort>(
641 ArmFault::TranslationLL + LookupLevel::L1,
642 isStage2,
644 else
645 return std::make_shared<DataAbort>(
648 is_atomic ? false : currState->isWrite,
649 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
651 }
655 } else {
656 DPRINTF(TLB, " - Selecting TTBR1\n");
657 // Check if table walk is allowed when Security Extensions are enabled
658 if (have_security && currState->ttbcr.pd1) {
659 if (currState->isFetch)
660 return std::make_shared<PrefetchAbort>(
662 ArmFault::TranslationLL + LookupLevel::L1,
663 isStage2,
665 else
666 return std::make_shared<DataAbort>(
669 is_atomic ? false : currState->isWrite,
670 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
672 }
673 ttbr = ttbr1;
674 currState->ttbcr.n = 0;
675 }
676
677 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
678 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
679 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
680 currState->ss == SecurityState::Secure ? "s" : "ns");
681
683 if (uncacheableWalk()) {
685 }
686
687 if (currState->secureLookup) {
688 flag.set(Request::SECURE);
689 }
690
692 l1desc_addr, currState->l1Desc,
693 sizeof(uint32_t), flag, LookupLevel::L1,
696
697 return currState->fault;
698}
699
700Fault
702{
703 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
704 int tsz, n;
705 LookupLevel start_lookup_level = LookupLevel::L1;
706
707 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
709
711
714 flag.set(Request::SECURE);
715
716 // work out which base address register to use, if in hyp mode we always
717 // use HTTBR
718 if (isStage2) {
719 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
721 tsz = sext<4>(currState->vtcr.t0sz);
722 start_lookup_level = currState->vtcr.sl0 ?
723 LookupLevel::L1 : LookupLevel::L2;
724 currState->isUncacheable = currState->vtcr.irgn0 == 0;
725 } else if (currState->el == EL2) {
726 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
728 tsz = currState->htcr.t0sz;
729 currState->isUncacheable = currState->htcr.irgn0 == 0;
730 } else {
732
733 // Determine boundaries of TTBR0/1 regions
734 if (currState->ttbcr.t0sz)
735 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
736 else if (currState->ttbcr.t1sz)
737 ttbr0_max = (1ULL << 32) -
738 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
739 else
740 ttbr0_max = (1ULL << 32) - 1;
741 if (currState->ttbcr.t1sz)
742 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
743 else
744 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
745
746 const bool is_atomic = currState->req->isAtomic();
747
748 // The following code snippet selects the appropriate translation table base
749 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
750 // depending on the address range supported by the translation table (ARM
751 // ARM issue C B3.6.4)
752 if (currState->vaddr <= ttbr0_max) {
753 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
754 // Check if table walk is allowed
755 if (currState->ttbcr.epd0) {
756 if (currState->isFetch)
757 return std::make_shared<PrefetchAbort>(
759 ArmFault::TranslationLL + LookupLevel::L1,
760 isStage2,
762 else
763 return std::make_shared<DataAbort>(
766 is_atomic ? false : currState->isWrite,
767 ArmFault::TranslationLL + LookupLevel::L1,
768 isStage2,
770 }
774 tsz = currState->ttbcr.t0sz;
775 currState->isUncacheable = currState->ttbcr.irgn0 == 0;
776 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
777 start_lookup_level = LookupLevel::L2;
778 } else if (currState->vaddr >= ttbr1_min) {
779 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
780 // Check if table walk is allowed
781 if (currState->ttbcr.epd1) {
782 if (currState->isFetch)
783 return std::make_shared<PrefetchAbort>(
785 ArmFault::TranslationLL + LookupLevel::L1,
786 isStage2,
788 else
789 return std::make_shared<DataAbort>(
792 is_atomic ? false : currState->isWrite,
793 ArmFault::TranslationLL + LookupLevel::L1,
794 isStage2,
796 }
800 tsz = currState->ttbcr.t1sz;
801 currState->isUncacheable = currState->ttbcr.irgn1 == 0;
802 // Lower limit >= 3 GiB
803 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
804 start_lookup_level = LookupLevel::L2;
805 } else {
806 // Out of boundaries -> translation fault
807 if (currState->isFetch)
808 return std::make_shared<PrefetchAbort>(
810 ArmFault::TranslationLL + LookupLevel::L1,
811 isStage2,
813 else
814 return std::make_shared<DataAbort>(
817 is_atomic ? false : currState->isWrite,
818 ArmFault::TranslationLL + LookupLevel::L1,
820 }
821
822 }
823
824 // Perform lookup (ARM ARM issue C B3.6.6)
825 if (start_lookup_level == LookupLevel::L1) {
826 n = 5 - tsz;
827 desc_addr = mbits(ttbr, 39, n) |
828 (bits(currState->vaddr, n + 26, 30) << 3);
829 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
830 desc_addr, currState->ss == SecurityState::Secure ?
831 "s" : "ns");
832 } else {
833 // Skip first-level lookup
834 n = (tsz >= 2 ? 14 - tsz : 12);
835 desc_addr = mbits(ttbr, 39, n) |
836 (bits(currState->vaddr, n + 17, 21) << 3);
837 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
838 desc_addr, currState->ss == SecurityState::Secure ?
839 "s" : "ns");
840 }
841
842 if (uncacheableWalk()) {
844 }
845
846 currState->longDesc.lookupLevel = start_lookup_level;
847 currState->longDesc.aarch64 = false;
850
852 desc_addr, currState->longDesc,
853 sizeof(uint64_t), flag, start_lookup_level,
854 LongDescEventByLevel[start_lookup_level],
856
857 return currState->fault;
858}
859
860Addr
862{
863 // The effective maximum input size is 48 if ARMv8.2-LVA is not
864 // supported or if the translation granule that is in use is 4KB or
865 // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
866 // translation granule size only, the effective minimum value of
867 // 52.
868 if (HaveExt(currState->tc, ArmExtension::FEAT_LVA) && tg == Grain64KB) {
869 return 12;
870 } else {
871 return 16;
872 }
873}
874
875Addr
877{
878 if (HaveExt(currState->tc, ArmExtension::FEAT_TTST)) {
879 switch (tg) {
880 case Grain4KB: return 48;
881 case Grain16KB: return 48;
882 case Grain64KB: return 47;
883 default:
884 panic("Invalid grain size\n");
885 }
886 }
887 return 39;
888}
889
890bool
892{
893 Addr min_txsz = s1MinTxSz(tg);
894 Addr max_txsz = maxTxSz(tg);
895
896 return tsz > max_txsz || tsz < min_txsz;
897}
898
899bool
900TableWalker::checkVAOutOfRange(Addr vaddr, int top_bit, int tsz, bool low_range)
901{
902 return low_range ?
903 bits(currState->vaddr, top_bit, tsz) != 0x0 :
904 bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1);
905}
906
907bool
909{
910 return (pa_range != _physAddrRange &&
911 bits(addr, _physAddrRange - 1, pa_range));
912}
913
914Fault
916{
917 assert(currState->aarch64);
918
919 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
921
923
924 // Determine TTBR, table size, granule size and phys. address range
925 Addr ttbr = 0;
926 int tsz = 0, ps = 0;
927 GrainSize tg = Grain4KB; // grain size computed from tg* field
928 bool fault = false;
929
930 int top_bit = computeAddrTop(currState->tc,
931 bits(currState->vaddr, 55),
933 currState->tcr,
934 currState->el);
935
936 bool vaddr_fault = false;
937 switch (currState->regime) {
939 if (isStage2) {
942 // Secure EL1&0 Secure IPA
943 DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
946 } else {
947 // Secure EL1&0 NonSecure IPA or NonSecure EL1&0
948 DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
951 !currState->vtcr.nsw : // Secure EL1&0 NonSecure IPA
952 false; // NonSecure EL1&0
953 }
954 tsz = 64 - currState->vtcr.t0sz64;
955 tg = GrainMap_tg0[currState->vtcr.tg0];
956
957 ps = currState->vtcr.ps;
958 currState->sh = currState->vtcr.sh0;
959 currState->irgn = currState->vtcr.irgn0;
960 currState->orgn = currState->vtcr.orgn0;
961 } else {
962 switch (bits(currState->vaddr, top_bit)) {
963 case 0:
964 DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
966 tsz = 64 - currState->tcr.t0sz;
967 tg = GrainMap_tg0[currState->tcr.tg0];
968 currState->hpd = currState->tcr.hpd0;
969 currState->sh = currState->tcr.sh0;
970 currState->irgn = currState->tcr.irgn0;
971 currState->orgn = currState->tcr.orgn0;
972 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
973 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
974
975 if (vaddr_fault || currState->tcr.epd0)
976 fault = true;
977 break;
978 case 0x1:
979 DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
981 tsz = 64 - currState->tcr.t1sz;
982 tg = GrainMap_tg1[currState->tcr.tg1];
983 currState->hpd = currState->tcr.hpd1;
984 currState->sh = currState->tcr.sh1;
985 currState->irgn = currState->tcr.irgn1;
986 currState->orgn = currState->tcr.orgn1;
987 vaddr_fault = s1TxSzFault(tg, currState->tcr.t1sz) ||
988 checkVAOutOfRange(currState->vaddr, top_bit, tsz, false);
989
990 if (vaddr_fault || currState->tcr.epd1)
991 fault = true;
992 break;
993 default:
994 // top two bytes must be all 0s or all 1s, else invalid addr
995 fault = true;
996 }
997 ps = currState->tcr.ips;
998 }
999 break;
1002 switch(bits(currState->vaddr, top_bit)) {
1003 case 0:
1004 DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1006 tsz = 64 - currState->tcr.t0sz;
1007 tg = GrainMap_tg0[currState->tcr.tg0];
1008 currState->hpd = currState->hcr.e2h ?
1009 currState->tcr.hpd0 : currState->tcr.hpd;
1010 currState->sh = currState->tcr.sh0;
1011 currState->irgn = currState->tcr.irgn0;
1012 currState->orgn = currState->tcr.orgn0;
1013 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
1014 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
1015
1016 if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1017 fault = true;
1018 break;
1019
1020 case 0x1:
1021 DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1023 tsz = 64 - currState->tcr.t1sz;
1024 tg = GrainMap_tg1[currState->tcr.tg1];
1025 currState->hpd = currState->tcr.hpd1;
1026 currState->sh = currState->tcr.sh1;
1027 currState->irgn = currState->tcr.irgn1;
1028 currState->orgn = currState->tcr.orgn1;
1029 vaddr_fault = s1TxSzFault(tg, currState->tcr.t1sz) ||
1030 checkVAOutOfRange(currState->vaddr, top_bit, tsz, false);
1031
1032 if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1033 fault = true;
1034 break;
1035
1036 default:
1037 // invalid addr if top two bytes are not all 0s
1038 fault = true;
1039 }
1040 ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1041 break;
1043 switch(bits(currState->vaddr, top_bit)) {
1044 case 0:
1045 DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1047 tsz = 64 - currState->tcr.t0sz;
1048 tg = GrainMap_tg0[currState->tcr.tg0];
1049 currState->hpd = currState->tcr.hpd;
1050 currState->sh = currState->tcr.sh0;
1051 currState->irgn = currState->tcr.irgn0;
1052 currState->orgn = currState->tcr.orgn0;
1053 vaddr_fault = s1TxSzFault(tg, currState->tcr.t0sz) ||
1054 checkVAOutOfRange(currState->vaddr, top_bit, tsz, true);
1055
1056 if (vaddr_fault)
1057 fault = true;
1058 break;
1059 default:
1060 // invalid addr if top two bytes are not all 0s
1061 fault = true;
1062 }
1063 ps = currState->tcr.ps;
1064 break;
1065 }
1066
1068 currState->orgn == 0;
1069
1070 const bool is_atomic = currState->req->isAtomic();
1071
1072 if (fault) {
1073 if (currState->isFetch) {
1074 return std::make_shared<PrefetchAbort>(
1076 ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1078 } else {
1079 return std::make_shared<DataAbort>(
1082 is_atomic ? false : currState->isWrite,
1083 ArmFault::TranslationLL + LookupLevel::L0,
1085 }
1086 }
1087
1088 if (tg == ReservedGrain) {
1089 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1090 "DEFINED behavior takes this to mean 4KB granules\n");
1091 tg = Grain4KB;
1092 }
1093
1094 // Clamp to lower limit
1095 int pa_range = decodePhysAddrRange64(ps);
1096 if (pa_range > _physAddrRange) {
1098 } else {
1099 currState->physAddrRange = pa_range;
1100 }
1101
1102 auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1103 ttbr, tg, tsz, pa_range);
1104
1105 // Determine physical address size and raise an Address Size Fault if
1106 // necessary
1108 DPRINTF(TLB, "Address size fault before any lookup\n");
1109 if (currState->isFetch)
1110 return std::make_shared<PrefetchAbort>(
1112 ArmFault::AddressSizeLL + start_lookup_level,
1113 isStage2,
1115 else
1116 return std::make_shared<DataAbort>(
1119 is_atomic ? false : currState->isWrite,
1120 ArmFault::AddressSizeLL + start_lookup_level,
1121 isStage2,
1123 }
1124
1126 if (uncacheableWalk()) {
1128 }
1129
1130 if (currState->secureLookup) {
1131 flag.set(Request::SECURE);
1132 }
1133
1134 currState->longDesc.lookupLevel = start_lookup_level;
1135 currState->longDesc.aarch64 = true;
1139
1141 sizeof(uint64_t), flag, start_lookup_level,
1142 LongDescEventByLevel[start_lookup_level],
1144
1145 return currState->fault;
1146}
1147
1148std::tuple<Addr, Addr, TableWalker::LookupLevel>
1149TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1150{
1151 const auto* ptops = getPageTableOps(tg);
1152
1153 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1154 Addr table_addr = 0;
1155 Addr desc_addr = 0;
1156
1157 if (currState->walkEntry.valid) {
1158 // WalkCache hit
1159 TlbEntry* entry = &currState->walkEntry;
1160 DPRINTF(PageTableWalker,
1161 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1162 currState->vaddr, entry->lookupLevel, entry->pfn);
1163
1164 if (currState->longDescData.has_value()) {
1165 currState->longDescData->xnTable = entry->xn;
1166 currState->longDescData->pxnTable = entry->pxn;
1167 currState->longDescData->rwTable = bits(entry->ap, 1);
1168 currState->longDescData->userTable = bits(entry->ap, 0);
1169 }
1170
1171 table_addr = entry->pfn;
1172 first_level = (LookupLevel)(entry->lookupLevel + 1);
1173 } else {
1174 // WalkCache miss
1175 first_level = isStage2 ?
1176 ptops->firstS2Level(currState->vtcr.sl0) :
1177 ptops->firstLevel(64 - tsz);
1178 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1179 "Table walker couldn't find lookup level\n");
1180
1181 int stride = tg - 3;
1182 int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1183
1184 if (pa_range == 52) {
1185 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1186 table_addr = mbits(ttbr, 47, z);
1187 table_addr |= (bits(ttbr, 5, 2) << 48);
1188 } else {
1189 table_addr = mbits(ttbr, 47, base_addr_lo);
1190 }
1191 }
1192
1193 desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1194
1195 return std::make_tuple(table_addr, desc_addr, first_level);
1196}
1197
1198void
1200 uint8_t texcb, bool s)
1201{
1202 // Note: tc and sctlr local variables are hiding tc and sctrl class
1203 // variables
1204 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1205 te.shareable = false; // default value
1206 te.nonCacheable = false;
1207 te.outerShareable = false;
1208 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1209 switch(texcb) {
1210 case 0: // Stongly-ordered
1211 te.nonCacheable = true;
1213 te.shareable = true;
1214 te.innerAttrs = 1;
1215 te.outerAttrs = 0;
1216 break;
1217 case 1: // Shareable Device
1218 te.nonCacheable = true;
1220 te.shareable = true;
1221 te.innerAttrs = 3;
1222 te.outerAttrs = 0;
1223 break;
1224 case 2: // Outer and Inner Write-Through, no Write-Allocate
1226 te.shareable = s;
1227 te.innerAttrs = 6;
1228 te.outerAttrs = bits(texcb, 1, 0);
1229 break;
1230 case 3: // Outer and Inner Write-Back, no Write-Allocate
1232 te.shareable = s;
1233 te.innerAttrs = 7;
1234 te.outerAttrs = bits(texcb, 1, 0);
1235 break;
1236 case 4: // Outer and Inner Non-cacheable
1237 te.nonCacheable = true;
1239 te.shareable = s;
1240 te.innerAttrs = 0;
1241 te.outerAttrs = bits(texcb, 1, 0);
1242 break;
1243 case 5: // Reserved
1244 panic("Reserved texcb value!\n");
1245 break;
1246 case 6: // Implementation Defined
1247 panic("Implementation-defined texcb value!\n");
1248 break;
1249 case 7: // Outer and Inner Write-Back, Write-Allocate
1251 te.shareable = s;
1252 te.innerAttrs = 5;
1253 te.outerAttrs = 1;
1254 break;
1255 case 8: // Non-shareable Device
1256 te.nonCacheable = true;
1258 te.shareable = false;
1259 te.innerAttrs = 3;
1260 te.outerAttrs = 0;
1261 break;
1262 case 9 ... 15: // Reserved
1263 panic("Reserved texcb value!\n");
1264 break;
1265 case 16 ... 31: // Cacheable Memory
1267 te.shareable = s;
1268 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1269 te.nonCacheable = true;
1270 te.innerAttrs = bits(texcb, 1, 0);
1271 te.outerAttrs = bits(texcb, 3, 2);
1272 break;
1273 default:
1274 panic("More than 32 states for 5 bits?\n");
1275 }
1276 } else {
1277 assert(tc);
1278 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1280 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1282 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1283 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1284 switch(bits(texcb, 2,0)) {
1285 case 0:
1286 curr_tr = prrr.tr0;
1287 curr_ir = nmrr.ir0;
1288 curr_or = nmrr.or0;
1289 te.outerShareable = (prrr.nos0 == 0);
1290 break;
1291 case 1:
1292 curr_tr = prrr.tr1;
1293 curr_ir = nmrr.ir1;
1294 curr_or = nmrr.or1;
1295 te.outerShareable = (prrr.nos1 == 0);
1296 break;
1297 case 2:
1298 curr_tr = prrr.tr2;
1299 curr_ir = nmrr.ir2;
1300 curr_or = nmrr.or2;
1301 te.outerShareable = (prrr.nos2 == 0);
1302 break;
1303 case 3:
1304 curr_tr = prrr.tr3;
1305 curr_ir = nmrr.ir3;
1306 curr_or = nmrr.or3;
1307 te.outerShareable = (prrr.nos3 == 0);
1308 break;
1309 case 4:
1310 curr_tr = prrr.tr4;
1311 curr_ir = nmrr.ir4;
1312 curr_or = nmrr.or4;
1313 te.outerShareable = (prrr.nos4 == 0);
1314 break;
1315 case 5:
1316 curr_tr = prrr.tr5;
1317 curr_ir = nmrr.ir5;
1318 curr_or = nmrr.or5;
1319 te.outerShareable = (prrr.nos5 == 0);
1320 break;
1321 case 6:
1322 panic("Imp defined type\n");
1323 case 7:
1324 curr_tr = prrr.tr7;
1325 curr_ir = nmrr.ir7;
1326 curr_or = nmrr.or7;
1327 te.outerShareable = (prrr.nos7 == 0);
1328 break;
1329 }
1330
1331 switch(curr_tr) {
1332 case 0:
1333 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1335 te.nonCacheable = true;
1336 te.innerAttrs = 1;
1337 te.outerAttrs = 0;
1338 te.shareable = true;
1339 break;
1340 case 1:
1341 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1342 prrr.ds1, prrr.ds0, s);
1344 te.nonCacheable = true;
1345 te.innerAttrs = 3;
1346 te.outerAttrs = 0;
1347 if (prrr.ds1 && s)
1348 te.shareable = true;
1349 if (prrr.ds0 && !s)
1350 te.shareable = true;
1351 break;
1352 case 2:
1353 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1354 prrr.ns1, prrr.ns0, s);
1356 if (prrr.ns1 && s)
1357 te.shareable = true;
1358 if (prrr.ns0 && !s)
1359 te.shareable = true;
1360 break;
1361 case 3:
1362 panic("Reserved type");
1363 }
1364
1365 if (te.mtype == TlbEntry::MemoryType::Normal){
1366 switch(curr_ir) {
1367 case 0:
1368 te.nonCacheable = true;
1369 te.innerAttrs = 0;
1370 break;
1371 case 1:
1372 te.innerAttrs = 5;
1373 break;
1374 case 2:
1375 te.innerAttrs = 6;
1376 break;
1377 case 3:
1378 te.innerAttrs = 7;
1379 break;
1380 }
1381
1382 switch(curr_or) {
1383 case 0:
1384 te.nonCacheable = true;
1385 te.outerAttrs = 0;
1386 break;
1387 case 1:
1388 te.outerAttrs = 1;
1389 break;
1390 case 2:
1391 te.outerAttrs = 2;
1392 break;
1393 case 3:
1394 te.outerAttrs = 3;
1395 break;
1396 }
1397 }
1398 }
1399 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1400 "outerAttrs: %d\n",
1401 te.shareable, te.innerAttrs, te.outerAttrs);
1402 te.setAttributes(false);
1403}
1404
1405void
1407 LongDescriptor &l_descriptor)
1408{
1409 assert(release->has(ArmExtension::LPAE));
1410
1411 uint8_t attr;
1412 uint8_t sh = l_descriptor.sh();
1413 // Different format and source of attributes if this is a stage 2
1414 // translation
1415 if (isStage2) {
1416 attr = l_descriptor.memAttr();
1417 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1418 uint8_t attr_1_0 = attr & 0x3;
1419
1420 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1421
1422 if (attr_3_2 == 0) {
1423 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1425 te.outerAttrs = 0;
1426 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1427 te.nonCacheable = true;
1428 } else {
1430 te.outerAttrs = attr_3_2 == 1 ? 0 :
1431 attr_3_2 == 2 ? 2 : 1;
1432 te.innerAttrs = attr_1_0 == 1 ? 0 :
1433 attr_1_0 == 2 ? 6 : 5;
1434 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1435 }
1436 } else {
1437 uint8_t attrIndx = l_descriptor.attrIndx();
1438
1439 // LPAE always uses remapping of memory attributes, irrespective of the
1440 // value of SCTLR.TRE
1441 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1442 int reg_as_int = snsBankedIndex(reg, currState->tc,
1444 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1445 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1446 uint8_t attr_7_4 = bits(attr, 7, 4);
1447 uint8_t attr_3_0 = bits(attr, 3, 0);
1448 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1449
1450 // Note: the memory subsystem only cares about the 'cacheable' memory
1451 // attribute. The other attributes are only used to fill the PAR register
1452 // accordingly to provide the illusion of full support
1453 te.nonCacheable = false;
1454
1455 switch (attr_7_4) {
1456 case 0x0:
1457 // Strongly-ordered or Device memory
1458 if (attr_3_0 == 0x0)
1460 else if (attr_3_0 == 0x4)
1462 else
1463 panic("Unpredictable behavior\n");
1464 te.nonCacheable = true;
1465 te.outerAttrs = 0;
1466 break;
1467 case 0x4:
1468 // Normal memory, Outer Non-cacheable
1470 te.outerAttrs = 0;
1471 if (attr_3_0 == 0x4)
1472 // Inner Non-cacheable
1473 te.nonCacheable = true;
1474 else if (attr_3_0 < 0x8)
1475 panic("Unpredictable behavior\n");
1476 break;
1477 case 0x8:
1478 case 0x9:
1479 case 0xa:
1480 case 0xb:
1481 case 0xc:
1482 case 0xd:
1483 case 0xe:
1484 case 0xf:
1485 if (attr_7_4 & 0x4) {
1486 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1487 } else {
1488 te.outerAttrs = 0x2;
1489 }
1490 // Normal memory, Outer Cacheable
1492 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1493 panic("Unpredictable behavior\n");
1494 break;
1495 default:
1496 panic("Unpredictable behavior\n");
1497 break;
1498 }
1499
1500 switch (attr_3_0) {
1501 case 0x0:
1502 te.innerAttrs = 0x1;
1503 break;
1504 case 0x4:
1505 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1506 break;
1507 case 0x8:
1508 case 0x9:
1509 case 0xA:
1510 case 0xB:
1511 te.innerAttrs = 6;
1512 break;
1513 case 0xC:
1514 case 0xD:
1515 case 0xE:
1516 case 0xF:
1517 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1518 break;
1519 default:
1520 panic("Unpredictable behavior\n");
1521 break;
1522 }
1523 }
1524
1525 te.outerShareable = sh == 2;
1526 te.shareable = (sh & 0x2) ? true : false;
1527 te.setAttributes(true);
1528 te.attributes |= (uint64_t) attr << 56;
1529}
1530
1531bool
1533{
1534 return !bits(attrs, 2) || // Write-through
1535 attrs == 0b0100; // NonCacheable
1536}
1537
1538void
1540 LongDescriptor &l_descriptor)
1541{
1542 uint8_t attr;
1543 uint8_t attr_hi;
1544 uint8_t attr_lo;
1545 uint8_t sh = l_descriptor.sh();
1546
1547 if (isStage2) {
1548 attr = l_descriptor.memAttr();
1549 uint8_t attr_hi = (attr >> 2) & 0x3;
1550 uint8_t attr_lo = attr & 0x3;
1551
1552 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1553
1554 if (attr_hi == 0) {
1555 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1557 te.outerAttrs = 0;
1558 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1559 te.nonCacheable = true;
1560 } else {
1562 te.outerAttrs = attr_hi == 1 ? 0 :
1563 attr_hi == 2 ? 2 : 1;
1564 te.innerAttrs = attr_lo == 1 ? 0 :
1565 attr_lo == 2 ? 6 : 5;
1566 // Treat write-through memory as uncacheable, this is safe
1567 // but for performance reasons not optimal.
1568 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1569 (attr_lo == 1) || (attr_lo == 2);
1570
1571 // To be used when merging stage1 and astage 2 attributes
1572 te.xs = !l_descriptor.fnxs();
1573 }
1574 } else {
1575 uint8_t attrIndx = l_descriptor.attrIndx();
1576
1577 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1578
1579 // Select MAIR
1580 uint64_t mair;
1581 switch (currState->regime) {
1583 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1584 break;
1587 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1588 break;
1590 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1591 break;
1592 default:
1593 panic("Invalid exception level");
1594 break;
1595 }
1596
1597 // Select attributes
1598 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1599 attr_lo = bits(attr, 3, 0);
1600 attr_hi = bits(attr, 7, 4);
1601
1602 // Treat write-through memory as uncacheable, this is safe
1603 // but for performance reasons not optimal.
1604 switch (attr) {
1605 case 0b00000000 ... 0b00001111: // Device Memory
1607 te.nonCacheable = true;
1608 te.xs = !bits(attr, 0);
1609 break;
1610 case 0b01000000: // Normal memory, Non-cacheable
1612 te.nonCacheable = true;
1613 te.xs = false;
1614 break;
1615 case 0b10100000: // Normal memory, Write-through
1617 te.nonCacheable = true;
1618 te.xs = false;
1619 break;
1620 default:
1622 te.nonCacheable = uncacheableFromAttrs(attr_hi) ||
1623 uncacheableFromAttrs(attr_lo);
1624 // XS is 0 only for write-back regions (cacheable)
1625 te.xs = te.nonCacheable;
1626 break;
1627 }
1628
1629 te.shareable = sh == 2;
1630 te.outerShareable = (sh & 0x2) ? true : false;
1631 // Attributes formatted according to the 64-bit PAR
1632 te.attributes = ((uint64_t) attr << 56) |
1633 (1 << 11) | // LPAE bit
1634 (te.ns << 9) | // NS bit
1635 (sh << 7);
1636 }
1637}
1638
1639void
1641{
1643 if (uncacheableWalk()) {
1644 te.shareable = 3;
1645 te.outerAttrs = 0;
1646 te.innerAttrs = 0;
1647 te.nonCacheable = true;
1648 } else {
1649 te.shareable = currState->sh;
1650 te.outerAttrs = currState->orgn;
1651 te.innerAttrs = currState->irgn;
1652 te.nonCacheable = (te.outerAttrs == 0 || te.outerAttrs == 2) &&
1653 (te.innerAttrs == 0 || te.innerAttrs == 2);
1654 }
1655
1656 // XS is 0 only for write-back regions (cacheable)
1657 te.xs = te.nonCacheable;
1658}
1659
1660void
1662{
1663 if (currState->fault != NoFault) {
1664 return;
1665 }
1666
1669
1670 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1672 TlbEntry te;
1673
1674 const bool is_atomic = currState->req->isAtomic();
1675
1676 switch (currState->l1Desc.type()) {
1679 if (!currState->timing) {
1680 currState->tc = NULL;
1681 currState->req = NULL;
1682 }
1683 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1684 if (currState->isFetch)
1685 currState->fault =
1686 std::make_shared<PrefetchAbort>(
1688 ArmFault::TranslationLL + LookupLevel::L1,
1689 isStage2,
1691 else
1692 currState->fault =
1693 std::make_shared<DataAbort>(
1696 is_atomic ? false : currState->isWrite,
1697 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1699 return;
1701 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1707 currState->fault = std::make_shared<DataAbort>(
1710 is_atomic ? false : currState->isWrite,
1711 ArmFault::AccessFlagLL + LookupLevel::L1,
1712 isStage2,
1714 }
1715 if (currState->l1Desc.supersection()) {
1716 panic("Haven't implemented supersections\n");
1717 }
1719 return;
1721 {
1722 Addr l2desc_addr;
1723 l2desc_addr = currState->l1Desc.l2Addr() |
1724 (bits(currState->vaddr, 19, 12) << 2);
1725 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1726 l2desc_addr, currState->ss == SecurityState::Secure ?
1727 "s" : "ns");
1728
1730
1731 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1733 }
1734
1736 flag.set(Request::SECURE);
1737
1739 l2desc_addr, currState->l2Desc,
1740 sizeof(uint32_t), flag, LookupLevel::L2,
1743
1745
1746 return;
1747 }
1748 default:
1749 panic("A new type in a 2 bit field?\n");
1750 }
1751}
1752
1753Fault
1755{
1756 if (currState->isFetch) {
1757 return std::make_shared<PrefetchAbort>(
1760 isStage2,
1762 } else {
1763 return std::make_shared<DataAbort>(
1766 currState->req->isAtomic() ? false : currState->isWrite,
1768 isStage2,
1770 }
1771}
1772
1773void
1775{
1776 if (currState->fault != NoFault) {
1777 return;
1778 }
1779
1782
1783 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1786 currState->aarch64 ? "AArch64" : "long-desc.");
1787
1790 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1791 "xn: %d, ap: %d, af: %d, type: %d\n",
1799 } else {
1800 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1804 }
1805
1806 TlbEntry te;
1807
1808 switch (currState->longDesc.type()) {
1810 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1813
1815 if (!currState->timing) {
1816 currState->tc = NULL;
1817 currState->req = NULL;
1818 }
1819 return;
1820
1823 {
1824 auto fault_source = ArmFault::FaultSourceInvalid;
1825 // Check for address size fault
1828
1829 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1831 fault_source = ArmFault::AddressSizeLL;
1832
1833 // Check for access fault
1834 } else if (currState->longDesc.af() == 0) {
1835
1836 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1838 fault_source = ArmFault::AccessFlagLL;
1839 }
1840
1841 if (fault_source != ArmFault::FaultSourceInvalid) {
1842 currState->fault = generateLongDescFault(fault_source);
1843 } else {
1845 }
1846 }
1847 return;
1849 {
1850 // Set hierarchical permission flags
1851 if (!isStage2) {
1854 }
1855 currState->longDescData->rwTable =
1856 currState->longDescData->rwTable &&
1858 currState->longDescData->userTable =
1859 currState->longDescData->userTable &&
1861 currState->longDescData->xnTable =
1862 currState->longDescData->xnTable ||
1864 currState->longDescData->pxnTable =
1865 currState->longDescData->pxnTable ||
1867
1868 // Set up next level lookup
1869 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1870 currState->vaddr);
1871
1872 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1875 next_desc_addr,
1876 currState->secureLookup ? "s" : "ns");
1877
1878 // Check for address size fault
1880 next_desc_addr, currState->physAddrRange)) {
1881 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1883
1886 return;
1887 }
1888
1889 if (mmu->hasWalkCache()) {
1891 }
1892
1895 flag.set(Request::SECURE);
1896
1897 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1899 }
1900
1903 Event *event = NULL;
1904 switch (L) {
1905 case LookupLevel::L1:
1906 assert(currState->aarch64);
1907 case LookupLevel::L2:
1908 case LookupLevel::L3:
1909 event = LongDescEventByLevel[L];
1910 break;
1911 default:
1912 panic("Wrong lookup level in table walk\n");
1913 break;
1914 }
1915
1917 next_desc_addr, currState->longDesc,
1918 sizeof(uint64_t), flag, L, event,
1920
1922 }
1923 return;
1924 default:
1925 panic("A new type in a 2 bit field?\n");
1926 }
1927}
1928
1929void
1931{
1932 if (currState->fault != NoFault) {
1933 return;
1934 }
1935
1938
1939 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1941 TlbEntry te;
1942
1943 const bool is_atomic = currState->req->isAtomic();
1944
1945 if (currState->l2Desc.invalid()) {
1946 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1947 if (!currState->timing) {
1948 currState->tc = NULL;
1949 currState->req = NULL;
1950 }
1951 if (currState->isFetch)
1952 currState->fault = std::make_shared<PrefetchAbort>(
1954 ArmFault::TranslationLL + LookupLevel::L2,
1955 isStage2,
1957 else
1958 currState->fault = std::make_shared<DataAbort>(
1960 is_atomic ? false : currState->isWrite,
1961 ArmFault::TranslationLL + LookupLevel::L2,
1962 isStage2,
1964 return;
1965 }
1966
1967 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1971 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1972 currState->sctlr.afe, currState->l2Desc.ap());
1973
1974 currState->fault = std::make_shared<DataAbort>(
1977 is_atomic ? false : currState->isWrite,
1978 ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
1980 }
1981
1983}
1984
1985void
1987{
1988 currState = stateQueues[LookupLevel::L1].front();
1989 currState->delayed = false;
1990 // if there's a stage2 translation object we don't need it any more
1991 if (currState->stage2Tran) {
1992 delete currState->stage2Tran;
1993 currState->stage2Tran = NULL;
1994 }
1995
1996
1997 DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
1999 DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2001
2002 DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2005
2006 stateQueues[LookupLevel::L1].pop_front();
2007 // Check if fault was generated
2008 if (currState->fault != NoFault) {
2012
2013 pending = false;
2015
2016 currState->req = NULL;
2017 currState->tc = NULL;
2018 currState->delayed = false;
2019 delete currState;
2020 }
2021 else if (!currState->delayed) {
2022 // delay is not set so there is no L2 to do
2023 // Don't finish the translation if a stage 2 look up is underway
2025 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2026
2030
2032
2033 pending = false;
2035
2036 currState->req = NULL;
2037 currState->tc = NULL;
2038 currState->delayed = false;
2039 delete currState;
2040 } else {
2041 // need to do L2 descriptor
2042 stashCurrState(LookupLevel::L2);
2043 }
2044 currState = NULL;
2045}
2046
2047void
2049{
2050 currState = stateQueues[LookupLevel::L2].front();
2051 assert(currState->delayed);
2052 // if there's a stage2 translation object we don't need it any more
2053 if (currState->stage2Tran) {
2054 delete currState->stage2Tran;
2055 currState->stage2Tran = NULL;
2056 }
2057
2058 DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2061
2062 // Check if fault was generated
2063 if (currState->fault != NoFault) {
2067 } else {
2069 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2070
2074
2076 }
2077
2078
2079 stateQueues[LookupLevel::L2].pop_front();
2080 pending = false;
2082
2083 currState->req = NULL;
2084 currState->tc = NULL;
2085 currState->delayed = false;
2086
2087 delete currState;
2088 currState = NULL;
2089}
2090
2091void
2096
2097void
2102
2103void
2108
2109void
2114
2115void
2117{
2118 currState = stateQueues[curr_lookup_level].front();
2119 assert(curr_lookup_level == currState->longDesc.lookupLevel);
2120 currState->delayed = false;
2121
2122 // if there's a stage2 translation object we don't need it any more
2123 if (currState->stage2Tran) {
2124 delete currState->stage2Tran;
2125 currState->stage2Tran = NULL;
2126 }
2127
2128 DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2131
2132 stateQueues[curr_lookup_level].pop_front();
2133
2134 if (currState->fault != NoFault) {
2135 // A fault was generated
2138
2139 pending = false;
2141
2142 currState->req = NULL;
2143 currState->tc = NULL;
2144 currState->delayed = false;
2145 delete currState;
2146 } else if (!currState->delayed) {
2147 // No additional lookups required
2148 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2150
2154
2155 stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2156
2157 pending = false;
2159
2160 currState->req = NULL;
2161 currState->tc = NULL;
2162 currState->delayed = false;
2163 delete currState;
2164 } else {
2165 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2166 panic("Max. number of lookups already reached in table walk\n");
2167 // Need to perform additional lookups
2169 }
2170 currState = NULL;
2171}
2172
2173
2174void
2176{
2177 if (pendingQueue.size())
2179 else
2180 completeDrain();
2181}
2182
2183void
2185 DescriptorBase &descriptor, int num_bytes,
2186 Request::Flags flags, LookupLevel lookup_level, Event *event,
2187 void (TableWalker::*doDescriptor)())
2188{
2189 uint8_t *data = descriptor.getRawPtr();
2190
2191 DPRINTF(PageTableWalker,
2192 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2193 desc_addr, currState->stage2Req);
2194
2195 // If this translation has a stage 2 then we know desc_addr is an IPA and
2196 // needs to be translated before we can access the page table. Do that
2197 // check here.
2198 if (currState->stage2Req) {
2199 Fault fault;
2200
2201 if (currState->timing) {
2202 auto *tran = new
2205 currState->stage2Tran = tran;
2206 readDataTimed(currState->tc, desc_addr, tran, num_bytes, flags);
2207 fault = tran->fault;
2208
2209 if (fault != NoFault) {
2210 currState->fault = fault;
2211 }
2212 } else {
2213 fault = readDataUntimed(currState->tc,
2214 currState->vaddr, desc_addr, data, num_bytes, flags,
2215 currState->mode,
2218
2219 if (fault != NoFault) {
2220 currState->fault = fault;
2221 }
2222
2223 (this->*doDescriptor)();
2224 }
2225 } else {
2226 RequestPtr req = std::make_shared<Request>(
2227 desc_addr, num_bytes, flags, requestorId);
2228 req->taskId(context_switch_task_id::DMA);
2229
2230 mpamTagTableWalk(req);
2231
2232 Fault fault = testWalk(req, descriptor.domain(),
2233 lookup_level);
2234
2235 if (fault != NoFault) {
2236 currState->fault = fault;
2237 return;
2238 }
2239
2240 if (currState->timing) {
2241 port->sendTimingReq(req, data,
2243
2244 } else if (!currState->functional) {
2245 port->sendAtomicReq(req, data,
2247
2248 (this->*doDescriptor)();
2249 } else {
2251 (this->*doDescriptor)();
2252 }
2253 }
2254}
2255
2256void
2258{
2259 DPRINTF(PageTableWalker, "Adding to walker fifo: "
2260 "queue size before adding: %d\n",
2261 stateQueues[queue_idx].size());
2262 stateQueues[queue_idx].push_back(currState);
2263 currState = NULL;
2264}
2265
2266void
2268{
2269 const bool have_security = release->has(ArmExtension::SECURITY);
2270 TlbEntry te;
2271
2272 // Create and fill a new page table entry
2273 te.valid = true;
2274 te.longDescFormat = true;
2275 te.partial = true;
2276 // The entry is global if there is no address space identifier
2277 // to differentiate translation contexts
2278 te.global = !mmu->hasUnprivRegime(currState->regime);
2279 te.asid = currState->asid;
2280 te.vmid = currState->vmid;
2281 te.N = descriptor.offsetBits();
2282 te.tg = descriptor.grainSize;
2283 te.vpn = currState->vaddr >> te.N;
2284 te.size = (1ULL << te.N) - 1;
2285 te.pfn = descriptor.nextTableAddr();
2286 te.domain = descriptor.domain();
2287 te.lookupLevel = descriptor.lookupLevel;
2288 te.ns = !descriptor.secure(have_security, currState);
2289 te.ss = currState->ss;
2290 te.ipaSpace = currState->ipaSpace; // Used by stage2 entries only
2291 te.type = TypeTLB::unified;
2292
2293 te.regime = currState->regime;
2294
2295 te.xn = currState->longDescData->xnTable;
2296 te.pxn = currState->longDescData->pxnTable;
2297 te.ap = (currState->longDescData->rwTable << 1) |
2298 (currState->longDescData->userTable);
2299
2301
2302 // Debug output
2303 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2304 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2305 te.N, te.pfn, te.size, te.global, te.valid);
2306 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2307 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2308 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2309 te.nonCacheable, te.ns);
2310 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2311 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2312 descriptor.getRawData());
2313
2314 // Insert the entry into the TLBs
2316}
2317
2318void
2319TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2320{
2321 const bool have_security = release->has(ArmExtension::SECURITY);
2322 TlbEntry te;
2323
2324 // Create and fill a new page table entry
2325 te.valid = true;
2326 te.longDescFormat = long_descriptor;
2327 te.asid = currState->asid;
2328 te.vmid = currState->vmid;
2329 te.N = descriptor.offsetBits();
2330 te.vpn = currState->vaddr >> te.N;
2331 te.size = (1<<te.N) - 1;
2332 te.pfn = descriptor.pfn();
2333 te.domain = descriptor.domain();
2334 te.lookupLevel = descriptor.lookupLevel;
2335 te.ns = !descriptor.secure(have_security, currState);
2336 te.ss = currState->ss;
2337 te.ipaSpace = currState->ipaSpace; // Used by stage2 entries only
2338 te.xn = descriptor.xn();
2339 te.type = currState->mode == BaseMMU::Execute ?
2340 TypeTLB::instruction : TypeTLB::data;
2341
2342 te.regime = currState->regime;
2343
2346
2347 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2348 // as global
2349 te.global = descriptor.global(currState) || isStage2;
2350 if (long_descriptor) {
2351 LongDescriptor l_descriptor =
2352 dynamic_cast<LongDescriptor &>(descriptor);
2353
2354 te.tg = l_descriptor.grainSize;
2355 te.xn |= currState->longDescData->xnTable;
2356 te.pxn = currState->longDescData->pxnTable || l_descriptor.pxn();
2357 if (isStage2) {
2358 // this is actually the HAP field, but its stored in the same bit
2359 // possitions as the AP field in a stage 1 translation.
2360 te.hap = l_descriptor.ap();
2361 } else {
2362 te.ap = ((!currState->longDescData->rwTable ||
2363 descriptor.ap() >> 1) << 1) |
2364 (currState->longDescData->userTable && (descriptor.ap() & 0x1));
2365 }
2366 if (currState->aarch64)
2367 memAttrsAArch64(currState->tc, te, l_descriptor);
2368 else
2369 memAttrsLPAE(currState->tc, te, l_descriptor);
2370 } else {
2371 te.ap = descriptor.ap();
2372 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2373 descriptor.shareable());
2374 }
2375
2376 // Debug output
2377 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2378 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2379 te.N, te.pfn, te.size, te.global, te.valid);
2380 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2381 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2382 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2383 te.nonCacheable, te.ns);
2384 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2385 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2386 descriptor.getRawData());
2387
2388 // Insert the entry into the TLBs
2390 if (!currState->timing) {
2391 currState->tc = NULL;
2392 currState->req = NULL;
2393 }
2394}
2395
2397TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2398{
2399 switch (lookup_level_as_int) {
2400 case LookupLevel::L1:
2401 return LookupLevel::L1;
2402 case LookupLevel::L2:
2403 return LookupLevel::L2;
2404 case LookupLevel::L3:
2405 return LookupLevel::L3;
2406 default:
2407 panic("Invalid lookup level conversion");
2408 }
2409}
2410
2411/* this method keeps track of the table walker queue's residency, so
2412 * needs to be called whenever requests start and complete. */
2413void
2415{
2416 unsigned n = pendingQueue.size();
2417 if ((currState != NULL) && (currState != pendingQueue.front())) {
2418 ++n;
2419 }
2420
2421 if (n != pendingReqs) {
2422 Tick now = curTick();
2424 pendingReqs = n;
2425 pendingChangeTick = now;
2426 }
2427}
2428
2429Fault
2431 LookupLevel lookup_level)
2432{
2433 if (!test) {
2434 return NoFault;
2435 } else {
2436 return test->walkCheck(walk_req, currState->vaddr,
2438 currState->el != EL0,
2439 currState->mode, domain, lookup_level);
2440 }
2441}
2442
2443void
2448
2449uint8_t
2451{
2452 /* for stats.pageSizes */
2453 switch(N) {
2454 case 12: return 0; // 4K
2455 case 14: return 1; // 16K (using 16K granule in v8-64)
2456 case 16: return 2; // 64K
2457 case 20: return 3; // 1M
2458 case 21: return 4; // 2M-LPAE
2459 case 24: return 5; // 16M
2460 case 25: return 6; // 32M (using 16K granule in v8-64)
2461 case 29: return 7; // 512M (using 64K granule in v8-64)
2462 case 30: return 8; // 1G-LPAE
2463 case 42: return 9; // 1G-LPAE
2464 default:
2465 panic("unknown page size");
2466 return 255;
2467 }
2468}
2469
2470Fault
2472 uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2473 MMU::ArmTranslationType tran_type, bool functional)
2474{
2475 Fault fault;
2476
2477 // translate to physical address using the second stage MMU
2478 auto req = std::make_shared<Request>();
2479 req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2480 requestorId, 0);
2481
2482 if (functional) {
2483 fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2484 tran_type, true);
2485 } else {
2486 fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2487 tran_type, true);
2488 }
2489
2490 // Now do the access.
2491 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2492 Packet pkt = Packet(req, MemCmd::ReadReq);
2493 pkt.dataStatic(data);
2494 if (functional) {
2495 port->sendFunctional(&pkt);
2496 } else {
2497 port->sendAtomic(&pkt);
2498 }
2499 assert(!pkt.isError());
2500 }
2501
2502 // If there was a fault annotate it with the flag saying the foult occured
2503 // while doing a translation for a stage 1 page table walk.
2504 if (fault != NoFault) {
2505 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2506 arm_fault->annotate(ArmFault::S1PTW, true);
2507 arm_fault->annotate(ArmFault::OVA, vaddr);
2508 }
2509 return fault;
2510}
2511
2512void
2517
2518void
2520 Stage2Walk *translation, int num_bytes,
2522{
2523 // translate to physical address using the second stage MMU
2524 translation->setVirt(
2525 desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2526 translation->translateTiming(tc);
2527}
2528
2530 uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2531 MMU::ArmTranslationType tran_type)
2532 : data(_data), numBytes(0), event(_event), parent(_parent),
2533 oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2534{
2535 req = std::make_shared<Request>();
2536}
2537
2538void
2540 const RequestPtr &req,
2542{
2543 fault = _fault;
2544
2545 // If there was a fault annotate it with the flag saying the foult occured
2546 // while doing a translation for a stage 1 page table walk.
2547 if (fault != NoFault) {
2548 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2549 arm_fault->annotate(ArmFault::S1PTW, true);
2550 arm_fault->annotate(ArmFault::OVA, oVAddr);
2551 }
2552
2553 if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2554 parent.getTableWalkerPort().sendTimingReq(req, data,
2555 tc->getCpuPtr()->clockPeriod(), event);
2556 } else {
2557 // We can't do the DMA access as there's been a problem, so tell the
2558 // event we're done
2559 event->process();
2560 }
2561}
2562
2563void
2565{
2566 parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2567}
2568
2570 : statistics::Group(parent),
2571 ADD_STAT(walks, statistics::units::Count::get(),
2572 "Table walker walks requested"),
2573 ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2574 "Table walker walks initiated with short descriptors"),
2575 ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2576 "Table walker walks initiated with long descriptors"),
2577 ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2578 "Level at which table walker walks with short descriptors "
2579 "terminate"),
2580 ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2581 "Level at which table walker walks with long descriptors "
2582 "terminate"),
2583 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2584 "Table walks squashed before starting"),
2585 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2586 "Table walks squashed after completion"),
2587 ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2588 "Table walker wait (enqueue to first request) latency"),
2589 ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2590 "Table walker service (enqueue to completion) latency"),
2591 ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2592 "Table walker pending requests distribution"),
2593 ADD_STAT(pageSizes, statistics::units::Count::get(),
2594 "Table walker page sizes translated"),
2595 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2596 "Table walker requests started/completed, data/inst")
2597{
2600
2603
2605 .init(2)
2607
2610
2612 .init(4)
2618
2621
2624
2626 .init(16)
2628
2630 .init(16)
2632
2634 .init(16)
2637
2638 pageSizes // see DDI 0487A D4-1661
2639 .init(10)
2642 pageSizes.subname(0, "4KiB");
2643 pageSizes.subname(1, "16KiB");
2644 pageSizes.subname(2, "64KiB");
2645 pageSizes.subname(3, "1MiB");
2646 pageSizes.subname(4, "2MiB");
2647 pageSizes.subname(5, "16MiB");
2648 pageSizes.subname(6, "32MiB");
2649 pageSizes.subname(7, "512MiB");
2650 pageSizes.subname(8, "1GiB");
2651 pageSizes.subname(9, "4TiB");
2652
2654 .init(2,2) // Instruction/Data, requests/completed
2656 requestOrigin.subname(0,"Requested");
2657 requestOrigin.subname(1,"Completed");
2658 requestOrigin.ysubname(0,"Data");
2659 requestOrigin.ysubname(1,"Inst");
2660}
2661
2662} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition faults.hh:96
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:231
static bool hasUnprivRegime(TranslationRegime regime)
Definition mmu.cc:816
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1441
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:251
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1483
bool hasWalkCache() const
Definition mmu.hh:321
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:240
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:86
const ArmRelease * release() const
Definition mmu.hh:319
int size
TLB Size.
Definition tlb.hh:131
vmid_t vmid
Definition tlb.hh:180
void multiInsert(const Lookup &lookup_data, TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
Definition tlb.cc:260
virtual bool global(WalkerState *currState) const =0
virtual uint64_t getRawData() const =0
virtual DomainType domain() const =0
virtual std::string dbgHeader() const =0
virtual uint8_t offsetBits() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual bool secure(bool have_security, WalkerState *currState) const =0
uint32_t data
The raw bits of the entry.
bool supersection() const
Is the page a Supersection (16 MiB)?
Addr l2Addr() const
Address of L2 descriptor if it exists.
DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
uint8_t ap() const override
Three bit access protection flags.
uint8_t ap() const override
Three bit access protection flags.
uint32_t data
The raw bits of the entry.
bool invalid() const
Is the entry invalid.
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
bool af() const
Returns true if the access flag (AF) is set.
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
bool aarch64
True if the current lookup is performed in AArch64 state.
EntryType type() const
Return the descriptor type.
bool xn() const override
Is execution allowed on this mapping?
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
uint64_t data
The raw bits of the entry.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Addr paddr() const
Return the physical address of the entry.
bool fnxs() const
FNXS for FEAT_XS only.
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool secureTable() const
Whether the subsequent levels of lookup are secure.
bool xnTable() const
Is execution allowed on subsequent lookup levels?
void sendAtomicReq(const RequestPtr &req, uint8_t *data, Tick delay)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void sendFunctionalReq(const RequestPtr &req, uint8_t *data)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendTimingReq(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
PacketPtr createPacket(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool secureLookup
Whether lookups should be treated as using the secure state.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
SecurityState ss
Security State of the access.
std::optional< LongDescData > longDescData
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
ExceptionLevel el
Current exception level.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Fault fault
The fault that we are going to return.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
TranslationRegime regime
Current translation regime.
bool timing
If the mode is timing or atomic.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
int physAddrRange
Current physical address range in bits.
PASpace ipaSpace
IPA space (Secure vs NonSecure); stage2 only.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
const ArmRelease * release
Cached copies of system-level properties.
bool checkVAOutOfRange(Addr addr, int top_bit, int tsz, bool low_range)
Fault generateLongDescFault(ArmFault::FaultSource src)
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
bool uncacheableWalk() const
Returns true if the table walk should be uncacheable.
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void fetchDescriptor(Addr desc_addr, DescriptorBase &descriptor, int num_bytes, Request::Flags flags, LookupLevel lookup_lvl, Event *event, void(TableWalker::*doDescriptor)())
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
Fault testWalk(const RequestPtr &walk_req, DomainType domain, LookupLevel lookup_level)
Addr s1MinTxSz(GrainSize tg) const
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
void nextWalk(ThreadContext *tc)
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
bool s1TxSzFault(GrainSize tg, int tsz) const
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
gem5::ArmISA::TableWalker::TableWalkerStats stats
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool uncacheableFromAttrs(uint8_t attrs)
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
void mpamTagTableWalk(RequestPtr &req) const
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void setTestInterface(TlbTestInterface *ti)
void memAttrsWalkAArch64(TlbEntry &te)
Addr maxTxSz(GrainSize tg) const
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
void stashCurrState(int queue_idx)
Timing mode: saves the currState into the stateQueues.
bool has(ArmExtension ext) const
Definition system.hh:76
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition mmu.hh:84
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool cacheResponding() const
Definition packet.hh:659
bool hasSharers() const
Definition packet.hh:686
Ports are used to interface objects to each other.
Definition port.hh:62
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition qport.hh:111
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition port.hh:579
@ PT_WALK
The request is a page table walk.
Definition request.hh:188
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Derived & ysubname(off_type index, const std::string &subname)
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type _x, size_type _y)
Derived & init(size_type size)
Set this vector to have the given size.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition bitfield.hh:106
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void set(Type mask)
Set all flag's bits matching the given mask.
Definition flags.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
atomic_var_t state
Definition helpers.cc:211
uint8_t flags
Definition helpers.cc:87
#define warn_once(...)
Definition logging.hh:260
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
ByteOrder byteOrder(const ThreadContext *tc)
Definition utility.hh:359
Bitfield< 30 > te
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition pagetable.cc:477
Bitfield< 31 > n
Bitfield< 24 > hpd
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 18, 16 > ps
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:141
Bitfield< 4 > s
Bitfield< 8, 7 > sh
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 7, 4 > domain
Bitfield< 11 > z
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:277
const GrainSize GrainMap_tg1[]
Definition pagetable.cc:51
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1380
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1399
SecurityState
Security State.
Definition types.hh:273
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:413
Bitfield< 21, 20 > stride
@ MISCREG_HCR
Definition misc.hh:266
@ MISCREG_VSTCR_EL2
Definition misc.hh:646
@ MISCREG_SCTLR_EL2
Definition misc.hh:616
@ MISCREG_TCR_EL2
Definition misc.hh:641
@ MISCREG_MAIR_EL1
Definition misc.hh:810
@ MISCREG_SCTLR
Definition misc.hh:253
@ MISCREG_TTBCR
Definition misc.hh:278
@ MISCREG_MAIR_EL2
Definition misc.hh:814
@ MISCREG_SCR_EL3
Definition misc.hh:628
@ MISCREG_TCR_EL3
Definition misc.hh:648
@ MISCREG_TCR_EL1
Definition misc.hh:636
@ MISCREG_SCTLR_EL1
Definition misc.hh:609
@ MISCREG_PRRR
Definition misc.hh:399
@ MISCREG_MAIR_EL3
Definition misc.hh:816
@ MISCREG_CPSR
Definition misc.hh:79
@ MISCREG_NMRR
Definition misc.hh:405
@ MISCREG_TTBR1_EL1
Definition misc.hh:634
@ MISCREG_MAIR1
Definition misc.hh:408
@ MISCREG_TTBR1_EL2
Definition misc.hh:904
@ MISCREG_HTTBR
Definition misc.hh:477
@ MISCREG_HCR_EL2
Definition misc.hh:619
@ MISCREG_TTBR1
Definition misc.hh:275
@ MISCREG_VTCR_EL2
Definition misc.hh:644
@ MISCREG_VTTBR
Definition misc.hh:478
@ MISCREG_HTCR
Definition misc.hh:281
@ MISCREG_TTBR0_EL3
Definition misc.hh:647
@ MISCREG_VTCR
Definition misc.hh:282
@ MISCREG_TTBR0
Definition misc.hh:272
@ MISCREG_TTBR0_EL2
Definition misc.hh:640
@ MISCREG_TTBR0_EL1
Definition misc.hh:632
@ MISCREG_SCTLR_EL3
Definition misc.hh:625
@ MISCREG_VSTTBR_EL2
Definition misc.hh:645
@ MISCREG_VTTBR_EL2
Definition misc.hh:643
@ MISCREG_MAIR0
Definition misc.hh:402
uint16_t vmid_t
Definition types.hh:57
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition utility.cc:1289
const GrainSize GrainMap_tg0[]
Definition pagetable.cc:49
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:686
Bitfield< 34 > aarch64
Definition types.hh:81
PASpace
Physical Address Space.
Definition types.hh:280
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:232
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition utility.cc:474
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 59, 56 > tlb
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 7, 0 > L
Definition int.hh:62
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
T htog(T value, ByteOrder guest_byte_order)
Definition byteswap.hh:187
uint64_t RegVal
Definition types.hh:173
Bitfield< 9 > hyp
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Overload hash function for BasicBlockRange type.
Definition binary32.hh:81
TableWalkerStats(statistics::Group *parent)
Helper variables used to implement hierarchical access permissions when the long-desc.
LookupLevel lookupLevel
Definition pagetable.hh:252
Definition test.h:38
const std::string & name()
Definition trace.cc:48

Generated on Mon Jan 13 2025 04:28:20 for gem5 by doxygen 1.9.8