gem5 v24.0.0.0
Loading...
Searching...
No Matches
table_walker.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010, 2012-2019, 2021-2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
38
39#include <cassert>
40#include <memory>
41
42#include "arch/arm/faults.hh"
43#include "arch/arm/mmu.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/pagetable.hh"
46#include "arch/arm/system.hh"
47#include "arch/arm/tlb.hh"
48#include "base/compiler.hh"
49#include "cpu/base.hh"
50#include "cpu/thread_context.hh"
51#include "debug/Checkpoint.hh"
52#include "debug/Drain.hh"
53#include "debug/PageTableWalker.hh"
54#include "debug/TLB.hh"
55#include "debug/TLBVerbose.hh"
56#include "sim/system.hh"
57
58namespace gem5
59{
60
61using namespace ArmISA;
62
65 requestorId(p.sys->getRequestorId(this)),
66 port(new Port(*this)),
67 isStage2(p.is_stage2), tlb(NULL),
68 currState(NULL), pending(false),
69 numSquashable(p.num_squash_per_cycle),
70 release(nullptr),
71 stats(this),
72 pendingReqs(0),
73 pendingChangeTick(curTick()),
74 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
75 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
76 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
77 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
78 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
79 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
80 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
81 &doL2LongDescEvent, &doL3LongDescEvent },
82 doProcessEvent([this]{ processWalkWrapper(); }, name()),
83 test(nullptr)
84{
85 sctlr = 0;
86
87 // Cache system-level properties
88 if (FullSystem) {
89 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
90 assert(arm_sys);
91 _physAddrRange = arm_sys->physAddrRange();
92 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
93 } else {
94 _haveLargeAsid64 = false;
95 _physAddrRange = 48;
96 }
97
98}
99
104
107{
108 return static_cast<Port&>(getPort("port"));
109}
110
111Port &
112TableWalker::getPort(const std::string &if_name, PortID idx)
113{
114 if (if_name == "port") {
115 return *port;
116 }
117 return ClockedObject::getPort(if_name, idx);
118}
119
120void
122{
123 mmu = _mmu;
124 release = mmu->release();
125}
126
128 tc(nullptr), aarch64(false), regime(TranslationRegime::EL10),
129 physAddrRange(0), req(nullptr),
130 asid(0), vmid(0), transState(nullptr),
131 vaddr(0), vaddr_tainted(0),
132 sctlr(0), scr(0), cpsr(0), tcr(0),
133 htcr(0), hcr(0), vtcr(0),
134 isWrite(false), isFetch(false), isSecure(false),
135 isUncacheable(false), longDescData(std::nullopt),
136 hpd(false), sh(0), irgn(0), orgn(0), stage2Req(false),
137 stage2Tran(nullptr), timing(false), functional(false),
138 mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
139 delayed(false), tableWalker(nullptr)
140{
141}
142
144 : QueuedRequestPort(_walker.name() + ".port", reqQueue, snoopRespQueue),
145 owner{_walker},
146 reqQueue(_walker, *this),
147 snoopRespQueue(_walker, *this)
148{
149}
150
153 const RequestPtr &req,
154 uint8_t *data, Tick delay,
155 Event *event)
156{
157 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
158 pkt->dataStatic(data);
159
160 auto state = new TableWalkerState;
161 state->event = event;
162 state->delay = delay;
163
164 pkt->senderState = state;
165 return pkt;
166}
167
168void
170 const RequestPtr &req, uint8_t *data)
171{
172 auto pkt = createPacket(req, data, 0, nullptr);
173
174 sendFunctional(pkt);
175
176 handleRespPacket(pkt);
177}
178
179void
181 const RequestPtr &req,
182 uint8_t *data, Tick delay)
183{
184 auto pkt = createPacket(req, data, delay, nullptr);
185
186 Tick lat = sendAtomic(pkt);
187
188 handleRespPacket(pkt, lat);
189}
190
191void
193 const RequestPtr &req,
194 uint8_t *data, Tick delay,
195 Event *event)
196{
197 auto pkt = createPacket(req, data, delay, event);
198
199 schedTimingReq(pkt, curTick());
200}
201
202bool
204{
205 // We shouldn't ever get a cacheable block in Modified state.
206 assert(pkt->req->isUncacheable() ||
207 !(pkt->cacheResponding() && !pkt->hasSharers()));
208
209 handleRespPacket(pkt);
210
211 return true;
212}
213
214void
216{
217 // Should always see a response with a sender state.
218 assert(pkt->isResponse());
219
220 // Get the DMA sender state.
221 auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
222 assert(state);
223
224 handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
225
226 delete pkt;
227}
228
229void
231 Addr size, Tick delay)
232{
233 if (state->event) {
234 owner.schedule(state->event, curTick() + delay);
235 }
236 delete state;
237}
238
239void
241{
243 stateQueues[LookupLevel::L0].empty() &&
244 stateQueues[LookupLevel::L1].empty() &&
245 stateQueues[LookupLevel::L2].empty() &&
246 stateQueues[LookupLevel::L3].empty() &&
247 pendingQueue.empty()) {
248
249 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
251 }
252}
253
256{
257 bool state_queues_not_empty = false;
258
259 for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
260 if (!stateQueues[i].empty()) {
261 state_queues_not_empty = true;
262 break;
263 }
264 }
265
266 if (state_queues_not_empty || pendingQueue.size()) {
267 DPRINTF(Drain, "TableWalker not drained\n");
269 } else {
270 DPRINTF(Drain, "TableWalker free, no need to drain\n");
271 return DrainState::Drained;
272 }
273}
274
275void
277{
278 if (params().sys->isTimingMode() && currState) {
279 delete currState;
280 currState = NULL;
282 }
283}
284
285bool
287{
288 bool disable_cacheability = isStage2 ?
289 currState->hcr.cd :
290 currState->sctlr.c == 0;
291 return disable_cacheability || currState->isUncacheable;
292}
293
294Fault
295TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
296 vmid_t _vmid, MMU::Mode _mode,
297 MMU::Translation *_trans, bool _timing, bool _functional,
298 bool secure, MMU::ArmTranslationType tranType,
299 bool _stage2Req, const TlbEntry *walk_entry)
300{
301 assert(!(_functional && _timing));
302 ++stats.walks;
303
304 WalkerState *savedCurrState = NULL;
305
306 if (!currState && !_functional) {
307 // For atomic mode, a new WalkerState instance should be only created
308 // once per TLB. For timing mode, a new instance is generated for every
309 // TLB miss.
310 DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
311
312 currState = new WalkerState();
313 currState->tableWalker = this;
314 } else if (_functional) {
315 // If we are mixing functional mode with timing (or even
316 // atomic), we need to to be careful and clean up after
317 // ourselves to not risk getting into an inconsistent state.
318 DPRINTF(PageTableWalker,
319 "creating functional instance of WalkerState\n");
320 savedCurrState = currState;
321 currState = new WalkerState();
322 currState->tableWalker = this;
323 } else if (_timing) {
324 // This is a translation that was completed and then faulted again
325 // because some underlying parameters that affect the translation
326 // changed out from under us (e.g. asid). It will either be a
327 // misprediction, in which case nothing will happen or we'll use
328 // this fault to re-execute the faulting instruction which should clean
329 // up everything.
330 if (currState->vaddr_tainted == _req->getVaddr()) {
332 return std::make_shared<ReExec>();
333 }
334 }
336
338 currState->tc = _tc;
339 currState->el =
342 tranType);
343
344 if (isStage2) {
346 currState->aarch64 = ELIs64(_tc, EL2);
347 } else {
352 }
353 currState->transState = _trans;
354 currState->req = _req;
355 if (walk_entry) {
356 currState->walkEntry = *walk_entry;
357 } else {
359 }
361 currState->asid = _asid;
362 currState->vmid = _vmid;
363 currState->timing = _timing;
364 currState->functional = _functional;
365 currState->mode = _mode;
366 currState->tranType = tranType;
367 currState->isSecure = secure;
368 currState->secureLookup = secure;
370
373 currState->vaddr_tainted = currState->req->getVaddr();
374 if (currState->aarch64)
378 else
380
381 if (currState->aarch64) {
383 if (isStage2) {
385 if (currState->secureLookup) {
386 currState->vtcr =
388 } else {
389 currState->vtcr =
391 }
392 } else switch (currState->regime) {
396 break;
399 assert(release->has(ArmExtension::VIRTUALIZATION));
402 break;
404 assert(release->has(ArmExtension::SECURITY));
407 break;
408 default:
409 panic("Invalid translation regime");
410 break;
411 }
412 } else {
420 }
422
425
427
428 currState->stage2Req = _stage2Req && !isStage2;
429
430 bool hyp = currState->el == EL2;
431 bool long_desc_format = currState->aarch64 || hyp || isStage2 ||
433
434 if (long_desc_format) {
435 // Helper variables used for hierarchical permissions
437 currState->longDescData->rwTable = true;
438 currState->longDescData->userTable = true;
439 currState->longDescData->xnTable = false;
440 currState->longDescData->pxnTable = false;
442 } else {
443 currState->longDescData = std::nullopt;
445 }
446
447 if (currState->timing && (pending || pendingQueue.size())) {
448 pendingQueue.push_back(currState);
449 currState = NULL;
451 return NoFault;
452 } else {
453 if (currState->timing) {
454 pending = true;
456 }
457
458 Fault fault = NoFault;
459 if (currState->aarch64) {
460 fault = processWalkAArch64();
461 } else if (long_desc_format) {
462 fault = processWalkLPAE();
463 } else {
464 fault = processWalk();
465 }
466
467 // If this was a functional non-timing access restore state to
468 // how we found it.
469 if (currState->functional) {
470 delete currState;
471 currState = savedCurrState;
472 } else if (currState->timing) {
473 if (fault) {
474 pending = false;
476 delete currState;
477 currState = NULL;
478 } else {
479 // Either we are using the long descriptor, which means we
480 // need to extract the queue index from longDesc, or we are
481 // using the short. In the latter we always start at L1
482 LookupLevel curr_lookup_level = long_desc_format ?
483 currState->longDesc.lookupLevel : LookupLevel::L1;
484
485 stashCurrState(curr_lookup_level);
486 }
487 } else if (fault) {
488 currState->tc = NULL;
489 currState->req = NULL;
490 }
491
492 return fault;
493 }
494}
495
496void
498{
499 assert(!currState);
500 assert(pendingQueue.size());
502 currState = pendingQueue.front();
503
504 // Check if a previous walk filled this request already
505 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
507 currState->vmid, currState->isSecure, true, false,
509
510 // Check if we still need to have a walk for this request. If the requesting
511 // instruction has been squashed, or a previous walk has filled the TLB with
512 // a match, we just want to get rid of the walk. The latter could happen
513 // when there are multiple outstanding misses to a single page and a
514 // previous request has been successfully translated.
515 if (!currState->transState->squashed() && (!te || te->partial)) {
516 // We've got a valid request, lets process it
517 pending = true;
518 pendingQueue.pop_front();
519
520 bool long_desc_format = currState->aarch64 || currState->el == EL2 ||
522
523 if (te && te->partial) {
525 }
526 Fault fault;
527 if (currState->aarch64) {
528 fault = processWalkAArch64();
529 } else if (long_desc_format) {
530 fault = processWalkLPAE();
531 } else {
532 fault = processWalk();
533 }
534
535 if (fault != NoFault) {
536 pending = false;
538
541
542 delete currState;
543 currState = NULL;
544 } else {
545 LookupLevel curr_lookup_level = long_desc_format ?
546 currState->longDesc.lookupLevel : LookupLevel::L1;
547
548 stashCurrState(curr_lookup_level);
549 }
550 return;
551 }
552
553
554 // If the instruction that we were translating for has been
555 // squashed we shouldn't bother.
556 unsigned num_squashed = 0;
557 ThreadContext *tc = currState->tc;
558 while ((num_squashed < numSquashable) && currState &&
560 (te && !te->partial))) {
561 pendingQueue.pop_front();
562 num_squashed++;
564
565 DPRINTF(TLB, "Squashing table walk for address %#x\n",
567
568 if (currState->transState->squashed()) {
569 // finish the translation which will delete the translation object
571 std::make_shared<UnimpFault>("Squashed Inst"),
573 } else {
574 // translate the request now that we know it will work
579 }
580
581 // delete the current request
582 delete currState;
583
584 // peak at the next one
585 if (pendingQueue.size()) {
586 currState = pendingQueue.front();
590 } else {
591 // Terminate the loop, nothing more to do
592 currState = NULL;
593 }
594 }
596
597 // if we still have pending translations, schedule more work
598 nextWalk(tc);
599 currState = NULL;
600}
601
602Fault
604{
605 Addr ttbr = 0;
606
607 // For short descriptors, translation configs are held in
608 // TTBR1.
611
612 const auto irgn0_mask = 0x1;
613 const auto irgn1_mask = 0x40;
614 currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
615
616 // If translation isn't enabled, we shouldn't be here
617 assert(currState->sctlr.m || isStage2);
618 const bool is_atomic = currState->req->isAtomic();
619 const bool have_security = release->has(ArmExtension::SECURITY);
620
621 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
623 32 - currState->ttbcr.n));
624
626
627 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
628 32 - currState->ttbcr.n)) {
629 DPRINTF(TLB, " - Selecting TTBR0\n");
630 // Check if table walk is allowed when Security Extensions are enabled
631 if (have_security && currState->ttbcr.pd0) {
632 if (currState->isFetch)
633 return std::make_shared<PrefetchAbort>(
635 ArmFault::TranslationLL + LookupLevel::L1,
636 isStage2,
638 else
639 return std::make_shared<DataAbort>(
642 is_atomic ? false : currState->isWrite,
643 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
645 }
648 } else {
649 DPRINTF(TLB, " - Selecting TTBR1\n");
650 // Check if table walk is allowed when Security Extensions are enabled
651 if (have_security && currState->ttbcr.pd1) {
652 if (currState->isFetch)
653 return std::make_shared<PrefetchAbort>(
655 ArmFault::TranslationLL + LookupLevel::L1,
656 isStage2,
658 else
659 return std::make_shared<DataAbort>(
662 is_atomic ? false : currState->isWrite,
663 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
665 }
666 ttbr = ttbr1;
667 currState->ttbcr.n = 0;
668 }
669
670 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
671 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
672 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
673 currState->isSecure ? "s" : "ns");
674
676 if (uncacheableWalk()) {
678 }
679
680 if (currState->secureLookup) {
681 flag.set(Request::SECURE);
682 }
683
685 l1desc_addr, currState->l1Desc,
686 sizeof(uint32_t), flag, LookupLevel::L1,
689
690 return currState->fault;
691}
692
693Fault
695{
696 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
697 int tsz, n;
698 LookupLevel start_lookup_level = LookupLevel::L1;
699
700 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
702
704
707 flag.set(Request::SECURE);
708
709 // work out which base address register to use, if in hyp mode we always
710 // use HTTBR
711 if (isStage2) {
712 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
714 tsz = sext<4>(currState->vtcr.t0sz);
715 start_lookup_level = currState->vtcr.sl0 ?
716 LookupLevel::L1 : LookupLevel::L2;
717 currState->isUncacheable = currState->vtcr.irgn0 == 0;
718 } else if (currState->el == EL2) {
719 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
721 tsz = currState->htcr.t0sz;
722 currState->isUncacheable = currState->htcr.irgn0 == 0;
723 } else {
725
726 // Determine boundaries of TTBR0/1 regions
727 if (currState->ttbcr.t0sz)
728 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
729 else if (currState->ttbcr.t1sz)
730 ttbr0_max = (1ULL << 32) -
731 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
732 else
733 ttbr0_max = (1ULL << 32) - 1;
734 if (currState->ttbcr.t1sz)
735 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
736 else
737 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
738
739 const bool is_atomic = currState->req->isAtomic();
740
741 // The following code snippet selects the appropriate translation table base
742 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
743 // depending on the address range supported by the translation table (ARM
744 // ARM issue C B3.6.4)
745 if (currState->vaddr <= ttbr0_max) {
746 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
747 // Check if table walk is allowed
748 if (currState->ttbcr.epd0) {
749 if (currState->isFetch)
750 return std::make_shared<PrefetchAbort>(
752 ArmFault::TranslationLL + LookupLevel::L1,
753 isStage2,
755 else
756 return std::make_shared<DataAbort>(
759 is_atomic ? false : currState->isWrite,
760 ArmFault::TranslationLL + LookupLevel::L1,
761 isStage2,
763 }
766 tsz = currState->ttbcr.t0sz;
767 currState->isUncacheable = currState->ttbcr.irgn0 == 0;
768 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
769 start_lookup_level = LookupLevel::L2;
770 } else if (currState->vaddr >= ttbr1_min) {
771 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
772 // Check if table walk is allowed
773 if (currState->ttbcr.epd1) {
774 if (currState->isFetch)
775 return std::make_shared<PrefetchAbort>(
777 ArmFault::TranslationLL + LookupLevel::L1,
778 isStage2,
780 else
781 return std::make_shared<DataAbort>(
784 is_atomic ? false : currState->isWrite,
785 ArmFault::TranslationLL + LookupLevel::L1,
786 isStage2,
788 }
791 tsz = currState->ttbcr.t1sz;
792 currState->isUncacheable = currState->ttbcr.irgn1 == 0;
793 // Lower limit >= 3 GiB
794 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
795 start_lookup_level = LookupLevel::L2;
796 } else {
797 // Out of boundaries -> translation fault
798 if (currState->isFetch)
799 return std::make_shared<PrefetchAbort>(
801 ArmFault::TranslationLL + LookupLevel::L1,
802 isStage2,
804 else
805 return std::make_shared<DataAbort>(
808 is_atomic ? false : currState->isWrite,
809 ArmFault::TranslationLL + LookupLevel::L1,
811 }
812
813 }
814
815 // Perform lookup (ARM ARM issue C B3.6.6)
816 if (start_lookup_level == LookupLevel::L1) {
817 n = 5 - tsz;
818 desc_addr = mbits(ttbr, 39, n) |
819 (bits(currState->vaddr, n + 26, 30) << 3);
820 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
821 desc_addr, currState->isSecure ? "s" : "ns");
822 } else {
823 // Skip first-level lookup
824 n = (tsz >= 2 ? 14 - tsz : 12);
825 desc_addr = mbits(ttbr, 39, n) |
826 (bits(currState->vaddr, n + 17, 21) << 3);
827 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
828 desc_addr, currState->isSecure ? "s" : "ns");
829 }
830
831 if (uncacheableWalk()) {
833 }
834
835 currState->longDesc.lookupLevel = start_lookup_level;
836 currState->longDesc.aarch64 = false;
838
840 desc_addr, currState->longDesc,
841 sizeof(uint64_t), flag, start_lookup_level,
842 LongDescEventByLevel[start_lookup_level],
844
845 return currState->fault;
846}
847
848bool
850 GrainSize tg, int tsz, bool low_range)
851{
852 // The effective maximum input size is 48 if ARMv8.2-LVA is not
853 // supported or if the translation granule that is in use is 4KB or
854 // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
855 // translation granule size only, the effective minimum value of
856 // 52.
857 const bool have_lva = HaveExt(currState->tc, ArmExtension::FEAT_LVA);
858 int in_max = (have_lva && tg == Grain64KB) ? 52 : 48;
859 int in_min = 64 - (tg == Grain64KB ? 47 : 48);
860
861 return tsz > in_max || tsz < in_min || (low_range ?
862 bits(currState->vaddr, top_bit, tsz) != 0x0 :
863 bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1));
864}
865
866bool
868{
869 return (pa_range != _physAddrRange &&
870 bits(addr, _physAddrRange - 1, pa_range));
871}
872
873Fault
875{
876 assert(currState->aarch64);
877
878 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
880
882
883 // Determine TTBR, table size, granule size and phys. address range
884 Addr ttbr = 0;
885 int tsz = 0, ps = 0;
886 GrainSize tg = Grain4KB; // grain size computed from tg* field
887 bool fault = false;
888
889 int top_bit = computeAddrTop(currState->tc,
890 bits(currState->vaddr, 55),
892 currState->tcr,
893 currState->el);
894
895 bool vaddr_fault = false;
896 switch (currState->regime) {
898 if (isStage2) {
899 if (currState->secureLookup) {
900 DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
902 } else {
903 DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
905 }
906 tsz = 64 - currState->vtcr.t0sz64;
907 tg = GrainMap_tg0[currState->vtcr.tg0];
908
909 ps = currState->vtcr.ps;
910 currState->sh = currState->vtcr.sh0;
911 currState->irgn = currState->vtcr.irgn0;
912 currState->orgn = currState->vtcr.orgn0;
913 } else {
914 switch (bits(currState->vaddr, top_bit)) {
915 case 0:
916 DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
918 tsz = 64 - currState->tcr.t0sz;
919 tg = GrainMap_tg0[currState->tcr.tg0];
920 currState->hpd = currState->tcr.hpd0;
921 currState->sh = currState->tcr.sh0;
922 currState->irgn = currState->tcr.irgn0;
923 currState->orgn = currState->tcr.orgn0;
925 top_bit, tg, tsz, true);
926
927 if (vaddr_fault || currState->tcr.epd0)
928 fault = true;
929 break;
930 case 0x1:
931 DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
933 tsz = 64 - currState->tcr.t1sz;
934 tg = GrainMap_tg1[currState->tcr.tg1];
935 currState->hpd = currState->tcr.hpd1;
936 currState->sh = currState->tcr.sh1;
937 currState->irgn = currState->tcr.irgn1;
938 currState->orgn = currState->tcr.orgn1;
940 top_bit, tg, tsz, false);
941
942 if (vaddr_fault || currState->tcr.epd1)
943 fault = true;
944 break;
945 default:
946 // top two bytes must be all 0s or all 1s, else invalid addr
947 fault = true;
948 }
949 ps = currState->tcr.ips;
950 }
951 break;
954 switch(bits(currState->vaddr, top_bit)) {
955 case 0:
956 DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
958 tsz = 64 - currState->tcr.t0sz;
959 tg = GrainMap_tg0[currState->tcr.tg0];
960 currState->hpd = currState->hcr.e2h ?
961 currState->tcr.hpd0 : currState->tcr.hpd;
962 currState->sh = currState->tcr.sh0;
963 currState->irgn = currState->tcr.irgn0;
964 currState->orgn = currState->tcr.orgn0;
966 top_bit, tg, tsz, true);
967
968 if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
969 fault = true;
970 break;
971
972 case 0x1:
973 DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
975 tsz = 64 - currState->tcr.t1sz;
976 tg = GrainMap_tg1[currState->tcr.tg1];
977 currState->hpd = currState->tcr.hpd1;
978 currState->sh = currState->tcr.sh1;
979 currState->irgn = currState->tcr.irgn1;
980 currState->orgn = currState->tcr.orgn1;
982 top_bit, tg, tsz, false);
983
984 if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
985 fault = true;
986 break;
987
988 default:
989 // invalid addr if top two bytes are not all 0s
990 fault = true;
991 }
992 ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
993 break;
995 switch(bits(currState->vaddr, top_bit)) {
996 case 0:
997 DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
999 tsz = 64 - currState->tcr.t0sz;
1000 tg = GrainMap_tg0[currState->tcr.tg0];
1001 currState->hpd = currState->tcr.hpd;
1002 currState->sh = currState->tcr.sh0;
1003 currState->irgn = currState->tcr.irgn0;
1004 currState->orgn = currState->tcr.orgn0;
1006 top_bit, tg, tsz, true);
1007
1008 if (vaddr_fault)
1009 fault = true;
1010 break;
1011 default:
1012 // invalid addr if top two bytes are not all 0s
1013 fault = true;
1014 }
1015 ps = currState->tcr.ps;
1016 break;
1017 }
1018
1020 currState->orgn == 0;
1021
1022 const bool is_atomic = currState->req->isAtomic();
1023
1024 if (fault) {
1025 if (currState->isFetch) {
1026 return std::make_shared<PrefetchAbort>(
1028 ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1030 } else {
1031 return std::make_shared<DataAbort>(
1034 is_atomic ? false : currState->isWrite,
1035 ArmFault::TranslationLL + LookupLevel::L0,
1037 }
1038 }
1039
1040 if (tg == ReservedGrain) {
1041 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1042 "DEFINED behavior takes this to mean 4KB granules\n");
1043 tg = Grain4KB;
1044 }
1045
1046 // Clamp to lower limit
1047 int pa_range = decodePhysAddrRange64(ps);
1048 if (pa_range > _physAddrRange) {
1050 } else {
1051 currState->physAddrRange = pa_range;
1052 }
1053
1054 auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1055 ttbr, tg, tsz, pa_range);
1056
1057 // Determine physical address size and raise an Address Size Fault if
1058 // necessary
1060 DPRINTF(TLB, "Address size fault before any lookup\n");
1061 if (currState->isFetch)
1062 return std::make_shared<PrefetchAbort>(
1064 ArmFault::AddressSizeLL + start_lookup_level,
1065 isStage2,
1067 else
1068 return std::make_shared<DataAbort>(
1071 is_atomic ? false : currState->isWrite,
1072 ArmFault::AddressSizeLL + start_lookup_level,
1073 isStage2,
1075 }
1076
1078 if (uncacheableWalk()) {
1080 }
1081
1082 if (currState->secureLookup) {
1083 flag.set(Request::SECURE);
1084 }
1085
1086 currState->longDesc.lookupLevel = start_lookup_level;
1087 currState->longDesc.aarch64 = true;
1090
1092 sizeof(uint64_t), flag, start_lookup_level,
1093 LongDescEventByLevel[start_lookup_level],
1095
1096 return currState->fault;
1097}
1098
1099std::tuple<Addr, Addr, TableWalker::LookupLevel>
1100TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1101{
1102 const auto* ptops = getPageTableOps(tg);
1103
1104 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1105 Addr table_addr = 0;
1106 Addr desc_addr = 0;
1107
1108 if (currState->walkEntry.valid) {
1109 // WalkCache hit
1110 TlbEntry* entry = &currState->walkEntry;
1111 DPRINTF(PageTableWalker,
1112 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1113 currState->vaddr, entry->lookupLevel, entry->pfn);
1114
1115 if (currState->longDescData.has_value()) {
1116 currState->longDescData->xnTable = entry->xn;
1117 currState->longDescData->pxnTable = entry->pxn;
1118 currState->longDescData->rwTable = bits(entry->ap, 1);
1119 currState->longDescData->userTable = bits(entry->ap, 0);
1120 }
1121
1122 table_addr = entry->pfn;
1123 first_level = (LookupLevel)(entry->lookupLevel + 1);
1124 } else {
1125 // WalkCache miss
1126 first_level = isStage2 ?
1127 ptops->firstS2Level(currState->vtcr.sl0) :
1128 ptops->firstLevel(64 - tsz);
1129 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1130 "Table walker couldn't find lookup level\n");
1131
1132 int stride = tg - 3;
1133 int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1134
1135 if (pa_range == 52) {
1136 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1137 table_addr = mbits(ttbr, 47, z);
1138 table_addr |= (bits(ttbr, 5, 2) << 48);
1139 } else {
1140 table_addr = mbits(ttbr, 47, base_addr_lo);
1141 }
1142 }
1143
1144 desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1145
1146 return std::make_tuple(table_addr, desc_addr, first_level);
1147}
1148
1149void
1151 uint8_t texcb, bool s)
1152{
1153 // Note: tc and sctlr local variables are hiding tc and sctrl class
1154 // variables
1155 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1156 te.shareable = false; // default value
1157 te.nonCacheable = false;
1158 te.outerShareable = false;
1159 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1160 switch(texcb) {
1161 case 0: // Stongly-ordered
1162 te.nonCacheable = true;
1164 te.shareable = true;
1165 te.innerAttrs = 1;
1166 te.outerAttrs = 0;
1167 break;
1168 case 1: // Shareable Device
1169 te.nonCacheable = true;
1171 te.shareable = true;
1172 te.innerAttrs = 3;
1173 te.outerAttrs = 0;
1174 break;
1175 case 2: // Outer and Inner Write-Through, no Write-Allocate
1177 te.shareable = s;
1178 te.innerAttrs = 6;
1179 te.outerAttrs = bits(texcb, 1, 0);
1180 break;
1181 case 3: // Outer and Inner Write-Back, no Write-Allocate
1183 te.shareable = s;
1184 te.innerAttrs = 7;
1185 te.outerAttrs = bits(texcb, 1, 0);
1186 break;
1187 case 4: // Outer and Inner Non-cacheable
1188 te.nonCacheable = true;
1190 te.shareable = s;
1191 te.innerAttrs = 0;
1192 te.outerAttrs = bits(texcb, 1, 0);
1193 break;
1194 case 5: // Reserved
1195 panic("Reserved texcb value!\n");
1196 break;
1197 case 6: // Implementation Defined
1198 panic("Implementation-defined texcb value!\n");
1199 break;
1200 case 7: // Outer and Inner Write-Back, Write-Allocate
1202 te.shareable = s;
1203 te.innerAttrs = 5;
1204 te.outerAttrs = 1;
1205 break;
1206 case 8: // Non-shareable Device
1207 te.nonCacheable = true;
1209 te.shareable = false;
1210 te.innerAttrs = 3;
1211 te.outerAttrs = 0;
1212 break;
1213 case 9 ... 15: // Reserved
1214 panic("Reserved texcb value!\n");
1215 break;
1216 case 16 ... 31: // Cacheable Memory
1218 te.shareable = s;
1219 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1220 te.nonCacheable = true;
1221 te.innerAttrs = bits(texcb, 1, 0);
1222 te.outerAttrs = bits(texcb, 3, 2);
1223 break;
1224 default:
1225 panic("More than 32 states for 5 bits?\n");
1226 }
1227 } else {
1228 assert(tc);
1229 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1231 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1233 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1234 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1235 switch(bits(texcb, 2,0)) {
1236 case 0:
1237 curr_tr = prrr.tr0;
1238 curr_ir = nmrr.ir0;
1239 curr_or = nmrr.or0;
1240 te.outerShareable = (prrr.nos0 == 0);
1241 break;
1242 case 1:
1243 curr_tr = prrr.tr1;
1244 curr_ir = nmrr.ir1;
1245 curr_or = nmrr.or1;
1246 te.outerShareable = (prrr.nos1 == 0);
1247 break;
1248 case 2:
1249 curr_tr = prrr.tr2;
1250 curr_ir = nmrr.ir2;
1251 curr_or = nmrr.or2;
1252 te.outerShareable = (prrr.nos2 == 0);
1253 break;
1254 case 3:
1255 curr_tr = prrr.tr3;
1256 curr_ir = nmrr.ir3;
1257 curr_or = nmrr.or3;
1258 te.outerShareable = (prrr.nos3 == 0);
1259 break;
1260 case 4:
1261 curr_tr = prrr.tr4;
1262 curr_ir = nmrr.ir4;
1263 curr_or = nmrr.or4;
1264 te.outerShareable = (prrr.nos4 == 0);
1265 break;
1266 case 5:
1267 curr_tr = prrr.tr5;
1268 curr_ir = nmrr.ir5;
1269 curr_or = nmrr.or5;
1270 te.outerShareable = (prrr.nos5 == 0);
1271 break;
1272 case 6:
1273 panic("Imp defined type\n");
1274 case 7:
1275 curr_tr = prrr.tr7;
1276 curr_ir = nmrr.ir7;
1277 curr_or = nmrr.or7;
1278 te.outerShareable = (prrr.nos7 == 0);
1279 break;
1280 }
1281
1282 switch(curr_tr) {
1283 case 0:
1284 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1286 te.nonCacheable = true;
1287 te.innerAttrs = 1;
1288 te.outerAttrs = 0;
1289 te.shareable = true;
1290 break;
1291 case 1:
1292 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1293 prrr.ds1, prrr.ds0, s);
1295 te.nonCacheable = true;
1296 te.innerAttrs = 3;
1297 te.outerAttrs = 0;
1298 if (prrr.ds1 && s)
1299 te.shareable = true;
1300 if (prrr.ds0 && !s)
1301 te.shareable = true;
1302 break;
1303 case 2:
1304 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1305 prrr.ns1, prrr.ns0, s);
1307 if (prrr.ns1 && s)
1308 te.shareable = true;
1309 if (prrr.ns0 && !s)
1310 te.shareable = true;
1311 break;
1312 case 3:
1313 panic("Reserved type");
1314 }
1315
1316 if (te.mtype == TlbEntry::MemoryType::Normal){
1317 switch(curr_ir) {
1318 case 0:
1319 te.nonCacheable = true;
1320 te.innerAttrs = 0;
1321 break;
1322 case 1:
1323 te.innerAttrs = 5;
1324 break;
1325 case 2:
1326 te.innerAttrs = 6;
1327 break;
1328 case 3:
1329 te.innerAttrs = 7;
1330 break;
1331 }
1332
1333 switch(curr_or) {
1334 case 0:
1335 te.nonCacheable = true;
1336 te.outerAttrs = 0;
1337 break;
1338 case 1:
1339 te.outerAttrs = 1;
1340 break;
1341 case 2:
1342 te.outerAttrs = 2;
1343 break;
1344 case 3:
1345 te.outerAttrs = 3;
1346 break;
1347 }
1348 }
1349 }
1350 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1351 "outerAttrs: %d\n",
1352 te.shareable, te.innerAttrs, te.outerAttrs);
1353 te.setAttributes(false);
1354}
1355
1356void
1358 LongDescriptor &l_descriptor)
1359{
1360 assert(release->has(ArmExtension::LPAE));
1361
1362 uint8_t attr;
1363 uint8_t sh = l_descriptor.sh();
1364 // Different format and source of attributes if this is a stage 2
1365 // translation
1366 if (isStage2) {
1367 attr = l_descriptor.memAttr();
1368 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1369 uint8_t attr_1_0 = attr & 0x3;
1370
1371 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1372
1373 if (attr_3_2 == 0) {
1374 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1376 te.outerAttrs = 0;
1377 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1378 te.nonCacheable = true;
1379 } else {
1381 te.outerAttrs = attr_3_2 == 1 ? 0 :
1382 attr_3_2 == 2 ? 2 : 1;
1383 te.innerAttrs = attr_1_0 == 1 ? 0 :
1384 attr_1_0 == 2 ? 6 : 5;
1385 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1386 }
1387 } else {
1388 uint8_t attrIndx = l_descriptor.attrIndx();
1389
1390 // LPAE always uses remapping of memory attributes, irrespective of the
1391 // value of SCTLR.TRE
1392 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1393 int reg_as_int = snsBankedIndex(reg, currState->tc,
1395 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1396 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1397 uint8_t attr_7_4 = bits(attr, 7, 4);
1398 uint8_t attr_3_0 = bits(attr, 3, 0);
1399 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1400
1401 // Note: the memory subsystem only cares about the 'cacheable' memory
1402 // attribute. The other attributes are only used to fill the PAR register
1403 // accordingly to provide the illusion of full support
1404 te.nonCacheable = false;
1405
1406 switch (attr_7_4) {
1407 case 0x0:
1408 // Strongly-ordered or Device memory
1409 if (attr_3_0 == 0x0)
1411 else if (attr_3_0 == 0x4)
1413 else
1414 panic("Unpredictable behavior\n");
1415 te.nonCacheable = true;
1416 te.outerAttrs = 0;
1417 break;
1418 case 0x4:
1419 // Normal memory, Outer Non-cacheable
1421 te.outerAttrs = 0;
1422 if (attr_3_0 == 0x4)
1423 // Inner Non-cacheable
1424 te.nonCacheable = true;
1425 else if (attr_3_0 < 0x8)
1426 panic("Unpredictable behavior\n");
1427 break;
1428 case 0x8:
1429 case 0x9:
1430 case 0xa:
1431 case 0xb:
1432 case 0xc:
1433 case 0xd:
1434 case 0xe:
1435 case 0xf:
1436 if (attr_7_4 & 0x4) {
1437 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1438 } else {
1439 te.outerAttrs = 0x2;
1440 }
1441 // Normal memory, Outer Cacheable
1443 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1444 panic("Unpredictable behavior\n");
1445 break;
1446 default:
1447 panic("Unpredictable behavior\n");
1448 break;
1449 }
1450
1451 switch (attr_3_0) {
1452 case 0x0:
1453 te.innerAttrs = 0x1;
1454 break;
1455 case 0x4:
1456 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1457 break;
1458 case 0x8:
1459 case 0x9:
1460 case 0xA:
1461 case 0xB:
1462 te.innerAttrs = 6;
1463 break;
1464 case 0xC:
1465 case 0xD:
1466 case 0xE:
1467 case 0xF:
1468 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1469 break;
1470 default:
1471 panic("Unpredictable behavior\n");
1472 break;
1473 }
1474 }
1475
1476 te.outerShareable = sh == 2;
1477 te.shareable = (sh & 0x2) ? true : false;
1478 te.setAttributes(true);
1479 te.attributes |= (uint64_t) attr << 56;
1480}
1481
1482void
1484 LongDescriptor &l_descriptor)
1485{
1486 uint8_t attr;
1487 uint8_t attr_hi;
1488 uint8_t attr_lo;
1489 uint8_t sh = l_descriptor.sh();
1490
1491 if (isStage2) {
1492 attr = l_descriptor.memAttr();
1493 uint8_t attr_hi = (attr >> 2) & 0x3;
1494 uint8_t attr_lo = attr & 0x3;
1495
1496 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1497
1498 if (attr_hi == 0) {
1499 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1501 te.outerAttrs = 0;
1502 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1503 te.nonCacheable = true;
1504 } else {
1506 te.outerAttrs = attr_hi == 1 ? 0 :
1507 attr_hi == 2 ? 2 : 1;
1508 te.innerAttrs = attr_lo == 1 ? 0 :
1509 attr_lo == 2 ? 6 : 5;
1510 // Treat write-through memory as uncacheable, this is safe
1511 // but for performance reasons not optimal.
1512 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1513 (attr_lo == 1) || (attr_lo == 2);
1514 }
1515 } else {
1516 uint8_t attrIndx = l_descriptor.attrIndx();
1517
1518 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1519
1520 // Select MAIR
1521 uint64_t mair;
1522 switch (currState->regime) {
1524 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1525 break;
1528 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1529 break;
1531 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1532 break;
1533 default:
1534 panic("Invalid exception level");
1535 break;
1536 }
1537
1538 // Select attributes
1539 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1540 attr_lo = bits(attr, 3, 0);
1541 attr_hi = bits(attr, 7, 4);
1542
1543 // Memory type
1545
1546 // Cacheability
1547 te.nonCacheable = false;
1548 if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1549 te.nonCacheable = true;
1550 }
1551 // Treat write-through memory as uncacheable, this is safe
1552 // but for performance reasons not optimal.
1553 switch (attr_hi) {
1554 case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1555 case 0x4: // Normal memory, Outer Non-cacheable
1556 case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1557 te.nonCacheable = true;
1558 }
1559 switch (attr_lo) {
1560 case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1561 case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1562 warn_if(!attr_hi, "Unpredictable behavior");
1563 [[fallthrough]];
1564 case 0x4: // Device-nGnRE memory or
1565 // Normal memory, Inner Non-cacheable
1566 case 0x8: // Device-nGRE memory or
1567 // Normal memory, Inner Write-through non-transient
1568 te.nonCacheable = true;
1569 }
1570
1571 te.shareable = sh == 2;
1572 te.outerShareable = (sh & 0x2) ? true : false;
1573 // Attributes formatted according to the 64-bit PAR
1574 te.attributes = ((uint64_t) attr << 56) |
1575 (1 << 11) | // LPAE bit
1576 (te.ns << 9) | // NS bit
1577 (sh << 7);
1578 }
1579}
1580
1581void
1583{
1585 if (uncacheableWalk()) {
1586 te.shareable = 3;
1587 te.outerAttrs = 0;
1588 te.innerAttrs = 0;
1589 te.nonCacheable = true;
1590 } else {
1591 te.shareable = currState->sh;
1592 te.outerAttrs = currState->orgn;
1593 te.innerAttrs = currState->irgn;
1594 te.nonCacheable = (te.outerAttrs == 0 || te.outerAttrs == 2) &&
1595 (te.innerAttrs == 0 || te.innerAttrs == 2);
1596 }
1597}
1598
1599void
1601{
1602 if (currState->fault != NoFault) {
1603 return;
1604 }
1605
1608
1609 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1611 TlbEntry te;
1612
1613 const bool is_atomic = currState->req->isAtomic();
1614
1615 switch (currState->l1Desc.type()) {
1618 if (!currState->timing) {
1619 currState->tc = NULL;
1620 currState->req = NULL;
1621 }
1622 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1623 if (currState->isFetch)
1624 currState->fault =
1625 std::make_shared<PrefetchAbort>(
1627 ArmFault::TranslationLL + LookupLevel::L1,
1628 isStage2,
1630 else
1631 currState->fault =
1632 std::make_shared<DataAbort>(
1635 is_atomic ? false : currState->isWrite,
1636 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1638 return;
1640 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1646 currState->fault = std::make_shared<DataAbort>(
1649 is_atomic ? false : currState->isWrite,
1650 ArmFault::AccessFlagLL + LookupLevel::L1,
1651 isStage2,
1653 }
1654 if (currState->l1Desc.supersection()) {
1655 panic("Haven't implemented supersections\n");
1656 }
1658 return;
1660 {
1661 Addr l2desc_addr;
1662 l2desc_addr = currState->l1Desc.l2Addr() |
1663 (bits(currState->vaddr, 19, 12) << 2);
1664 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1665 l2desc_addr, currState->isSecure ? "s" : "ns");
1666
1668
1669 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1671 }
1672
1674 flag.set(Request::SECURE);
1675
1677 l2desc_addr, currState->l2Desc,
1678 sizeof(uint32_t), flag, LookupLevel::L2,
1681
1683
1684 return;
1685 }
1686 default:
1687 panic("A new type in a 2 bit field?\n");
1688 }
1689}
1690
1691Fault
1693{
1694 if (currState->isFetch) {
1695 return std::make_shared<PrefetchAbort>(
1698 isStage2,
1700 } else {
1701 return std::make_shared<DataAbort>(
1704 currState->req->isAtomic() ? false : currState->isWrite,
1705 src + currState->longDesc.lookupLevel,
1706 isStage2,
1707 ArmFault::LpaeTran);
1708 }
1709}
1710
1711void
1713{
1714 if (currState->fault != NoFault) {
1715 return;
1716 }
1717
1720
1721 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1724 currState->aarch64 ? "AArch64" : "long-desc.");
1725
1728 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1729 "xn: %d, ap: %d, af: %d, type: %d\n",
1737 } else {
1738 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1742 }
1743
1744 TlbEntry te;
1745
1746 switch (currState->longDesc.type()) {
1748 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1751
1753 if (!currState->timing) {
1754 currState->tc = NULL;
1755 currState->req = NULL;
1756 }
1757 return;
1758
1761 {
1762 auto fault_source = ArmFault::FaultSourceInvalid;
1763 // Check for address size fault
1766
1767 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1769 fault_source = ArmFault::AddressSizeLL;
1770
1771 // Check for access fault
1772 } else if (currState->longDesc.af() == 0) {
1773
1774 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1776 fault_source = ArmFault::AccessFlagLL;
1777 }
1778
1779 if (fault_source != ArmFault::FaultSourceInvalid) {
1780 currState->fault = generateLongDescFault(fault_source);
1781 } else {
1783 }
1784 }
1785 return;
1787 {
1788 // Set hierarchical permission flags
1791 currState->longDescData->rwTable =
1792 currState->longDescData->rwTable &&
1794 currState->longDescData->userTable =
1795 currState->longDescData->userTable &&
1797 currState->longDescData->xnTable =
1798 currState->longDescData->xnTable ||
1800 currState->longDescData->pxnTable =
1801 currState->longDescData->pxnTable ||
1803
1804 // Set up next level lookup
1805 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1806 currState->vaddr);
1807
1808 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1811 next_desc_addr,
1812 currState->secureLookup ? "s" : "ns");
1813
1814 // Check for address size fault
1816 next_desc_addr, currState->physAddrRange)) {
1817 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1819
1822 return;
1823 }
1824
1825 if (mmu->hasWalkCache()) {
1827 }
1828
1831 flag.set(Request::SECURE);
1832
1833 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1835 }
1836
1839 Event *event = NULL;
1840 switch (L) {
1841 case LookupLevel::L1:
1842 assert(currState->aarch64);
1843 case LookupLevel::L2:
1844 case LookupLevel::L3:
1845 event = LongDescEventByLevel[L];
1846 break;
1847 default:
1848 panic("Wrong lookup level in table walk\n");
1849 break;
1850 }
1851
1853 next_desc_addr, currState->longDesc,
1854 sizeof(uint64_t), flag, L, event,
1856
1858 }
1859 return;
1860 default:
1861 panic("A new type in a 2 bit field?\n");
1862 }
1863}
1864
1865void
1867{
1868 if (currState->fault != NoFault) {
1869 return;
1870 }
1871
1874
1875 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1877 TlbEntry te;
1878
1879 const bool is_atomic = currState->req->isAtomic();
1880
1881 if (currState->l2Desc.invalid()) {
1882 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1883 if (!currState->timing) {
1884 currState->tc = NULL;
1885 currState->req = NULL;
1886 }
1887 if (currState->isFetch)
1888 currState->fault = std::make_shared<PrefetchAbort>(
1890 ArmFault::TranslationLL + LookupLevel::L2,
1891 isStage2,
1893 else
1894 currState->fault = std::make_shared<DataAbort>(
1896 is_atomic ? false : currState->isWrite,
1897 ArmFault::TranslationLL + LookupLevel::L2,
1898 isStage2,
1900 return;
1901 }
1902
1903 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1907 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1908 currState->sctlr.afe, currState->l2Desc.ap());
1909
1910 currState->fault = std::make_shared<DataAbort>(
1913 is_atomic ? false : currState->isWrite,
1914 ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
1916 }
1917
1919}
1920
1921void
1923{
1924 currState = stateQueues[LookupLevel::L1].front();
1925 currState->delayed = false;
1926 // if there's a stage2 translation object we don't need it any more
1927 if (currState->stage2Tran) {
1928 delete currState->stage2Tran;
1929 currState->stage2Tran = NULL;
1930 }
1931
1932
1933 DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
1935 DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
1937
1938 DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
1941
1942 stateQueues[LookupLevel::L1].pop_front();
1943 // Check if fault was generated
1944 if (currState->fault != NoFault) {
1948
1949 pending = false;
1951
1952 currState->req = NULL;
1953 currState->tc = NULL;
1954 currState->delayed = false;
1955 delete currState;
1956 }
1957 else if (!currState->delayed) {
1958 // delay is not set so there is no L2 to do
1959 // Don't finish the translation if a stage 2 look up is underway
1961 DPRINTF(PageTableWalker, "calling translateTiming again\n");
1962
1966
1968
1969 pending = false;
1971
1972 currState->req = NULL;
1973 currState->tc = NULL;
1974 currState->delayed = false;
1975 delete currState;
1976 } else {
1977 // need to do L2 descriptor
1978 stashCurrState(LookupLevel::L2);
1979 }
1980 currState = NULL;
1981}
1982
1983void
1985{
1986 currState = stateQueues[LookupLevel::L2].front();
1987 assert(currState->delayed);
1988 // if there's a stage2 translation object we don't need it any more
1989 if (currState->stage2Tran) {
1990 delete currState->stage2Tran;
1991 currState->stage2Tran = NULL;
1992 }
1993
1994 DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
1997
1998 // Check if fault was generated
1999 if (currState->fault != NoFault) {
2003 } else {
2005 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2006
2010
2012 }
2013
2014
2015 stateQueues[LookupLevel::L2].pop_front();
2016 pending = false;
2018
2019 currState->req = NULL;
2020 currState->tc = NULL;
2021 currState->delayed = false;
2022
2023 delete currState;
2024 currState = NULL;
2025}
2026
2027void
2032
2033void
2038
2039void
2044
2045void
2050
2051void
2053{
2054 currState = stateQueues[curr_lookup_level].front();
2055 assert(curr_lookup_level == currState->longDesc.lookupLevel);
2056 currState->delayed = false;
2057
2058 // if there's a stage2 translation object we don't need it any more
2059 if (currState->stage2Tran) {
2060 delete currState->stage2Tran;
2061 currState->stage2Tran = NULL;
2062 }
2063
2064 DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2067
2068 stateQueues[curr_lookup_level].pop_front();
2069
2070 if (currState->fault != NoFault) {
2071 // A fault was generated
2074
2075 pending = false;
2077
2078 currState->req = NULL;
2079 currState->tc = NULL;
2080 currState->delayed = false;
2081 delete currState;
2082 } else if (!currState->delayed) {
2083 // No additional lookups required
2084 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2086
2090
2091 stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2092
2093 pending = false;
2095
2096 currState->req = NULL;
2097 currState->tc = NULL;
2098 currState->delayed = false;
2099 delete currState;
2100 } else {
2101 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2102 panic("Max. number of lookups already reached in table walk\n");
2103 // Need to perform additional lookups
2105 }
2106 currState = NULL;
2107}
2108
2109
2110void
2112{
2113 if (pendingQueue.size())
2115 else
2116 completeDrain();
2117}
2118
2119void
2121 DescriptorBase &descriptor, int num_bytes,
2122 Request::Flags flags, LookupLevel lookup_level, Event *event,
2123 void (TableWalker::*doDescriptor)())
2124{
2125 uint8_t *data = descriptor.getRawPtr();
2126
2127 DPRINTF(PageTableWalker,
2128 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2129 desc_addr, currState->stage2Req);
2130
2131 // If this translation has a stage 2 then we know desc_addr is an IPA and
2132 // needs to be translated before we can access the page table. Do that
2133 // check here.
2134 if (currState->stage2Req) {
2135 Fault fault;
2136
2137 if (currState->timing) {
2138 auto *tran = new
2141 currState->stage2Tran = tran;
2142 readDataTimed(currState->tc, desc_addr, tran, num_bytes, flags);
2143 fault = tran->fault;
2144
2145 if (fault != NoFault) {
2146 currState->fault = fault;
2147 }
2148 } else {
2149 fault = readDataUntimed(currState->tc,
2150 currState->vaddr, desc_addr, data, num_bytes, flags,
2151 currState->mode,
2154
2155 if (fault != NoFault) {
2156 currState->fault = fault;
2157 }
2158
2159 (this->*doDescriptor)();
2160 }
2161 } else {
2162 RequestPtr req = std::make_shared<Request>(
2163 desc_addr, num_bytes, flags, requestorId);
2164 req->taskId(context_switch_task_id::DMA);
2165
2166 mpamTagTableWalk(req);
2167
2168 Fault fault = testWalk(req, descriptor.domain(),
2169 lookup_level);
2170
2171 if (fault != NoFault) {
2172 currState->fault = fault;
2173 return;
2174 }
2175
2176 if (currState->timing) {
2177 port->sendTimingReq(req, data,
2179
2180 } else if (!currState->functional) {
2181 port->sendAtomicReq(req, data,
2183
2184 (this->*doDescriptor)();
2185 } else {
2187 (this->*doDescriptor)();
2188 }
2189 }
2190}
2191
2192void
2194{
2195 DPRINTF(PageTableWalker, "Adding to walker fifo: "
2196 "queue size before adding: %d\n",
2197 stateQueues[queue_idx].size());
2198 stateQueues[queue_idx].push_back(currState);
2199 currState = NULL;
2200}
2201
2202void
2204{
2205 const bool have_security = release->has(ArmExtension::SECURITY);
2206 TlbEntry te;
2207
2208 // Create and fill a new page table entry
2209 te.valid = true;
2210 te.longDescFormat = true;
2211 te.partial = true;
2212 // The entry is global if there is no address space identifier
2213 // to differentiate translation contexts
2214 te.global = !mmu->hasUnprivRegime(currState->regime);
2215 te.asid = currState->asid;
2216 te.vmid = currState->vmid;
2217 te.N = descriptor.offsetBits();
2218 te.tg = descriptor.grainSize;
2219 te.vpn = currState->vaddr >> te.N;
2220 te.size = (1ULL << te.N) - 1;
2221 te.pfn = descriptor.nextTableAddr();
2222 te.domain = descriptor.domain();
2223 te.lookupLevel = descriptor.lookupLevel;
2224 te.ns = !descriptor.secure(have_security, currState);
2225 te.nstid = !currState->isSecure;
2226 te.type = TypeTLB::unified;
2227
2228 te.regime = currState->regime;
2229
2230 te.xn = currState->longDescData->xnTable;
2231 te.pxn = currState->longDescData->pxnTable;
2232 te.ap = (currState->longDescData->rwTable << 1) |
2233 (currState->longDescData->userTable);
2234
2236
2237 // Debug output
2238 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2239 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2240 te.N, te.pfn, te.size, te.global, te.valid);
2241 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2242 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2243 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2244 te.nonCacheable, te.ns);
2245 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2246 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2247 descriptor.getRawData());
2248
2249 // Insert the entry into the TLBs
2250 tlb->multiInsert(te);
2251}
2252
2253void
2254TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2255{
2256 const bool have_security = release->has(ArmExtension::SECURITY);
2257 TlbEntry te;
2258
2259 // Create and fill a new page table entry
2260 te.valid = true;
2261 te.longDescFormat = long_descriptor;
2262 te.asid = currState->asid;
2263 te.vmid = currState->vmid;
2264 te.N = descriptor.offsetBits();
2265 te.vpn = currState->vaddr >> te.N;
2266 te.size = (1<<te.N) - 1;
2267 te.pfn = descriptor.pfn();
2268 te.domain = descriptor.domain();
2269 te.lookupLevel = descriptor.lookupLevel;
2270 te.ns = !descriptor.secure(have_security, currState);
2271 te.nstid = !currState->isSecure;
2272 te.xn = descriptor.xn();
2273 te.type = currState->mode == BaseMMU::Execute ?
2274 TypeTLB::instruction : TypeTLB::data;
2275
2276 te.regime = currState->regime;
2277
2280
2281 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2282 // as global
2283 te.global = descriptor.global(currState) || isStage2;
2284 if (long_descriptor) {
2285 LongDescriptor l_descriptor =
2286 dynamic_cast<LongDescriptor &>(descriptor);
2287
2288 te.tg = l_descriptor.grainSize;
2289 te.xn |= currState->longDescData->xnTable;
2290 te.pxn = currState->longDescData->pxnTable || l_descriptor.pxn();
2291 if (isStage2) {
2292 // this is actually the HAP field, but its stored in the same bit
2293 // possitions as the AP field in a stage 1 translation.
2294 te.hap = l_descriptor.ap();
2295 } else {
2296 te.ap = ((!currState->longDescData->rwTable ||
2297 descriptor.ap() >> 1) << 1) |
2298 (currState->longDescData->userTable && (descriptor.ap() & 0x1));
2299 }
2300 if (currState->aarch64)
2301 memAttrsAArch64(currState->tc, te, l_descriptor);
2302 else
2303 memAttrsLPAE(currState->tc, te, l_descriptor);
2304 } else {
2305 te.ap = descriptor.ap();
2306 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2307 descriptor.shareable());
2308 }
2309
2310 // Debug output
2311 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2312 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2313 te.N, te.pfn, te.size, te.global, te.valid);
2314 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2315 "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2316 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid,
2317 te.nonCacheable, te.ns);
2318 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2319 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2320 descriptor.getRawData());
2321
2322 // Insert the entry into the TLBs
2323 tlb->multiInsert(te);
2324 if (!currState->timing) {
2325 currState->tc = NULL;
2326 currState->req = NULL;
2327 }
2328}
2329
2331TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2332{
2333 switch (lookup_level_as_int) {
2334 case LookupLevel::L1:
2335 return LookupLevel::L1;
2336 case LookupLevel::L2:
2337 return LookupLevel::L2;
2338 case LookupLevel::L3:
2339 return LookupLevel::L3;
2340 default:
2341 panic("Invalid lookup level conversion");
2342 }
2343}
2344
2345/* this method keeps track of the table walker queue's residency, so
2346 * needs to be called whenever requests start and complete. */
2347void
2349{
2350 unsigned n = pendingQueue.size();
2351 if ((currState != NULL) && (currState != pendingQueue.front())) {
2352 ++n;
2353 }
2354
2355 if (n != pendingReqs) {
2356 Tick now = curTick();
2358 pendingReqs = n;
2359 pendingChangeTick = now;
2360 }
2361}
2362
2363Fault
2365 LookupLevel lookup_level)
2366{
2367 if (!test) {
2368 return NoFault;
2369 } else {
2370 return test->walkCheck(walk_req, currState->vaddr, currState->isSecure,
2371 currState->el != EL0,
2372 currState->mode, domain, lookup_level);
2373 }
2374}
2375
2376void
2381
2382uint8_t
2384{
2385 /* for stats.pageSizes */
2386 switch(N) {
2387 case 12: return 0; // 4K
2388 case 14: return 1; // 16K (using 16K granule in v8-64)
2389 case 16: return 2; // 64K
2390 case 20: return 3; // 1M
2391 case 21: return 4; // 2M-LPAE
2392 case 24: return 5; // 16M
2393 case 25: return 6; // 32M (using 16K granule in v8-64)
2394 case 29: return 7; // 512M (using 64K granule in v8-64)
2395 case 30: return 8; // 1G-LPAE
2396 case 42: return 9; // 1G-LPAE
2397 default:
2398 panic("unknown page size");
2399 return 255;
2400 }
2401}
2402
2403Fault
2405 uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2406 MMU::ArmTranslationType tran_type, bool functional)
2407{
2408 Fault fault;
2409
2410 // translate to physical address using the second stage MMU
2411 auto req = std::make_shared<Request>();
2412 req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2413 requestorId, 0);
2414
2415 if (functional) {
2416 fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2417 tran_type, true);
2418 } else {
2419 fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2420 tran_type, true);
2421 }
2422
2423 // Now do the access.
2424 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2425 Packet pkt = Packet(req, MemCmd::ReadReq);
2426 pkt.dataStatic(data);
2427 if (functional) {
2428 port->sendFunctional(&pkt);
2429 } else {
2430 port->sendAtomic(&pkt);
2431 }
2432 assert(!pkt.isError());
2433 }
2434
2435 // If there was a fault annotate it with the flag saying the foult occured
2436 // while doing a translation for a stage 1 page table walk.
2437 if (fault != NoFault) {
2438 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2439 arm_fault->annotate(ArmFault::S1PTW, true);
2440 arm_fault->annotate(ArmFault::OVA, vaddr);
2441 }
2442 return fault;
2443}
2444
2445void
2450
2451void
2453 Stage2Walk *translation, int num_bytes,
2455{
2456 // translate to physical address using the second stage MMU
2457 translation->setVirt(
2458 desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2459 translation->translateTiming(tc);
2460}
2461
2463 uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2464 MMU::ArmTranslationType tran_type)
2465 : data(_data), numBytes(0), event(_event), parent(_parent),
2466 oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2467{
2468 req = std::make_shared<Request>();
2469}
2470
2471void
2473 const RequestPtr &req,
2475{
2476 fault = _fault;
2477
2478 // If there was a fault annotate it with the flag saying the foult occured
2479 // while doing a translation for a stage 1 page table walk.
2480 if (fault != NoFault) {
2481 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2482 arm_fault->annotate(ArmFault::S1PTW, true);
2483 arm_fault->annotate(ArmFault::OVA, oVAddr);
2484 }
2485
2486 if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2487 parent.getTableWalkerPort().sendTimingReq(req, data,
2488 tc->getCpuPtr()->clockPeriod(), event);
2489 } else {
2490 // We can't do the DMA access as there's been a problem, so tell the
2491 // event we're done
2492 event->process();
2493 }
2494}
2495
2496void
2498{
2499 parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2500}
2501
2503 : statistics::Group(parent),
2504 ADD_STAT(walks, statistics::units::Count::get(),
2505 "Table walker walks requested"),
2506 ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2507 "Table walker walks initiated with short descriptors"),
2508 ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2509 "Table walker walks initiated with long descriptors"),
2510 ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2511 "Level at which table walker walks with short descriptors "
2512 "terminate"),
2513 ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2514 "Level at which table walker walks with long descriptors "
2515 "terminate"),
2516 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2517 "Table walks squashed before starting"),
2518 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2519 "Table walks squashed after completion"),
2520 ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2521 "Table walker wait (enqueue to first request) latency"),
2522 ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2523 "Table walker service (enqueue to completion) latency"),
2524 ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2525 "Table walker pending requests distribution"),
2526 ADD_STAT(pageSizes, statistics::units::Count::get(),
2527 "Table walker page sizes translated"),
2528 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2529 "Table walker requests started/completed, data/inst")
2530{
2533
2536
2538 .init(2)
2540
2543
2545 .init(4)
2551
2554
2557
2559 .init(16)
2561
2563 .init(16)
2565
2567 .init(16)
2570
2571 pageSizes // see DDI 0487A D4-1661
2572 .init(10)
2575 pageSizes.subname(0, "4KiB");
2576 pageSizes.subname(1, "16KiB");
2577 pageSizes.subname(2, "64KiB");
2578 pageSizes.subname(3, "1MiB");
2579 pageSizes.subname(4, "2MiB");
2580 pageSizes.subname(5, "16MiB");
2581 pageSizes.subname(6, "32MiB");
2582 pageSizes.subname(7, "512MiB");
2583 pageSizes.subname(8, "1GiB");
2584 pageSizes.subname(9, "4TiB");
2585
2587 .init(2,2) // Instruction/Data, requests/completed
2589 requestOrigin.subname(0,"Requested");
2590 requestOrigin.subname(1,"Completed");
2591 requestOrigin.ysubname(0,"Data");
2592 requestOrigin.ysubname(1,"Inst");
2593}
2594
2595} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition faults.hh:96
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:236
static bool hasUnprivRegime(TranslationRegime regime)
Definition mmu.cc:726
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1348
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:256
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool secure, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1390
bool hasWalkCache() const
Definition mmu.hh:384
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:245
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:91
const ArmRelease * release() const
Definition mmu.hh:382
int size
TLB Size.
Definition tlb.hh:108
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
Definition tlb.cc:270
vmid_t vmid
Definition tlb.hh:157
virtual bool global(WalkerState *currState) const =0
virtual uint64_t getRawData() const =0
virtual std::string dbgHeader() const =0
virtual uint8_t offsetBits() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual TlbEntry::DomainType domain() const =0
virtual bool secure(bool have_security, WalkerState *currState) const =0
uint32_t data
The raw bits of the entry.
bool supersection() const
Is the page a Supersection (16 MiB)?
Addr l2Addr() const
Address of L2 descriptor if it exists.
uint8_t ap() const override
Three bit access protection flags.
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
uint8_t ap() const override
Three bit access protection flags.
uint32_t data
The raw bits of the entry.
bool invalid() const
Is the entry invalid.
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
bool af() const
Returns true if the access flag (AF) is set.
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
bool aarch64
True if the current lookup is performed in AArch64 state.
EntryType type() const
Return the descriptor type.
bool xn() const override
Is execution allowed on this mapping?
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
uint64_t data
The raw bits of the entry.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Addr paddr() const
Return the physical address of the entry.
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool secureTable() const
Whether the subsequent levels of lookup are secure.
TlbEntry::DomainType domain() const override
bool xnTable() const
Is execution allowed on subsequent lookup levels?
void sendAtomicReq(const RequestPtr &req, uint8_t *data, Tick delay)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void sendFunctionalReq(const RequestPtr &req, uint8_t *data)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendTimingReq(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
PacketPtr createPacket(const RequestPtr &req, uint8_t *data, Tick delay, Event *event)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool secureLookup
Whether lookups should be treated as using the secure state.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
std::optional< LongDescData > longDescData
bool isSecure
If the access comes from the secure state.
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
ExceptionLevel el
Current exception level.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Fault fault
The fault that we are going to return.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
TranslationRegime regime
Current translation regime.
bool timing
If the mode is timing or atomic.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
int physAddrRange
Current physical address range in bits.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
const ArmRelease * release
Cached copies of system-level properties.
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
bool uncacheableWalk() const
Returns true if the table walk should be uncacheable.
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void fetchDescriptor(Addr desc_addr, DescriptorBase &descriptor, int num_bytes, Request::Flags flags, LookupLevel lookup_lvl, Event *event, void(TableWalker::*doDescriptor)())
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
Fault testWalk(const RequestPtr &walk_req, TlbEntry::DomainType domain, LookupLevel lookup_level)
Fault generateLongDescFault(ArmFault::FaultSource src)
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
void nextWalk(ThreadContext *tc)
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
gem5::ArmISA::TableWalker::TableWalkerStats stats
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
void mpamTagTableWalk(RequestPtr &req) const
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void setTestInterface(TlbTestInterface *ti)
void memAttrsWalkAArch64(TlbEntry &te)
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
void stashCurrState(int queue_idx)
Timing mode: saves the currState into the stateQueues.
bool has(ArmExtension ext) const
Definition system.hh:76
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition mmu.hh:84
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool cacheResponding() const
Definition packet.hh:659
bool hasSharers() const
Definition packet.hh:686
Ports are used to interface objects to each other.
Definition port.hh:62
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition qport.hh:111
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition port.hh:579
@ PT_WALK
The request is a page table walk.
Definition request.hh:188
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Derived & ysubname(off_type index, const std::string &subname)
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type _x, size_type _y)
Derived & init(size_type size)
Set this vector to have the given size.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition bitfield.hh:106
constexpr uint64_t sext(uint64_t val)
Sign-extend an N-bit value to 64 bits.
Definition bitfield.hh:129
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void set(Type mask)
Set all flag's bits matching the given mask.
Definition flags.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
atomic_var_t state
Definition helpers.cc:211
uint8_t flags
Definition helpers.cc:87
#define warn_once(...)
Definition logging.hh:260
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition logging.hh:283
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
ByteOrder byteOrder(const ThreadContext *tc)
Definition utility.hh:359
Bitfield< 30 > te
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition pagetable.cc:476
Bitfield< 31 > n
Bitfield< 24 > hpd
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Bitfield< 18, 16 > ps
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:140
Bitfield< 4 > s
Bitfield< 8, 7 > sh
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 7, 4 > domain
Bitfield< 11 > z
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:276
const GrainSize GrainMap_tg1[]
Definition pagetable.cc:51
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1379
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1398
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:412
Bitfield< 21, 20 > stride
@ MISCREG_HCR
Definition misc.hh:254
@ MISCREG_VSTCR_EL2
Definition misc.hh:622
@ MISCREG_SCTLR_EL2
Definition misc.hh:592
@ MISCREG_TCR_EL2
Definition misc.hh:617
@ MISCREG_MAIR_EL1
Definition misc.hh:786
@ MISCREG_SCTLR
Definition misc.hh:241
@ MISCREG_TTBCR
Definition misc.hh:266
@ MISCREG_MAIR_EL2
Definition misc.hh:790
@ MISCREG_SCR_EL3
Definition misc.hh:604
@ MISCREG_TCR_EL3
Definition misc.hh:624
@ MISCREG_TCR_EL1
Definition misc.hh:612
@ MISCREG_SCTLR_EL1
Definition misc.hh:585
@ MISCREG_PRRR
Definition misc.hh:375
@ MISCREG_MAIR_EL3
Definition misc.hh:792
@ MISCREG_CPSR
Definition misc.hh:67
@ MISCREG_NMRR
Definition misc.hh:381
@ MISCREG_TTBR1_EL1
Definition misc.hh:610
@ MISCREG_MAIR1
Definition misc.hh:384
@ MISCREG_TTBR1_EL2
Definition misc.hh:880
@ MISCREG_HTTBR
Definition misc.hh:453
@ MISCREG_HCR_EL2
Definition misc.hh:595
@ MISCREG_TTBR1
Definition misc.hh:263
@ MISCREG_VTCR_EL2
Definition misc.hh:620
@ MISCREG_VTTBR
Definition misc.hh:454
@ MISCREG_HTCR
Definition misc.hh:269
@ MISCREG_TTBR0_EL3
Definition misc.hh:623
@ MISCREG_VTCR
Definition misc.hh:270
@ MISCREG_TTBR0
Definition misc.hh:260
@ MISCREG_TTBR0_EL2
Definition misc.hh:616
@ MISCREG_TTBR0_EL1
Definition misc.hh:608
@ MISCREG_SCTLR_EL3
Definition misc.hh:601
@ MISCREG_VSTTBR_EL2
Definition misc.hh:621
@ MISCREG_VTTBR_EL2
Definition misc.hh:619
@ MISCREG_MAIR0
Definition misc.hh:378
uint16_t vmid_t
Definition types.hh:57
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition utility.cc:1288
const GrainSize GrainMap_tg0[]
Definition pagetable.cc:49
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:672
Bitfield< 34 > aarch64
Definition types.hh:81
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:231
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition utility.cc:473
Bitfield< 59, 56 > tlb
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 7, 0 > L
Definition int.hh:62
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t RegVal
Definition types.hh:173
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
T htog(T value, ByteOrder guest_byte_order)
Definition byteswap.hh:187
Bitfield< 9 > hyp
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Overload hash function for BasicBlockRange type.
Definition binary32.hh:81
TableWalkerStats(statistics::Group *parent)
Helper variables used to implement hierarchical access permissions when the long-desc.
LookupLevel lookupLevel
Definition pagetable.hh:217
Definition test.h:38
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:23:57 for gem5 by doxygen 1.11.0