gem5 v23.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
table_walker.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010, 2012-2019, 2021-2022 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
38
39#include <cassert>
40#include <memory>
41
42#include "arch/arm/faults.hh"
43#include "arch/arm/mmu.hh"
44#include "arch/arm/pagetable.hh"
45#include "arch/arm/system.hh"
46#include "arch/arm/tlb.hh"
47#include "base/compiler.hh"
48#include "cpu/base.hh"
49#include "cpu/thread_context.hh"
50#include "debug/Checkpoint.hh"
51#include "debug/Drain.hh"
52#include "debug/PageTableWalker.hh"
53#include "debug/TLB.hh"
54#include "debug/TLBVerbose.hh"
55#include "sim/system.hh"
56
57namespace gem5
58{
59
60using namespace ArmISA;
61
64 requestorId(p.sys->getRequestorId(this)),
65 port(new Port(*this, requestorId)),
66 isStage2(p.is_stage2), tlb(NULL),
67 currState(NULL), pending(false),
68 numSquashable(p.num_squash_per_cycle),
69 release(nullptr),
70 stats(this),
71 pendingReqs(0),
72 pendingChangeTick(curTick()),
73 doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
74 doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
75 doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
76 doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
77 doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
78 doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
79 LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
80 &doL2LongDescEvent, &doL3LongDescEvent },
81 doProcessEvent([this]{ processWalkWrapper(); }, name())
82{
83 sctlr = 0;
84
85 // Cache system-level properties
86 if (FullSystem) {
87 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
88 assert(arm_sys);
89 _physAddrRange = arm_sys->physAddrRange();
90 _haveLargeAsid64 = arm_sys->haveLargeAsid64();
91 } else {
92 _haveLargeAsid64 = false;
93 _physAddrRange = 48;
94 }
95
96}
97
99{
100 ;
101}
102
105{
106 return static_cast<Port&>(getPort("port"));
107}
108
109Port &
110TableWalker::getPort(const std::string &if_name, PortID idx)
111{
112 if (if_name == "port") {
113 return *port;
114 }
115 return ClockedObject::getPort(if_name, idx);
116}
117
118void
120{
121 mmu = _mmu;
122 release = mmu->release();
123}
124
126 tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
127 asid(0), vmid(0), isHyp(false), transState(nullptr),
128 vaddr(0), vaddr_tainted(0),
129 sctlr(0), scr(0), cpsr(0), tcr(0),
130 htcr(0), hcr(0), vtcr(0),
131 isWrite(false), isFetch(false), isSecure(false),
132 isUncacheable(false),
133 secureLookup(false), rwTable(false), userTable(false), xnTable(false),
134 pxnTable(false), hpd(false), stage2Req(false),
135 stage2Tran(nullptr), timing(false), functional(false),
136 mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
137 delayed(false), tableWalker(nullptr)
138{
139}
140
142 : QueuedRequestPort(_walker.name() + ".port", reqQueue, snoopRespQueue),
143 owner{_walker},
144 reqQueue(_walker, *this),
145 snoopRespQueue(_walker, *this),
147{
148}
149
152 Addr desc_addr, int size,
153 uint8_t *data, Request::Flags flags, Tick delay,
154 Event *event)
155{
156 RequestPtr req = std::make_shared<Request>(
157 desc_addr, size, flags, requestorId);
158 req->taskId(context_switch_task_id::DMA);
159
160 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
161 pkt->dataStatic(data);
162
163 auto state = new TableWalkerState;
164 state->event = event;
165 state->delay = delay;
166
167 pkt->senderState = state;
168 return pkt;
169}
170
171void
173 Addr desc_addr, int size,
174 uint8_t *data, Request::Flags flags)
175{
176 auto pkt = createPacket(desc_addr, size, data, flags, 0, nullptr);
177
178 sendFunctional(pkt);
179
180 handleRespPacket(pkt);
181}
182
183void
185 Addr desc_addr, int size,
186 uint8_t *data, Request::Flags flags, Tick delay)
187{
188 auto pkt = createPacket(desc_addr, size, data, flags, delay, nullptr);
189
190 Tick lat = sendAtomic(pkt);
191
192 handleRespPacket(pkt, lat);
193}
194
195void
197 Addr desc_addr, int size,
198 uint8_t *data, Request::Flags flags, Tick delay,
199 Event *event)
200{
201 auto pkt = createPacket(desc_addr, size, data, flags, delay, event);
202
203 schedTimingReq(pkt, curTick());
204}
205
206bool
208{
209 // We shouldn't ever get a cacheable block in Modified state.
210 assert(pkt->req->isUncacheable() ||
211 !(pkt->cacheResponding() && !pkt->hasSharers()));
212
213 handleRespPacket(pkt);
214
215 return true;
216}
217
218void
220{
221 // Should always see a response with a sender state.
222 assert(pkt->isResponse());
223
224 // Get the DMA sender state.
225 auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
226 assert(state);
227
228 handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
229
230 delete pkt;
231}
232
233void
235 Addr size, Tick delay)
236{
237 if (state->event) {
238 owner.schedule(state->event, curTick() + delay);
239 }
240 delete state;
241}
242
243void
245{
247 stateQueues[LookupLevel::L0].empty() &&
248 stateQueues[LookupLevel::L1].empty() &&
249 stateQueues[LookupLevel::L2].empty() &&
250 stateQueues[LookupLevel::L3].empty() &&
251 pendingQueue.empty()) {
252
253 DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
255 }
256}
257
260{
261 bool state_queues_not_empty = false;
262
263 for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
264 if (!stateQueues[i].empty()) {
265 state_queues_not_empty = true;
266 break;
267 }
268 }
269
270 if (state_queues_not_empty || pendingQueue.size()) {
271 DPRINTF(Drain, "TableWalker not drained\n");
273 } else {
274 DPRINTF(Drain, "TableWalker free, no need to drain\n");
275 return DrainState::Drained;
276 }
277}
278
279void
281{
282 if (params().sys->isTimingMode() && currState) {
283 delete currState;
284 currState = NULL;
286 }
287}
288
289Fault
290TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
291 vmid_t _vmid, bool _isHyp, MMU::Mode _mode,
292 MMU::Translation *_trans, bool _timing, bool _functional,
293 bool secure, MMU::ArmTranslationType tranType,
294 bool _stage2Req, const TlbEntry *walk_entry)
295{
296 assert(!(_functional && _timing));
297 ++stats.walks;
298
299 WalkerState *savedCurrState = NULL;
300
301 if (!currState && !_functional) {
302 // For atomic mode, a new WalkerState instance should be only created
303 // once per TLB. For timing mode, a new instance is generated for every
304 // TLB miss.
305 DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
306
307 currState = new WalkerState();
308 currState->tableWalker = this;
309 } else if (_functional) {
310 // If we are mixing functional mode with timing (or even
311 // atomic), we need to to be careful and clean up after
312 // ourselves to not risk getting into an inconsistent state.
313 DPRINTF(PageTableWalker,
314 "creating functional instance of WalkerState\n");
315 savedCurrState = currState;
316 currState = new WalkerState();
317 currState->tableWalker = this;
318 } else if (_timing) {
319 // This is a translation that was completed and then faulted again
320 // because some underlying parameters that affect the translation
321 // changed out from under us (e.g. asid). It will either be a
322 // misprediction, in which case nothing will happen or we'll use
323 // this fault to re-execute the faulting instruction which should clean
324 // up everything.
325 if (currState->vaddr_tainted == _req->getVaddr()) {
327 return std::make_shared<ReExec>();
328 }
329 }
331
333 currState->tc = _tc;
334 // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
335 // aarch32/translation/translation/AArch32.TranslateAddress dictates
336 // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
337 if (isStage2) {
338 currState->el = EL1;
339 currState->aarch64 = ELIs64(_tc, EL2);
340 } else {
341 currState->el =
344 ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
345 }
346 currState->transState = _trans;
347 currState->req = _req;
348 if (walk_entry) {
349 currState->walkEntry = *walk_entry;
350 } else {
352 }
354 currState->asid = _asid;
355 currState->vmid = _vmid;
356 currState->isHyp = _isHyp;
357 currState->timing = _timing;
358 currState->functional = _functional;
359 currState->mode = _mode;
360 currState->tranType = tranType;
361 currState->isSecure = secure;
363
366 currState->vaddr_tainted = currState->req->getVaddr();
367 if (currState->aarch64)
371 else
373
374 if (currState->aarch64) {
376 if (isStage2) {
378 if (currState->secureLookup) {
379 currState->vtcr =
381 } else {
382 currState->vtcr =
384 }
385 } else switch (currState->el) {
386 case EL0:
387 if (HaveExt(currState->tc, ArmExtension::FEAT_VHE) &&
388 currState->hcr.tge == 1 && currState->hcr.e2h ==1) {
391 } else {
394 }
395 break;
396 case EL1:
399 break;
400 case EL2:
401 assert(release->has(ArmExtension::VIRTUALIZATION));
404 break;
405 case EL3:
406 assert(release->has(ArmExtension::SECURITY));
409 break;
410 default:
411 panic("Invalid exception level");
412 break;
413 }
414 } else {
422 }
424
427
429
430 currState->stage2Req = _stage2Req && !isStage2;
431
432 bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
434
435 if (long_desc_format) {
436 // Helper variables used for hierarchical permissions
438 currState->rwTable = true;
439 currState->userTable = true;
440 currState->xnTable = false;
441 currState->pxnTable = false;
442
444 } else {
446 }
447
448 if (!currState->timing) {
449 Fault fault = NoFault;
450 if (currState->aarch64)
451 fault = processWalkAArch64();
452 else if (long_desc_format)
453 fault = processWalkLPAE();
454 else
455 fault = processWalk();
456
457 // If this was a functional non-timing access restore state to
458 // how we found it.
459 if (currState->functional) {
460 delete currState;
461 currState = savedCurrState;
462 }
463 return fault;
464 }
465
466 if (pending || pendingQueue.size()) {
467 pendingQueue.push_back(currState);
468 currState = NULL;
470 } else {
471 pending = true;
473 if (currState->aarch64)
474 return processWalkAArch64();
475 else if (long_desc_format)
476 return processWalkLPAE();
477 else
478 return processWalk();
479 }
480
481 return NoFault;
482}
483
484void
486{
487 assert(!currState);
488 assert(pendingQueue.size());
490 currState = pendingQueue.front();
491
492 // Check if a previous walk filled this request already
493 // @TODO Should this always be the TLB or should we look in the stage2 TLB?
496 currState->el, false, isStage2, currState->mode);
497
498 // Check if we still need to have a walk for this request. If the requesting
499 // instruction has been squashed, or a previous walk has filled the TLB with
500 // a match, we just want to get rid of the walk. The latter could happen
501 // when there are multiple outstanding misses to a single page and a
502 // previous request has been successfully translated.
503 if (!currState->transState->squashed() && (!te || te->partial)) {
504 // We've got a valid request, lets process it
505 pending = true;
506 pendingQueue.pop_front();
507 // Keep currState in case one of the processWalk... calls NULLs it
508
509 if (te && te->partial) {
511 }
512 WalkerState *curr_state_copy = currState;
513 Fault f;
514 if (currState->aarch64)
516 else if (longDescFormatInUse(currState->tc) ||
518 f = processWalkLPAE();
519 else
520 f = processWalk();
521
522 if (f != NoFault) {
523 curr_state_copy->transState->finish(f, curr_state_copy->req,
524 curr_state_copy->tc, curr_state_copy->mode);
525
526 delete curr_state_copy;
527 }
528 return;
529 }
530
531
532 // If the instruction that we were translating for has been
533 // squashed we shouldn't bother.
534 unsigned num_squashed = 0;
536 while ((num_squashed < numSquashable) && currState &&
538 (te && !te->partial))) {
539 pendingQueue.pop_front();
540 num_squashed++;
542
543 DPRINTF(TLB, "Squashing table walk for address %#x\n",
545
546 if (currState->transState->squashed()) {
547 // finish the translation which will delete the translation object
549 std::make_shared<UnimpFault>("Squashed Inst"),
551 } else {
552 // translate the request now that we know it will work
557 }
558
559 // delete the current request
560 delete currState;
561
562 // peak at the next one
563 if (pendingQueue.size()) {
564 currState = pendingQueue.front();
567 false, currState->el, false, isStage2, currState->mode);
568 } else {
569 // Terminate the loop, nothing more to do
570 currState = NULL;
571 }
572 }
574
575 // if we still have pending translations, schedule more work
576 nextWalk(tc);
577 currState = NULL;
578}
579
580Fault
582{
583 Addr ttbr = 0;
584
585 // For short descriptors, translation configs are held in
586 // TTBR1.
589
590 const auto irgn0_mask = 0x1;
591 const auto irgn1_mask = 0x40;
592 currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
593
594 // If translation isn't enabled, we shouldn't be here
595 assert(currState->sctlr.m || isStage2);
596 const bool is_atomic = currState->req->isAtomic();
597 const bool have_security = release->has(ArmExtension::SECURITY);
598
599 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
601 32 - currState->ttbcr.n));
602
604
605 if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
606 32 - currState->ttbcr.n)) {
607 DPRINTF(TLB, " - Selecting TTBR0\n");
608 // Check if table walk is allowed when Security Extensions are enabled
609 if (have_security && currState->ttbcr.pd0) {
610 if (currState->isFetch)
611 return std::make_shared<PrefetchAbort>(
613 ArmFault::TranslationLL + LookupLevel::L1,
614 isStage2,
616 else
617 return std::make_shared<DataAbort>(
620 is_atomic ? false : currState->isWrite,
621 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
623 }
626 } else {
627 DPRINTF(TLB, " - Selecting TTBR1\n");
628 // Check if table walk is allowed when Security Extensions are enabled
629 if (have_security && currState->ttbcr.pd1) {
630 if (currState->isFetch)
631 return std::make_shared<PrefetchAbort>(
633 ArmFault::TranslationLL + LookupLevel::L1,
634 isStage2,
636 else
637 return std::make_shared<DataAbort>(
640 is_atomic ? false : currState->isWrite,
641 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
643 }
644 ttbr = ttbr1;
645 currState->ttbcr.n = 0;
646 }
647
648 Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
649 (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
650 DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
651 currState->isSecure ? "s" : "ns");
652
653 // Trickbox address check
654 Fault f;
655 f = testWalk(l1desc_addr, sizeof(uint32_t),
656 TlbEntry::DomainType::NoAccess, LookupLevel::L1, isStage2);
657 if (f) {
658 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
659 if (currState->timing) {
660 pending = false;
662 currState = NULL;
663 } else {
664 currState->tc = NULL;
665 currState->req = NULL;
666 }
667 return f;
668 }
669
671 if (currState->sctlr.c == 0 || currState->isUncacheable) {
673 }
674
675 if (currState->isSecure) {
676 flag.set(Request::SECURE);
677 }
678
679 bool delayed;
680 delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
681 sizeof(uint32_t), flag, LookupLevel::L1,
684 if (!delayed) {
685 f = currState->fault;
686 }
687
688 return f;
689}
690
691Fault
693{
694 Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
695 int tsz, n;
696 LookupLevel start_lookup_level = LookupLevel::L1;
697
698 DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
700
702
704 if (currState->isSecure)
705 flag.set(Request::SECURE);
706
707 // work out which base address register to use, if in hyp mode we always
708 // use HTTBR
709 if (isStage2) {
710 DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
712 tsz = sext<4>(currState->vtcr.t0sz);
713 start_lookup_level = currState->vtcr.sl0 ?
714 LookupLevel::L1 : LookupLevel::L2;
715 currState->isUncacheable = currState->vtcr.irgn0 == 0;
716 } else if (currState->isHyp) {
717 DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
719 tsz = currState->htcr.t0sz;
720 currState->isUncacheable = currState->htcr.irgn0 == 0;
721 } else {
723
724 // Determine boundaries of TTBR0/1 regions
725 if (currState->ttbcr.t0sz)
726 ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
727 else if (currState->ttbcr.t1sz)
728 ttbr0_max = (1ULL << 32) -
729 (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
730 else
731 ttbr0_max = (1ULL << 32) - 1;
732 if (currState->ttbcr.t1sz)
733 ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
734 else
735 ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
736
737 const bool is_atomic = currState->req->isAtomic();
738
739 // The following code snippet selects the appropriate translation table base
740 // address (TTBR0 or TTBR1) and the appropriate starting lookup level
741 // depending on the address range supported by the translation table (ARM
742 // ARM issue C B3.6.4)
743 if (currState->vaddr <= ttbr0_max) {
744 DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
745 // Check if table walk is allowed
746 if (currState->ttbcr.epd0) {
747 if (currState->isFetch)
748 return std::make_shared<PrefetchAbort>(
750 ArmFault::TranslationLL + LookupLevel::L1,
751 isStage2,
753 else
754 return std::make_shared<DataAbort>(
757 is_atomic ? false : currState->isWrite,
758 ArmFault::TranslationLL + LookupLevel::L1,
759 isStage2,
761 }
764 tsz = currState->ttbcr.t0sz;
765 currState->isUncacheable = currState->ttbcr.irgn0 == 0;
766 if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
767 start_lookup_level = LookupLevel::L2;
768 } else if (currState->vaddr >= ttbr1_min) {
769 DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
770 // Check if table walk is allowed
771 if (currState->ttbcr.epd1) {
772 if (currState->isFetch)
773 return std::make_shared<PrefetchAbort>(
775 ArmFault::TranslationLL + LookupLevel::L1,
776 isStage2,
778 else
779 return std::make_shared<DataAbort>(
782 is_atomic ? false : currState->isWrite,
783 ArmFault::TranslationLL + LookupLevel::L1,
784 isStage2,
786 }
789 tsz = currState->ttbcr.t1sz;
790 currState->isUncacheable = currState->ttbcr.irgn1 == 0;
791 // Lower limit >= 3 GiB
792 if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
793 start_lookup_level = LookupLevel::L2;
794 } else {
795 // Out of boundaries -> translation fault
796 if (currState->isFetch)
797 return std::make_shared<PrefetchAbort>(
799 ArmFault::TranslationLL + LookupLevel::L1,
800 isStage2,
802 else
803 return std::make_shared<DataAbort>(
806 is_atomic ? false : currState->isWrite,
807 ArmFault::TranslationLL + LookupLevel::L1,
809 }
810
811 }
812
813 // Perform lookup (ARM ARM issue C B3.6.6)
814 if (start_lookup_level == LookupLevel::L1) {
815 n = 5 - tsz;
816 desc_addr = mbits(ttbr, 39, n) |
817 (bits(currState->vaddr, n + 26, 30) << 3);
818 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
819 desc_addr, currState->isSecure ? "s" : "ns");
820 } else {
821 // Skip first-level lookup
822 n = (tsz >= 2 ? 14 - tsz : 12);
823 desc_addr = mbits(ttbr, 39, n) |
824 (bits(currState->vaddr, n + 17, 21) << 3);
825 DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
826 desc_addr, currState->isSecure ? "s" : "ns");
827 }
828
829 // Trickbox address check
830 Fault f = testWalk(desc_addr, sizeof(uint64_t),
831 TlbEntry::DomainType::NoAccess, start_lookup_level,
832 isStage2);
833 if (f) {
834 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
835 if (currState->timing) {
836 pending = false;
838 currState = NULL;
839 } else {
840 currState->tc = NULL;
841 currState->req = NULL;
842 }
843 return f;
844 }
845
846 if (currState->sctlr.c == 0 || currState->isUncacheable) {
848 }
849
850 currState->longDesc.lookupLevel = start_lookup_level;
851 currState->longDesc.aarch64 = false;
853
854 bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
855 sizeof(uint64_t), flag, start_lookup_level,
856 LongDescEventByLevel[start_lookup_level],
858 if (!delayed) {
859 f = currState->fault;
860 }
861
862 return f;
863}
864
865bool
867 GrainSize tg, int tsz, bool low_range)
868{
869 // The effective maximum input size is 48 if ARMv8.2-LVA is not
870 // supported or if the translation granule that is in use is 4KB or
871 // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
872 // translation granule size only, the effective minimum value of
873 // 52.
874 const bool have_lva = HaveExt(currState->tc, ArmExtension::FEAT_LVA);
875 int in_max = (have_lva && tg == Grain64KB) ? 52 : 48;
876 int in_min = 64 - (tg == Grain64KB ? 47 : 48);
877
878 return tsz > in_max || tsz < in_min || (low_range ?
879 bits(currState->vaddr, top_bit, tsz) != 0x0 :
880 bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1));
881}
882
883bool
885{
886 return (pa_range != _physAddrRange &&
887 bits(addr, _physAddrRange - 1, pa_range));
888}
889
890Fault
892{
893 assert(currState->aarch64);
894
895 DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
897
899
900 // Determine TTBR, table size, granule size and phys. address range
901 Addr ttbr = 0;
902 int tsz = 0, ps = 0;
903 GrainSize tg = Grain4KB; // grain size computed from tg* field
904 bool fault = false;
905
906 int top_bit = computeAddrTop(currState->tc,
907 bits(currState->vaddr, 55),
909 currState->tcr,
910 currState->el);
911
912 bool vaddr_fault = false;
913 switch (currState->el) {
914 case EL0:
915 {
916 Addr ttbr0;
917 Addr ttbr1;
918 if (HaveExt(currState->tc, ArmExtension::FEAT_VHE) &&
919 currState->hcr.tge==1 && currState->hcr.e2h == 1) {
920 // VHE code for EL2&0 regime
923 } else {
926 }
927 switch (bits(currState->vaddr, 63,48)) {
928 case 0:
929 DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
930 ttbr = ttbr0;
931 tsz = 64 - currState->tcr.t0sz;
932 tg = GrainMap_tg0[currState->tcr.tg0];
933 currState->hpd = currState->tcr.hpd0;
934 currState->isUncacheable = currState->tcr.irgn0 == 0;
936 top_bit, tg, tsz, true);
937
938 if (vaddr_fault || currState->tcr.epd0)
939 fault = true;
940 break;
941 case 0xffff:
942 DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
943 ttbr = ttbr1;
944 tsz = 64 - currState->tcr.t1sz;
945 tg = GrainMap_tg1[currState->tcr.tg1];
946 currState->hpd = currState->tcr.hpd1;
947 currState->isUncacheable = currState->tcr.irgn1 == 0;
949 top_bit, tg, tsz, false);
950
951 if (vaddr_fault || currState->tcr.epd1)
952 fault = true;
953 break;
954 default:
955 // top two bytes must be all 0s or all 1s, else invalid addr
956 fault = true;
957 }
958 ps = currState->tcr.ips;
959 }
960 break;
961 case EL1:
962 if (isStage2) {
963 if (currState->secureLookup) {
964 DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
966 } else {
967 DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
969 }
970 tsz = 64 - currState->vtcr.t0sz64;
971 tg = GrainMap_tg0[currState->vtcr.tg0];
972
973 ps = currState->vtcr.ps;
974 currState->isUncacheable = currState->vtcr.irgn0 == 0;
975 } else {
976 switch (bits(currState->vaddr, top_bit)) {
977 case 0:
978 DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
980 tsz = 64 - currState->tcr.t0sz;
981 tg = GrainMap_tg0[currState->tcr.tg0];
982 currState->hpd = currState->tcr.hpd0;
983 currState->isUncacheable = currState->tcr.irgn0 == 0;
985 top_bit, tg, tsz, true);
986
987 if (vaddr_fault || currState->tcr.epd0)
988 fault = true;
989 break;
990 case 0x1:
991 DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
993 tsz = 64 - currState->tcr.t1sz;
994 tg = GrainMap_tg1[currState->tcr.tg1];
995 currState->hpd = currState->tcr.hpd1;
996 currState->isUncacheable = currState->tcr.irgn1 == 0;
998 top_bit, tg, tsz, false);
999
1000 if (vaddr_fault || currState->tcr.epd1)
1001 fault = true;
1002 break;
1003 default:
1004 // top two bytes must be all 0s or all 1s, else invalid addr
1005 fault = true;
1006 }
1007 ps = currState->tcr.ips;
1008 }
1009 break;
1010 case EL2:
1011 switch(bits(currState->vaddr, top_bit)) {
1012 case 0:
1013 DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1015 tsz = 64 - currState->tcr.t0sz;
1016 tg = GrainMap_tg0[currState->tcr.tg0];
1017 currState->hpd = currState->hcr.e2h ?
1018 currState->tcr.hpd0 : currState->tcr.hpd;
1019 currState->isUncacheable = currState->tcr.irgn0 == 0;
1021 top_bit, tg, tsz, true);
1022
1023 if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1024 fault = true;
1025 break;
1026
1027 case 0x1:
1028 DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1030 tsz = 64 - currState->tcr.t1sz;
1031 tg = GrainMap_tg1[currState->tcr.tg1];
1032 currState->hpd = currState->tcr.hpd1;
1033 currState->isUncacheable = currState->tcr.irgn1 == 0;
1035 top_bit, tg, tsz, false);
1036
1037 if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1038 fault = true;
1039 break;
1040
1041 default:
1042 // invalid addr if top two bytes are not all 0s
1043 fault = true;
1044 }
1045 ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1046 break;
1047 case EL3:
1048 switch(bits(currState->vaddr, top_bit)) {
1049 case 0:
1050 DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1052 tsz = 64 - currState->tcr.t0sz;
1053 tg = GrainMap_tg0[currState->tcr.tg0];
1054 currState->hpd = currState->tcr.hpd;
1055 currState->isUncacheable = currState->tcr.irgn0 == 0;
1057 top_bit, tg, tsz, true);
1058
1059 if (vaddr_fault)
1060 fault = true;
1061 break;
1062 default:
1063 // invalid addr if top two bytes are not all 0s
1064 fault = true;
1065 }
1066 ps = currState->tcr.ps;
1067 break;
1068 }
1069
1070 const bool is_atomic = currState->req->isAtomic();
1071
1072 if (fault) {
1073 Fault f;
1074 if (currState->isFetch)
1075 f = std::make_shared<PrefetchAbort>(
1077 ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1079 else
1080 f = std::make_shared<DataAbort>(
1083 is_atomic ? false : currState->isWrite,
1084 ArmFault::TranslationLL + LookupLevel::L0,
1086
1087 if (currState->timing) {
1088 pending = false;
1090 currState = NULL;
1091 } else {
1092 currState->tc = NULL;
1093 currState->req = NULL;
1094 }
1095 return f;
1096
1097 }
1098
1099 if (tg == ReservedGrain) {
1100 warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1101 "DEFINED behavior takes this to mean 4KB granules\n");
1102 tg = Grain4KB;
1103 }
1104
1105 // Clamp to lower limit
1106 int pa_range = decodePhysAddrRange64(ps);
1107 if (pa_range > _physAddrRange) {
1109 } else {
1110 currState->physAddrRange = pa_range;
1111 }
1112
1113 auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1114 ttbr, tg, tsz, pa_range);
1115
1116 // Determine physical address size and raise an Address Size Fault if
1117 // necessary
1119 DPRINTF(TLB, "Address size fault before any lookup\n");
1120 Fault f;
1121 if (currState->isFetch)
1122 f = std::make_shared<PrefetchAbort>(
1124 ArmFault::AddressSizeLL + start_lookup_level,
1125 isStage2,
1127 else
1128 f = std::make_shared<DataAbort>(
1131 is_atomic ? false : currState->isWrite,
1132 ArmFault::AddressSizeLL + start_lookup_level,
1133 isStage2,
1135
1136
1137 if (currState->timing) {
1138 pending = false;
1140 currState = NULL;
1141 } else {
1142 currState->tc = NULL;
1143 currState->req = NULL;
1144 }
1145 return f;
1146
1147 }
1148
1149 // Trickbox address check
1150 Fault f = testWalk(desc_addr, sizeof(uint64_t),
1151 TlbEntry::DomainType::NoAccess, start_lookup_level, isStage2);
1152 if (f) {
1153 DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1154 if (currState->timing) {
1155 pending = false;
1157 currState = NULL;
1158 } else {
1159 currState->tc = NULL;
1160 currState->req = NULL;
1161 }
1162 return f;
1163 }
1164
1166 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1168 }
1169
1170 if (currState->isSecure) {
1171 flag.set(Request::SECURE);
1172 }
1173
1174 currState->longDesc.lookupLevel = start_lookup_level;
1175 currState->longDesc.aarch64 = true;
1178
1179 if (currState->timing) {
1180 fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1181 sizeof(uint64_t), flag, start_lookup_level,
1182 LongDescEventByLevel[start_lookup_level], NULL);
1183 } else {
1184 fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1185 sizeof(uint64_t), flag, -1, NULL,
1187 f = currState->fault;
1188 }
1189
1190 return f;
1191}
1192
1193std::tuple<Addr, Addr, TableWalker::LookupLevel>
1194TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1195{
1196 const auto* ptops = getPageTableOps(tg);
1197
1198 LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1199 Addr table_addr = 0;
1200 Addr desc_addr = 0;
1201
1202 if (currState->walkEntry.valid) {
1203 // WalkCache hit
1204 TlbEntry* entry = &currState->walkEntry;
1205 DPRINTF(PageTableWalker,
1206 "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1207 currState->vaddr, entry->lookupLevel, entry->pfn);
1208
1209 currState->xnTable = entry->xn;
1210 currState->pxnTable = entry->pxn;
1211 currState->rwTable = bits(entry->ap, 1);
1212 currState->userTable = bits(entry->ap, 0);
1213
1214 table_addr = entry->pfn;
1215 first_level = (LookupLevel)(entry->lookupLevel + 1);
1216 } else {
1217 // WalkCache miss
1218 first_level = isStage2 ?
1219 ptops->firstS2Level(currState->vtcr.sl0) :
1220 ptops->firstLevel(64 - tsz);
1221 panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1222 "Table walker couldn't find lookup level\n");
1223
1224 int stride = tg - 3;
1225 int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1226
1227 if (pa_range == 52) {
1228 int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1229 table_addr = mbits(ttbr, 47, z);
1230 table_addr |= (bits(ttbr, 5, 2) << 48);
1231 } else {
1232 table_addr = mbits(ttbr, 47, base_addr_lo);
1233 }
1234 }
1235
1236 desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1237
1238 return std::make_tuple(table_addr, desc_addr, first_level);
1239}
1240
1241void
1243 uint8_t texcb, bool s)
1244{
1245 // Note: tc and sctlr local variables are hiding tc and sctrl class
1246 // variables
1247 DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1248 te.shareable = false; // default value
1249 te.nonCacheable = false;
1250 te.outerShareable = false;
1251 if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1252 switch(texcb) {
1253 case 0: // Stongly-ordered
1254 te.nonCacheable = true;
1256 te.shareable = true;
1257 te.innerAttrs = 1;
1258 te.outerAttrs = 0;
1259 break;
1260 case 1: // Shareable Device
1261 te.nonCacheable = true;
1263 te.shareable = true;
1264 te.innerAttrs = 3;
1265 te.outerAttrs = 0;
1266 break;
1267 case 2: // Outer and Inner Write-Through, no Write-Allocate
1269 te.shareable = s;
1270 te.innerAttrs = 6;
1271 te.outerAttrs = bits(texcb, 1, 0);
1272 break;
1273 case 3: // Outer and Inner Write-Back, no Write-Allocate
1275 te.shareable = s;
1276 te.innerAttrs = 7;
1277 te.outerAttrs = bits(texcb, 1, 0);
1278 break;
1279 case 4: // Outer and Inner Non-cacheable
1280 te.nonCacheable = true;
1282 te.shareable = s;
1283 te.innerAttrs = 0;
1284 te.outerAttrs = bits(texcb, 1, 0);
1285 break;
1286 case 5: // Reserved
1287 panic("Reserved texcb value!\n");
1288 break;
1289 case 6: // Implementation Defined
1290 panic("Implementation-defined texcb value!\n");
1291 break;
1292 case 7: // Outer and Inner Write-Back, Write-Allocate
1294 te.shareable = s;
1295 te.innerAttrs = 5;
1296 te.outerAttrs = 1;
1297 break;
1298 case 8: // Non-shareable Device
1299 te.nonCacheable = true;
1301 te.shareable = false;
1302 te.innerAttrs = 3;
1303 te.outerAttrs = 0;
1304 break;
1305 case 9 ... 15: // Reserved
1306 panic("Reserved texcb value!\n");
1307 break;
1308 case 16 ... 31: // Cacheable Memory
1310 te.shareable = s;
1311 if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1312 te.nonCacheable = true;
1313 te.innerAttrs = bits(texcb, 1, 0);
1314 te.outerAttrs = bits(texcb, 3, 2);
1315 break;
1316 default:
1317 panic("More than 32 states for 5 bits?\n");
1318 }
1319 } else {
1320 assert(tc);
1321 PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1323 NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1325 DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1326 uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1327 switch(bits(texcb, 2,0)) {
1328 case 0:
1329 curr_tr = prrr.tr0;
1330 curr_ir = nmrr.ir0;
1331 curr_or = nmrr.or0;
1332 te.outerShareable = (prrr.nos0 == 0);
1333 break;
1334 case 1:
1335 curr_tr = prrr.tr1;
1336 curr_ir = nmrr.ir1;
1337 curr_or = nmrr.or1;
1338 te.outerShareable = (prrr.nos1 == 0);
1339 break;
1340 case 2:
1341 curr_tr = prrr.tr2;
1342 curr_ir = nmrr.ir2;
1343 curr_or = nmrr.or2;
1344 te.outerShareable = (prrr.nos2 == 0);
1345 break;
1346 case 3:
1347 curr_tr = prrr.tr3;
1348 curr_ir = nmrr.ir3;
1349 curr_or = nmrr.or3;
1350 te.outerShareable = (prrr.nos3 == 0);
1351 break;
1352 case 4:
1353 curr_tr = prrr.tr4;
1354 curr_ir = nmrr.ir4;
1355 curr_or = nmrr.or4;
1356 te.outerShareable = (prrr.nos4 == 0);
1357 break;
1358 case 5:
1359 curr_tr = prrr.tr5;
1360 curr_ir = nmrr.ir5;
1361 curr_or = nmrr.or5;
1362 te.outerShareable = (prrr.nos5 == 0);
1363 break;
1364 case 6:
1365 panic("Imp defined type\n");
1366 case 7:
1367 curr_tr = prrr.tr7;
1368 curr_ir = nmrr.ir7;
1369 curr_or = nmrr.or7;
1370 te.outerShareable = (prrr.nos7 == 0);
1371 break;
1372 }
1373
1374 switch(curr_tr) {
1375 case 0:
1376 DPRINTF(TLBVerbose, "StronglyOrdered\n");
1378 te.nonCacheable = true;
1379 te.innerAttrs = 1;
1380 te.outerAttrs = 0;
1381 te.shareable = true;
1382 break;
1383 case 1:
1384 DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1385 prrr.ds1, prrr.ds0, s);
1387 te.nonCacheable = true;
1388 te.innerAttrs = 3;
1389 te.outerAttrs = 0;
1390 if (prrr.ds1 && s)
1391 te.shareable = true;
1392 if (prrr.ds0 && !s)
1393 te.shareable = true;
1394 break;
1395 case 2:
1396 DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1397 prrr.ns1, prrr.ns0, s);
1399 if (prrr.ns1 && s)
1400 te.shareable = true;
1401 if (prrr.ns0 && !s)
1402 te.shareable = true;
1403 break;
1404 case 3:
1405 panic("Reserved type");
1406 }
1407
1408 if (te.mtype == TlbEntry::MemoryType::Normal){
1409 switch(curr_ir) {
1410 case 0:
1411 te.nonCacheable = true;
1412 te.innerAttrs = 0;
1413 break;
1414 case 1:
1415 te.innerAttrs = 5;
1416 break;
1417 case 2:
1418 te.innerAttrs = 6;
1419 break;
1420 case 3:
1421 te.innerAttrs = 7;
1422 break;
1423 }
1424
1425 switch(curr_or) {
1426 case 0:
1427 te.nonCacheable = true;
1428 te.outerAttrs = 0;
1429 break;
1430 case 1:
1431 te.outerAttrs = 1;
1432 break;
1433 case 2:
1434 te.outerAttrs = 2;
1435 break;
1436 case 3:
1437 te.outerAttrs = 3;
1438 break;
1439 }
1440 }
1441 }
1442 DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1443 "outerAttrs: %d\n",
1444 te.shareable, te.innerAttrs, te.outerAttrs);
1445 te.setAttributes(false);
1446}
1447
1448void
1450 LongDescriptor &l_descriptor)
1451{
1452 assert(release->has(ArmExtension::LPAE));
1453
1454 uint8_t attr;
1455 uint8_t sh = l_descriptor.sh();
1456 // Different format and source of attributes if this is a stage 2
1457 // translation
1458 if (isStage2) {
1459 attr = l_descriptor.memAttr();
1460 uint8_t attr_3_2 = (attr >> 2) & 0x3;
1461 uint8_t attr_1_0 = attr & 0x3;
1462
1463 DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1464
1465 if (attr_3_2 == 0) {
1466 te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1468 te.outerAttrs = 0;
1469 te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1470 te.nonCacheable = true;
1471 } else {
1473 te.outerAttrs = attr_3_2 == 1 ? 0 :
1474 attr_3_2 == 2 ? 2 : 1;
1475 te.innerAttrs = attr_1_0 == 1 ? 0 :
1476 attr_1_0 == 2 ? 6 : 5;
1477 te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1478 }
1479 } else {
1480 uint8_t attrIndx = l_descriptor.attrIndx();
1481
1482 // LPAE always uses remapping of memory attributes, irrespective of the
1483 // value of SCTLR.TRE
1484 MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1485 int reg_as_int = snsBankedIndex(reg, currState->tc,
1487 uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1488 attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1489 uint8_t attr_7_4 = bits(attr, 7, 4);
1490 uint8_t attr_3_0 = bits(attr, 3, 0);
1491 DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1492
1493 // Note: the memory subsystem only cares about the 'cacheable' memory
1494 // attribute. The other attributes are only used to fill the PAR register
1495 // accordingly to provide the illusion of full support
1496 te.nonCacheable = false;
1497
1498 switch (attr_7_4) {
1499 case 0x0:
1500 // Strongly-ordered or Device memory
1501 if (attr_3_0 == 0x0)
1503 else if (attr_3_0 == 0x4)
1505 else
1506 panic("Unpredictable behavior\n");
1507 te.nonCacheable = true;
1508 te.outerAttrs = 0;
1509 break;
1510 case 0x4:
1511 // Normal memory, Outer Non-cacheable
1513 te.outerAttrs = 0;
1514 if (attr_3_0 == 0x4)
1515 // Inner Non-cacheable
1516 te.nonCacheable = true;
1517 else if (attr_3_0 < 0x8)
1518 panic("Unpredictable behavior\n");
1519 break;
1520 case 0x8:
1521 case 0x9:
1522 case 0xa:
1523 case 0xb:
1524 case 0xc:
1525 case 0xd:
1526 case 0xe:
1527 case 0xf:
1528 if (attr_7_4 & 0x4) {
1529 te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1530 } else {
1531 te.outerAttrs = 0x2;
1532 }
1533 // Normal memory, Outer Cacheable
1535 if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1536 panic("Unpredictable behavior\n");
1537 break;
1538 default:
1539 panic("Unpredictable behavior\n");
1540 break;
1541 }
1542
1543 switch (attr_3_0) {
1544 case 0x0:
1545 te.innerAttrs = 0x1;
1546 break;
1547 case 0x4:
1548 te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1549 break;
1550 case 0x8:
1551 case 0x9:
1552 case 0xA:
1553 case 0xB:
1554 te.innerAttrs = 6;
1555 break;
1556 case 0xC:
1557 case 0xD:
1558 case 0xE:
1559 case 0xF:
1560 te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1561 break;
1562 default:
1563 panic("Unpredictable behavior\n");
1564 break;
1565 }
1566 }
1567
1568 te.outerShareable = sh == 2;
1569 te.shareable = (sh & 0x2) ? true : false;
1570 te.setAttributes(true);
1571 te.attributes |= (uint64_t) attr << 56;
1572}
1573
1574void
1576 LongDescriptor &l_descriptor)
1577{
1578 uint8_t attr;
1579 uint8_t attr_hi;
1580 uint8_t attr_lo;
1581 uint8_t sh = l_descriptor.sh();
1582
1583 if (isStage2) {
1584 attr = l_descriptor.memAttr();
1585 uint8_t attr_hi = (attr >> 2) & 0x3;
1586 uint8_t attr_lo = attr & 0x3;
1587
1588 DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1589
1590 if (attr_hi == 0) {
1591 te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1593 te.outerAttrs = 0;
1594 te.innerAttrs = attr_lo == 0 ? 1 : 3;
1595 te.nonCacheable = true;
1596 } else {
1598 te.outerAttrs = attr_hi == 1 ? 0 :
1599 attr_hi == 2 ? 2 : 1;
1600 te.innerAttrs = attr_lo == 1 ? 0 :
1601 attr_lo == 2 ? 6 : 5;
1602 // Treat write-through memory as uncacheable, this is safe
1603 // but for performance reasons not optimal.
1604 te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1605 (attr_lo == 1) || (attr_lo == 2);
1606 }
1607 } else {
1608 uint8_t attrIndx = l_descriptor.attrIndx();
1609
1610 DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1612
1613 // Select MAIR
1614 uint64_t mair;
1615 switch (regime) {
1616 case EL0:
1617 case EL1:
1618 mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1619 break;
1620 case EL2:
1621 mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1622 break;
1623 case EL3:
1624 mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1625 break;
1626 default:
1627 panic("Invalid exception level");
1628 break;
1629 }
1630
1631 // Select attributes
1632 attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1633 attr_lo = bits(attr, 3, 0);
1634 attr_hi = bits(attr, 7, 4);
1635
1636 // Memory type
1638
1639 // Cacheability
1640 te.nonCacheable = false;
1641 if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1642 te.nonCacheable = true;
1643 }
1644 // Treat write-through memory as uncacheable, this is safe
1645 // but for performance reasons not optimal.
1646 switch (attr_hi) {
1647 case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1648 case 0x4: // Normal memory, Outer Non-cacheable
1649 case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1650 te.nonCacheable = true;
1651 }
1652 switch (attr_lo) {
1653 case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1654 case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1655 warn_if(!attr_hi, "Unpredictable behavior");
1656 [[fallthrough]];
1657 case 0x4: // Device-nGnRE memory or
1658 // Normal memory, Inner Non-cacheable
1659 case 0x8: // Device-nGRE memory or
1660 // Normal memory, Inner Write-through non-transient
1661 te.nonCacheable = true;
1662 }
1663
1664 te.shareable = sh == 2;
1665 te.outerShareable = (sh & 0x2) ? true : false;
1666 // Attributes formatted according to the 64-bit PAR
1667 te.attributes = ((uint64_t) attr << 56) |
1668 (1 << 11) | // LPAE bit
1669 (te.ns << 9) | // NS bit
1670 (sh << 7);
1671 }
1672}
1673
1674void
1676{
1677 if (currState->fault != NoFault) {
1678 return;
1679 }
1680
1683
1684 DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1686 TlbEntry te;
1687
1688 const bool is_atomic = currState->req->isAtomic();
1689
1690 switch (currState->l1Desc.type()) {
1693 if (!currState->timing) {
1694 currState->tc = NULL;
1695 currState->req = NULL;
1696 }
1697 DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1698 if (currState->isFetch)
1699 currState->fault =
1700 std::make_shared<PrefetchAbort>(
1702 ArmFault::TranslationLL + LookupLevel::L1,
1703 isStage2,
1705 else
1706 currState->fault =
1707 std::make_shared<DataAbort>(
1710 is_atomic ? false : currState->isWrite,
1711 ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1713 return;
1715 if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1721 currState->fault = std::make_shared<DataAbort>(
1724 is_atomic ? false : currState->isWrite,
1725 ArmFault::AccessFlagLL + LookupLevel::L1,
1726 isStage2,
1728 }
1729 if (currState->l1Desc.supersection()) {
1730 panic("Haven't implemented supersections\n");
1731 }
1733 return;
1735 {
1736 Addr l2desc_addr;
1737 l2desc_addr = currState->l1Desc.l2Addr() |
1738 (bits(currState->vaddr, 19, 12) << 2);
1739 DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1740 l2desc_addr, currState->isSecure ? "s" : "ns");
1741
1742 // Trickbox address check
1743 currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1745 LookupLevel::L2, isStage2);
1746
1747 if (currState->fault) {
1748 if (!currState->timing) {
1749 currState->tc = NULL;
1750 currState->req = NULL;
1751 }
1752 return;
1753 }
1754
1756
1757 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1759 }
1760
1761 if (currState->isSecure)
1762 flag.set(Request::SECURE);
1763
1764 bool delayed;
1765 delayed = fetchDescriptor(l2desc_addr,
1766 (uint8_t*)&currState->l2Desc.data,
1767 sizeof(uint32_t), flag, -1, &doL2DescEvent,
1769 if (delayed) {
1770 currState->delayed = true;
1771 }
1772
1773 return;
1774 }
1775 default:
1776 panic("A new type in a 2 bit field?\n");
1777 }
1778}
1779
1780Fault
1782{
1783 if (currState->isFetch) {
1784 return std::make_shared<PrefetchAbort>(
1787 isStage2,
1789 } else {
1790 return std::make_shared<DataAbort>(
1793 currState->req->isAtomic() ? false : currState->isWrite,
1795 isStage2,
1797 }
1798}
1799
1800void
1802{
1803 if (currState->fault != NoFault) {
1804 return;
1805 }
1806
1809
1810 DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1813 currState->aarch64 ? "AArch64" : "long-desc.");
1814
1817 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1818 "xn: %d, ap: %d, af: %d, type: %d\n",
1826 } else {
1827 DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1831 }
1832
1833 TlbEntry te;
1834
1835 switch (currState->longDesc.type()) {
1837 DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1840
1842 if (!currState->timing) {
1843 currState->tc = NULL;
1844 currState->req = NULL;
1845 }
1846 return;
1847
1850 {
1851 auto fault_source = ArmFault::FaultSourceInvalid;
1852 // Check for address size fault
1855
1856 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1858 fault_source = ArmFault::AddressSizeLL;
1859
1860 // Check for access fault
1861 } else if (currState->longDesc.af() == 0) {
1862
1863 DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1865 fault_source = ArmFault::AccessFlagLL;
1866 }
1867
1868 if (fault_source != ArmFault::FaultSourceInvalid) {
1869 currState->fault = generateLongDescFault(fault_source);
1870 } else {
1872 }
1873 }
1874 return;
1876 {
1877 // Set hierarchical permission flags
1888
1889 // Set up next level lookup
1890 Addr next_desc_addr = currState->longDesc.nextDescAddr(
1891 currState->vaddr);
1892
1893 DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1896 next_desc_addr,
1897 currState->secureLookup ? "s" : "ns");
1898
1899 // Check for address size fault
1901 next_desc_addr, currState->physAddrRange)) {
1902 DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1904
1907 return;
1908 }
1909
1910 // Trickbox address check
1912 next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1914
1915 if (currState->fault) {
1916 if (!currState->timing) {
1917 currState->tc = NULL;
1918 currState->req = NULL;
1919 }
1920 return;
1921 }
1922
1923 if (mmu->hasWalkCache()) {
1925 }
1926
1927
1930 flag.set(Request::SECURE);
1931
1932 if (currState->sctlr.c == 0 || currState->isUncacheable) {
1934 }
1935
1938 Event *event = NULL;
1939 switch (L) {
1940 case LookupLevel::L1:
1941 assert(currState->aarch64);
1942 case LookupLevel::L2:
1943 case LookupLevel::L3:
1944 event = LongDescEventByLevel[L];
1945 break;
1946 default:
1947 panic("Wrong lookup level in table walk\n");
1948 break;
1949 }
1950
1951 bool delayed;
1952 delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1953 sizeof(uint64_t), flag, -1, event,
1955 if (delayed) {
1956 currState->delayed = true;
1957 }
1958 }
1959 return;
1960 default:
1961 panic("A new type in a 2 bit field?\n");
1962 }
1963}
1964
1965void
1967{
1968 if (currState->fault != NoFault) {
1969 return;
1970 }
1971
1974
1975 DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1977 TlbEntry te;
1978
1979 const bool is_atomic = currState->req->isAtomic();
1980
1981 if (currState->l2Desc.invalid()) {
1982 DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1983 if (!currState->timing) {
1984 currState->tc = NULL;
1985 currState->req = NULL;
1986 }
1987 if (currState->isFetch)
1988 currState->fault = std::make_shared<PrefetchAbort>(
1990 ArmFault::TranslationLL + LookupLevel::L2,
1991 isStage2,
1993 else
1994 currState->fault = std::make_shared<DataAbort>(
1996 is_atomic ? false : currState->isWrite,
1997 ArmFault::TranslationLL + LookupLevel::L2,
1998 isStage2,
2000 return;
2001 }
2002
2003 if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
2007 DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
2008 currState->sctlr.afe, currState->l2Desc.ap());
2009
2010 currState->fault = std::make_shared<DataAbort>(
2013 is_atomic ? false : currState->isWrite,
2014 ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
2016 }
2017
2019}
2020
2021void
2023{
2024 currState = stateQueues[LookupLevel::L1].front();
2025 currState->delayed = false;
2026 // if there's a stage2 translation object we don't need it any more
2027 if (currState->stage2Tran) {
2028 delete currState->stage2Tran;
2029 currState->stage2Tran = NULL;
2030 }
2031
2032
2033 DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2035 DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2037
2038 DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2041
2042 stateQueues[LookupLevel::L1].pop_front();
2043 // Check if fault was generated
2044 if (currState->fault != NoFault) {
2048
2049 pending = false;
2051
2052 currState->req = NULL;
2053 currState->tc = NULL;
2054 currState->delayed = false;
2055 delete currState;
2056 }
2057 else if (!currState->delayed) {
2058 // delay is not set so there is no L2 to do
2059 // Don't finish the translation if a stage 2 look up is underway
2061 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2062
2066
2068
2069 pending = false;
2071
2072 currState->req = NULL;
2073 currState->tc = NULL;
2074 currState->delayed = false;
2075 delete currState;
2076 } else {
2077 // need to do L2 descriptor
2078 stateQueues[LookupLevel::L2].push_back(currState);
2079 }
2080 currState = NULL;
2081}
2082
2083void
2085{
2086 currState = stateQueues[LookupLevel::L2].front();
2087 assert(currState->delayed);
2088 // if there's a stage2 translation object we don't need it any more
2089 if (currState->stage2Tran) {
2090 delete currState->stage2Tran;
2091 currState->stage2Tran = NULL;
2092 }
2093
2094 DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2097
2098 // Check if fault was generated
2099 if (currState->fault != NoFault) {
2103 } else {
2105 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2106
2110
2112 }
2113
2114
2115 stateQueues[LookupLevel::L2].pop_front();
2116 pending = false;
2118
2119 currState->req = NULL;
2120 currState->tc = NULL;
2121 currState->delayed = false;
2122
2123 delete currState;
2124 currState = NULL;
2125}
2126
2127void
2129{
2130 doLongDescriptorWrapper(LookupLevel::L0);
2131}
2132
2133void
2135{
2136 doLongDescriptorWrapper(LookupLevel::L1);
2137}
2138
2139void
2141{
2142 doLongDescriptorWrapper(LookupLevel::L2);
2143}
2144
2145void
2147{
2148 doLongDescriptorWrapper(LookupLevel::L3);
2149}
2150
2151void
2153{
2154 currState = stateQueues[curr_lookup_level].front();
2155 assert(curr_lookup_level == currState->longDesc.lookupLevel);
2156 currState->delayed = false;
2157
2158 // if there's a stage2 translation object we don't need it any more
2159 if (currState->stage2Tran) {
2160 delete currState->stage2Tran;
2161 currState->stage2Tran = NULL;
2162 }
2163
2164 DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2167
2168 stateQueues[curr_lookup_level].pop_front();
2169
2170 if (currState->fault != NoFault) {
2171 // A fault was generated
2174
2175 pending = false;
2177
2178 currState->req = NULL;
2179 currState->tc = NULL;
2180 currState->delayed = false;
2181 delete currState;
2182 } else if (!currState->delayed) {
2183 // No additional lookups required
2184 DPRINTF(PageTableWalker, "calling translateTiming again\n");
2186
2190
2191 stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2192
2193 pending = false;
2195
2196 currState->req = NULL;
2197 currState->tc = NULL;
2198 currState->delayed = false;
2199 delete currState;
2200 } else {
2201 if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2202 panic("Max. number of lookups already reached in table walk\n");
2203 // Need to perform additional lookups
2205 }
2206 currState = NULL;
2207}
2208
2209
2210void
2212{
2213 if (pendingQueue.size())
2215 else
2216 completeDrain();
2217}
2218
2219bool
2220TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2221 Request::Flags flags, int queueIndex, Event *event,
2222 void (TableWalker::*doDescriptor)())
2223{
2224 bool isTiming = currState->timing;
2225
2226 DPRINTF(PageTableWalker,
2227 "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2228 descAddr, currState->stage2Req);
2229
2230 // If this translation has a stage 2 then we know descAddr is an IPA and
2231 // needs to be translated before we can access the page table. Do that
2232 // check here.
2233 if (currState->stage2Req) {
2234 Fault fault;
2235
2236 if (isTiming) {
2237 auto *tran = new
2240 currState->stage2Tran = tran;
2241 readDataTimed(currState->tc, descAddr, tran, numBytes, flags);
2242 fault = tran->fault;
2243 } else {
2244 fault = readDataUntimed(currState->tc,
2245 currState->vaddr, descAddr, data, numBytes, flags,
2246 currState->mode,
2249 }
2250
2251 if (fault != NoFault) {
2252 currState->fault = fault;
2253 }
2254 if (isTiming) {
2255 if (queueIndex >= 0) {
2256 DPRINTF(PageTableWalker, "Adding to walker fifo: "
2257 "queue size before adding: %d\n",
2258 stateQueues[queueIndex].size());
2259 stateQueues[queueIndex].push_back(currState);
2260 currState = NULL;
2261 }
2262 } else {
2263 (this->*doDescriptor)();
2264 }
2265 } else {
2266 if (isTiming) {
2267 port->sendTimingReq(descAddr, numBytes, data, flags,
2269
2270 if (queueIndex >= 0) {
2271 DPRINTF(PageTableWalker, "Adding to walker fifo: "
2272 "queue size before adding: %d\n",
2273 stateQueues[queueIndex].size());
2274 stateQueues[queueIndex].push_back(currState);
2275 currState = NULL;
2276 }
2277 } else if (!currState->functional) {
2278 port->sendAtomicReq(descAddr, numBytes, data, flags,
2280
2281 (this->*doDescriptor)();
2282 } else {
2283 port->sendFunctionalReq(descAddr, numBytes, data, flags);
2284 (this->*doDescriptor)();
2285 }
2286 }
2287 return (isTiming);
2288}
2289
2290void
2292{
2293 const bool have_security = release->has(ArmExtension::SECURITY);
2294 TlbEntry te;
2295
2296 // Create and fill a new page table entry
2297 te.valid = true;
2298 te.longDescFormat = true;
2299 te.partial = true;
2300 // The entry is global if there is no address space identifier
2301 // to differentiate translation contexts
2302 te.global = !mmu->hasUnprivRegime(
2303 currState->el, currState->hcr.e2h);
2304 te.isHyp = currState->isHyp;
2305 te.asid = currState->asid;
2306 te.vmid = currState->vmid;
2307 te.N = descriptor.offsetBits();
2308 te.vpn = currState->vaddr >> te.N;
2309 te.size = (1ULL << te.N) - 1;
2310 te.pfn = descriptor.nextTableAddr();
2311 te.domain = descriptor.domain();
2312 te.lookupLevel = descriptor.lookupLevel;
2313 te.ns = !descriptor.secure(have_security, currState);
2314 te.nstid = !currState->isSecure;
2315 te.type = TypeTLB::unified;
2316
2317 if (currState->aarch64)
2318 te.el = currState->el;
2319 else
2320 te.el = EL1;
2321
2322 te.xn = currState->xnTable;
2323 te.pxn = currState->pxnTable;
2324 te.ap = (currState->rwTable << 1) | (currState->userTable);
2325
2326 // Debug output
2327 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2328 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2329 te.N, te.pfn, te.size, te.global, te.valid);
2330 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2331 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2332 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2333 te.nonCacheable, te.ns);
2334 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2335 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2336 descriptor.getRawData());
2337
2338 // Insert the entry into the TLBs
2339 tlb->multiInsert(te);
2340}
2341
2342void
2343TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2344{
2345 const bool have_security = release->has(ArmExtension::SECURITY);
2346 TlbEntry te;
2347
2348 // Create and fill a new page table entry
2349 te.valid = true;
2350 te.longDescFormat = long_descriptor;
2351 te.isHyp = currState->isHyp;
2352 te.asid = currState->asid;
2353 te.vmid = currState->vmid;
2354 te.N = descriptor.offsetBits();
2355 te.vpn = currState->vaddr >> te.N;
2356 te.size = (1<<te.N) - 1;
2357 te.pfn = descriptor.pfn();
2358 te.domain = descriptor.domain();
2359 te.lookupLevel = descriptor.lookupLevel;
2360 te.ns = !descriptor.secure(have_security, currState);
2361 te.nstid = !currState->isSecure;
2362 te.xn = descriptor.xn();
2363 te.type = currState->mode == BaseMMU::Execute ?
2364 TypeTLB::instruction : TypeTLB::data;
2365
2366 if (currState->aarch64)
2367 te.el = currState->el;
2368 else
2369 te.el = EL1;
2370
2373
2374 // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2375 // as global
2376 te.global = descriptor.global(currState) || isStage2;
2377 if (long_descriptor) {
2378 LongDescriptor l_descriptor =
2379 dynamic_cast<LongDescriptor &>(descriptor);
2380
2381 te.xn |= currState->xnTable;
2382 te.pxn = currState->pxnTable || l_descriptor.pxn();
2383 if (isStage2) {
2384 // this is actually the HAP field, but its stored in the same bit
2385 // possitions as the AP field in a stage 1 translation.
2386 te.hap = l_descriptor.ap();
2387 } else {
2388 te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2389 (currState->userTable && (descriptor.ap() & 0x1));
2390 }
2391 if (currState->aarch64)
2392 memAttrsAArch64(currState->tc, te, l_descriptor);
2393 else
2394 memAttrsLPAE(currState->tc, te, l_descriptor);
2395 } else {
2396 te.ap = descriptor.ap();
2397 memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2398 descriptor.shareable());
2399 }
2400
2401 // Debug output
2402 DPRINTF(TLB, descriptor.dbgHeader().c_str());
2403 DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2404 te.N, te.pfn, te.size, te.global, te.valid);
2405 DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2406 "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2407 te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2408 te.nonCacheable, te.ns);
2409 DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2410 descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2411 descriptor.getRawData());
2412
2413 // Insert the entry into the TLBs
2414 tlb->multiInsert(te);
2415 if (!currState->timing) {
2416 currState->tc = NULL;
2417 currState->req = NULL;
2418 }
2419}
2420
2422TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2423{
2424 switch (lookup_level_as_int) {
2425 case LookupLevel::L1:
2426 return LookupLevel::L1;
2427 case LookupLevel::L2:
2428 return LookupLevel::L2;
2429 case LookupLevel::L3:
2430 return LookupLevel::L3;
2431 default:
2432 panic("Invalid lookup level conversion");
2433 }
2434}
2435
2436/* this method keeps track of the table walker queue's residency, so
2437 * needs to be called whenever requests start and complete. */
2438void
2440{
2441 unsigned n = pendingQueue.size();
2442 if ((currState != NULL) && (currState != pendingQueue.front())) {
2443 ++n;
2444 }
2445
2446 if (n != pendingReqs) {
2447 Tick now = curTick();
2449 pendingReqs = n;
2450 pendingChangeTick = now;
2451 }
2452}
2453
2454Fault
2456 LookupLevel lookup_level, bool stage2)
2457{
2458 return mmu->testWalk(pa, size, currState->vaddr, currState->isSecure,
2459 currState->mode, domain, lookup_level, stage2);
2460}
2461
2462
2463uint8_t
2465{
2466 /* for stats.pageSizes */
2467 switch(N) {
2468 case 12: return 0; // 4K
2469 case 14: return 1; // 16K (using 16K granule in v8-64)
2470 case 16: return 2; // 64K
2471 case 20: return 3; // 1M
2472 case 21: return 4; // 2M-LPAE
2473 case 24: return 5; // 16M
2474 case 25: return 6; // 32M (using 16K granule in v8-64)
2475 case 29: return 7; // 512M (using 64K granule in v8-64)
2476 case 30: return 8; // 1G-LPAE
2477 case 42: return 9; // 1G-LPAE
2478 default:
2479 panic("unknown page size");
2480 return 255;
2481 }
2482}
2483
2484Fault
2486 uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2487 MMU::ArmTranslationType tran_type, bool functional)
2488{
2489 Fault fault;
2490
2491 // translate to physical address using the second stage MMU
2492 auto req = std::make_shared<Request>();
2493 req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2494 requestorId, 0);
2495
2496 if (functional) {
2497 fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2498 tran_type, true);
2499 } else {
2500 fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2501 tran_type, true);
2502 }
2503
2504 // Now do the access.
2505 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2506 Packet pkt = Packet(req, MemCmd::ReadReq);
2507 pkt.dataStatic(data);
2508 if (functional) {
2509 port->sendFunctional(&pkt);
2510 } else {
2511 port->sendAtomic(&pkt);
2512 }
2513 assert(!pkt.isError());
2514 }
2515
2516 // If there was a fault annotate it with the flag saying the foult occured
2517 // while doing a translation for a stage 1 page table walk.
2518 if (fault != NoFault) {
2519 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2520 arm_fault->annotate(ArmFault::S1PTW, true);
2521 arm_fault->annotate(ArmFault::OVA, vaddr);
2522 }
2523 return fault;
2524}
2525
2526void
2528 Stage2Walk *translation, int num_bytes,
2530{
2531 // translate to physical address using the second stage MMU
2532 translation->setVirt(
2533 desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2534 translation->translateTiming(tc);
2535}
2536
2538 uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2539 MMU::ArmTranslationType tran_type)
2540 : data(_data), numBytes(0), event(_event), parent(_parent),
2541 oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2542{
2543 req = std::make_shared<Request>();
2544}
2545
2546void
2548 const RequestPtr &req,
2550{
2551 fault = _fault;
2552
2553 // If there was a fault annotate it with the flag saying the foult occured
2554 // while doing a translation for a stage 1 page table walk.
2555 if (fault != NoFault) {
2556 ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2557 arm_fault->annotate(ArmFault::S1PTW, true);
2558 arm_fault->annotate(ArmFault::OVA, oVAddr);
2559 }
2560
2561 if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2562 parent.getTableWalkerPort().sendTimingReq(
2563 req->getPaddr(), numBytes, data, req->getFlags(),
2564 tc->getCpuPtr()->clockPeriod(), event);
2565 } else {
2566 // We can't do the DMA access as there's been a problem, so tell the
2567 // event we're done
2568 event->process();
2569 }
2570}
2571
2572void
2574{
2575 parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2576}
2577
2579 : statistics::Group(parent),
2580 ADD_STAT(walks, statistics::units::Count::get(),
2581 "Table walker walks requested"),
2582 ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2583 "Table walker walks initiated with short descriptors"),
2584 ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2585 "Table walker walks initiated with long descriptors"),
2586 ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2587 "Level at which table walker walks with short descriptors "
2588 "terminate"),
2589 ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2590 "Level at which table walker walks with long descriptors "
2591 "terminate"),
2592 ADD_STAT(squashedBefore, statistics::units::Count::get(),
2593 "Table walks squashed before starting"),
2594 ADD_STAT(squashedAfter, statistics::units::Count::get(),
2595 "Table walks squashed after completion"),
2596 ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2597 "Table walker wait (enqueue to first request) latency"),
2598 ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2599 "Table walker service (enqueue to completion) latency"),
2600 ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2601 "Table walker pending requests distribution"),
2602 ADD_STAT(pageSizes, statistics::units::Count::get(),
2603 "Table walker page sizes translated"),
2604 ADD_STAT(requestOrigin, statistics::units::Count::get(),
2605 "Table walker requests started/completed, data/inst")
2606{
2609
2612
2614 .init(2)
2616
2619
2621 .init(4)
2627
2630
2633
2635 .init(16)
2637
2639 .init(16)
2641
2643 .init(16)
2646
2647 pageSizes // see DDI 0487A D4-1661
2648 .init(10)
2651 pageSizes.subname(0, "4KiB");
2652 pageSizes.subname(1, "16KiB");
2653 pageSizes.subname(2, "64KiB");
2654 pageSizes.subname(3, "1MiB");
2655 pageSizes.subname(4, "2MiB");
2656 pageSizes.subname(5, "16MiB");
2657 pageSizes.subname(6, "32MiB");
2658 pageSizes.subname(7, "512MiB");
2659 pageSizes.subname(8, "1GiB");
2660 pageSizes.subname(9, "4TiB");
2661
2663 .init(2,2) // Instruction/Data, requests/completed
2665 requestOrigin.subname(0,"Requested");
2666 requestOrigin.subname(1,"Completed");
2667 requestOrigin.ysubname(0,"Data");
2668 requestOrigin.ysubname(1,"Inst");
2669}
2670
2671} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition faults.hh:96
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:239
static bool hasUnprivRegime(ExceptionLevel el, bool e2h)
Definition mmu.cc:702
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1409
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition mmu.cc:1609
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:256
bool hasWalkCache() const
Definition mmu.hh:384
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1370
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:245
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:91
const ArmRelease * release() const
Definition mmu.hh:382
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
Definition tlb.cc:270
virtual bool global(WalkerState *currState) const =0
virtual uint64_t getRawData() const =0
virtual std::string dbgHeader() const =0
virtual uint8_t offsetBits() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
virtual TlbEntry::DomainType domain() const =0
virtual bool secure(bool have_security, WalkerState *currState) const =0
uint32_t data
The raw bits of the entry.
bool supersection() const
Is the page a Supersection (16 MiB)?
Addr l2Addr() const
Address of L2 descriptor if it exists.
uint8_t ap() const override
Three bit access protection flags.
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
uint8_t ap() const override
Three bit access protection flags.
uint32_t data
The raw bits of the entry.
bool invalid() const
Is the entry invalid.
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
bool af() const
Returns true if the access flag (AF) is set.
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
bool aarch64
True if the current lookup is performed in AArch64 state.
EntryType type() const
Return the descriptor type.
bool xn() const override
Is execution allowed on this mapping?
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
uint64_t data
The raw bits of the entry.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Addr paddr() const
Return the physical address of the entry.
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool secureTable() const
Whether the subsequent levels of lookup are secure.
TlbEntry::DomainType domain() const override
bool xnTable() const
Is execution allowed on subsequent lookup levels?
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
Port(TableWalker &_walker, RequestorID id)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
bool isSecure
If the access comes from the secure state.
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
ExceptionLevel el
Current exception level.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Fault fault
The fault that we are going to return.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
bool timing
If the mode is timing or atomic.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
int physAddrRange
Current physical address range in bits.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
const ArmRelease * release
Cached copies of system-level properties.
Fault generateLongDescFault(ArmFault::FaultSource src)
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
void nextWalk(ThreadContext *tc)
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
gem5::ArmISA::TableWalker::TableWalkerStats stats
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
bool has(ArmExtension ext) const
Definition system.hh:76
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition mmu.hh:84
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
bool isResponse() const
Definition packet.hh:598
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
bool cacheResponding() const
Definition packet.hh:659
bool hasSharers() const
Definition packet.hh:686
Ports are used to interface objects to each other.
Definition port.hh:62
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition qport.hh:111
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:487
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition port.hh:508
@ PT_WALK
The request is a page table walk.
Definition request.hh:188
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Derived & ysubname(off_type index, const std::string &subname)
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
Histogram & init(size_type size)
Set the parameters of this histogram.
Derived & init(size_type _x, size_type _y)
Derived & init(size_type size)
Set this vector to have the given size.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:76
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition bitfield.hh:103
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void set(Type mask)
Set all flag's bits matching the given mask.
Definition flags.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
atomic_var_t state
Definition helpers.cc:188
uint8_t flags
Definition helpers.cc:66
#define warn_once(...)
Definition logging.hh:260
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition logging.hh:283
ByteOrder byteOrder(const ThreadContext *tc)
Definition utility.hh:357
Bitfield< 30 > te
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition pagetable.cc:476
Bitfield< 31 > n
Bitfield< 24 > hpd
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Bitfield< 18, 16 > ps
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:229
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:131
Bitfield< 4 > s
Bitfield< 8, 7 > sh
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 7, 4 > domain
Bitfield< 11 > z
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:267
const GrainSize GrainMap_tg1[]
Definition pagetable.cc:51
Bitfield< 33 > id
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:403
Bitfield< 21, 20 > stride
@ MISCREG_HCR
Definition misc.hh:253
@ MISCREG_VSTCR_EL2
Definition misc.hh:614
@ MISCREG_SCTLR_EL2
Definition misc.hh:589
@ MISCREG_TCR_EL2
Definition misc.hh:610
@ MISCREG_MAIR_EL1
Definition misc.hh:748
@ MISCREG_SCTLR
Definition misc.hh:240
@ MISCREG_TTBCR
Definition misc.hh:265
@ MISCREG_MAIR_EL2
Definition misc.hh:752
@ MISCREG_TCR_EL3
Definition misc.hh:616
@ MISCREG_TCR_EL1
Definition misc.hh:607
@ MISCREG_SCTLR_EL1
Definition misc.hh:584
@ MISCREG_PRRR
Definition misc.hh:374
@ MISCREG_MAIR_EL3
Definition misc.hh:754
@ MISCREG_CPSR
Definition misc.hh:66
@ MISCREG_NMRR
Definition misc.hh:380
@ MISCREG_TTBR1_EL1
Definition misc.hh:605
@ MISCREG_MAIR1
Definition misc.hh:383
@ MISCREG_TTBR1_EL2
Definition misc.hh:842
@ MISCREG_HTTBR
Definition misc.hh:452
@ MISCREG_HCR_EL2
Definition misc.hh:591
@ MISCREG_TTBR1
Definition misc.hh:262
@ MISCREG_VTCR_EL2
Definition misc.hh:612
@ MISCREG_VTTBR
Definition misc.hh:453
@ MISCREG_HTCR
Definition misc.hh:268
@ MISCREG_TTBR0_EL3
Definition misc.hh:615
@ MISCREG_VTCR
Definition misc.hh:269
@ MISCREG_TTBR0
Definition misc.hh:259
@ MISCREG_TTBR0_EL2
Definition misc.hh:609
@ MISCREG_TTBR0_EL1
Definition misc.hh:603
@ MISCREG_SCTLR_EL3
Definition misc.hh:597
@ MISCREG_VSTTBR_EL2
Definition misc.hh:613
@ MISCREG_VTTBR_EL2
Definition misc.hh:611
@ MISCREG_MAIR0
Definition misc.hh:377
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 3, 2 > el
Definition misc_types.hh:73
uint16_t vmid_t
Definition types.hh:57
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition utility.cc:1279
const GrainSize GrainMap_tg0[]
Definition pagetable.cc:49
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:671
Bitfield< 34 > aarch64
Definition types.hh:81
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:222
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition utility.cc:464
Bitfield< 39, 12 > pa
Bitfield< 59, 56 > tlb
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 5, 3 > reg
Definition types.hh:92
Bitfield< 3 > addr
Definition types.hh:84
Bitfield< 7, 0 > L
Definition int.hh:62
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
const FlagsType total
Print the total.
Definition info.hh:59
const FlagsType dist
Print the distribution.
Definition info.hh:65
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
uint64_t Tick
Tick count type.
Definition types.hh:58
uint16_t RequestorID
Definition request.hh:95
T htog(T value, ByteOrder guest_byte_order)
Definition byteswap.hh:187
uint64_t RegVal
Definition types.hh:173
constexpr decltype(nullptr) NoFault
Definition types.hh:253
TableWalkerStats(statistics::Group *parent)
LookupLevel lookupLevel
Definition pagetable.hh:215
const std::string & name()
Definition trace.cc:48

Generated on Mon Jul 10 2023 14:24:25 for gem5 by doxygen 1.9.7