gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
mmu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2013, 2016-2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "arch/arm/mmu.hh"
42
43#include "arch/arm/isa.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/reg_abi.hh"
48#include "arch/arm/tlb.hh"
49#include "arch/arm/tlbi_op.hh"
50#include "debug/MMU.hh"
51#include "mem/packet_access.hh"
52#include "params/ArmMMU.hh"
53#include "sim/process.hh"
54#include "sim/pseudo_inst.hh"
55
56namespace gem5
57{
58
59using namespace ArmISA;
60
61MMU::MMU(const ArmMMUParams &p)
62 : BaseMMU(p),
63 itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
64 itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
65 itbStage2Walker(p.stage2_itb_walker),
66 dtbStage2Walker(p.stage2_dtb_walker),
67 test(nullptr),
69 s1State(this, false), s2State(this, true),
70 _attr(0),
71 _release(nullptr),
72 _hasWalkCache(false),
73 stats(this)
74{
75 // Cache system-level properties
76 if (FullSystem) {
77 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
78 assert(arm_sys);
80 physAddrRange = arm_sys->physAddrRange();
81
82 _release = arm_sys->releaseFS();
83 } else {
84 haveLargeAsid64 = false;
85 physAddrRange = 48;
86
87 _release = p.release_se;
88 }
89
90 m5opRange = p.sys->m5opRange();
91}
92
93void
95{
96 itbWalker->setMmu(this);
97 dtbWalker->setMmu(this);
98 itbStage2Walker->setMmu(this);
99 dtbStage2Walker->setMmu(this);
100
101 itbStage2->setTableWalker(itbStage2Walker);
102 dtbStage2->setTableWalker(dtbStage2Walker);
103
106
108
110}
111
112bool
114{
115 for (auto tlb : instruction) {
116 if (static_cast<TLB*>(tlb)->walkCache())
117 return true;
118 }
119 for (auto tlb : data) {
120 if (static_cast<TLB*>(tlb)->walkCache())
121 return true;
122 }
123 for (auto tlb : unified) {
124 if (static_cast<TLB*>(tlb)->walkCache())
125 return true;
126 }
127
128 return false;
129}
130
131void
133{
134 s1State.miscRegValid = false;
135 s2State.miscRegValid = false;
136}
137
140{
141 return static_cast<ArmISA::TLB *>(dtb);
142}
143
146{
147 return static_cast<ArmISA::TLB *>(itb);
148}
149
150TLB *
151MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
152{
153 if (mode == BaseMMU::Execute) {
154 if (stage2)
155 return itbStage2;
156 else
157 return getITBPtr();
158 } else {
159 if (stage2)
160 return dtbStage2;
161 else
162 return getDTBPtr();
163 }
164}
165
168{
169 if (mode == BaseMMU::Execute) {
170 if (stage2)
171 return itbStage2Walker;
172 else
173 return itbWalker;
174 } else {
175 if (stage2)
176 return dtbStage2Walker;
177 else
178 return dtbWalker;
179 }
180}
181
182bool
184{
185 CachedState& state = updateMiscReg(tc, NormalTran, false);
186
187 auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
188
189 TlbEntry::KeyType lookup_data;
190
191 lookup_data.va = va;
192 lookup_data.asn = state.asid;
193 lookup_data.ignoreAsn = false;
194 lookup_data.vmid = state.vmid;
195 lookup_data.ss = state.securityState;
196 lookup_data.functional = true;
197 lookup_data.targetRegime = state.currRegime;
198 lookup_data.mode = BaseMMU::Read;
199
200 TlbEntry *e = tlb->multiLookup(lookup_data);
201
202 if (!e)
203 return false;
204 pa = e->pAddr(va);
205 return true;
206}
207
208void
210{
211 s1State.miscRegValid = false;
212 s1State.computeAddrTop.flush();
213 s2State.computeAddrTop.flush();
214}
215
216void
217MMU::flush(const TLBIOp &tlbi_op)
218{
219 if (tlbi_op.stage1Flush()) {
220 flushStage1(tlbi_op);
221 }
222
223 if (tlbi_op.stage2Flush()) {
224 flushStage2(tlbi_op);
225 }
226}
227
228void
230{
231 for (auto tlb : instruction) {
232 static_cast<TLB*>(tlb)->flush(tlbi_op);
233 }
234 for (auto tlb : data) {
235 static_cast<TLB*>(tlb)->flush(tlbi_op);
236 }
237 for (auto tlb : unified) {
238 static_cast<TLB*>(tlb)->flush(tlbi_op);
239 }
240}
241
242void
244{
245 itbStage2->flush(tlbi_op);
246 dtbStage2->flush(tlbi_op);
247}
248
249void
250MMU::iflush(const TLBIOp &tlbi_op)
251{
252 for (auto tlb : instruction) {
253 static_cast<TLB*>(tlb)->flush(tlbi_op);
254 }
255 for (auto tlb : unified) {
256 static_cast<TLB*>(tlb)->flush(tlbi_op);
257 }
258}
259
260void
261MMU::dflush(const TLBIOp &tlbi_op)
262{
263 for (auto tlb : data) {
264 static_cast<TLB*>(tlb)->flush(tlbi_op);
265 }
266 for (auto tlb : unified) {
267 static_cast<TLB*>(tlb)->flush(tlbi_op);
268 }
269}
270
271void
273{
275 itbStage2->flushAll();
276 dtbStage2->flushAll();
277}
278
279
280Fault
283 TlbEntry* te, CachedState &state) const
284{
285 // If we don't have a valid tlb entry it means virtual memory
286 // is not enabled
288
289 mpam::tagRequest(tc, req, mode == Execute);
290
291 // Check for a tester generated address fault
292 Fault fault = testTranslation(req, mode, domain, state);
293 if (fault != NoFault) {
294 return fault;
295 } else {
296 // Now that we checked no fault has been generated in the
297 // translation process, we can finalize the physical address
298 return finalizePhysical(req, tc, mode);
299 }
300}
301
302Fault
304 ThreadContext *tc, Mode mode) const
305{
306 const Addr paddr = req->getPaddr();
307
308 if (m5opRange.contains(paddr)) {
309 uint8_t func;
310 pseudo_inst::decodeAddrOffset(paddr - m5opRange.start(), func);
311 req->setLocalAccessor(
312 [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
313 {
314 uint64_t ret;
315 if (inAArch64(tc))
317 else
319
320 if (mode == Read)
321 pkt->setLE(ret);
322
323 return Cycles(1);
324 }
325 );
326 }
327
328 return NoFault;
329}
330
331
332Fault
334 Translation *translation, bool &delay, bool timing,
335 CachedState &state)
336{
338 Addr vaddr_tainted = req->getVaddr();
339 Addr vaddr = 0;
340 if (state.aarch64) {
341 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
342 static_cast<TCR>(state.ttbcr), mode==Execute, state);
343 } else {
344 vaddr = vaddr_tainted;
345 }
346 Request::Flags flags = req->getFlags();
347
348 bool is_fetch = (mode == Execute);
349 bool is_write = (mode == Write);
350
351 if (!is_fetch) {
352 if (state.sctlr.a || !(flags & AllowUnaligned)) {
353 if (vaddr & mask(flags & AlignmentMask)) {
354 // LPAE is always disabled in SE mode
355 return std::make_shared<DataAbort>(
356 vaddr_tainted,
357 DomainType::NoAccess, is_write,
360 }
361 }
362 }
363
364 Process *p = tc->getProcessPtr();
365 if (const auto pte = p->pTable->lookup(vaddr); !pte) {
366 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
367 } else {
368 req->setPaddr(pte->paddr + p->pTable->pageOffset(vaddr));
369
370 if (pte->flags & EmulationPageTable::Uncacheable)
371 req->setFlags(Request::UNCACHEABLE);
372
373 return finalizePhysical(req, tc, mode);
374 }
375}
376
377Addr
379{
380 auto& state = updateMiscReg(tc, NormalTran, false);
381 Addr purified_vaddr = 0;
382 if (state.aarch64) {
383 purified_vaddr = purifyTaggedAddr(vaddr, tc, state.exceptionLevel,
384 static_cast<TCR>(state.ttbcr), mode==Execute, state);
385 } else {
386 purified_vaddr = vaddr;
387 }
388 return purified_vaddr;
389}
390
391Fault
393 bool stage2)
394{
395 return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
396}
397
398Fault
400 CachedState &state)
401{
402 // a data cache maintenance instruction that operates by MVA does
403 // not generate a Data Abort exeception due to a Permission fault
404 if (req->isCacheMaintenance()) {
405 return NoFault;
406 }
407
408 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
409 Request::Flags flags = req->getFlags();
410 bool is_fetch = (mode == Execute);
411 bool is_write = (mode == Write);
412 bool is_priv = state.isPriv && !(flags & UserMode);
413
414 // Get the translation type from the actuall table entry
415 TranMethod tran_method = te->longDescFormat ?
417
418 // If this is the second stage of translation and the request is for a
419 // stage 1 page table walk then we need to check the HCR.PTW bit. This
420 // allows us to generate a fault if the request targets an area marked
421 // as a device or strongly ordered.
422 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
423 (te->mtype != TlbEntry::MemoryType::Normal)) {
424 return std::make_shared<DataAbort>(
425 vaddr, te->domain, is_write,
426 ArmFault::PermissionLL + te->lookupLevel,
427 state.isStage2, tran_method);
428 }
429
430 // Generate an alignment fault for unaligned data accesses to device or
431 // strongly ordered memory
432 if (!is_fetch) {
433 if (te->mtype != TlbEntry::MemoryType::Normal) {
434 if (vaddr & mask(flags & AlignmentMask)) {
435 stats.alignFaults++;
436 return std::make_shared<DataAbort>(
437 vaddr, DomainType::NoAccess, is_write,
439 tran_method);
440 }
441 }
442 }
443
444 if (te->nonCacheable) {
445 // Prevent prefetching from I/O devices.
446 if (req->isPrefetch()) {
447 // Here we can safely use the fault status for the short
448 // desc. format in all cases
449 return std::make_shared<PrefetchAbort>(
451 state.isStage2, tran_method);
452 }
453 }
454
455 if (!te->longDescFormat) {
456 switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
457 case 0:
458 stats.domainFaults++;
459 DPRINTF(MMU, "MMU Fault: Data abort on domain. DACR: %#x"
460 " domain: %#x write:%d\n", state.dacr,
461 static_cast<uint8_t>(te->domain), is_write);
462 if (is_fetch) {
463 // Use PC value instead of vaddr because vaddr might
464 // be aligned to cache line and should not be the
465 // address reported in FAR
466 return std::make_shared<PrefetchAbort>(
467 req->getPC(),
468 ArmFault::DomainLL + te->lookupLevel,
469 state.isStage2, tran_method);
470 } else
471 return std::make_shared<DataAbort>(
472 vaddr, te->domain, is_write,
473 ArmFault::DomainLL + te->lookupLevel,
474 state.isStage2, tran_method);
475 case 1:
476 // Continue with permissions check
477 break;
478 case 2:
479 panic("UNPRED domain\n");
480 case 3:
481 return NoFault;
482 }
483 }
484
485 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
486 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
487 uint8_t hap = te->hap;
488
489 if (state.sctlr.afe == 1 || te->longDescFormat)
490 ap |= 1;
491
492 bool abt;
493 bool isWritable = true;
494 // If this is a stage 2 access (eg for reading stage 1 page table entries)
495 // then don't perform the AP permissions check, we stil do the HAP check
496 // below.
497 if (state.isStage2) {
498 abt = false;
499 } else {
500 switch (ap) {
501 case 0:
502 DPRINTF(MMU, "Access permissions 0, checking rs:%#x\n",
503 (int)state.sctlr.rs);
504 if (!state.sctlr.xp) {
505 switch ((int)state.sctlr.rs) {
506 case 2:
507 abt = is_write;
508 break;
509 case 1:
510 abt = is_write || !is_priv;
511 break;
512 case 0:
513 case 3:
514 default:
515 abt = true;
516 break;
517 }
518 } else {
519 abt = true;
520 }
521 break;
522 case 1:
523 abt = !is_priv;
524 break;
525 case 2:
526 abt = !is_priv && is_write;
527 isWritable = is_priv;
528 break;
529 case 3:
530 abt = false;
531 break;
532 case 4:
533 panic("UNPRED premissions\n");
534 case 5:
535 abt = !is_priv || is_write;
536 isWritable = false;
537 break;
538 case 6:
539 case 7:
540 abt = is_write;
541 isWritable = false;
542 break;
543 default:
544 panic("Unknown permissions %#x\n", ap);
545 }
546 }
547
548 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
549 bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
550 (ap == 3 && state.sctlr.uwxn && is_priv);
551 if (is_fetch && (abt || xn ||
552 (te->longDescFormat && te->pxn && is_priv) ||
554 te->ns && state.scr.sif))) {
555 stats.permsFaults++;
556 DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. AP:%d "
557 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
558 ap, is_priv, is_write, te->ns,
559 state.scr.sif, state.sctlr.afe);
560 // Use PC value instead of vaddr because vaddr might be aligned to
561 // cache line and should not be the address reported in FAR
562 return std::make_shared<PrefetchAbort>(
563 req->getPC(),
564 ArmFault::PermissionLL + te->lookupLevel,
565 state.isStage2, tran_method);
566 } else if (abt | hapAbt) {
567 stats.permsFaults++;
568 DPRINTF(MMU, "MMU Fault: Data abort on permission check. AP:%d priv:%d"
569 " write:%d\n", ap, is_priv, is_write);
570 return std::make_shared<DataAbort>(
571 vaddr, te->domain, is_write,
572 ArmFault::PermissionLL + te->lookupLevel,
573 state.isStage2 | !abt, tran_method);
574 }
575 return NoFault;
576}
577
578Fault
580 ThreadContext *tc, bool stage2)
581{
582 return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
583}
584
585Fault
587 ThreadContext *tc, CachedState &state)
588{
589 assert(state.aarch64);
590
591 // A data cache maintenance instruction that operates by VA does
592 // not generate a Permission fault unless:
593 // * It is a data cache invalidate (dc ivac) which requires write
594 // permissions to the VA, or
595 // * It is executed from EL0
596 if (req->isCacheClean() && state.exceptionLevel != EL0 && !state.isStage2) {
597 return NoFault;
598 }
599
600 Addr vaddr_tainted = req->getVaddr();
601 Request::Flags flags = req->getFlags();
602 bool is_fetch = (mode == Execute);
603 // Cache clean operations require read permissions to the specified VA
604 bool is_write = !req->isCacheClean() && mode == Write;
605 bool is_atomic = req->isAtomic();
606
607 updateMiscReg(tc, state.curTranType, state.isStage2);
608
609 // If this is the second stage of translation and the request is for a
610 // stage 1 page table walk then we need to check the HCR.PTW bit. This
611 // allows us to generate a fault if the request targets an area marked
612 // as a device or strongly ordered.
613 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
614 (te->mtype != TlbEntry::MemoryType::Normal)) {
615 return std::make_shared<DataAbort>(
616 vaddr_tainted, te->domain, is_write,
617 ArmFault::PermissionLL + te->lookupLevel,
619 }
620
621 // Generate an alignment fault for unaligned accesses to device or
622 // strongly ordered memory
623 if (!is_fetch) {
624 if (te->mtype != TlbEntry::MemoryType::Normal) {
625 if (vaddr_tainted & mask(flags & AlignmentMask)) {
626 stats.alignFaults++;
627 return std::make_shared<DataAbort>(
628 vaddr_tainted,
630 is_atomic ? false : is_write,
633 }
634 }
635 }
636
637 if (te->nonCacheable) {
638 // Prevent prefetching from I/O devices.
639 if (req->isPrefetch()) {
640 // Here we can safely use the fault status for the short
641 // desc. format in all cases
642 return std::make_shared<PrefetchAbort>(
643 vaddr_tainted,
646 }
647 }
648
649 bool grant = false;
650 // grant_read is used for faults from an atomic instruction that
651 // both reads and writes from a memory location. From a ISS point
652 // of view they count as read if a read to that address would have
653 // generated the fault; they count as writes otherwise
654 bool grant_read = true;
655
656 if (state.isStage2) {
657 std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
658 (!is_write && !is_fetch), is_write, is_fetch);
659 } else {
660 std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
661 (!is_write && !is_fetch), is_write, is_fetch);
662 }
663
664 if (!grant) {
665 if (is_fetch) {
666 stats.permsFaults++;
667 DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. "
668 "ns:%d scr.sif:%d sctlr.afe: %d\n",
669 te->ns, state.scr.sif, state.sctlr.afe);
670 // Use PC value instead of vaddr because vaddr might be aligned to
671 // cache line and should not be the address reported in FAR
672 return std::make_shared<PrefetchAbort>(
673 req->getPC(),
674 ArmFault::PermissionLL + te->lookupLevel,
676 } else {
677 stats.permsFaults++;
678 DPRINTF(MMU, "MMU Fault: Data abort on permission check."
679 " ns:%d\n", te->ns);
680 return std::make_shared<DataAbort>(
681 vaddr_tainted, te->domain,
682 (is_atomic && !grant_read) ? false : is_write,
683 ArmFault::PermissionLL + te->lookupLevel,
685 }
686 }
687
688 return NoFault;
689}
690
693 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
694{
695 assert(ArmSystem::haveEL(tc, EL2) && state.exceptionLevel != EL2);
696
697 // In stage 2 we use the hypervisor access permission bits.
698 // The following permissions are described in ARM DDI 0487A.f
699 // D4-1802
700 bool grant = false;
701 bool grant_read = te->hap & 0b01;
702 bool grant_write = te->hap & 0b10;
703
704 uint8_t xn = te->xn;
705 uint8_t pxn = te->pxn;
706
707 if (ArmSystem::haveEL(tc, EL3) &&
709 te->ns && state.scr.sif) {
710 xn = true;
711 }
712
713 DPRINTF(MMU,
714 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
715 "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
716
717 if (x) {
718 grant = !xn;
719 } else if (req->isAtomic()) {
720 grant = grant_read || grant_write;
721 } else if (w) {
722 grant = grant_write;
723 } else if (r) {
724 grant = grant_read;
725 } else {
726 panic("Invalid Operation\n");
727 }
728
729 return std::make_pair(grant, grant_read);
730}
731
732std::tuple<bool, bool, bool>
734 ThreadContext *tc, CachedState &state,
735 bool r, bool w, bool x)
736{
737 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
738 TranslationRegime regime = !is_priv ? TranslationRegime::EL10 :
739 state.currRegime;
740
741 bool wxn = false;
742 uint8_t xn = te->xn;
743 uint8_t pxn = te->pxn;
744
745 // Read PIR corresponding to target configuration.
746 uint8_t piindex = te->piindex;
747 uint8_t ppi = bits(state.pir, 4 * piindex + 3, 4 * piindex);
748 uint8_t upi = bits(state.pire0, 4 * piindex + 3, 4 * piindex);
749
750 DPRINTF(MMU, "Checking S1 indirect permissions: "
751 "piindex:%d, ppi:%d, xn:%d, pxn:%d, r:%d, "
752 "w:%d, x:%d, is_priv: %d, wxn: %d\n", piindex, ppi,
753 xn, pxn, r, w, x, is_priv, wxn);
754
755 // Indirect permission check.
756 // Decode indirect permissions
757 bool pr = false;
758 bool pw = false;
759 bool px = false;
760 bool p_wxn = ppi == 0b0110;
761 switch (ppi) {
762 // No access
763 case 0b0000: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
764 // Privileged read
765 case 0b0001: pr = 1; pw = 0; px = 0; /* pgcs = 0; */ break;
766 // Privileged execute
767 case 0b0010: pr = 0; pw = 0; px = 1; /* pgcs = 0; */ break;
768 // Privileged read and execute
769 case 0b0011: pr = 1; pw = 0; px = 1; /* pgcs = 0; */ break;
770 // Reserved
771 case 0b0100: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
772 // Privileged read and write
773 case 0b0101: pr = 1; pw = 1; px = 0; /* pgcs = 0; */ break;
774 // Privileged read, write and execute
775 case 0b0110: pr = 1; pw = 1; px = 1; /* pgcs = 0; */ break;
776 // Privileged read, write and execute
777 case 0b0111: pr = 1; pw = 1; px = 1; /* pgcs = 0; */ break;
778 // Privileged read
779 case 0b1000: pr = 1; pw = 0; px = 0; /* pgcs = 0; */ break;
780 // Privileged read and gcs
781 case 0b1001: pr = 1; pw = 0; px = 0; /* pgcs = 1; */ break;
782 // Privileged read and execute
783 case 0b1010: pr = 1; pw = 0; px = 1; /* pgcs = 0; */ break;
784 // Reserved
785 case 0b1011: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
786 // Privileged read and write
787 case 0b1100: pr = 1; pw = 1; px = 0; /* pgcs = 0; */ break;
788 // Reserved
789 case 0b1101: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
790 // Privileged read, write and execute
791 case 0b1110: pr = 1; pw = 1; px = 1; /* pgcs = 0; */ break;
792 // Reserved
793 case 0b1111: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
794 }
795
796 bool grant_read;
797 bool grant_write;
798 bool grant_exec;
799 if (hasUnprivRegime(regime)) {
800 bool ur = false;
801 bool uw = false;
802 bool ux = false;
803 bool u_wxn = upi == 0b0110;
804 switch (upi) {
805 // No access
806 case 0b0000: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
807 // Unprivileged read
808 case 0b0001: ur = 1; uw = 0; ux = 0; /* ugcs = 0; */ break;
809 // Unprivileged execute
810 case 0b0010: ur = 0; uw = 0; ux = 1; /* ugcs = 0; */ break;
811 // Unprivileged read and execute
812 case 0b0011: ur = 1; uw = 0; ux = 1; /* ugcs = 0; */ break;
813 // Reserved
814 case 0b0100: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
815 // Unprivileged read and write
816 case 0b0101: ur = 1; uw = 1; ux = 0; /* ugcs = 0; */ break;
817 // Unprivileged read, write and execute
818 case 0b0110: ur = 1; uw = 1; ux = 1; /* ugcs = 0; */ break;
819 // Unprivileged read, write and execute
820 case 0b0111: ur = 1; uw = 1; ux = 1; /* ugcs = 0; */ break;
821 // Unprivileged read
822 case 0b1000: ur = 1; uw = 0; ux = 0; /* ugcs = 0; */ break;
823 // Unprivileged read and gcs
824 case 0b1001: ur = 1; uw = 0; ux = 0; /* ugcs = 1; */ break;
825 // Unprivileged read and execute
826 case 0b1010: ur = 1; uw = 0; ux = 1; /* ugcs = 0; */ break;
827 // Reserved
828 case 0b1011: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
829 // Unprivileged read and write
830 case 0b1100: ur = 1; uw = 1; ux = 0; /* ugcs = 0; */ break;
831 // Reserved
832 case 0b1101: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
833 // Unprivileged read,write and execute
834 case 0b1110: ur = 1; uw = 1; ux = 1; /* ugcs = 0; */ break;
835 // Reserved
836 case 0b1111: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
837 }
838
839 // PAN does not affect CMOs other than DC ZVA
840 bool pan_access = !req->isCacheMaintenance() ||
841 req->getFlags() & Request::CACHE_BLOCK_ZERO;
842
843 if (_release->has(ArmExtension::FEAT_PAN) && pan_access) {
844 if (state.cpsr.pan && upi != 0) {
845 pr = false;
846 pw = false;
847 }
848 }
849
850 grant_read = is_priv ? pr : ur;
851 grant_write = is_priv ? pw : uw;
852 grant_exec = is_priv ? px : ux;
853 wxn = is_priv ? p_wxn : u_wxn;
854 } else {
855 grant_read = pr;
856 grant_write = pw;
857 grant_exec = px;
858 wxn = p_wxn;
859 }
860
861 // Do not allow execution from writable location
862 // if wxn is set
863 grant_exec = grant_exec && !(wxn && grant_write);
864
865 if (ArmSystem::haveEL(tc, EL3) &&
866 state.securityState == SecurityState::Secure && te->ns) {
867 grant_exec = grant_exec && !state.scr.sif;
868 }
869
870 return std::make_tuple(grant_read, grant_write, grant_exec);
871}
872
873std::tuple<bool, bool, bool>
875 ThreadContext *tc, CachedState &state,
876 bool r, bool w, bool x)
877{
878 const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
879 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
880
881 bool wxn = state.sctlr.wxn;
882 uint8_t xn = te->xn;
883 uint8_t pxn = te->pxn;
884
885 DPRINTF(MMU, "Checking S1 direct permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
886 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
887 pxn, r, w, x, is_priv, wxn);
888
889 bool grant_read;
890 bool grant_write;
891 bool grant_exec;
892
893 TranslationRegime regime = !is_priv ? TranslationRegime::EL10 :
894 state.currRegime;
895 // Tranditional permission check.
896 if (hasUnprivRegime(regime)) {
897 bool pr = false;
898 bool pw = false;
899 bool ur = false;
900 bool uw = false;
901 // Apply leaf permissions
902 switch (ap) {
903 case 0b00: // Privileged access
904 pr = 1; pw = 1; ur = 0; uw = 0;
905 break;
906 case 0b01: // No effect
907 pr = 1; pw = 1; ur = 1; uw = 1;
908 break;
909 case 0b10: // Read-only, privileged access
910 pr = 1; pw = 0; ur = 0; uw = 0;
911 break;
912 case 0b11: // Read-only
913 pr = 1; pw = 0; ur = 1; uw = 0;
914 break;
915 }
916
917 // Locations writable by unprivileged cannot be executed by
918 // privileged
919 const bool px = !(pxn || uw);
920 const bool ux = !xn;
921
922 // PAN does not affect CMOs other than DC ZVA
923 bool pan_access = !req->isCacheMaintenance() ||
924 req->getFlags() & Request::CACHE_BLOCK_ZERO;
925
926 if (_release->has(ArmExtension::FEAT_PAN) && pan_access) {
927 bool pan = state.cpsr.pan && (ur || uw);
928 pr = pr && !pan;
929 pw = pw && !pan;
930 }
931
932 grant_read = is_priv ? pr : ur;
933 grant_write = is_priv ? pw : uw;
934 grant_exec = is_priv ? px : ux;
935 } else {
936 switch (bits(ap, 1)) {
937 case 0b0: // No effect
938 grant_read = 1; grant_write = 1;
939 break;
940 case 0b1: // Read-Only
941 grant_read = 1; grant_write = 0;
942 break;
943 }
944 grant_exec = !xn;
945 }
946
947 // Do not allow execution from writable location
948 // if wxn is set
949 grant_exec = grant_exec && !(wxn && grant_write);
950
951 if (ArmSystem::haveEL(tc, EL3) &&
952 state.securityState == SecurityState::Secure && te->ns) {
953 grant_exec = grant_exec && !state.scr.sif;
954 }
955
956 return std::make_tuple(grant_read, grant_write, grant_exec);
957}
958
961 ThreadContext *tc, CachedState &state,
962 bool r, bool w, bool x)
963{
964 bool grant = false;
965 bool grant_read = true, grant_write = true, grant_exec = true;
966
967 // Check the feature of indirected premission.
968 if (state.pie) {
969 std::tie(grant_read, grant_write, grant_exec) =
970 s1IndirectPermBits64(te, req, mode, tc, state, r, w, x);
971 } else {
972 std::tie(grant_read, grant_write, grant_exec) =
973 s1DirectPermBits64(te, req, mode, tc, state, r, w, x);
974 }
975
976 if (x) {
977 grant = grant_exec;
978 } else if (req->isAtomic()) {
979 grant = grant_read && grant_write;
980 } else if (w) {
981 grant = grant_write;
982 } else {
983 grant = grant_read;
984 }
985
986 return std::make_pair(grant, grant_read);
987}
988
989bool
991{
992 switch (regime) {
995 return true;
996 default:
997 return false;
998 }
999}
1000
1001Addr
1004 TCR tcr, bool is_inst, CachedState& state)
1005{
1006 const bool selbit = bits(vaddr_tainted, 55);
1007
1008 // Call the memoized version of computeAddrTop
1009 const auto topbit = state.computeAddrTop(tc, selbit, is_inst, tcr, el);
1010
1011 return maskTaggedAddr(vaddr_tainted, tc, el, topbit);
1012}
1013
1014Fault
1016 ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
1017 CachedState &state)
1018{
1019 bool is_fetch = (mode == Execute);
1020 bool is_atomic = req->isAtomic();
1021 req->setPaddr(vaddr);
1022 // When the MMU is off the security attribute corresponds to the
1023 // security state of the processor
1025 req->setFlags(Request::SECURE);
1026 else
1027 req->clearFlags(Request::SECURE);
1028 if (state.aarch64) {
1029 bool selbit = bits(vaddr, 55);
1030 TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
1031 int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
1032 int addr_sz = bits(vaddr, topbit, physAddrRange);
1033 if (addr_sz != 0){
1034 Fault f;
1035 if (is_fetch)
1036 f = std::make_shared<PrefetchAbort>(vaddr,
1039 else
1040 f = std::make_shared<DataAbort>( vaddr,
1042 is_atomic ? false : mode==Write,
1045 return f;
1046 }
1047 }
1048
1049 // @todo: double check this (ARM ARM issue C B3.2.1)
1050 if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
1051 state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
1052 if (!req->isCacheMaintenance()) {
1053 req->setFlags(Request::UNCACHEABLE);
1054 }
1055 req->setFlags(Request::STRICT_ORDER);
1056 }
1057
1058 // Set memory attributes
1059 bool in_secure_state = state.securityState == SecurityState::Secure;
1060 TlbEntry temp_te;
1061 temp_te.ns = !in_secure_state;
1062 bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1063 state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
1064 bool i_cacheability = state.sctlr.i && !state.sctlr.m;
1065 if (state.isStage2 || !dc || state.exceptionLevel == EL2) {
1066 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1068 temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
1069 temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
1070 temp_te.shareable = true;
1071 temp_te.outerShareable = true;
1072 } else {
1074 temp_te.innerAttrs = 0x3;
1075 temp_te.outerAttrs = 0x3;
1076 temp_te.shareable = false;
1077 temp_te.outerShareable = false;
1078 }
1079 temp_te.setAttributes(long_desc_format);
1080 DPRINTF(MMU, "(No MMU) setting memory attributes: shareable: "
1081 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
1082 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1083 state.isStage2);
1084 setAttr(temp_te.attributes);
1085
1086 return testAndFinalize(req, tc, mode, nullptr, state);
1087}
1088
1089Fault
1091 Translation *translation, bool &delay, bool timing,
1092 bool functional, Addr vaddr,
1093 TranMethod tran_method, CachedState &state)
1094{
1095 TlbEntry *te = NULL;
1096 bool is_fetch = (mode == Execute);
1097 TlbEntry mergeTe;
1098
1099 Request::Flags flags = req->getFlags();
1100 Addr vaddr_tainted = req->getVaddr();
1101
1102 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1103 functional, &mergeTe, state);
1104 // only proceed if we have a valid table entry
1105 if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
1106
1107 // If we have the table entry transfer some of the attributes to the
1108 // request that triggered the translation
1110 // Set memory attributes
1111 DPRINTF(MMU,
1112 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1113 "outerAttrs: %d, mtype: %d, stage2: %d\n",
1114 te->shareable, te->innerAttrs, te->outerAttrs,
1115 static_cast<uint8_t>(te->mtype), state.isStage2);
1116 setAttr(te->attributes);
1117
1118 if (te->nonCacheable && !req->isCacheMaintenance())
1119 req->setFlags(Request::UNCACHEABLE);
1120
1121 // Require requests to be ordered if the request goes to
1122 // strongly ordered or device memory (i.e., anything other
1123 // than normal memory requires strict order).
1124 if (te->mtype != TlbEntry::MemoryType::Normal)
1125 req->setFlags(Request::STRICT_ORDER);
1126
1127 Addr pa = te->pAddr(vaddr);
1128 req->setPaddr(pa);
1129
1130 if (state.securityState == SecurityState::Secure && !te->ns) {
1131 req->setFlags(Request::SECURE);
1132 } else {
1133 req->clearFlags(Request::SECURE);
1134 }
1135 if (!is_fetch && fault == NoFault &&
1136 (vaddr & mask(flags & AlignmentMask)) &&
1137 (te->mtype != TlbEntry::MemoryType::Normal)) {
1138 // Unaligned accesses to Device memory should always cause an
1139 // abort regardless of sctlr.a
1140 stats.alignFaults++;
1141 bool is_write = (mode == Write);
1142 return std::make_shared<DataAbort>(
1143 vaddr_tainted,
1144 DomainType::NoAccess, is_write,
1146 tran_method);
1147 }
1148
1149 if (fault == NoFault)
1150 fault = testAndFinalize(req, tc, mode, te, state);
1151 }
1152
1153 return fault;
1154}
1155
1156Fault
1158 Translation *translation, bool &delay, bool timing,
1159 ArmTranslationType tran_type, bool functional,
1160 CachedState &state)
1161{
1162 // No such thing as a functional timing access
1163 assert(!(timing && functional));
1164
1165 Addr vaddr_tainted = req->getVaddr();
1166 Addr vaddr = 0;
1167 if (state.aarch64) {
1168 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1169 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1170 } else {
1171 vaddr = vaddr_tainted;
1172 }
1173 Request::Flags flags = req->getFlags();
1174
1175 bool is_fetch = (mode == Execute);
1176 bool is_write = (mode == Write);
1177 bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
1178 TranMethod tran_method = long_desc_format ?
1180
1181 DPRINTF(MMU,
1182 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1183 state.isPriv, flags & UserMode,
1185 tran_type & S1S2NsTran);
1186
1187 DPRINTF(MMU, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1188 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
1189 state.isStage2, state.scr, state.sctlr, flags, tran_type);
1190
1191 if (!state.isStage2) {
1192 if ((req->isInstFetch() && (!state.sctlr.i)) ||
1193 ((!req->isInstFetch()) && (!state.sctlr.c))){
1194 if (!req->isCacheMaintenance()) {
1195 req->setFlags(Request::UNCACHEABLE);
1196 }
1197 req->setFlags(Request::STRICT_ORDER);
1198 }
1199 }
1200 if (!is_fetch) {
1201 if (state.sctlr.a || !(flags & AllowUnaligned)) {
1202 if (vaddr & mask(flags & AlignmentMask)) {
1203 stats.alignFaults++;
1204 return std::make_shared<DataAbort>(
1205 vaddr_tainted,
1206 DomainType::NoAccess, is_write,
1208 tran_method);
1209 }
1210 }
1211 }
1212
1213 bool vm = state.hcr.vm;
1214 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1215 state.hcr.e2h == 1 && state.hcr.tge == 1)
1216 vm = 0;
1217 else if (state.hcr.dc == 1)
1218 vm = 1;
1219
1220 Fault fault = NoFault;
1221 // If guest MMU is off or hcr.vm=0 go straight to stage2
1222 if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1223 fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1224 long_desc_format, state);
1225 } else {
1226 DPRINTF(MMU, "Translating %s=%#x context=%d\n",
1227 state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1228 // Translation enabled
1229 fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1230 functional, vaddr, tran_method, state);
1231 }
1232
1233 // Check for Debug Exceptions
1235
1236 if (sd->enabled() && fault == NoFault) {
1237 fault = sd->testDebug(tc, req, mode);
1238 }
1239
1240 return fault;
1241}
1242
1243Fault
1245 ArmTranslationType tran_type)
1246{
1247 return translateAtomic(req, tc, mode, tran_type, false);
1248}
1249
1250Fault
1252 ArmTranslationType tran_type, bool stage2)
1253{
1254 auto& state = updateMiscReg(tc, tran_type, stage2);
1255
1256 bool delay = false;
1257 Fault fault;
1258 if (FullSystem)
1259 fault = translateFs(req, tc, mode, NULL, delay, false,
1260 tran_type, false, state);
1261 else
1262 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1263 assert(!delay);
1264 return fault;
1265}
1266
1267Fault
1269{
1270 return translateFunctional(req, tc, mode, NormalTran, false);
1271}
1272
1273Fault
1275 ArmTranslationType tran_type)
1276{
1277 return translateFunctional(req, tc, mode, tran_type, false);
1278}
1279
1280Fault
1282 ArmTranslationType tran_type, bool stage2)
1283{
1284 auto& state = updateMiscReg(tc, tran_type, stage2);
1285
1286 bool delay = false;
1287 Fault fault;
1288 if (FullSystem)
1289 fault = translateFs(req, tc, mode, NULL, delay, false,
1290 tran_type, true, state);
1291 else
1292 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1293 assert(!delay);
1294 return fault;
1295}
1296
1297void
1299 Translation *translation, Mode mode, ArmTranslationType tran_type,
1300 bool stage2)
1301{
1302 auto& state = updateMiscReg(tc, tran_type, stage2);
1303
1304 assert(translation);
1305
1306 translateComplete(req, tc, translation, mode, tran_type,
1307 stage2, state);
1308}
1309
1310Fault
1312 Translation *translation, Mode mode, ArmTranslationType tran_type,
1313 bool call_from_s2)
1314{
1315 return translateComplete(req, tc, translation, mode, tran_type,
1316 call_from_s2, s1State);
1317}
1318
1319Fault
1321 Translation *translation, Mode mode, ArmTranslationType tran_type,
1322 bool call_from_s2, CachedState &state)
1323{
1324 bool delay = false;
1325 Fault fault;
1326 if (FullSystem)
1327 fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1328 false, state);
1329 else
1330 fault = translateSe(req, tc, mode, translation, delay, true, state);
1331
1332 DPRINTF(MMU, "Translation returning delay=%d fault=%d\n", delay,
1333 fault != NoFault);
1334 // If we have a translation, and we're not in the middle of doing a stage
1335 // 2 translation tell the translation that we've either finished or its
1336 // going to take a while. By not doing this when we're in the middle of a
1337 // stage 2 translation we prevent marking the translation as delayed twice,
1338 // one when the translation starts and again when the stage 1 translation
1339 // completes.
1340
1341 if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1342 fault != NoFault)) {
1343 if (!delay)
1344 translation->finish(fault, req, tc, mode);
1345 else
1346 translation->markDelayed();
1347 }
1348 return fault;
1349}
1350
1351vmid_t
1353{
1354 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1355 VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1356 vmid_t vmid = 0;
1357
1358 switch (mmfr1.vmidbits) {
1359 case 0b0000:
1360 // 8 bits
1361 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1362 break;
1363 case 0b0010:
1364 if (vtcr.vs && ELIs64(tc, EL2)) {
1365 // 16 bits
1366 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1367 } else {
1368 // 8 bits
1369 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1370 }
1371 break;
1372 default:
1373 panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1374 mmfr1.vmidbits);
1375 }
1376
1377 return vmid;
1378}
1379
1382 ArmTranslationType tran_type, bool stage2)
1383{
1384 // check if the regs have changed, or the translation mode is different.
1385 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1386 // one type of translation anyway
1387
1388 auto& state = stage2 ? s2State : s1State;
1389 if (state.miscRegValid && miscRegContext == tc->contextId() &&
1390 ((tran_type == state.curTranType) || stage2)) {
1391
1392 } else {
1393 DPRINTF(MMU, "MMU variables changed!\n");
1394 state.updateMiscReg(tc, tran_type);
1395
1396 itbStage2->setVMID(state.vmid);
1397 dtbStage2->setVMID(state.vmid);
1398
1399 for (auto tlb : instruction) {
1400 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1401 }
1402 for (auto tlb : data) {
1403 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1404 }
1405 for (auto tlb : unified) {
1406 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1407 }
1408
1409 miscRegContext = tc->contextId();
1410 }
1411
1412 if (state.directToStage2) {
1413 s2State.updateMiscReg(tc, tran_type);
1414 return s2State;
1415 } else {
1416 return state;
1417 }
1418}
1419
1420void
1422 ArmTranslationType tran_type)
1423{
1427
1428 // Dependencies: SCR/SCR_EL3, CPSR
1430 !(tran_type & HypMode) && !(tran_type & S1S2NsTran) ?
1432
1433 exceptionLevel = tranTypeEL(cpsr, scr, tran_type);
1435 aarch64 = isStage2 ?
1436 ELIs64(tc, EL2) :
1438
1439 if (aarch64) { // AArch64
1440 // determine EL we need to translate in
1441 switch (currRegime) {
1443 {
1447 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1450 pie = tcr2.pie;
1451 }
1452 uint64_t ttbr_asid = ttbcr.a1 ?
1455 asid = bits(ttbr_asid,
1456 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1457 }
1458 break;
1460 {
1461 // VHE code for EL2&0 regime
1465 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1468 pie = tcr2.pie;
1469 }
1470 uint64_t ttbr_asid = ttbcr.a1 ?
1473 asid = bits(ttbr_asid,
1474 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1475 }
1476 break;
1481 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1483 pie = tcr2.pie;
1484 }
1485 asid = -1;
1486 break;
1490 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1492 pie = static_cast<TCR>(ttbcr).pie;
1493 }
1494 asid = -1;
1495 break;
1496 }
1497
1499 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1500 vmid = getVMID(tc);
1501 bool vm = hcr.vm;
1502 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1503 hcr.e2h == 1 && hcr.tge ==1) {
1504 vm = 0;
1505 }
1506
1507 if (hcr.e2h == 1 && (exceptionLevel == EL2
1508 || (hcr.tge ==1 && exceptionLevel == EL0))) {
1509 directToStage2 = false;
1510 stage2Req = false;
1511 stage2DescReq = false;
1512 } else {
1513 // Work out if we should skip the first stage of translation and go
1514 // directly to stage 2. This value is cached so we don't have to
1515 // compute it for every translation.
1516 const bool el2_enabled = EL2Enabled(tc);
1517 stage2Req = isStage2 ||
1518 (vm && exceptionLevel < EL2 && el2_enabled &&
1519 !(tran_type & S1CTran) &&
1520 !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1522 (vm && exceptionLevel < EL2 && el2_enabled);
1524 }
1525 } else {
1526 vmid = 0;
1527 directToStage2 = false;
1528 stage2Req = false;
1529 stage2DescReq = false;
1530 }
1531 } else { // AArch32
1536 isPriv = cpsr.mode != MODE_USER;
1537 if (longDescFormatInUse(tc)) {
1538 uint64_t ttbr_asid = tc->readMiscReg(
1541 asid = bits(ttbr_asid, 55, 48);
1542 } else { // Short-descriptor translation table format in use
1543 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1546 asid = context_id.asid;
1547 }
1554
1555 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1556 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1557 if (exceptionLevel == EL2) {
1559 }
1560 // Work out if we should skip the first stage of translation and go
1561 // directly to stage 2. This value is cached so we don't have to
1562 // compute it for every translation.
1563 const bool el2_enabled = EL2Enabled(tc);
1564 stage2Req = isStage2 ||
1565 (hcr.vm && exceptionLevel < EL2 && el2_enabled &&
1566 !(tran_type & S1CTran));
1568 (hcr.vm && exceptionLevel < EL2 && el2_enabled);
1570 } else {
1571 vmid = 0;
1572 stage2Req = false;
1573 directToStage2 = false;
1574 stage2DescReq = false;
1575 }
1576 }
1577 miscRegValid = true;
1578 curTranType = tran_type;
1579}
1580
1582MMU::tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
1583{
1584 switch (type) {
1585 case S1E0Tran:
1586 case S12E0Tran:
1587 return EL0;
1588
1589 case S1E1Tran:
1590 case S12E1Tran:
1591 case S1S2NsTran:
1592 return EL1;
1593
1594 case S1E2Tran:
1595 case HypMode:
1596 return EL2;
1597
1598 case S1E3Tran:
1599 return EL3;
1600
1601 case S1CTran:
1602 return currEL(cpsr) == EL3 && scr.ns == 0 ?
1603 EL3 : EL1;
1604
1605 case NormalTran:
1606 return currEL(cpsr);
1607
1608 default:
1609 panic("Unknown translation mode!\n");
1610 }
1611}
1612
1613Fault
1615 Translation *translation, bool timing, bool functional,
1616 SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type,
1617 bool stage2)
1618{
1619 return getTE(te, req, tc, mode, translation, timing, functional,
1620 ss, ipaspace, tran_type, stage2 ? s2State : s1State);
1621}
1622
1623TlbEntry*
1625 bool functional, bool ignore_asn, TranslationRegime regime,
1626 bool stage2, BaseMMU::Mode mode)
1627{
1628 TLB *tlb = getTlb(mode, stage2);
1629
1630 TlbEntry::KeyType lookup_data;
1631
1632 lookup_data.va = va;
1633 lookup_data.asn = asid;
1634 lookup_data.ignoreAsn = ignore_asn;
1635 lookup_data.vmid = vmid;
1636 lookup_data.ss = ss;
1637 lookup_data.functional = functional;
1638 lookup_data.targetRegime = regime;
1639 lookup_data.mode = mode;
1640
1641 return tlb->multiLookup(lookup_data);
1642}
1643
1644Fault
1646 Translation *translation, bool timing, bool functional,
1647 SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type,
1648 CachedState& state)
1649{
1650 // In a 2-stage system, the IPA->PA translation can be started via this
1651 // call so make sure the miscRegs are correct.
1652 if (state.isStage2) {
1653 updateMiscReg(tc, tran_type, true);
1654 }
1655
1656 Addr vaddr_tainted = req->getVaddr();
1657 Addr vaddr = 0;
1658 TranslationRegime regime = state.currRegime;
1659
1660 if (state.aarch64) {
1661 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1662 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1663 } else {
1664 vaddr = vaddr_tainted;
1665 }
1666
1667 *te = lookup(vaddr, state.asid, state.vmid, ss, false,
1668 false, regime, state.isStage2, mode);
1669
1670 if (!isCompleteTranslation(*te)) {
1671 if (req->isPrefetch()) {
1672 // if the request is a prefetch don't attempt to fill the TLB or go
1673 // any further with the memory access (here we can safely use the
1674 // fault status for the short desc. format in all cases)
1675 stats.prefetchFaults++;
1676 return std::make_shared<PrefetchAbort>(
1677 vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1678 }
1679
1680 // start translation table walk, pass variables rather than
1681 // re-retreaving in table walker for speed
1682 DPRINTF(MMU,
1683 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1684 vaddr_tainted, state.asid, state.vmid);
1685
1686 Fault fault;
1687 fault = getTableWalker(mode, state.isStage2)->walk(
1688 req, tc, state.asid, state.vmid, mode,
1689 translation, timing, functional, ss,
1690 ipaspace, tran_type, state.stage2DescReq, *te);
1691
1692 // for timing mode, return and wait for table walk,
1693 if (timing || fault != NoFault) {
1694 return fault;
1695 }
1696
1697 *te = lookup(vaddr, state.asid, state.vmid, ss,
1698 true, false, regime, state.isStage2, mode);
1699 assert(*te);
1700 }
1701 return NoFault;
1702}
1703
1704Fault
1706 ThreadContext *tc, Mode mode,
1707 Translation *translation, bool timing, bool functional,
1708 TlbEntry *mergeTe, CachedState &state)
1709{
1710 Fault fault;
1711
1712 if (state.isStage2) {
1713 PASpace ipaspace = state.securityState == SecurityState::Secure ?
1715
1716 // We are already in the stage 2 TLB. Grab the table entry for stage
1717 // 2 only. We are here because stage 1 translation is disabled.
1718 TlbEntry *s2_te = nullptr;
1719 // Get the stage 2 table entry
1720 fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1721 state.securityState, ipaspace,
1722 state.curTranType, state);
1723 // Check permissions of stage 2
1724 if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1725 if (state.aarch64)
1726 fault = checkPermissions64(s2_te, req, mode, tc, state);
1727 else
1728 fault = checkPermissions(s2_te, req, mode, state);
1729 }
1730 *te = s2_te;
1731 return fault;
1732 }
1733
1734 TlbEntry *s1_te = nullptr;
1735
1736 Addr vaddr_tainted = req->getVaddr();
1737
1738 // Get the stage 1 table entry
1739 fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1741 state.curTranType, state);
1742 // only proceed if we have a valid table entry
1743 if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1744 // Check stage 1 permissions before checking stage 2
1745 if (state.aarch64)
1746 fault = checkPermissions64(s1_te, req, mode, tc, state);
1747 else
1748 fault = checkPermissions(s1_te, req, mode, state);
1749 if (state.stage2Req & (fault == NoFault)) {
1750 Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1751 req, translation, mode, timing, functional,
1752 state.securityState, state.curTranType);
1753 fault = s2_lookup->getTe(tc, mergeTe);
1754 if (s2_lookup->isComplete()) {
1755 *te = mergeTe;
1756 // We've finished with the lookup so delete it
1757 delete s2_lookup;
1758 } else {
1759 // The lookup hasn't completed, so we can't delete it now. We
1760 // get round this by asking the object to self delete when the
1761 // translation is complete.
1762 s2_lookup->setSelfDelete();
1763 }
1764 } else {
1765 // This case deals with an S1 hit (or bypass), followed by
1766 // an S2 hit-but-perms issue
1767 if (state.isStage2) {
1768 DPRINTF(MMU, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1769 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1770 fault);
1771 if (fault != NoFault) {
1772 auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1773 arm_fault->annotate(ArmFault::S1PTW, false);
1774 arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1775 }
1776 }
1777 *te = s1_te;
1778 }
1779 }
1780 return fault;
1781}
1782
1783bool
1785{
1786 return entry && !entry->partial;
1787}
1788
1789void
1791{
1792 BaseMMU::takeOverFrom(old_mmu);
1793
1794 auto *ommu = dynamic_cast<MMU*>(old_mmu);
1795 assert(ommu);
1796
1797 _attr = ommu->_attr;
1798
1799 s1State = ommu->s1State;
1800 s2State = ommu->s2State;
1801}
1802
1803void
1805{
1806 if (!_ti) {
1807 test = nullptr;
1808 } else {
1809 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1810 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1811 test = ti;
1812 itbWalker->setTestInterface(test);
1813 dtbWalker->setTestInterface(test);
1814 itbStage2Walker->setTestInterface(test);
1815 dtbStage2Walker->setTestInterface(test);
1816 }
1817}
1818
1819Fault
1821 DomainType domain, CachedState &state) const
1822{
1823 if (!test || !req->hasSize() || req->getSize() == 0 ||
1824 req->isCacheMaintenance()) {
1825 return NoFault;
1826 } else {
1827 return test->translationCheck(req, state.isPriv, mode, domain);
1828 }
1829}
1830
1832 : statistics::Group(parent),
1833 ADD_STAT(alignFaults, statistics::units::Count::get(),
1834 "Number of MMU faults due to alignment restrictions"),
1835 ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1836 "Number of MMU faults due to prefetch"),
1837 ADD_STAT(domainFaults, statistics::units::Count::get(),
1838 "Number of MMU faults due to domain restrictions"),
1839 ADD_STAT(permsFaults, statistics::units::Count::get(),
1840 "Number of MMU faults due to permissions restrictions")
1841{
1842}
1843
1844} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:231
SelfDebug * getSelfDebug() const
Definition isa.hh:182
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, TranMethod tran_method, CachedState &state)
Definition mmu.cc:1090
std::tuple< bool, bool, bool > s1IndirectPermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:733
ArmISA::TLB * getITBPtr() const
Definition mmu.cc:145
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition mmu.hh:323
void flushStage2(const TLBIOp &tlbi_op)
Definition mmu.cc:243
static bool hasUnprivRegime(TranslationRegime regime)
Definition mmu.cc:990
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition mmu.cc:333
void drainResume() override
Resume execution after a successful drain.
Definition mmu.cc:132
uint64_t _attr
Definition mmu.hh:434
ContextID miscRegContext
Definition mmu.hh:428
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition mmu.cc:1311
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:151
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition mmu.cc:1015
Fault testTranslation(const RequestPtr &req, Mode mode, DomainType domain, CachedState &state) const
Definition mmu.cc:1820
TLB * dtbStage2
Definition mmu.hh:78
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:94
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1381
bool isCompleteTranslation(TlbEntry *te) const
Definition mmu.cc:1784
void invalidateMiscReg()
Definition mmu.cc:209
bool haveLargeAsid64
Definition mmu.hh:438
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition mmu.cc:303
bool _hasWalkCache
Definition mmu.hh:443
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:960
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition mmu.cc:1705
void flushStage1(const TLBIOp &tlbi_op)
Definition mmu.cc:229
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1582
std::tuple< bool, bool, bool > s1DirectPermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:874
uint8_t physAddrRange
Definition mmu.hh:439
ArmISA::TLB * getDTBPtr() const
Definition mmu.cc:139
bool checkWalkCache() const
Definition mmu.cc:113
TlbTestInterface * test
Definition mmu.hh:407
void flushAll() override
Definition mmu.cc:272
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:260
TLB * itbStage2
Definition mmu.hh:77
void setTestInterface(SimObject *ti)
Definition mmu.cc:1804
TableWalker * itbStage2Walker
Definition mmu.hh:82
Addr getValidAddr(Addr vaddr, ThreadContext *tc, Mode mode) override
Definition mmu.cc:378
AddrRange m5opRange
Definition mmu.hh:441
TableWalker * dtbStage2Walker
Definition mmu.hh:83
TableWalker * dtbWalker
Definition mmu.hh:81
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1614
void dflush(const TLBIOp &tlbi_op)
Definition mmu.cc:261
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition mmu.cc:579
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition mmu.cc:1157
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1624
TableWalker * itbWalker
Definition mmu.hh:80
const ArmRelease * _release
Definition mmu.hh:437
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:692
CachedState s1State
Definition mmu.hh:431
void flush(const TLBIOp &tlbi_op)
Definition mmu.cc:217
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition mmu.cc:392
MMU(const ArmMMUParams &p)
Definition mmu.cc:61
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:167
void takeOverFrom(BaseMMU *old_mmu) override
Definition mmu.cc:1790
Fault testAndFinalize(const RequestPtr &req, ThreadContext *tc, Mode mode, TlbEntry *te, CachedState &state) const
Definition mmu.cc:281
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
Definition mmu.cc:1002
CachedState s2State
Definition mmu.hh:431
void iflush(const TLBIOp &tlbi_op)
Definition mmu.cc:250
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:249
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:87
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
virtual bool stage1Flush() const
Return true if the TLBI op needs to flush stage1 entries, Defaulting to true in the TLBIOp abstract c...
Definition tlbi_op.hh:95
virtual bool stage2Flush() const
Return true if the TLBI op needs to flush stage2 entries, Defaulting to false in the TLBIOp abstract ...
Definition tlbi_op.hh:106
void setTableWalker(TableWalker *table_walker)
Definition tlb.cc:143
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition system.hh:221
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition system.hh:207
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition system.cc:133
const ArmRelease * releaseFS() const
Definition system.hh:157
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition mmu.hh:185
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:53
virtual void flushAll()
Definition mmu.cc:81
BaseTLB * itb
Definition mmu.hh:163
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition mmu.cc:172
std::set< BaseTLB * > data
Definition mmu.hh:186
std::set< BaseTLB * > unified
Definition mmu.hh:187
BaseMMU(const Params &p)
Definition mmu.hh:90
BaseTLB * dtb
Definition mmu.hh:162
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:60
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition request.hh:143
gem5::Flags< FlagsType > Flags
Definition request.hh:102
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
Statistics container.
Definition group.hh:93
STL pair class.
Definition stl.hh:58
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:268
SimObject(const Params &p)
Definition sim_object.cc:58
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
Bitfield< 12 > dc
Bitfield< 30 > te
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 19 > wxn
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition utility.cc:134
Bitfield< 31, 0 > uw
Definition int.hh:63
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Bitfield< 22 > pan
Definition misc_types.hh:59
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
Definition utility.cc:459
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:141
Bitfield< 7, 4 > domain
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:277
Bitfield< 9 > e
Definition misc_types.hh:65
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1410
bool EL2Enabled(ThreadContext *tc)
Definition utility.cc:268
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1429
SecurityState
Security State.
Definition types.hh:273
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:413
Bitfield< 0 > vm
@ MISCREG_PIR_EL2
Definition misc.hh:1193
@ MISCREG_SCTLR_EL2
Definition misc.hh:616
@ MISCREG_PIRE0_EL1
Definition misc.hh:1189
@ MISCREG_TCR_EL2
Definition misc.hh:641
@ MISCREG_PIR_EL1
Definition misc.hh:1192
@ MISCREG_SCTLR
Definition misc.hh:253
@ MISCREG_TTBCR
Definition misc.hh:278
@ MISCREG_SCR_EL3
Definition misc.hh:628
@ MISCREG_TCR_EL3
Definition misc.hh:648
@ MISCREG_TCR_EL1
Definition misc.hh:636
@ MISCREG_PIR_EL3
Definition misc.hh:1194
@ MISCREG_SCTLR_EL1
Definition misc.hh:609
@ MISCREG_PRRR
Definition misc.hh:399
@ MISCREG_ID_AA64MMFR1_EL1
Definition misc.hh:600
@ MISCREG_CPSR
Definition misc.hh:79
@ MISCREG_NMRR
Definition misc.hh:405
@ MISCREG_TTBR1_EL1
Definition misc.hh:634
@ MISCREG_TCR2_EL2
Definition misc.hh:642
@ MISCREG_CONTEXTIDR
Definition misc.hh:429
@ MISCREG_TTBR1_EL2
Definition misc.hh:904
@ MISCREG_HCR_EL2
Definition misc.hh:619
@ MISCREG_TTBR1
Definition misc.hh:275
@ MISCREG_VTCR_EL2
Definition misc.hh:644
@ MISCREG_VTTBR
Definition misc.hh:478
@ MISCREG_PIRE0_EL2
Definition misc.hh:1190
@ MISCREG_TCR2_EL1
Definition misc.hh:638
@ MISCREG_TTBR0
Definition misc.hh:272
@ MISCREG_DACR
Definition misc.hh:283
@ MISCREG_TTBR0_EL2
Definition misc.hh:640
@ MISCREG_HSCTLR
Definition misc.hh:264
@ MISCREG_TTBR0_EL1
Definition misc.hh:632
@ MISCREG_SCTLR_EL3
Definition misc.hh:625
@ MISCREG_VTTBR_EL2
Definition misc.hh:643
Bitfield< 4 > sd
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 3, 2 > el
Definition misc_types.hh:73
uint16_t vmid_t
Definition types.hh:57
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:686
bool inAArch64(ThreadContext *tc)
Definition utility.cc:127
PASpace
Physical Address Space.
Definition types.hh:280
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:232
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 39, 12 > pa
Bitfield< 8 > va
Bitfield< 59, 56 > tlb
Bitfield< 23 > px
Bitfield< 5 > ux
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 0 > w
Bitfield< 14 > pr
Definition misc.hh:116
Bitfield< 3 > x
Definition pagetable.hh:78
static void decodeAddrOffset(Addr offset, uint8_t &func)
bool pseudoInst(ThreadContext *tc, uint8_t func, uint64_t &result)
Units for Stats.
Definition units.hh:113
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
Packet * PacketPtr
constexpr decltype(nullptr) NoFault
Definition types.hh:253
ExceptionLevel exceptionLevel
Definition mmu.hh:182
SecurityState securityState
Definition mmu.hh:187
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition mmu.cc:1421
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition mmu.cc:1352
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Definition mmu.hh:216
ArmTranslationType curTranType
Definition mmu.hh:200
TranslationRegime currRegime
Definition mmu.hh:183
Stats(statistics::Group *parent)
Definition mmu.cc:1831
statistics::Scalar permsFaults
Definition mmu.hh:452
statistics::Scalar alignFaults
Definition mmu.hh:449
statistics::Scalar prefetchFaults
Definition mmu.hh:450
statistics::Scalar domainFaults
Definition mmu.hh:451
TranslationRegime targetRegime
Definition pagetable.hh:201
void setAttributes(bool lpae)
Definition pagetable.hh:501
TLBTypes::KeyType KeyType
Definition pagetable.hh:238
The file contains the definition of a set of TLB Invalidate Instructions.

Generated on Mon Oct 27 2025 04:12:55 for gem5 by doxygen 1.14.0