gem5 [DEVELOP-FOR-25.0]
Loading...
Searching...
No Matches
mmu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2013, 2016-2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "arch/arm/mmu.hh"
42
43#include "arch/arm/isa.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/reg_abi.hh"
48#include "arch/arm/tlb.hh"
49#include "arch/arm/tlbi_op.hh"
50#include "debug/MMU.hh"
51#include "mem/packet_access.hh"
52#include "sim/pseudo_inst.hh"
53#include "sim/process.hh"
54
55namespace gem5
56{
57
58using namespace ArmISA;
59
60MMU::MMU(const ArmMMUParams &p)
61 : BaseMMU(p),
62 itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
63 itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
64 itbStage2Walker(p.stage2_itb_walker),
65 dtbStage2Walker(p.stage2_dtb_walker),
66 test(nullptr),
68 s1State(this, false), s2State(this, true),
69 _attr(0),
70 _release(nullptr),
71 _hasWalkCache(false),
72 stats(this)
73{
74 // Cache system-level properties
75 if (FullSystem) {
76 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
77 assert(arm_sys);
79 physAddrRange = arm_sys->physAddrRange();
80
81 _release = arm_sys->releaseFS();
82 } else {
83 haveLargeAsid64 = false;
84 physAddrRange = 48;
85
86 _release = p.release_se;
87 }
88
89 m5opRange = p.sys->m5opRange();
90}
91
92void
94{
95 itbWalker->setMmu(this);
96 dtbWalker->setMmu(this);
97 itbStage2Walker->setMmu(this);
98 dtbStage2Walker->setMmu(this);
99
100 itbStage2->setTableWalker(itbStage2Walker);
101 dtbStage2->setTableWalker(dtbStage2Walker);
102
105
107
109}
110
111bool
113{
114 for (auto tlb : instruction) {
115 if (static_cast<TLB*>(tlb)->walkCache())
116 return true;
117 }
118 for (auto tlb : data) {
119 if (static_cast<TLB*>(tlb)->walkCache())
120 return true;
121 }
122 for (auto tlb : unified) {
123 if (static_cast<TLB*>(tlb)->walkCache())
124 return true;
125 }
126
127 return false;
128}
129
130void
132{
133 s1State.miscRegValid = false;
134 s2State.miscRegValid = false;
135}
136
139{
140 return static_cast<ArmISA::TLB *>(dtb);
141}
142
145{
146 return static_cast<ArmISA::TLB *>(itb);
147}
148
149TLB *
150MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
151{
152 if (mode == BaseMMU::Execute) {
153 if (stage2)
154 return itbStage2;
155 else
156 return getITBPtr();
157 } else {
158 if (stage2)
159 return dtbStage2;
160 else
161 return getDTBPtr();
162 }
163}
164
167{
168 if (mode == BaseMMU::Execute) {
169 if (stage2)
170 return itbStage2Walker;
171 else
172 return itbWalker;
173 } else {
174 if (stage2)
175 return dtbStage2Walker;
176 else
177 return dtbWalker;
178 }
179}
180
181bool
183{
184 CachedState& state = updateMiscReg(tc, NormalTran, false);
185
186 auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
187
188 TlbEntry::KeyType lookup_data;
189
190 lookup_data.va = va;
191 lookup_data.asn = state.asid;
192 lookup_data.ignoreAsn = false;
193 lookup_data.vmid = state.vmid;
194 lookup_data.ss = state.securityState;
195 lookup_data.functional = true;
196 lookup_data.targetRegime = state.currRegime;
197 lookup_data.mode = BaseMMU::Read;
198
199 TlbEntry *e = tlb->multiLookup(lookup_data);
200
201 if (!e)
202 return false;
203 pa = e->pAddr(va);
204 return true;
205}
206
207void
209{
210 s1State.miscRegValid = false;
211 s1State.computeAddrTop.flush();
212 s2State.computeAddrTop.flush();
213}
214
215void
216MMU::flush(const TLBIOp &tlbi_op)
217{
218 if (tlbi_op.stage1Flush()) {
219 flushStage1(tlbi_op);
220 }
221
222 if (tlbi_op.stage2Flush()) {
223 flushStage2(tlbi_op);
224 }
225}
226
227void
229{
230 for (auto tlb : instruction) {
231 static_cast<TLB*>(tlb)->flush(tlbi_op);
232 }
233 for (auto tlb : data) {
234 static_cast<TLB*>(tlb)->flush(tlbi_op);
235 }
236 for (auto tlb : unified) {
237 static_cast<TLB*>(tlb)->flush(tlbi_op);
238 }
239}
240
241void
243{
244 itbStage2->flush(tlbi_op);
245 dtbStage2->flush(tlbi_op);
246}
247
248void
249MMU::iflush(const TLBIOp &tlbi_op)
250{
251 for (auto tlb : instruction) {
252 static_cast<TLB*>(tlb)->flush(tlbi_op);
253 }
254 for (auto tlb : unified) {
255 static_cast<TLB*>(tlb)->flush(tlbi_op);
256 }
257}
258
259void
260MMU::dflush(const TLBIOp &tlbi_op)
261{
262 for (auto tlb : data) {
263 static_cast<TLB*>(tlb)->flush(tlbi_op);
264 }
265 for (auto tlb : unified) {
266 static_cast<TLB*>(tlb)->flush(tlbi_op);
267 }
268}
269
270void
272{
274 itbStage2->flushAll();
275 dtbStage2->flushAll();
276}
277
278
279Fault
282 TlbEntry* te, CachedState &state) const
283{
284 // If we don't have a valid tlb entry it means virtual memory
285 // is not enabled
287
288 mpam::tagRequest(tc, req, mode == Execute);
289
290 // Check for a tester generated address fault
291 Fault fault = testTranslation(req, mode, domain, state);
292 if (fault != NoFault) {
293 return fault;
294 } else {
295 // Now that we checked no fault has been generated in the
296 // translation process, we can finalize the physical address
297 return finalizePhysical(req, tc, mode);
298 }
299}
300
301Fault
303 ThreadContext *tc, Mode mode) const
304{
305 const Addr paddr = req->getPaddr();
306
307 if (m5opRange.contains(paddr)) {
308 uint8_t func;
309 pseudo_inst::decodeAddrOffset(paddr - m5opRange.start(), func);
310 req->setLocalAccessor(
311 [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
312 {
313 uint64_t ret;
314 if (inAArch64(tc))
316 else
318
319 if (mode == Read)
320 pkt->setLE(ret);
321
322 return Cycles(1);
323 }
324 );
325 }
326
327 return NoFault;
328}
329
330
331Fault
333 Translation *translation, bool &delay, bool timing,
334 CachedState &state)
335{
337 Addr vaddr_tainted = req->getVaddr();
338 Addr vaddr = 0;
339 if (state.aarch64) {
340 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
341 static_cast<TCR>(state.ttbcr), mode==Execute, state);
342 } else {
343 vaddr = vaddr_tainted;
344 }
345 Request::Flags flags = req->getFlags();
346
347 bool is_fetch = (mode == Execute);
348 bool is_write = (mode == Write);
349
350 if (!is_fetch) {
351 if (state.sctlr.a || !(flags & AllowUnaligned)) {
352 if (vaddr & mask(flags & AlignmentMask)) {
353 // LPAE is always disabled in SE mode
354 return std::make_shared<DataAbort>(
355 vaddr_tainted,
356 DomainType::NoAccess, is_write,
359 }
360 }
361 }
362
363 Process *p = tc->getProcessPtr();
364 if (const auto pte = p->pTable->lookup(vaddr); !pte) {
365 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
366 } else {
367 req->setPaddr(pte->paddr + p->pTable->pageOffset(vaddr));
368
369 if (pte->flags & EmulationPageTable::Uncacheable)
370 req->setFlags(Request::UNCACHEABLE);
371
372 return finalizePhysical(req, tc, mode);
373 }
374}
375
376Addr
378{
379 auto& state = updateMiscReg(tc, NormalTran, false);
380 Addr purified_vaddr = 0;
381 if (state.aarch64) {
382 purified_vaddr = purifyTaggedAddr(vaddr, tc, state.exceptionLevel,
383 static_cast<TCR>(state.ttbcr), mode==Execute, state);
384 } else {
385 purified_vaddr = vaddr;
386 }
387 return purified_vaddr;
388}
389
390Fault
392 bool stage2)
393{
394 return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
395}
396
397Fault
399 CachedState &state)
400{
401 // a data cache maintenance instruction that operates by MVA does
402 // not generate a Data Abort exeception due to a Permission fault
403 if (req->isCacheMaintenance()) {
404 return NoFault;
405 }
406
407 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
408 Request::Flags flags = req->getFlags();
409 bool is_fetch = (mode == Execute);
410 bool is_write = (mode == Write);
411 bool is_priv = state.isPriv && !(flags & UserMode);
412
413 // Get the translation type from the actuall table entry
414 TranMethod tran_method = te->longDescFormat ?
416
417 // If this is the second stage of translation and the request is for a
418 // stage 1 page table walk then we need to check the HCR.PTW bit. This
419 // allows us to generate a fault if the request targets an area marked
420 // as a device or strongly ordered.
421 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
422 (te->mtype != TlbEntry::MemoryType::Normal)) {
423 return std::make_shared<DataAbort>(
424 vaddr, te->domain, is_write,
425 ArmFault::PermissionLL + te->lookupLevel,
426 state.isStage2, tran_method);
427 }
428
429 // Generate an alignment fault for unaligned data accesses to device or
430 // strongly ordered memory
431 if (!is_fetch) {
432 if (te->mtype != TlbEntry::MemoryType::Normal) {
433 if (vaddr & mask(flags & AlignmentMask)) {
434 stats.alignFaults++;
435 return std::make_shared<DataAbort>(
436 vaddr, DomainType::NoAccess, is_write,
438 tran_method);
439 }
440 }
441 }
442
443 if (te->nonCacheable) {
444 // Prevent prefetching from I/O devices.
445 if (req->isPrefetch()) {
446 // Here we can safely use the fault status for the short
447 // desc. format in all cases
448 return std::make_shared<PrefetchAbort>(
450 state.isStage2, tran_method);
451 }
452 }
453
454 if (!te->longDescFormat) {
455 switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
456 case 0:
457 stats.domainFaults++;
458 DPRINTF(MMU, "MMU Fault: Data abort on domain. DACR: %#x"
459 " domain: %#x write:%d\n", state.dacr,
460 static_cast<uint8_t>(te->domain), is_write);
461 if (is_fetch) {
462 // Use PC value instead of vaddr because vaddr might
463 // be aligned to cache line and should not be the
464 // address reported in FAR
465 return std::make_shared<PrefetchAbort>(
466 req->getPC(),
467 ArmFault::DomainLL + te->lookupLevel,
468 state.isStage2, tran_method);
469 } else
470 return std::make_shared<DataAbort>(
471 vaddr, te->domain, is_write,
472 ArmFault::DomainLL + te->lookupLevel,
473 state.isStage2, tran_method);
474 case 1:
475 // Continue with permissions check
476 break;
477 case 2:
478 panic("UNPRED domain\n");
479 case 3:
480 return NoFault;
481 }
482 }
483
484 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
485 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
486 uint8_t hap = te->hap;
487
488 if (state.sctlr.afe == 1 || te->longDescFormat)
489 ap |= 1;
490
491 bool abt;
492 bool isWritable = true;
493 // If this is a stage 2 access (eg for reading stage 1 page table entries)
494 // then don't perform the AP permissions check, we stil do the HAP check
495 // below.
496 if (state.isStage2) {
497 abt = false;
498 } else {
499 switch (ap) {
500 case 0:
501 DPRINTF(MMU, "Access permissions 0, checking rs:%#x\n",
502 (int)state.sctlr.rs);
503 if (!state.sctlr.xp) {
504 switch ((int)state.sctlr.rs) {
505 case 2:
506 abt = is_write;
507 break;
508 case 1:
509 abt = is_write || !is_priv;
510 break;
511 case 0:
512 case 3:
513 default:
514 abt = true;
515 break;
516 }
517 } else {
518 abt = true;
519 }
520 break;
521 case 1:
522 abt = !is_priv;
523 break;
524 case 2:
525 abt = !is_priv && is_write;
526 isWritable = is_priv;
527 break;
528 case 3:
529 abt = false;
530 break;
531 case 4:
532 panic("UNPRED premissions\n");
533 case 5:
534 abt = !is_priv || is_write;
535 isWritable = false;
536 break;
537 case 6:
538 case 7:
539 abt = is_write;
540 isWritable = false;
541 break;
542 default:
543 panic("Unknown permissions %#x\n", ap);
544 }
545 }
546
547 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
548 bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
549 (ap == 3 && state.sctlr.uwxn && is_priv);
550 if (is_fetch && (abt || xn ||
551 (te->longDescFormat && te->pxn && is_priv) ||
553 te->ns && state.scr.sif))) {
554 stats.permsFaults++;
555 DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. AP:%d "
556 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
557 ap, is_priv, is_write, te->ns,
558 state.scr.sif, state.sctlr.afe);
559 // Use PC value instead of vaddr because vaddr might be aligned to
560 // cache line and should not be the address reported in FAR
561 return std::make_shared<PrefetchAbort>(
562 req->getPC(),
563 ArmFault::PermissionLL + te->lookupLevel,
564 state.isStage2, tran_method);
565 } else if (abt | hapAbt) {
566 stats.permsFaults++;
567 DPRINTF(MMU, "MMU Fault: Data abort on permission check. AP:%d priv:%d"
568 " write:%d\n", ap, is_priv, is_write);
569 return std::make_shared<DataAbort>(
570 vaddr, te->domain, is_write,
571 ArmFault::PermissionLL + te->lookupLevel,
572 state.isStage2 | !abt, tran_method);
573 }
574 return NoFault;
575}
576
577Fault
579 ThreadContext *tc, bool stage2)
580{
581 return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
582}
583
584Fault
586 ThreadContext *tc, CachedState &state)
587{
588 assert(state.aarch64);
589
590 // A data cache maintenance instruction that operates by VA does
591 // not generate a Permission fault unless:
592 // * It is a data cache invalidate (dc ivac) which requires write
593 // permissions to the VA, or
594 // * It is executed from EL0
595 if (req->isCacheClean() && state.exceptionLevel != EL0 && !state.isStage2) {
596 return NoFault;
597 }
598
599 Addr vaddr_tainted = req->getVaddr();
600 Request::Flags flags = req->getFlags();
601 bool is_fetch = (mode == Execute);
602 // Cache clean operations require read permissions to the specified VA
603 bool is_write = !req->isCacheClean() && mode == Write;
604 bool is_atomic = req->isAtomic();
605
606 updateMiscReg(tc, state.curTranType, state.isStage2);
607
608 // If this is the second stage of translation and the request is for a
609 // stage 1 page table walk then we need to check the HCR.PTW bit. This
610 // allows us to generate a fault if the request targets an area marked
611 // as a device or strongly ordered.
612 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
613 (te->mtype != TlbEntry::MemoryType::Normal)) {
614 return std::make_shared<DataAbort>(
615 vaddr_tainted, te->domain, is_write,
616 ArmFault::PermissionLL + te->lookupLevel,
618 }
619
620 // Generate an alignment fault for unaligned accesses to device or
621 // strongly ordered memory
622 if (!is_fetch) {
623 if (te->mtype != TlbEntry::MemoryType::Normal) {
624 if (vaddr_tainted & mask(flags & AlignmentMask)) {
625 stats.alignFaults++;
626 return std::make_shared<DataAbort>(
627 vaddr_tainted,
629 is_atomic ? false : is_write,
632 }
633 }
634 }
635
636 if (te->nonCacheable) {
637 // Prevent prefetching from I/O devices.
638 if (req->isPrefetch()) {
639 // Here we can safely use the fault status for the short
640 // desc. format in all cases
641 return std::make_shared<PrefetchAbort>(
642 vaddr_tainted,
645 }
646 }
647
648 bool grant = false;
649 // grant_read is used for faults from an atomic instruction that
650 // both reads and writes from a memory location. From a ISS point
651 // of view they count as read if a read to that address would have
652 // generated the fault; they count as writes otherwise
653 bool grant_read = true;
654
655 if (state.isStage2) {
656 std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
657 (!is_write && !is_fetch), is_write, is_fetch);
658 } else {
659 std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
660 (!is_write && !is_fetch), is_write, is_fetch);
661 }
662
663 if (!grant) {
664 if (is_fetch) {
665 stats.permsFaults++;
666 DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. "
667 "ns:%d scr.sif:%d sctlr.afe: %d\n",
668 te->ns, state.scr.sif, state.sctlr.afe);
669 // Use PC value instead of vaddr because vaddr might be aligned to
670 // cache line and should not be the address reported in FAR
671 return std::make_shared<PrefetchAbort>(
672 req->getPC(),
673 ArmFault::PermissionLL + te->lookupLevel,
675 } else {
676 stats.permsFaults++;
677 DPRINTF(MMU, "MMU Fault: Data abort on permission check."
678 " ns:%d\n", te->ns);
679 return std::make_shared<DataAbort>(
680 vaddr_tainted, te->domain,
681 (is_atomic && !grant_read) ? false : is_write,
682 ArmFault::PermissionLL + te->lookupLevel,
684 }
685 }
686
687 return NoFault;
688}
689
692 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
693{
694 assert(ArmSystem::haveEL(tc, EL2) && state.exceptionLevel != EL2);
695
696 // In stage 2 we use the hypervisor access permission bits.
697 // The following permissions are described in ARM DDI 0487A.f
698 // D4-1802
699 bool grant = false;
700 bool grant_read = te->hap & 0b01;
701 bool grant_write = te->hap & 0b10;
702
703 uint8_t xn = te->xn;
704 uint8_t pxn = te->pxn;
705
706 if (ArmSystem::haveEL(tc, EL3) &&
708 te->ns && state.scr.sif) {
709 xn = true;
710 }
711
712 DPRINTF(MMU,
713 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
714 "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
715
716 if (x) {
717 grant = !xn;
718 } else if (req->isAtomic()) {
719 grant = grant_read || grant_write;
720 } else if (w) {
721 grant = grant_write;
722 } else if (r) {
723 grant = grant_read;
724 } else {
725 panic("Invalid Operation\n");
726 }
727
728 return std::make_pair(grant, grant_read);
729}
730
731std::tuple<bool, bool, bool>
733 ThreadContext *tc, CachedState &state,
734 bool r, bool w, bool x)
735{
736 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
737 TranslationRegime regime = !is_priv ? TranslationRegime::EL10 :
738 state.currRegime;
739
740 bool wxn = false;
741 uint8_t xn = te->xn;
742 uint8_t pxn = te->pxn;
743
744 // Read PIR corresponding to target configuration.
745 uint8_t piindex = te->piindex;
746 uint8_t ppi = bits(state.pir, 4 * piindex + 3, 4 * piindex);
747 uint8_t upi = bits(state.pire0, 4 * piindex + 3, 4 * piindex);
748
749 DPRINTF(MMU, "Checking S1 indirect permissions: "
750 "piindex:%d, ppi:%d, xn:%d, pxn:%d, r:%d, "
751 "w:%d, x:%d, is_priv: %d, wxn: %d\n", piindex, ppi,
752 xn, pxn, r, w, x, is_priv, wxn);
753
754 // Indirect permission check.
755 // Decode indirect permissions
756 bool pr = false;
757 bool pw = false;
758 bool px = false;
759 bool p_wxn = ppi == 0b0110;
760 switch (ppi) {
761 // No access
762 case 0b0000: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
763 // Privileged read
764 case 0b0001: pr = 1; pw = 0; px = 0; /* pgcs = 0; */ break;
765 // Privileged execute
766 case 0b0010: pr = 0; pw = 0; px = 1; /* pgcs = 0; */ break;
767 // Privileged read and execute
768 case 0b0011: pr = 1; pw = 0; px = 1; /* pgcs = 0; */ break;
769 // Reserved
770 case 0b0100: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
771 // Privileged read and write
772 case 0b0101: pr = 1; pw = 1; px = 0; /* pgcs = 0; */ break;
773 // Privileged read, write and execute
774 case 0b0110: pr = 1; pw = 1; px = 1; /* pgcs = 0; */ break;
775 // Privileged read, write and execute
776 case 0b0111: pr = 1; pw = 1; px = 1; /* pgcs = 0; */ break;
777 // Privileged read
778 case 0b1000: pr = 1; pw = 0; px = 0; /* pgcs = 0; */ break;
779 // Privileged read and gcs
780 case 0b1001: pr = 1; pw = 0; px = 0; /* pgcs = 1; */ break;
781 // Privileged read and execute
782 case 0b1010: pr = 1; pw = 0; px = 1; /* pgcs = 0; */ break;
783 // Reserved
784 case 0b1011: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
785 // Privileged read and write
786 case 0b1100: pr = 1; pw = 1; px = 0; /* pgcs = 0; */ break;
787 // Reserved
788 case 0b1101: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
789 // Privileged read, write and execute
790 case 0b1110: pr = 1; pw = 1; px = 1; /* pgcs = 0; */ break;
791 // Reserved
792 case 0b1111: pr = 0; pw = 0; px = 0; /* pgcs = 0; */ break;
793 }
794
795 bool grant_read;
796 bool grant_write;
797 bool grant_exec;
798 if (hasUnprivRegime(regime)) {
799 bool ur = false;
800 bool uw = false;
801 bool ux = false;
802 bool u_wxn = upi == 0b0110;
803 switch (upi) {
804 // No access
805 case 0b0000: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
806 // Unprivileged read
807 case 0b0001: ur = 1; uw = 0; ux = 0; /* ugcs = 0; */ break;
808 // Unprivileged execute
809 case 0b0010: ur = 0; uw = 0; ux = 1; /* ugcs = 0; */ break;
810 // Unprivileged read and execute
811 case 0b0011: ur = 1; uw = 0; ux = 1; /* ugcs = 0; */ break;
812 // Reserved
813 case 0b0100: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
814 // Unprivileged read and write
815 case 0b0101: ur = 1; uw = 1; ux = 0; /* ugcs = 0; */ break;
816 // Unprivileged read, write and execute
817 case 0b0110: ur = 1; uw = 1; ux = 1; /* ugcs = 0; */ break;
818 // Unprivileged read, write and execute
819 case 0b0111: ur = 1; uw = 1; ux = 1; /* ugcs = 0; */ break;
820 // Unprivileged read
821 case 0b1000: ur = 1; uw = 0; ux = 0; /* ugcs = 0; */ break;
822 // Unprivileged read and gcs
823 case 0b1001: ur = 1; uw = 0; ux = 0; /* ugcs = 1; */ break;
824 // Unprivileged read and execute
825 case 0b1010: ur = 1; uw = 0; ux = 1; /* ugcs = 0; */ break;
826 // Reserved
827 case 0b1011: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
828 // Unprivileged read and write
829 case 0b1100: ur = 1; uw = 1; ux = 0; /* ugcs = 0; */ break;
830 // Reserved
831 case 0b1101: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
832 // Unprivileged read,write and execute
833 case 0b1110: ur = 1; uw = 1; ux = 1; /* ugcs = 0; */ break;
834 // Reserved
835 case 0b1111: ur = 0; uw = 0; ux = 0; /* ugcs = 0; */ break;
836 }
837
838 // PAN does not affect CMOs other than DC ZVA
839 bool pan_access = !req->isCacheMaintenance() ||
840 req->getFlags() & Request::CACHE_BLOCK_ZERO;
841
842 if (_release->has(ArmExtension::FEAT_PAN) && pan_access) {
843 if (state.cpsr.pan && upi != 0) {
844 pr = false;
845 pw = false;
846 }
847 }
848
849 grant_read = is_priv ? pr : ur;
850 grant_write = is_priv ? pw : uw;
851 grant_exec = is_priv ? px : ux;
852 wxn = is_priv ? p_wxn : u_wxn;
853 } else {
854 grant_read = pr;
855 grant_write = pw;
856 grant_exec = px;
857 wxn = p_wxn;
858 }
859
860 // Do not allow execution from writable location
861 // if wxn is set
862 grant_exec = grant_exec && !(wxn && grant_write);
863
864 if (ArmSystem::haveEL(tc, EL3) &&
865 state.securityState == SecurityState::Secure && te->ns) {
866 grant_exec = grant_exec && !state.scr.sif;
867 }
868
869 return std::make_tuple(grant_read, grant_write, grant_exec);
870}
871
872std::tuple<bool, bool, bool>
874 ThreadContext *tc, CachedState &state,
875 bool r, bool w, bool x)
876{
877 const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
878 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
879
880 bool wxn = state.sctlr.wxn;
881 uint8_t xn = te->xn;
882 uint8_t pxn = te->pxn;
883
884 DPRINTF(MMU, "Checking S1 direct permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
885 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
886 pxn, r, w, x, is_priv, wxn);
887
888 bool grant_read;
889 bool grant_write;
890 bool grant_exec;
891
892 TranslationRegime regime = !is_priv ? TranslationRegime::EL10 :
893 state.currRegime;
894 // Tranditional permission check.
895 if (hasUnprivRegime(regime)) {
896 bool pr = false;
897 bool pw = false;
898 bool ur = false;
899 bool uw = false;
900 // Apply leaf permissions
901 switch (ap) {
902 case 0b00: // Privileged access
903 pr = 1; pw = 1; ur = 0; uw = 0;
904 break;
905 case 0b01: // No effect
906 pr = 1; pw = 1; ur = 1; uw = 1;
907 break;
908 case 0b10: // Read-only, privileged access
909 pr = 1; pw = 0; ur = 0; uw = 0;
910 break;
911 case 0b11: // Read-only
912 pr = 1; pw = 0; ur = 1; uw = 0;
913 break;
914 }
915
916 // Locations writable by unprivileged cannot be executed by
917 // privileged
918 const bool px = !(pxn || uw);
919 const bool ux = !xn;
920
921 // PAN does not affect CMOs other than DC ZVA
922 bool pan_access = !req->isCacheMaintenance() ||
923 req->getFlags() & Request::CACHE_BLOCK_ZERO;
924
925 if (_release->has(ArmExtension::FEAT_PAN) && pan_access) {
926 bool pan = state.cpsr.pan && (ur || uw);
927 pr = pr && !pan;
928 pw = pw && !pan;
929 }
930
931 grant_read = is_priv ? pr : ur;
932 grant_write = is_priv ? pw : uw;
933 grant_exec = is_priv ? px : ux;
934 } else {
935 switch (bits(ap, 1)) {
936 case 0b0: // No effect
937 grant_read = 1; grant_write = 1;
938 break;
939 case 0b1: // Read-Only
940 grant_read = 1; grant_write = 0;
941 break;
942 }
943 grant_exec = !xn;
944 }
945
946 // Do not allow execution from writable location
947 // if wxn is set
948 grant_exec = grant_exec && !(wxn && grant_write);
949
950 if (ArmSystem::haveEL(tc, EL3) &&
951 state.securityState == SecurityState::Secure && te->ns) {
952 grant_exec = grant_exec && !state.scr.sif;
953 }
954
955 return std::make_tuple(grant_read, grant_write, grant_exec);
956}
957
960 ThreadContext *tc, CachedState &state,
961 bool r, bool w, bool x)
962{
963 bool grant = false;
964 bool grant_read = true, grant_write = true, grant_exec = true;
965
966 // Check the feature of indirected premission.
967 if (state.pie) {
968 std::tie(grant_read, grant_write, grant_exec) =
969 s1IndirectPermBits64(te, req, mode, tc, state, r, w, x);
970 } else {
971 std::tie(grant_read, grant_write, grant_exec) =
972 s1DirectPermBits64(te, req, mode, tc, state, r, w, x);
973 }
974
975 if (x) {
976 grant = grant_exec;
977 } else if (req->isAtomic()) {
978 grant = grant_read && grant_write;
979 } else if (w) {
980 grant = grant_write;
981 } else {
982 grant = grant_read;
983 }
984
985 return std::make_pair(grant, grant_read);
986}
987
988bool
990{
991 switch (regime) {
994 return true;
995 default:
996 return false;
997 }
998}
999
1000Addr
1003 TCR tcr, bool is_inst, CachedState& state)
1004{
1005 const bool selbit = bits(vaddr_tainted, 55);
1006
1007 // Call the memoized version of computeAddrTop
1008 const auto topbit = state.computeAddrTop(tc, selbit, is_inst, tcr, el);
1009
1010 return maskTaggedAddr(vaddr_tainted, tc, el, topbit);
1011}
1012
1013Fault
1015 ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
1016 CachedState &state)
1017{
1018 bool is_fetch = (mode == Execute);
1019 bool is_atomic = req->isAtomic();
1020 req->setPaddr(vaddr);
1021 // When the MMU is off the security attribute corresponds to the
1022 // security state of the processor
1024 req->setFlags(Request::SECURE);
1025 else
1026 req->clearFlags(Request::SECURE);
1027 if (state.aarch64) {
1028 bool selbit = bits(vaddr, 55);
1029 TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
1030 int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
1031 int addr_sz = bits(vaddr, topbit, physAddrRange);
1032 if (addr_sz != 0){
1033 Fault f;
1034 if (is_fetch)
1035 f = std::make_shared<PrefetchAbort>(vaddr,
1038 else
1039 f = std::make_shared<DataAbort>( vaddr,
1041 is_atomic ? false : mode==Write,
1044 return f;
1045 }
1046 }
1047
1048 // @todo: double check this (ARM ARM issue C B3.2.1)
1049 if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
1050 state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
1051 if (!req->isCacheMaintenance()) {
1052 req->setFlags(Request::UNCACHEABLE);
1053 }
1054 req->setFlags(Request::STRICT_ORDER);
1055 }
1056
1057 // Set memory attributes
1058 bool in_secure_state = state.securityState == SecurityState::Secure;
1059 TlbEntry temp_te;
1060 temp_te.ns = !in_secure_state;
1061 bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1062 state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
1063 bool i_cacheability = state.sctlr.i && !state.sctlr.m;
1064 if (state.isStage2 || !dc || state.exceptionLevel == EL2) {
1065 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1067 temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
1068 temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
1069 temp_te.shareable = true;
1070 temp_te.outerShareable = true;
1071 } else {
1073 temp_te.innerAttrs = 0x3;
1074 temp_te.outerAttrs = 0x3;
1075 temp_te.shareable = false;
1076 temp_te.outerShareable = false;
1077 }
1078 temp_te.setAttributes(long_desc_format);
1079 DPRINTF(MMU, "(No MMU) setting memory attributes: shareable: "
1080 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
1081 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1082 state.isStage2);
1083 setAttr(temp_te.attributes);
1084
1085 return testAndFinalize(req, tc, mode, nullptr, state);
1086}
1087
1088Fault
1090 Translation *translation, bool &delay, bool timing,
1091 bool functional, Addr vaddr,
1092 TranMethod tran_method, CachedState &state)
1093{
1094 TlbEntry *te = NULL;
1095 bool is_fetch = (mode == Execute);
1096 TlbEntry mergeTe;
1097
1098 Request::Flags flags = req->getFlags();
1099 Addr vaddr_tainted = req->getVaddr();
1100
1101 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1102 functional, &mergeTe, state);
1103 // only proceed if we have a valid table entry
1104 if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
1105
1106 // If we have the table entry transfer some of the attributes to the
1107 // request that triggered the translation
1109 // Set memory attributes
1110 DPRINTF(MMU,
1111 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1112 "outerAttrs: %d, mtype: %d, stage2: %d\n",
1113 te->shareable, te->innerAttrs, te->outerAttrs,
1114 static_cast<uint8_t>(te->mtype), state.isStage2);
1115 setAttr(te->attributes);
1116
1117 if (te->nonCacheable && !req->isCacheMaintenance())
1118 req->setFlags(Request::UNCACHEABLE);
1119
1120 // Require requests to be ordered if the request goes to
1121 // strongly ordered or device memory (i.e., anything other
1122 // than normal memory requires strict order).
1123 if (te->mtype != TlbEntry::MemoryType::Normal)
1124 req->setFlags(Request::STRICT_ORDER);
1125
1126 Addr pa = te->pAddr(vaddr);
1127 req->setPaddr(pa);
1128
1129 if (state.securityState == SecurityState::Secure && !te->ns) {
1130 req->setFlags(Request::SECURE);
1131 } else {
1132 req->clearFlags(Request::SECURE);
1133 }
1134 if (!is_fetch && fault == NoFault &&
1135 (vaddr & mask(flags & AlignmentMask)) &&
1136 (te->mtype != TlbEntry::MemoryType::Normal)) {
1137 // Unaligned accesses to Device memory should always cause an
1138 // abort regardless of sctlr.a
1139 stats.alignFaults++;
1140 bool is_write = (mode == Write);
1141 return std::make_shared<DataAbort>(
1142 vaddr_tainted,
1143 DomainType::NoAccess, is_write,
1145 tran_method);
1146 }
1147
1148 if (fault == NoFault)
1149 fault = testAndFinalize(req, tc, mode, te, state);
1150 }
1151
1152 return fault;
1153}
1154
1155Fault
1157 Translation *translation, bool &delay, bool timing,
1158 ArmTranslationType tran_type, bool functional,
1159 CachedState &state)
1160{
1161 // No such thing as a functional timing access
1162 assert(!(timing && functional));
1163
1164 Addr vaddr_tainted = req->getVaddr();
1165 Addr vaddr = 0;
1166 if (state.aarch64) {
1167 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1168 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1169 } else {
1170 vaddr = vaddr_tainted;
1171 }
1172 Request::Flags flags = req->getFlags();
1173
1174 bool is_fetch = (mode == Execute);
1175 bool is_write = (mode == Write);
1176 bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
1177 TranMethod tran_method = long_desc_format ?
1179
1180 DPRINTF(MMU,
1181 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1182 state.isPriv, flags & UserMode,
1184 tran_type & S1S2NsTran);
1185
1186 DPRINTF(MMU, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1187 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
1188 state.isStage2, state.scr, state.sctlr, flags, tran_type);
1189
1190 if (!state.isStage2) {
1191 if ((req->isInstFetch() && (!state.sctlr.i)) ||
1192 ((!req->isInstFetch()) && (!state.sctlr.c))){
1193 if (!req->isCacheMaintenance()) {
1194 req->setFlags(Request::UNCACHEABLE);
1195 }
1196 req->setFlags(Request::STRICT_ORDER);
1197 }
1198 }
1199 if (!is_fetch) {
1200 if (state.sctlr.a || !(flags & AllowUnaligned)) {
1201 if (vaddr & mask(flags & AlignmentMask)) {
1202 stats.alignFaults++;
1203 return std::make_shared<DataAbort>(
1204 vaddr_tainted,
1205 DomainType::NoAccess, is_write,
1207 tran_method);
1208 }
1209 }
1210 }
1211
1212 bool vm = state.hcr.vm;
1213 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1214 state.hcr.e2h == 1 && state.hcr.tge == 1)
1215 vm = 0;
1216 else if (state.hcr.dc == 1)
1217 vm = 1;
1218
1219 Fault fault = NoFault;
1220 // If guest MMU is off or hcr.vm=0 go straight to stage2
1221 if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1222 fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1223 long_desc_format, state);
1224 } else {
1225 DPRINTF(MMU, "Translating %s=%#x context=%d\n",
1226 state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1227 // Translation enabled
1228 fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1229 functional, vaddr, tran_method, state);
1230 }
1231
1232 // Check for Debug Exceptions
1234
1235 if (sd->enabled() && fault == NoFault) {
1236 fault = sd->testDebug(tc, req, mode);
1237 }
1238
1239 return fault;
1240}
1241
1242Fault
1244 ArmTranslationType tran_type)
1245{
1246 return translateAtomic(req, tc, mode, tran_type, false);
1247}
1248
1249Fault
1251 ArmTranslationType tran_type, bool stage2)
1252{
1253 auto& state = updateMiscReg(tc, tran_type, stage2);
1254
1255 bool delay = false;
1256 Fault fault;
1257 if (FullSystem)
1258 fault = translateFs(req, tc, mode, NULL, delay, false,
1259 tran_type, false, state);
1260 else
1261 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1262 assert(!delay);
1263 return fault;
1264}
1265
1266Fault
1268{
1269 return translateFunctional(req, tc, mode, NormalTran, false);
1270}
1271
1272Fault
1274 ArmTranslationType tran_type)
1275{
1276 return translateFunctional(req, tc, mode, tran_type, false);
1277}
1278
1279Fault
1281 ArmTranslationType tran_type, bool stage2)
1282{
1283 auto& state = updateMiscReg(tc, tran_type, stage2);
1284
1285 bool delay = false;
1286 Fault fault;
1287 if (FullSystem)
1288 fault = translateFs(req, tc, mode, NULL, delay, false,
1289 tran_type, true, state);
1290 else
1291 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1292 assert(!delay);
1293 return fault;
1294}
1295
1296void
1298 Translation *translation, Mode mode, ArmTranslationType tran_type,
1299 bool stage2)
1300{
1301 auto& state = updateMiscReg(tc, tran_type, stage2);
1302
1303 assert(translation);
1304
1305 translateComplete(req, tc, translation, mode, tran_type,
1306 stage2, state);
1307}
1308
1309Fault
1311 Translation *translation, Mode mode, ArmTranslationType tran_type,
1312 bool call_from_s2)
1313{
1314 return translateComplete(req, tc, translation, mode, tran_type,
1315 call_from_s2, s1State);
1316}
1317
1318Fault
1320 Translation *translation, Mode mode, ArmTranslationType tran_type,
1321 bool call_from_s2, CachedState &state)
1322{
1323 bool delay = false;
1324 Fault fault;
1325 if (FullSystem)
1326 fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1327 false, state);
1328 else
1329 fault = translateSe(req, tc, mode, translation, delay, true, state);
1330
1331 DPRINTF(MMU, "Translation returning delay=%d fault=%d\n", delay,
1332 fault != NoFault);
1333 // If we have a translation, and we're not in the middle of doing a stage
1334 // 2 translation tell the translation that we've either finished or its
1335 // going to take a while. By not doing this when we're in the middle of a
1336 // stage 2 translation we prevent marking the translation as delayed twice,
1337 // one when the translation starts and again when the stage 1 translation
1338 // completes.
1339
1340 if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1341 fault != NoFault)) {
1342 if (!delay)
1343 translation->finish(fault, req, tc, mode);
1344 else
1345 translation->markDelayed();
1346 }
1347 return fault;
1348}
1349
1350vmid_t
1352{
1353 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1354 VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1355 vmid_t vmid = 0;
1356
1357 switch (mmfr1.vmidbits) {
1358 case 0b0000:
1359 // 8 bits
1360 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1361 break;
1362 case 0b0010:
1363 if (vtcr.vs && ELIs64(tc, EL2)) {
1364 // 16 bits
1365 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1366 } else {
1367 // 8 bits
1368 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1369 }
1370 break;
1371 default:
1372 panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1373 mmfr1.vmidbits);
1374 }
1375
1376 return vmid;
1377}
1378
1381 ArmTranslationType tran_type, bool stage2)
1382{
1383 // check if the regs have changed, or the translation mode is different.
1384 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1385 // one type of translation anyway
1386
1387 auto& state = stage2 ? s2State : s1State;
1388 if (state.miscRegValid && miscRegContext == tc->contextId() &&
1389 ((tran_type == state.curTranType) || stage2)) {
1390
1391 } else {
1392 DPRINTF(MMU, "MMU variables changed!\n");
1393 state.updateMiscReg(tc, tran_type);
1394
1395 itbStage2->setVMID(state.vmid);
1396 dtbStage2->setVMID(state.vmid);
1397
1398 for (auto tlb : instruction) {
1399 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1400 }
1401 for (auto tlb : data) {
1402 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1403 }
1404 for (auto tlb : unified) {
1405 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1406 }
1407
1408 miscRegContext = tc->contextId();
1409 }
1410
1411 if (state.directToStage2) {
1412 s2State.updateMiscReg(tc, tran_type);
1413 return s2State;
1414 } else {
1415 return state;
1416 }
1417}
1418
1419void
1421 ArmTranslationType tran_type)
1422{
1426
1427 // Dependencies: SCR/SCR_EL3, CPSR
1429 !(tran_type & HypMode) && !(tran_type & S1S2NsTran) ?
1431
1432 exceptionLevel = tranTypeEL(cpsr, scr, tran_type);
1434 aarch64 = isStage2 ?
1435 ELIs64(tc, EL2) :
1437
1438 if (aarch64) { // AArch64
1439 // determine EL we need to translate in
1440 switch (currRegime) {
1442 {
1446 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1449 pie = tcr2.pie;
1450 }
1451 uint64_t ttbr_asid = ttbcr.a1 ?
1454 asid = bits(ttbr_asid,
1455 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1456 }
1457 break;
1459 {
1460 // VHE code for EL2&0 regime
1464 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1467 pie = tcr2.pie;
1468 }
1469 uint64_t ttbr_asid = ttbcr.a1 ?
1472 asid = bits(ttbr_asid,
1473 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1474 }
1475 break;
1480 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1482 pie = tcr2.pie;
1483 }
1484 asid = -1;
1485 break;
1489 if (mmu->release()->has(ArmExtension::FEAT_S1PIE)) {
1491 pie = static_cast<TCR>(ttbcr).pie;
1492 }
1493 asid = -1;
1494 break;
1495 }
1496
1498 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1499 vmid = getVMID(tc);
1500 bool vm = hcr.vm;
1501 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1502 hcr.e2h == 1 && hcr.tge ==1) {
1503 vm = 0;
1504 }
1505
1506 if (hcr.e2h == 1 && (exceptionLevel == EL2
1507 || (hcr.tge ==1 && exceptionLevel == EL0))) {
1508 directToStage2 = false;
1509 stage2Req = false;
1510 stage2DescReq = false;
1511 } else {
1512 // Work out if we should skip the first stage of translation and go
1513 // directly to stage 2. This value is cached so we don't have to
1514 // compute it for every translation.
1515 const bool el2_enabled = EL2Enabled(tc);
1516 stage2Req = isStage2 ||
1517 (vm && exceptionLevel < EL2 && el2_enabled &&
1518 !(tran_type & S1CTran) &&
1519 !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1521 (vm && exceptionLevel < EL2 && el2_enabled);
1523 }
1524 } else {
1525 vmid = 0;
1526 directToStage2 = false;
1527 stage2Req = false;
1528 stage2DescReq = false;
1529 }
1530 } else { // AArch32
1535 isPriv = cpsr.mode != MODE_USER;
1536 if (longDescFormatInUse(tc)) {
1537 uint64_t ttbr_asid = tc->readMiscReg(
1540 asid = bits(ttbr_asid, 55, 48);
1541 } else { // Short-descriptor translation table format in use
1542 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1545 asid = context_id.asid;
1546 }
1553
1554 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1555 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1556 if (exceptionLevel == EL2) {
1558 }
1559 // Work out if we should skip the first stage of translation and go
1560 // directly to stage 2. This value is cached so we don't have to
1561 // compute it for every translation.
1562 const bool el2_enabled = EL2Enabled(tc);
1563 stage2Req = isStage2 ||
1564 (hcr.vm && exceptionLevel < EL2 && el2_enabled &&
1565 !(tran_type & S1CTran));
1567 (hcr.vm && exceptionLevel < EL2 && el2_enabled);
1569 } else {
1570 vmid = 0;
1571 stage2Req = false;
1572 directToStage2 = false;
1573 stage2DescReq = false;
1574 }
1575 }
1576 miscRegValid = true;
1577 curTranType = tran_type;
1578}
1579
1581MMU::tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
1582{
1583 switch (type) {
1584 case S1E0Tran:
1585 case S12E0Tran:
1586 return EL0;
1587
1588 case S1E1Tran:
1589 case S12E1Tran:
1590 case S1S2NsTran:
1591 return EL1;
1592
1593 case S1E2Tran:
1594 case HypMode:
1595 return EL2;
1596
1597 case S1E3Tran:
1598 return EL3;
1599
1600 case S1CTran:
1601 return currEL(cpsr) == EL3 && scr.ns == 0 ?
1602 EL3 : EL1;
1603
1604 case NormalTran:
1605 return currEL(cpsr);
1606
1607 default:
1608 panic("Unknown translation mode!\n");
1609 }
1610}
1611
1612Fault
1614 Translation *translation, bool timing, bool functional,
1615 SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type,
1616 bool stage2)
1617{
1618 return getTE(te, req, tc, mode, translation, timing, functional,
1619 ss, ipaspace, tran_type, stage2 ? s2State : s1State);
1620}
1621
1622TlbEntry*
1624 bool functional, bool ignore_asn, TranslationRegime regime,
1625 bool stage2, BaseMMU::Mode mode)
1626{
1627 TLB *tlb = getTlb(mode, stage2);
1628
1629 TlbEntry::KeyType lookup_data;
1630
1631 lookup_data.va = va;
1632 lookup_data.asn = asid;
1633 lookup_data.ignoreAsn = ignore_asn;
1634 lookup_data.vmid = vmid;
1635 lookup_data.ss = ss;
1636 lookup_data.functional = functional;
1637 lookup_data.targetRegime = regime;
1638 lookup_data.mode = mode;
1639
1640 return tlb->multiLookup(lookup_data);
1641}
1642
1643Fault
1645 Translation *translation, bool timing, bool functional,
1646 SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type,
1647 CachedState& state)
1648{
1649 // In a 2-stage system, the IPA->PA translation can be started via this
1650 // call so make sure the miscRegs are correct.
1651 if (state.isStage2) {
1652 updateMiscReg(tc, tran_type, true);
1653 }
1654
1655 Addr vaddr_tainted = req->getVaddr();
1656 Addr vaddr = 0;
1657 TranslationRegime regime = state.currRegime;
1658
1659 if (state.aarch64) {
1660 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1661 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1662 } else {
1663 vaddr = vaddr_tainted;
1664 }
1665
1666 *te = lookup(vaddr, state.asid, state.vmid, ss, false,
1667 false, regime, state.isStage2, mode);
1668
1669 if (!isCompleteTranslation(*te)) {
1670 if (req->isPrefetch()) {
1671 // if the request is a prefetch don't attempt to fill the TLB or go
1672 // any further with the memory access (here we can safely use the
1673 // fault status for the short desc. format in all cases)
1674 stats.prefetchFaults++;
1675 return std::make_shared<PrefetchAbort>(
1676 vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1677 }
1678
1679 // start translation table walk, pass variables rather than
1680 // re-retreaving in table walker for speed
1681 DPRINTF(MMU,
1682 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1683 vaddr_tainted, state.asid, state.vmid);
1684
1685 Fault fault;
1686 fault = getTableWalker(mode, state.isStage2)->walk(
1687 req, tc, state.asid, state.vmid, mode,
1688 translation, timing, functional, ss,
1689 ipaspace, tran_type, state.stage2DescReq, *te);
1690
1691 // for timing mode, return and wait for table walk,
1692 if (timing || fault != NoFault) {
1693 return fault;
1694 }
1695
1696 *te = lookup(vaddr, state.asid, state.vmid, ss,
1697 true, false, regime, state.isStage2, mode);
1698 assert(*te);
1699 }
1700 return NoFault;
1701}
1702
1703Fault
1705 ThreadContext *tc, Mode mode,
1706 Translation *translation, bool timing, bool functional,
1707 TlbEntry *mergeTe, CachedState &state)
1708{
1709 Fault fault;
1710
1711 if (state.isStage2) {
1712 PASpace ipaspace = state.securityState == SecurityState::Secure ?
1714
1715 // We are already in the stage 2 TLB. Grab the table entry for stage
1716 // 2 only. We are here because stage 1 translation is disabled.
1717 TlbEntry *s2_te = nullptr;
1718 // Get the stage 2 table entry
1719 fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1720 state.securityState, ipaspace,
1721 state.curTranType, state);
1722 // Check permissions of stage 2
1723 if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1724 if (state.aarch64)
1725 fault = checkPermissions64(s2_te, req, mode, tc, state);
1726 else
1727 fault = checkPermissions(s2_te, req, mode, state);
1728 }
1729 *te = s2_te;
1730 return fault;
1731 }
1732
1733 TlbEntry *s1_te = nullptr;
1734
1735 Addr vaddr_tainted = req->getVaddr();
1736
1737 // Get the stage 1 table entry
1738 fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1740 state.curTranType, state);
1741 // only proceed if we have a valid table entry
1742 if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1743 // Check stage 1 permissions before checking stage 2
1744 if (state.aarch64)
1745 fault = checkPermissions64(s1_te, req, mode, tc, state);
1746 else
1747 fault = checkPermissions(s1_te, req, mode, state);
1748 if (state.stage2Req & (fault == NoFault)) {
1749 Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1750 req, translation, mode, timing, functional,
1751 state.securityState, state.curTranType);
1752 fault = s2_lookup->getTe(tc, mergeTe);
1753 if (s2_lookup->isComplete()) {
1754 *te = mergeTe;
1755 // We've finished with the lookup so delete it
1756 delete s2_lookup;
1757 } else {
1758 // The lookup hasn't completed, so we can't delete it now. We
1759 // get round this by asking the object to self delete when the
1760 // translation is complete.
1761 s2_lookup->setSelfDelete();
1762 }
1763 } else {
1764 // This case deals with an S1 hit (or bypass), followed by
1765 // an S2 hit-but-perms issue
1766 if (state.isStage2) {
1767 DPRINTF(MMU, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1768 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1769 fault);
1770 if (fault != NoFault) {
1771 auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1772 arm_fault->annotate(ArmFault::S1PTW, false);
1773 arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1774 }
1775 }
1776 *te = s1_te;
1777 }
1778 }
1779 return fault;
1780}
1781
1782bool
1784{
1785 return entry && !entry->partial;
1786}
1787
1788void
1790{
1791 BaseMMU::takeOverFrom(old_mmu);
1792
1793 auto *ommu = dynamic_cast<MMU*>(old_mmu);
1794 assert(ommu);
1795
1796 _attr = ommu->_attr;
1797
1798 s1State = ommu->s1State;
1799 s2State = ommu->s2State;
1800}
1801
1802void
1804{
1805 if (!_ti) {
1806 test = nullptr;
1807 } else {
1808 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1809 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1810 test = ti;
1811 itbWalker->setTestInterface(test);
1812 dtbWalker->setTestInterface(test);
1813 itbStage2Walker->setTestInterface(test);
1814 dtbStage2Walker->setTestInterface(test);
1815 }
1816}
1817
1818Fault
1820 DomainType domain, CachedState &state) const
1821{
1822 if (!test || !req->hasSize() || req->getSize() == 0 ||
1823 req->isCacheMaintenance()) {
1824 return NoFault;
1825 } else {
1826 return test->translationCheck(req, state.isPriv, mode, domain);
1827 }
1828}
1829
1831 : statistics::Group(parent),
1832 ADD_STAT(alignFaults, statistics::units::Count::get(),
1833 "Number of MMU faults due to alignment restrictions"),
1834 ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1835 "Number of MMU faults due to prefetch"),
1836 ADD_STAT(domainFaults, statistics::units::Count::get(),
1837 "Number of MMU faults due to domain restrictions"),
1838 ADD_STAT(permsFaults, statistics::units::Count::get(),
1839 "Number of MMU faults due to permissions restrictions")
1840{
1841}
1842
1843} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:231
SelfDebug * getSelfDebug() const
Definition isa.hh:182
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, TranMethod tran_method, CachedState &state)
Definition mmu.cc:1089
std::tuple< bool, bool, bool > s1IndirectPermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:732
ArmISA::TLB * getITBPtr() const
Definition mmu.cc:144
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition mmu.hh:322
void flushStage2(const TLBIOp &tlbi_op)
Definition mmu.cc:242
static bool hasUnprivRegime(TranslationRegime regime)
Definition mmu.cc:989
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition mmu.cc:332
void drainResume() override
Resume execution after a successful drain.
Definition mmu.cc:131
uint64_t _attr
Definition mmu.hh:433
ContextID miscRegContext
Definition mmu.hh:427
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition mmu.cc:1310
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:150
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition mmu.cc:1014
Fault testTranslation(const RequestPtr &req, Mode mode, DomainType domain, CachedState &state) const
Definition mmu.cc:1819
TLB * dtbStage2
Definition mmu.hh:77
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:93
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1380
bool isCompleteTranslation(TlbEntry *te) const
Definition mmu.cc:1783
void invalidateMiscReg()
Definition mmu.cc:208
bool haveLargeAsid64
Definition mmu.hh:437
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition mmu.cc:302
bool _hasWalkCache
Definition mmu.hh:442
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:959
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition mmu.cc:1704
void flushStage1(const TLBIOp &tlbi_op)
Definition mmu.cc:228
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1581
std::tuple< bool, bool, bool > s1DirectPermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:873
uint8_t physAddrRange
Definition mmu.hh:438
ArmISA::TLB * getDTBPtr() const
Definition mmu.cc:138
bool checkWalkCache() const
Definition mmu.cc:112
TlbTestInterface * test
Definition mmu.hh:406
void flushAll() override
Definition mmu.cc:271
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:259
TLB * itbStage2
Definition mmu.hh:76
void setTestInterface(SimObject *ti)
Definition mmu.cc:1803
TableWalker * itbStage2Walker
Definition mmu.hh:81
Addr getValidAddr(Addr vaddr, ThreadContext *tc, Mode mode) override
Definition mmu.cc:377
AddrRange m5opRange
Definition mmu.hh:440
TableWalker * dtbStage2Walker
Definition mmu.hh:82
TableWalker * dtbWalker
Definition mmu.hh:80
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1613
void dflush(const TLBIOp &tlbi_op)
Definition mmu.cc:260
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition mmu.cc:578
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition mmu.cc:1156
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1623
TableWalker * itbWalker
Definition mmu.hh:79
const ArmRelease * _release
Definition mmu.hh:436
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:691
CachedState s1State
Definition mmu.hh:430
void flush(const TLBIOp &tlbi_op)
Definition mmu.cc:216
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition mmu.cc:391
MMU(const ArmMMUParams &p)
Definition mmu.cc:60
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:166
void takeOverFrom(BaseMMU *old_mmu) override
Definition mmu.cc:1789
Fault testAndFinalize(const RequestPtr &req, ThreadContext *tc, Mode mode, TlbEntry *te, CachedState &state) const
Definition mmu.cc:280
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
Definition mmu.cc:1001
CachedState s2State
Definition mmu.hh:430
void iflush(const TLBIOp &tlbi_op)
Definition mmu.cc:249
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:248
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:86
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
virtual bool stage1Flush() const
Return true if the TLBI op needs to flush stage1 entries, Defaulting to true in the TLBIOp abstract c...
Definition tlbi_op.hh:95
virtual bool stage2Flush() const
Return true if the TLBI op needs to flush stage2 entries, Defaulting to false in the TLBIOp abstract ...
Definition tlbi_op.hh:106
void setTableWalker(TableWalker *table_walker)
Definition tlb.cc:143
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition system.hh:220
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition system.hh:206
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition system.cc:132
const ArmRelease * releaseFS() const
Definition system.hh:156
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition mmu.hh:185
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:53
virtual void flushAll()
Definition mmu.cc:81
BaseTLB * itb
Definition mmu.hh:163
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition mmu.cc:172
std::set< BaseTLB * > data
Definition mmu.hh:186
std::set< BaseTLB * > unified
Definition mmu.hh:187
BaseMMU(const Params &p)
Definition mmu.hh:90
BaseTLB * dtb
Definition mmu.hh:162
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:60
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition request.hh:143
gem5::Flags< FlagsType > Flags
Definition request.hh:102
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
Statistics container.
Definition group.hh:93
STL pair class.
Definition stl.hh:58
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:268
SimObject(const Params &p)
Definition sim_object.cc:58
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
Bitfield< 12 > dc
Bitfield< 30 > te
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 19 > wxn
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition utility.cc:134
Bitfield< 31, 0 > uw
Definition int.hh:63
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Bitfield< 22 > pan
Definition misc_types.hh:59
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
Definition utility.cc:459
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:141
Bitfield< 7, 4 > domain
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:277
Bitfield< 9 > e
Definition misc_types.hh:65
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1380
bool EL2Enabled(ThreadContext *tc)
Definition utility.cc:268
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1399
SecurityState
Security State.
Definition types.hh:273
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:413
Bitfield< 0 > vm
@ MISCREG_PIR_EL2
Definition misc.hh:1193
@ MISCREG_SCTLR_EL2
Definition misc.hh:616
@ MISCREG_PIRE0_EL1
Definition misc.hh:1189
@ MISCREG_TCR_EL2
Definition misc.hh:641
@ MISCREG_PIR_EL1
Definition misc.hh:1192
@ MISCREG_SCTLR
Definition misc.hh:253
@ MISCREG_TTBCR
Definition misc.hh:278
@ MISCREG_SCR_EL3
Definition misc.hh:628
@ MISCREG_TCR_EL3
Definition misc.hh:648
@ MISCREG_TCR_EL1
Definition misc.hh:636
@ MISCREG_PIR_EL3
Definition misc.hh:1194
@ MISCREG_SCTLR_EL1
Definition misc.hh:609
@ MISCREG_PRRR
Definition misc.hh:399
@ MISCREG_ID_AA64MMFR1_EL1
Definition misc.hh:600
@ MISCREG_CPSR
Definition misc.hh:79
@ MISCREG_NMRR
Definition misc.hh:405
@ MISCREG_TTBR1_EL1
Definition misc.hh:634
@ MISCREG_TCR2_EL2
Definition misc.hh:642
@ MISCREG_CONTEXTIDR
Definition misc.hh:429
@ MISCREG_TTBR1_EL2
Definition misc.hh:904
@ MISCREG_HCR_EL2
Definition misc.hh:619
@ MISCREG_TTBR1
Definition misc.hh:275
@ MISCREG_VTCR_EL2
Definition misc.hh:644
@ MISCREG_VTTBR
Definition misc.hh:478
@ MISCREG_PIRE0_EL2
Definition misc.hh:1190
@ MISCREG_TCR2_EL1
Definition misc.hh:638
@ MISCREG_TTBR0
Definition misc.hh:272
@ MISCREG_DACR
Definition misc.hh:283
@ MISCREG_TTBR0_EL2
Definition misc.hh:640
@ MISCREG_HSCTLR
Definition misc.hh:264
@ MISCREG_TTBR0_EL1
Definition misc.hh:632
@ MISCREG_SCTLR_EL3
Definition misc.hh:625
@ MISCREG_VTTBR_EL2
Definition misc.hh:643
Bitfield< 4 > sd
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 3, 2 > el
Definition misc_types.hh:73
uint16_t vmid_t
Definition types.hh:57
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:686
bool inAArch64(ThreadContext *tc)
Definition utility.cc:127
PASpace
Physical Address Space.
Definition types.hh:280
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:232
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 39, 12 > pa
Bitfield< 8 > va
Bitfield< 59, 56 > tlb
Bitfield< 23 > px
Bitfield< 5 > ux
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 0 > w
Bitfield< 14 > pr
Definition misc.hh:116
Bitfield< 3 > x
Definition pagetable.hh:76
static void decodeAddrOffset(Addr offset, uint8_t &func)
bool pseudoInst(ThreadContext *tc, uint8_t func, uint64_t &result)
Units for Stats.
Definition units.hh:113
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
Packet * PacketPtr
constexpr decltype(nullptr) NoFault
Definition types.hh:253
ExceptionLevel exceptionLevel
Definition mmu.hh:181
SecurityState securityState
Definition mmu.hh:186
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition mmu.cc:1420
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition mmu.cc:1351
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Definition mmu.hh:215
ArmTranslationType curTranType
Definition mmu.hh:199
TranslationRegime currRegime
Definition mmu.hh:182
Stats(statistics::Group *parent)
Definition mmu.cc:1830
statistics::Scalar permsFaults
Definition mmu.hh:451
statistics::Scalar alignFaults
Definition mmu.hh:448
statistics::Scalar prefetchFaults
Definition mmu.hh:449
statistics::Scalar domainFaults
Definition mmu.hh:450
TranslationRegime targetRegime
Definition pagetable.hh:199
void setAttributes(bool lpae)
Definition pagetable.hh:499
TLBTypes::KeyType KeyType
Definition pagetable.hh:236
The file contains the definition of a set of TLB Invalidate Instructions.

Generated on Mon May 26 2025 09:18:58 for gem5 by doxygen 1.13.2