gem5 v24.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mmu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2013, 2016-2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "arch/arm/mmu.hh"
42
43#include "arch/arm/isa.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/reg_abi.hh"
48#include "arch/arm/tlb.hh"
49#include "arch/arm/tlbi_op.hh"
50#include "debug/MMU.hh"
51#include "mem/packet_access.hh"
52#include "sim/pseudo_inst.hh"
53#include "sim/process.hh"
54
55namespace gem5
56{
57
58using namespace ArmISA;
59
60MMU::MMU(const ArmMMUParams &p)
61 : BaseMMU(p),
62 itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
63 itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
64 itbStage2Walker(p.stage2_itb_walker),
65 dtbStage2Walker(p.stage2_dtb_walker),
66 test(nullptr),
67 miscRegContext(0),
68 s1State(this, false), s2State(this, true),
69 _attr(0),
70 _release(nullptr),
71 _hasWalkCache(false),
72 stats(this)
73{
74 // Cache system-level properties
75 if (FullSystem) {
76 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
77 assert(arm_sys);
79 physAddrRange = arm_sys->physAddrRange();
80
81 _release = arm_sys->releaseFS();
82 } else {
83 haveLargeAsid64 = false;
84 physAddrRange = 48;
85
86 _release = p.release_se;
87 }
88
89 m5opRange = p.sys->m5opRange();
90}
91
92void
110
111bool
113{
114 for (auto tlb : instruction) {
115 if (static_cast<TLB*>(tlb)->walkCache())
116 return true;
117 }
118 for (auto tlb : data) {
119 if (static_cast<TLB*>(tlb)->walkCache())
120 return true;
121 }
122 for (auto tlb : unified) {
123 if (static_cast<TLB*>(tlb)->walkCache())
124 return true;
125 }
126
127 return false;
128}
129
130void
132{
133 s1State.miscRegValid = false;
134 s2State.miscRegValid = false;
135}
136
139{
140 return static_cast<ArmISA::TLB *>(dtb);
141}
142
145{
146 return static_cast<ArmISA::TLB *>(itb);
147}
148
149TLB *
150MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
151{
152 if (mode == BaseMMU::Execute) {
153 if (stage2)
154 return itbStage2;
155 else
156 return getITBPtr();
157 } else {
158 if (stage2)
159 return dtbStage2;
160 else
161 return getDTBPtr();
162 }
163}
164
167{
168 if (mode == BaseMMU::Execute) {
169 if (stage2)
170 return itbStage2Walker;
171 else
172 return itbWalker;
173 } else {
174 if (stage2)
175 return dtbStage2Walker;
176 else
177 return dtbWalker;
178 }
179}
180
181bool
183{
185
186 auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
187
188 TlbEntry::KeyType lookup_data;
189
190 lookup_data.va = va;
191 lookup_data.asn = state.asid;
192 lookup_data.ignoreAsn = false;
193 lookup_data.vmid = state.vmid;
194 lookup_data.ss = state.securityState;
195 lookup_data.functional = true;
196 lookup_data.targetRegime = state.currRegime;
197 lookup_data.mode = BaseMMU::Read;
198
199 TlbEntry *e = tlb->multiLookup(lookup_data);
200
201 if (!e)
202 return false;
203 pa = e->pAddr(va);
204 return true;
205}
206
207void
209{
210 s1State.miscRegValid = false;
211 s1State.computeAddrTop.flush();
212 s2State.computeAddrTop.flush();
213}
214
215void
216MMU::flush(const TLBIOp &tlbi_op)
217{
218 if (tlbi_op.stage1Flush()) {
219 flushStage1(tlbi_op);
220 }
221
222 if (tlbi_op.stage2Flush()) {
223 flushStage2(tlbi_op);
224 }
225}
226
227void
229{
230 for (auto tlb : instruction) {
231 static_cast<TLB*>(tlb)->flush(tlbi_op);
232 }
233 for (auto tlb : data) {
234 static_cast<TLB*>(tlb)->flush(tlbi_op);
235 }
236 for (auto tlb : unified) {
237 static_cast<TLB*>(tlb)->flush(tlbi_op);
238 }
239}
240
241void
243{
244 itbStage2->flush(tlbi_op);
245 dtbStage2->flush(tlbi_op);
246}
247
248void
249MMU::iflush(const TLBIOp &tlbi_op)
250{
251 for (auto tlb : instruction) {
252 static_cast<TLB*>(tlb)->flush(tlbi_op);
253 }
254 for (auto tlb : unified) {
255 static_cast<TLB*>(tlb)->flush(tlbi_op);
256 }
257}
258
259void
260MMU::dflush(const TLBIOp &tlbi_op)
261{
262 for (auto tlb : data) {
263 static_cast<TLB*>(tlb)->flush(tlbi_op);
264 }
265 for (auto tlb : unified) {
266 static_cast<TLB*>(tlb)->flush(tlbi_op);
267 }
268}
269
270void
277
278
279Fault
282 TlbEntry* te, CachedState &state) const
283{
284 // If we don't have a valid tlb entry it means virtual memory
285 // is not enabled
287
288 mpam::tagRequest(tc, req, mode == Execute);
289
290 // Check for a tester generated address fault
291 Fault fault = testTranslation(req, mode, domain, state);
292 if (fault != NoFault) {
293 return fault;
294 } else {
295 // Now that we checked no fault has been generated in the
296 // translation process, we can finalize the physical address
297 return finalizePhysical(req, tc, mode);
298 }
299}
300
301Fault
303 ThreadContext *tc, Mode mode) const
304{
305 const Addr paddr = req->getPaddr();
306
307 if (m5opRange.contains(paddr)) {
308 uint8_t func;
310 req->setLocalAccessor(
311 [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
312 {
313 uint64_t ret;
314 if (inAArch64(tc))
315 pseudo_inst::pseudoInst<RegABI64>(tc, func, ret);
316 else
317 pseudo_inst::pseudoInst<RegABI32>(tc, func, ret);
318
319 if (mode == Read)
320 pkt->setLE(ret);
321
322 return Cycles(1);
323 }
324 );
325 }
326
327 return NoFault;
328}
329
330
331Fault
333 Translation *translation, bool &delay, bool timing,
335{
336 updateMiscReg(tc, NormalTran, state.isStage2);
337 Addr vaddr_tainted = req->getVaddr();
338 Addr vaddr = 0;
339 if (state.aarch64) {
340 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
341 static_cast<TCR>(state.ttbcr), mode==Execute, state);
342 } else {
343 vaddr = vaddr_tainted;
344 }
345 Request::Flags flags = req->getFlags();
346
347 bool is_fetch = (mode == Execute);
348 bool is_write = (mode == Write);
349
350 if (!is_fetch) {
351 if (state.sctlr.a || !(flags & AllowUnaligned)) {
352 if (vaddr & mask(flags & AlignmentMask)) {
353 // LPAE is always disabled in SE mode
354 return std::make_shared<DataAbort>(
355 vaddr_tainted,
356 DomainType::NoAccess, is_write,
359 }
360 }
361 }
362
363 Process *p = tc->getProcessPtr();
364 if (const auto pte = p->pTable->lookup(vaddr); !pte) {
365 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
366 } else {
367 req->setPaddr(pte->paddr + p->pTable->pageOffset(vaddr));
368
369 if (pte->flags & EmulationPageTable::Uncacheable)
370 req->setFlags(Request::UNCACHEABLE);
371
372 return finalizePhysical(req, tc, mode);
373 }
374}
375
376Addr
378{
379 auto& state = updateMiscReg(tc, NormalTran, false);
380 Addr purified_vaddr = 0;
381 if (state.aarch64) {
382 purified_vaddr = purifyTaggedAddr(vaddr, tc, state.exceptionLevel,
383 static_cast<TCR>(state.ttbcr), mode==Execute, state);
384 } else {
385 purified_vaddr = vaddr;
386 }
387 return purified_vaddr;
388}
389
390Fault
392 bool stage2)
393{
394 return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
395}
396
397Fault
400{
401 // a data cache maintenance instruction that operates by MVA does
402 // not generate a Data Abort exeception due to a Permission fault
403 if (req->isCacheMaintenance()) {
404 return NoFault;
405 }
406
407 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
408 Request::Flags flags = req->getFlags();
409 bool is_fetch = (mode == Execute);
410 bool is_write = (mode == Write);
411 bool is_priv = state.isPriv && !(flags & UserMode);
412
413 // Get the translation type from the actuall table entry
414 TranMethod tran_method = te->longDescFormat ?
416
417 // If this is the second stage of translation and the request is for a
418 // stage 1 page table walk then we need to check the HCR.PTW bit. This
419 // allows us to generate a fault if the request targets an area marked
420 // as a device or strongly ordered.
421 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
422 (te->mtype != TlbEntry::MemoryType::Normal)) {
423 return std::make_shared<DataAbort>(
424 vaddr, te->domain, is_write,
425 ArmFault::PermissionLL + te->lookupLevel,
426 state.isStage2, tran_method);
427 }
428
429 // Generate an alignment fault for unaligned data accesses to device or
430 // strongly ordered memory
431 if (!is_fetch) {
432 if (te->mtype != TlbEntry::MemoryType::Normal) {
433 if (vaddr & mask(flags & AlignmentMask)) {
435 return std::make_shared<DataAbort>(
436 vaddr, DomainType::NoAccess, is_write,
438 tran_method);
439 }
440 }
441 }
442
443 if (te->nonCacheable) {
444 // Prevent prefetching from I/O devices.
445 if (req->isPrefetch()) {
446 // Here we can safely use the fault status for the short
447 // desc. format in all cases
448 return std::make_shared<PrefetchAbort>(
450 state.isStage2, tran_method);
451 }
452 }
453
454 if (!te->longDescFormat) {
455 switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
456 case 0:
458 DPRINTF(MMU, "MMU Fault: Data abort on domain. DACR: %#x"
459 " domain: %#x write:%d\n", state.dacr,
460 static_cast<uint8_t>(te->domain), is_write);
461 if (is_fetch) {
462 // Use PC value instead of vaddr because vaddr might
463 // be aligned to cache line and should not be the
464 // address reported in FAR
465 return std::make_shared<PrefetchAbort>(
466 req->getPC(),
467 ArmFault::DomainLL + te->lookupLevel,
468 state.isStage2, tran_method);
469 } else
470 return std::make_shared<DataAbort>(
471 vaddr, te->domain, is_write,
472 ArmFault::DomainLL + te->lookupLevel,
473 state.isStage2, tran_method);
474 case 1:
475 // Continue with permissions check
476 break;
477 case 2:
478 panic("UNPRED domain\n");
479 case 3:
480 return NoFault;
481 }
482 }
483
484 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
485 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
486 uint8_t hap = te->hap;
487
488 if (state.sctlr.afe == 1 || te->longDescFormat)
489 ap |= 1;
490
491 bool abt;
492 bool isWritable = true;
493 // If this is a stage 2 access (eg for reading stage 1 page table entries)
494 // then don't perform the AP permissions check, we stil do the HAP check
495 // below.
496 if (state.isStage2) {
497 abt = false;
498 } else {
499 switch (ap) {
500 case 0:
501 DPRINTF(MMU, "Access permissions 0, checking rs:%#x\n",
502 (int)state.sctlr.rs);
503 if (!state.sctlr.xp) {
504 switch ((int)state.sctlr.rs) {
505 case 2:
506 abt = is_write;
507 break;
508 case 1:
509 abt = is_write || !is_priv;
510 break;
511 case 0:
512 case 3:
513 default:
514 abt = true;
515 break;
516 }
517 } else {
518 abt = true;
519 }
520 break;
521 case 1:
522 abt = !is_priv;
523 break;
524 case 2:
525 abt = !is_priv && is_write;
526 isWritable = is_priv;
527 break;
528 case 3:
529 abt = false;
530 break;
531 case 4:
532 panic("UNPRED premissions\n");
533 case 5:
534 abt = !is_priv || is_write;
535 isWritable = false;
536 break;
537 case 6:
538 case 7:
539 abt = is_write;
540 isWritable = false;
541 break;
542 default:
543 panic("Unknown permissions %#x\n", ap);
544 }
545 }
546
547 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
548 bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
549 (ap == 3 && state.sctlr.uwxn && is_priv);
550 if (is_fetch && (abt || xn ||
551 (te->longDescFormat && te->pxn && is_priv) ||
552 (state.securityState == SecurityState::Secure &&
553 te->ns && state.scr.sif))) {
555 DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. AP:%d "
556 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
557 ap, is_priv, is_write, te->ns,
558 state.scr.sif, state.sctlr.afe);
559 // Use PC value instead of vaddr because vaddr might be aligned to
560 // cache line and should not be the address reported in FAR
561 return std::make_shared<PrefetchAbort>(
562 req->getPC(),
563 ArmFault::PermissionLL + te->lookupLevel,
564 state.isStage2, tran_method);
565 } else if (abt | hapAbt) {
567 DPRINTF(MMU, "MMU Fault: Data abort on permission check. AP:%d priv:%d"
568 " write:%d\n", ap, is_priv, is_write);
569 return std::make_shared<DataAbort>(
570 vaddr, te->domain, is_write,
571 ArmFault::PermissionLL + te->lookupLevel,
572 state.isStage2 | !abt, tran_method);
573 }
574 return NoFault;
575}
576
577Fault
579 ThreadContext *tc, bool stage2)
580{
581 return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
582}
583
584Fault
587{
588 assert(state.aarch64);
589
590 // A data cache maintenance instruction that operates by VA does
591 // not generate a Permission fault unless:
592 // * It is a data cache invalidate (dc ivac) which requires write
593 // permissions to the VA, or
594 // * It is executed from EL0
595 if (req->isCacheClean() && state.exceptionLevel != EL0 && !state.isStage2) {
596 return NoFault;
597 }
598
599 Addr vaddr_tainted = req->getVaddr();
600 Request::Flags flags = req->getFlags();
601 bool is_fetch = (mode == Execute);
602 // Cache clean operations require read permissions to the specified VA
603 bool is_write = !req->isCacheClean() && mode == Write;
604 bool is_atomic = req->isAtomic();
605
606 updateMiscReg(tc, state.curTranType, state.isStage2);
607
608 // If this is the second stage of translation and the request is for a
609 // stage 1 page table walk then we need to check the HCR.PTW bit. This
610 // allows us to generate a fault if the request targets an area marked
611 // as a device or strongly ordered.
612 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
613 (te->mtype != TlbEntry::MemoryType::Normal)) {
614 return std::make_shared<DataAbort>(
615 vaddr_tainted, te->domain, is_write,
616 ArmFault::PermissionLL + te->lookupLevel,
617 state.isStage2, TranMethod::LpaeTran);
618 }
619
620 // Generate an alignment fault for unaligned accesses to device or
621 // strongly ordered memory
622 if (!is_fetch) {
623 if (te->mtype != TlbEntry::MemoryType::Normal) {
624 if (vaddr_tainted & mask(flags & AlignmentMask)) {
626 return std::make_shared<DataAbort>(
627 vaddr_tainted,
629 is_atomic ? false : is_write,
632 }
633 }
634 }
635
636 if (te->nonCacheable) {
637 // Prevent prefetching from I/O devices.
638 if (req->isPrefetch()) {
639 // Here we can safely use the fault status for the short
640 // desc. format in all cases
641 return std::make_shared<PrefetchAbort>(
642 vaddr_tainted,
644 state.isStage2, TranMethod::LpaeTran);
645 }
646 }
647
648 bool grant = false;
649 // grant_read is used for faults from an atomic instruction that
650 // both reads and writes from a memory location. From a ISS point
651 // of view they count as read if a read to that address would have
652 // generated the fault; they count as writes otherwise
653 bool grant_read = true;
654
655 if (state.isStage2) {
656 std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
657 (!is_write && !is_fetch), is_write, is_fetch);
658 } else {
659 std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
660 (!is_write && !is_fetch), is_write, is_fetch);
661 }
662
663 if (!grant) {
664 if (is_fetch) {
666 DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. "
667 "ns:%d scr.sif:%d sctlr.afe: %d\n",
668 te->ns, state.scr.sif, state.sctlr.afe);
669 // Use PC value instead of vaddr because vaddr might be aligned to
670 // cache line and should not be the address reported in FAR
671 return std::make_shared<PrefetchAbort>(
672 req->getPC(),
673 ArmFault::PermissionLL + te->lookupLevel,
674 state.isStage2, TranMethod::LpaeTran);
675 } else {
677 DPRINTF(MMU, "MMU Fault: Data abort on permission check."
678 "ns:%d", te->ns);
679 return std::make_shared<DataAbort>(
680 vaddr_tainted, te->domain,
681 (is_atomic && !grant_read) ? false : is_write,
682 ArmFault::PermissionLL + te->lookupLevel,
683 state.isStage2, TranMethod::LpaeTran);
684 }
685 }
686
687 return NoFault;
688}
689
692 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
693{
694 assert(ArmSystem::haveEL(tc, EL2) && state.exceptionLevel != EL2);
695
696 // In stage 2 we use the hypervisor access permission bits.
697 // The following permissions are described in ARM DDI 0487A.f
698 // D4-1802
699 bool grant = false;
700 bool grant_read = te->hap & 0b01;
701 bool grant_write = te->hap & 0b10;
702
703 uint8_t xn = te->xn;
704 uint8_t pxn = te->pxn;
705
706 if (ArmSystem::haveEL(tc, EL3) &&
707 state.securityState == SecurityState::Secure &&
708 te->ns && state.scr.sif) {
709 xn = true;
710 }
711
712 DPRINTF(MMU,
713 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
714 "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
715
716 if (x) {
717 grant = !xn;
718 } else if (req->isAtomic()) {
719 grant = grant_read || grant_write;
720 } else if (w) {
721 grant = grant_write;
722 } else if (r) {
723 grant = grant_read;
724 } else {
725 panic("Invalid Operation\n");
726 }
727
728 return std::make_pair(grant, grant_read);
729}
730
733 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
734{
735 bool grant = false, grant_read = true, grant_write = true, grant_exec = true;
736
737 const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
738 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
739
740 bool wxn = state.sctlr.wxn;
741 uint8_t xn = te->xn;
742 uint8_t pxn = te->pxn;
743
744 DPRINTF(MMU, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
745 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
746 pxn, r, w, x, is_priv, wxn);
747
748 if (faultPAN(tc, ap, req, mode, is_priv, state)) {
749 return std::make_pair(false, false);
750 }
751
752 TranslationRegime regime = !is_priv ? TranslationRegime::EL10 : state.currRegime;
753 if (hasUnprivRegime(regime)) {
754 bool pr = false;
755 bool pw = false;
756 bool ur = false;
757 bool uw = false;
758 // Apply leaf permissions
759 switch (ap) {
760 case 0b00: // Privileged access
761 pr = 1; pw = 1; ur = 0; uw = 0;
762 break;
763 case 0b01: // No effect
764 pr = 1; pw = 1; ur = 1; uw = 1;
765 break;
766 case 0b10: // Read-only, privileged access
767 pr = 1; pw = 0; ur = 0; uw = 0;
768 break;
769 case 0b11: // Read-only
770 pr = 1; pw = 0; ur = 1; uw = 0;
771 break;
772 }
773
774 // Locations writable by unprivileged cannot be executed by privileged
775 const bool px = !(pxn || uw);
776 const bool ux = !xn;
777
778 grant_read = is_priv ? pr : ur;
779 grant_write = is_priv ? pw : uw;
780 grant_exec = is_priv ? px : ux;
781 } else {
782 switch (bits(ap, 1)) {
783 case 0b0: // No effect
784 grant_read = 1; grant_write = 1;
785 break;
786 case 0b1: // Read-Only
787 grant_read = 1; grant_write = 0;
788 break;
789 }
790 grant_exec = !xn;
791 }
792
793 // Do not allow execution from writable location
794 // if wxn is set
795 grant_exec = grant_exec && !(wxn && grant_write);
796
797 if (ArmSystem::haveEL(tc, EL3) &&
798 state.securityState == SecurityState::Secure && te->ns) {
799 grant_exec = grant_exec && !state.scr.sif;
800 }
801
802 if (x) {
803 grant = grant_exec;
804 } else if (req->isAtomic()) {
805 grant = grant_read && grant_write;
806 } else if (w) {
807 grant = grant_write;
808 } else {
809 grant = grant_read;
810 }
811
812 return std::make_pair(grant, grant_read);
813}
814
815bool
817{
818 switch (regime) {
821 return true;
822 default:
823 return false;
824 }
825}
826
827bool
828MMU::faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
829 const bool is_priv, CachedState &state)
830{
831 bool exception = false;
832 switch (state.exceptionLevel) {
833 case EL0:
834 break;
835 case EL1:
836 if (checkPAN(tc, ap, req, mode, is_priv, state)) {
837 exception = true;;
838 }
839 break;
840 case EL2:
841 if (state.hcr.e2h && checkPAN(tc, ap, req, mode, is_priv, state)) {
842 exception = true;;
843 }
844 break;
845 case EL3:
846 break;
847 }
848
849 return exception;
850}
851
852bool
853MMU::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
854 const bool is_priv, CachedState &state)
855{
856 // The PAN bit has no effect on:
857 // 1) Instruction accesses.
858 // 2) Data Cache instructions other than DC ZVA
859 // 3) Address translation instructions, other than ATS1E1RP and
860 // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
861 // gem5)
862 // 4) Instructions to be treated as unprivileged, unless
863 // HCR_EL2.{E2H, TGE} == {1, 0}
864 if (_release->has(ArmExtension::FEAT_PAN) && state.cpsr.pan &&
865 (ap & 0x1) && mode != BaseMMU::Execute) {
866
867 if (req->isCacheMaintenance() &&
868 !(req->getFlags() & Request::CACHE_BLOCK_ZERO)) {
869 // Cache maintenance other than DC ZVA
870 return false;
871 } else if (!is_priv && !(state.hcr.e2h && !state.hcr.tge)) {
872 // Treated as unprivileged unless HCR_EL2.{E2H, TGE} == {1, 0}
873 return false;
874 }
875 return true;
876 }
877
878 return false;
879}
880
881Addr
884 TCR tcr, bool is_inst, CachedState& state)
885{
886 const bool selbit = bits(vaddr_tainted, 55);
887
888 // Call the memoized version of computeAddrTop
889 const auto topbit = state.computeAddrTop(tc, selbit, is_inst, tcr, el);
890
891 return maskTaggedAddr(vaddr_tainted, tc, el, topbit);
892}
893
894Fault
896 ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
898{
899 bool is_fetch = (mode == Execute);
900 bool is_atomic = req->isAtomic();
901 req->setPaddr(vaddr);
902 // When the MMU is off the security attribute corresponds to the
903 // security state of the processor
904 if (state.securityState == SecurityState::Secure)
905 req->setFlags(Request::SECURE);
906 else
907 req->clearFlags(Request::SECURE);
908 if (state.aarch64) {
909 bool selbit = bits(vaddr, 55);
910 TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
911 int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
912 int addr_sz = bits(vaddr, topbit, physAddrRange);
913 if (addr_sz != 0){
914 Fault f;
915 if (is_fetch)
916 f = std::make_shared<PrefetchAbort>(vaddr,
919 else
920 f = std::make_shared<DataAbort>( vaddr,
922 is_atomic ? false : mode==Write,
925 return f;
926 }
927 }
928
929 // @todo: double check this (ARM ARM issue C B3.2.1)
930 if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
931 state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
932 if (!req->isCacheMaintenance()) {
933 req->setFlags(Request::UNCACHEABLE);
934 }
935 req->setFlags(Request::STRICT_ORDER);
936 }
937
938 // Set memory attributes
939 bool in_secure_state = state.securityState == SecurityState::Secure;
940 TlbEntry temp_te;
941 temp_te.ns = !in_secure_state;
942 bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) &&
943 state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
944 bool i_cacheability = state.sctlr.i && !state.sctlr.m;
945 if (state.isStage2 || !dc || state.exceptionLevel == EL2) {
946 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
948 temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
949 temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
950 temp_te.shareable = true;
951 temp_te.outerShareable = true;
952 } else {
954 temp_te.innerAttrs = 0x3;
955 temp_te.outerAttrs = 0x3;
956 temp_te.shareable = false;
957 temp_te.outerShareable = false;
958 }
959 temp_te.setAttributes(long_desc_format);
960 DPRINTF(MMU, "(No MMU) setting memory attributes: shareable: "
961 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
962 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
963 state.isStage2);
964 setAttr(temp_te.attributes);
965
966 return testAndFinalize(req, tc, mode, nullptr, state);
967}
968
969Fault
971 Translation *translation, bool &delay, bool timing,
972 bool functional, Addr vaddr,
973 TranMethod tran_method, CachedState &state)
974{
975 TlbEntry *te = NULL;
976 bool is_fetch = (mode == Execute);
977 TlbEntry mergeTe;
978
979 Request::Flags flags = req->getFlags();
980 Addr vaddr_tainted = req->getVaddr();
981
982 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
983 functional, &mergeTe, state);
984 // only proceed if we have a valid table entry
985 if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
986
987 // If we have the table entry transfer some of the attributes to the
988 // request that triggered the translation
990 // Set memory attributes
991 DPRINTF(MMU,
992 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
993 "outerAttrs: %d, mtype: %d, stage2: %d\n",
994 te->shareable, te->innerAttrs, te->outerAttrs,
995 static_cast<uint8_t>(te->mtype), state.isStage2);
996 setAttr(te->attributes);
997
998 if (te->nonCacheable && !req->isCacheMaintenance())
999 req->setFlags(Request::UNCACHEABLE);
1000
1001 // Require requests to be ordered if the request goes to
1002 // strongly ordered or device memory (i.e., anything other
1003 // than normal memory requires strict order).
1004 if (te->mtype != TlbEntry::MemoryType::Normal)
1005 req->setFlags(Request::STRICT_ORDER);
1006
1007 Addr pa = te->pAddr(vaddr);
1008 req->setPaddr(pa);
1009
1010 if (state.securityState == SecurityState::Secure && !te->ns) {
1011 req->setFlags(Request::SECURE);
1012 } else {
1013 req->clearFlags(Request::SECURE);
1014 }
1015 if (!is_fetch && fault == NoFault &&
1016 (vaddr & mask(flags & AlignmentMask)) &&
1017 (te->mtype != TlbEntry::MemoryType::Normal)) {
1018 // Unaligned accesses to Device memory should always cause an
1019 // abort regardless of sctlr.a
1021 bool is_write = (mode == Write);
1022 return std::make_shared<DataAbort>(
1023 vaddr_tainted,
1024 DomainType::NoAccess, is_write,
1026 tran_method);
1027 }
1028
1029 if (fault == NoFault)
1030 fault = testAndFinalize(req, tc, mode, te, state);
1031 }
1032
1033 return fault;
1034}
1035
1036Fault
1038 Translation *translation, bool &delay, bool timing,
1039 ArmTranslationType tran_type, bool functional,
1041{
1042 // No such thing as a functional timing access
1043 assert(!(timing && functional));
1044
1045 Addr vaddr_tainted = req->getVaddr();
1046 Addr vaddr = 0;
1047 if (state.aarch64) {
1048 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1049 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1050 } else {
1051 vaddr = vaddr_tainted;
1052 }
1053 Request::Flags flags = req->getFlags();
1054
1055 bool is_fetch = (mode == Execute);
1056 bool is_write = (mode == Write);
1057 bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
1058 TranMethod tran_method = long_desc_format ?
1060
1061 DPRINTF(MMU,
1062 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1063 state.isPriv, flags & UserMode,
1064 state.securityState == SecurityState::Secure,
1065 tran_type & S1S2NsTran);
1066
1067 DPRINTF(MMU, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1068 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
1069 state.isStage2, state.scr, state.sctlr, flags, tran_type);
1070
1071 if (!state.isStage2) {
1072 if ((req->isInstFetch() && (!state.sctlr.i)) ||
1073 ((!req->isInstFetch()) && (!state.sctlr.c))){
1074 if (!req->isCacheMaintenance()) {
1075 req->setFlags(Request::UNCACHEABLE);
1076 }
1077 req->setFlags(Request::STRICT_ORDER);
1078 }
1079 }
1080 if (!is_fetch) {
1081 if (state.sctlr.a || !(flags & AllowUnaligned)) {
1082 if (vaddr & mask(flags & AlignmentMask)) {
1084 return std::make_shared<DataAbort>(
1085 vaddr_tainted,
1086 DomainType::NoAccess, is_write,
1088 tran_method);
1089 }
1090 }
1091 }
1092
1093 bool vm = state.hcr.vm;
1094 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1095 state.hcr.e2h == 1 && state.hcr.tge == 1)
1096 vm = 0;
1097 else if (state.hcr.dc == 1)
1098 vm = 1;
1099
1100 Fault fault = NoFault;
1101 // If guest MMU is off or hcr.vm=0 go straight to stage2
1102 if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1103 fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1104 long_desc_format, state);
1105 } else {
1106 DPRINTF(MMU, "Translating %s=%#x context=%d\n",
1107 state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1108 // Translation enabled
1109 fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1110 functional, vaddr, tran_method, state);
1111 }
1112
1113 // Check for Debug Exceptions
1115
1116 if (sd->enabled() && fault == NoFault) {
1117 fault = sd->testDebug(tc, req, mode);
1118 }
1119
1120 return fault;
1121}
1122
1123Fault
1125 ArmTranslationType tran_type)
1126{
1127 return translateAtomic(req, tc, mode, tran_type, false);
1128}
1129
1130Fault
1132 ArmTranslationType tran_type, bool stage2)
1133{
1134 auto& state = updateMiscReg(tc, tran_type, stage2);
1135
1136 bool delay = false;
1137 Fault fault;
1138 if (FullSystem)
1139 fault = translateFs(req, tc, mode, NULL, delay, false,
1140 tran_type, false, state);
1141 else
1142 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1143 assert(!delay);
1144 return fault;
1145}
1146
1147Fault
1149{
1150 return translateFunctional(req, tc, mode, NormalTran, false);
1151}
1152
1153Fault
1155 ArmTranslationType tran_type)
1156{
1157 return translateFunctional(req, tc, mode, tran_type, false);
1158}
1159
1160Fault
1162 ArmTranslationType tran_type, bool stage2)
1163{
1164 auto& state = updateMiscReg(tc, tran_type, stage2);
1165
1166 bool delay = false;
1167 Fault fault;
1168 if (FullSystem)
1169 fault = translateFs(req, tc, mode, NULL, delay, false,
1170 tran_type, true, state);
1171 else
1172 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1173 assert(!delay);
1174 return fault;
1175}
1176
1177void
1179 Translation *translation, Mode mode, ArmTranslationType tran_type,
1180 bool stage2)
1181{
1182 auto& state = updateMiscReg(tc, tran_type, stage2);
1183
1184 assert(translation);
1185
1186 translateComplete(req, tc, translation, mode, tran_type,
1187 stage2, state);
1188}
1189
1190Fault
1192 Translation *translation, Mode mode, ArmTranslationType tran_type,
1193 bool call_from_s2)
1194{
1195 return translateComplete(req, tc, translation, mode, tran_type,
1196 call_from_s2, s1State);
1197}
1198
1199Fault
1201 Translation *translation, Mode mode, ArmTranslationType tran_type,
1202 bool call_from_s2, CachedState &state)
1203{
1204 bool delay = false;
1205 Fault fault;
1206 if (FullSystem)
1207 fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1208 false, state);
1209 else
1210 fault = translateSe(req, tc, mode, translation, delay, true, state);
1211
1212 DPRINTF(MMU, "Translation returning delay=%d fault=%d\n", delay,
1213 fault != NoFault);
1214 // If we have a translation, and we're not in the middle of doing a stage
1215 // 2 translation tell the translation that we've either finished or its
1216 // going to take a while. By not doing this when we're in the middle of a
1217 // stage 2 translation we prevent marking the translation as delayed twice,
1218 // one when the translation starts and again when the stage 1 translation
1219 // completes.
1220
1221 if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1222 fault != NoFault)) {
1223 if (!delay)
1224 translation->finish(fault, req, tc, mode);
1225 else
1226 translation->markDelayed();
1227 }
1228 return fault;
1229}
1230
1231vmid_t
1233{
1234 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1235 VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1236 vmid_t vmid = 0;
1237
1238 switch (mmfr1.vmidbits) {
1239 case 0b0000:
1240 // 8 bits
1241 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1242 break;
1243 case 0b0010:
1244 if (vtcr.vs && ELIs64(tc, EL2)) {
1245 // 16 bits
1246 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1247 } else {
1248 // 8 bits
1249 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1250 }
1251 break;
1252 default:
1253 panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1254 mmfr1.vmidbits);
1255 }
1256
1257 return vmid;
1258}
1259
1262 ArmTranslationType tran_type, bool stage2)
1263{
1264 // check if the regs have changed, or the translation mode is different.
1265 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1266 // one type of translation anyway
1267
1268 auto& state = stage2 ? s2State : s1State;
1269 if (state.miscRegValid && miscRegContext == tc->contextId() &&
1270 ((tran_type == state.curTranType) || stage2)) {
1271
1272 } else {
1273 DPRINTF(MMU, "MMU variables changed!\n");
1274 state.updateMiscReg(tc, tran_type);
1275
1276 itbStage2->setVMID(state.vmid);
1277 dtbStage2->setVMID(state.vmid);
1278
1279 for (auto tlb : instruction) {
1280 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1281 }
1282 for (auto tlb : data) {
1283 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1284 }
1285 for (auto tlb : unified) {
1286 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1287 }
1288
1289 miscRegContext = tc->contextId();
1290 }
1291
1292 if (state.directToStage2) {
1293 s2State.updateMiscReg(tc, tran_type);
1294 return s2State;
1295 } else {
1296 return state;
1297 }
1298}
1299
1300void
1302 ArmTranslationType tran_type)
1303{
1304 cpsr = tc->readMiscReg(MISCREG_CPSR);
1305 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1306 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1307
1308 // Dependencies: SCR/SCR_EL3, CPSR
1309 securityState = ArmISA::isSecure(tc) &&
1310 !(tran_type & HypMode) && !(tran_type & S1S2NsTran) ?
1312
1313 exceptionLevel = tranTypeEL(cpsr, scr, tran_type);
1314 currRegime = translationRegime(tc, exceptionLevel);
1315 aarch64 = isStage2 ?
1316 ELIs64(tc, EL2) :
1317 ELIs64(tc, translationEl(currRegime));
1318
1319 if (aarch64) { // AArch64
1320 // determine EL we need to translate in
1321 switch (currRegime) {
1323 {
1324 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1325 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1326 uint64_t ttbr_asid = ttbcr.a1 ?
1329 asid = bits(ttbr_asid,
1330 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1331 }
1332 break;
1334 {
1335 // VHE code for EL2&0 regime
1336 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1337 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1338 uint64_t ttbr_asid = ttbcr.a1 ?
1341 asid = bits(ttbr_asid,
1342 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1343 }
1344 break;
1346 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1347 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1348 asid = -1;
1349 break;
1351 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1352 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1353 asid = -1;
1354 break;
1355 }
1356
1357 isPriv = exceptionLevel != EL0;
1358 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1359 vmid = getVMID(tc);
1360 bool vm = hcr.vm;
1361 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1362 hcr.e2h == 1 && hcr.tge ==1) {
1363 vm = 0;
1364 }
1365
1366 if (hcr.e2h == 1 && (exceptionLevel == EL2
1367 || (hcr.tge ==1 && exceptionLevel == EL0))) {
1368 directToStage2 = false;
1369 stage2Req = false;
1370 stage2DescReq = false;
1371 } else {
1372 // Work out if we should skip the first stage of translation and go
1373 // directly to stage 2. This value is cached so we don't have to
1374 // compute it for every translation.
1375 const bool el2_enabled = EL2Enabled(tc);
1376 stage2Req = isStage2 ||
1377 (vm && exceptionLevel < EL2 && el2_enabled &&
1378 !(tran_type & S1CTran) &&
1379 !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1380 stage2DescReq = isStage2 ||
1381 (vm && exceptionLevel < EL2 && el2_enabled);
1382 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1383 }
1384 } else {
1385 vmid = 0;
1386 directToStage2 = false;
1387 stage2Req = false;
1388 stage2DescReq = false;
1389 }
1390 } else { // AArch32
1392 securityState == SecurityState::NonSecure));
1394 securityState == SecurityState::NonSecure));
1395 isPriv = cpsr.mode != MODE_USER;
1396 if (longDescFormatInUse(tc)) {
1397 uint64_t ttbr_asid = tc->readMiscReg(
1399 tc, securityState == SecurityState::NonSecure));
1400 asid = bits(ttbr_asid, 55, 48);
1401 } else { // Short-descriptor translation table format in use
1402 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1404 securityState == SecurityState::NonSecure));
1405 asid = context_id.asid;
1406 }
1408 securityState == SecurityState::NonSecure));
1410 securityState == SecurityState::NonSecure));
1412 securityState == SecurityState::NonSecure));
1413
1414 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1415 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1416 if (exceptionLevel == EL2) {
1417 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1418 }
1419 // Work out if we should skip the first stage of translation and go
1420 // directly to stage 2. This value is cached so we don't have to
1421 // compute it for every translation.
1422 const bool el2_enabled = EL2Enabled(tc);
1423 stage2Req = isStage2 ||
1424 (hcr.vm && exceptionLevel < EL2 && el2_enabled &&
1425 !(tran_type & S1CTran));
1426 stage2DescReq = isStage2 ||
1427 (hcr.vm && exceptionLevel < EL2 && el2_enabled);
1428 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1429 } else {
1430 vmid = 0;
1431 stage2Req = false;
1432 directToStage2 = false;
1433 stage2DescReq = false;
1434 }
1435 }
1436 miscRegValid = true;
1437 curTranType = tran_type;
1438}
1439
1442{
1443 switch (type) {
1444 case S1E0Tran:
1445 case S12E0Tran:
1446 return EL0;
1447
1448 case S1E1Tran:
1449 case S12E1Tran:
1450 case S1S2NsTran:
1451 return EL1;
1452
1453 case S1E2Tran:
1454 case HypMode:
1455 return EL2;
1456
1457 case S1E3Tran:
1458 return EL3;
1459
1460 case S1CTran:
1461 return currEL(cpsr) == EL3 && scr.ns == 0 ?
1462 EL3 : EL1;
1463
1464 case NormalTran:
1465 return currEL(cpsr);
1466
1467 default:
1468 panic("Unknown translation mode!\n");
1469 }
1470}
1471
1472Fault
1474 Translation *translation, bool timing, bool functional,
1475 SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type,
1476 bool stage2)
1477{
1478 return getTE(te, req, tc, mode, translation, timing, functional,
1479 ss, ipaspace, tran_type, stage2 ? s2State : s1State);
1480}
1481
1482TlbEntry*
1484 bool functional, bool ignore_asn, TranslationRegime regime,
1485 bool stage2, BaseMMU::Mode mode)
1486{
1487 TLB *tlb = getTlb(mode, stage2);
1488
1489 TlbEntry::KeyType lookup_data;
1490
1491 lookup_data.va = va;
1492 lookup_data.asn = asid;
1493 lookup_data.ignoreAsn = ignore_asn;
1494 lookup_data.vmid = vmid;
1495 lookup_data.ss = ss;
1496 lookup_data.functional = functional;
1497 lookup_data.targetRegime = regime;
1498 lookup_data.mode = mode;
1499
1500 return tlb->multiLookup(lookup_data);
1501}
1502
1503Fault
1505 Translation *translation, bool timing, bool functional,
1506 SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type,
1508{
1509 // In a 2-stage system, the IPA->PA translation can be started via this
1510 // call so make sure the miscRegs are correct.
1511 if (state.isStage2) {
1512 updateMiscReg(tc, tran_type, true);
1513 }
1514
1515 Addr vaddr_tainted = req->getVaddr();
1516 Addr vaddr = 0;
1517 TranslationRegime regime = state.currRegime;
1518
1519 if (state.aarch64) {
1520 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1521 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1522 } else {
1523 vaddr = vaddr_tainted;
1524 }
1525
1526 *te = lookup(vaddr, state.asid, state.vmid, ss, false,
1527 false, regime, state.isStage2, mode);
1528
1529 if (!isCompleteTranslation(*te)) {
1530 if (req->isPrefetch()) {
1531 // if the request is a prefetch don't attempt to fill the TLB or go
1532 // any further with the memory access (here we can safely use the
1533 // fault status for the short desc. format in all cases)
1535 return std::make_shared<PrefetchAbort>(
1536 vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1537 }
1538
1539 // start translation table walk, pass variables rather than
1540 // re-retreaving in table walker for speed
1541 DPRINTF(MMU,
1542 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1543 vaddr_tainted, state.asid, state.vmid);
1544
1545 Fault fault;
1546 fault = getTableWalker(mode, state.isStage2)->walk(
1547 req, tc, state.asid, state.vmid, mode,
1548 translation, timing, functional, ss,
1549 ipaspace, tran_type, state.stage2DescReq, *te);
1550
1551 // for timing mode, return and wait for table walk,
1552 if (timing || fault != NoFault) {
1553 return fault;
1554 }
1555
1556 *te = lookup(vaddr, state.asid, state.vmid, ss,
1557 true, false, regime, state.isStage2, mode);
1558 assert(*te);
1559 }
1560 return NoFault;
1561}
1562
1563Fault
1565 ThreadContext *tc, Mode mode,
1566 Translation *translation, bool timing, bool functional,
1567 TlbEntry *mergeTe, CachedState &state)
1568{
1569 Fault fault;
1570
1571 if (state.isStage2) {
1572 PASpace ipaspace = state.securityState == SecurityState::Secure ?
1574
1575 // We are already in the stage 2 TLB. Grab the table entry for stage
1576 // 2 only. We are here because stage 1 translation is disabled.
1577 TlbEntry *s2_te = nullptr;
1578 // Get the stage 2 table entry
1579 fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1580 state.securityState, ipaspace,
1581 state.curTranType, state);
1582 // Check permissions of stage 2
1583 if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1584 if (state.aarch64)
1585 fault = checkPermissions64(s2_te, req, mode, tc, state);
1586 else
1587 fault = checkPermissions(s2_te, req, mode, state);
1588 }
1589 *te = s2_te;
1590 return fault;
1591 }
1592
1593 TlbEntry *s1_te = nullptr;
1594
1595 Addr vaddr_tainted = req->getVaddr();
1596
1597 // Get the stage 1 table entry
1598 fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1599 state.securityState, PASpace::NonSecure,
1600 state.curTranType, state);
1601 // only proceed if we have a valid table entry
1602 if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1603 // Check stage 1 permissions before checking stage 2
1604 if (state.aarch64)
1605 fault = checkPermissions64(s1_te, req, mode, tc, state);
1606 else
1607 fault = checkPermissions(s1_te, req, mode, state);
1608 if (state.stage2Req & (fault == NoFault)) {
1609 Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1610 req, translation, mode, timing, functional,
1611 state.securityState, state.curTranType);
1612 fault = s2_lookup->getTe(tc, mergeTe);
1613 if (s2_lookup->isComplete()) {
1614 *te = mergeTe;
1615 // We've finished with the lookup so delete it
1616 delete s2_lookup;
1617 } else {
1618 // The lookup hasn't completed, so we can't delete it now. We
1619 // get round this by asking the object to self delete when the
1620 // translation is complete.
1621 s2_lookup->setSelfDelete();
1622 }
1623 } else {
1624 // This case deals with an S1 hit (or bypass), followed by
1625 // an S2 hit-but-perms issue
1626 if (state.isStage2) {
1627 DPRINTF(MMU, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1628 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1629 fault);
1630 if (fault != NoFault) {
1631 auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1632 arm_fault->annotate(ArmFault::S1PTW, false);
1633 arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1634 }
1635 }
1636 *te = s1_te;
1637 }
1638 }
1639 return fault;
1640}
1641
1642bool
1644{
1645 return entry && !entry->partial;
1646}
1647
1648void
1650{
1651 BaseMMU::takeOverFrom(old_mmu);
1652
1653 auto *ommu = dynamic_cast<MMU*>(old_mmu);
1654 assert(ommu);
1655
1656 _attr = ommu->_attr;
1657
1658 s1State = ommu->s1State;
1659 s2State = ommu->s2State;
1660}
1661
1662void
1664{
1665 if (!_ti) {
1666 test = nullptr;
1667 } else {
1668 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1669 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1670 test = ti;
1675 }
1676}
1677
1678Fault
1681{
1682 if (!test || !req->hasSize() || req->getSize() == 0 ||
1683 req->isCacheMaintenance()) {
1684 return NoFault;
1685 } else {
1686 return test->translationCheck(req, state.isPriv, mode, domain);
1687 }
1688}
1689
1691 : statistics::Group(parent),
1692 ADD_STAT(alignFaults, statistics::units::Count::get(),
1693 "Number of MMU faults due to alignment restrictions"),
1694 ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1695 "Number of MMU faults due to prefetch"),
1696 ADD_STAT(domainFaults, statistics::units::Count::get(),
1697 "Number of MMU faults due to domain restrictions"),
1698 ADD_STAT(permsFaults, statistics::units::Count::get(),
1699 "Number of MMU faults due to permissions restrictions")
1700{
1701}
1702
1703} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:231
SelfDebug * getSelfDebug() const
Definition isa.hh:182
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, TranMethod tran_method, CachedState &state)
Definition mmu.cc:970
ArmISA::TLB * getITBPtr() const
Definition mmu.cc:144
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition mmu.hh:314
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition mmu.cc:853
void flushStage2(const TLBIOp &tlbi_op)
Definition mmu.cc:242
static bool hasUnprivRegime(TranslationRegime regime)
Definition mmu.cc:816
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition mmu.cc:332
void drainResume() override
Resume execution after a successful drain.
Definition mmu.cc:131
uint64_t _attr
Definition mmu.hh:424
ContextID miscRegContext
Definition mmu.hh:418
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition mmu.cc:1191
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:150
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition mmu.cc:895
Fault testTranslation(const RequestPtr &req, Mode mode, DomainType domain, CachedState &state) const
Definition mmu.cc:1679
TLB * dtbStage2
Definition mmu.hh:77
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:93
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1261
bool isCompleteTranslation(TlbEntry *te) const
Definition mmu.cc:1643
void invalidateMiscReg()
Definition mmu.cc:208
bool haveLargeAsid64
Definition mmu.hh:428
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition mmu.cc:302
bool _hasWalkCache
Definition mmu.hh:433
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:732
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition mmu.cc:1564
void flushStage1(const TLBIOp &tlbi_op)
Definition mmu.cc:228
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1441
uint8_t physAddrRange
Definition mmu.hh:429
ArmISA::TLB * getDTBPtr() const
Definition mmu.cc:138
bool checkWalkCache() const
Definition mmu.cc:112
void flushAll() override
Definition mmu.cc:271
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:251
TLB * itbStage2
Definition mmu.hh:76
void setTestInterface(SimObject *ti)
Definition mmu.cc:1663
TableWalker * itbStage2Walker
Definition mmu.hh:81
Addr getValidAddr(Addr vaddr, ThreadContext *tc, Mode mode) override
Definition mmu.cc:377
AddrRange m5opRange
Definition mmu.hh:431
TableWalker * dtbStage2Walker
Definition mmu.hh:82
TableWalker * dtbWalker
Definition mmu.hh:80
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, SecurityState ss, PASpace ipaspace, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1473
void dflush(const TLBIOp &tlbi_op)
Definition mmu.cc:260
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition mmu.cc:578
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition mmu.cc:1037
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, SecurityState ss, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1483
TableWalker * itbWalker
Definition mmu.hh:79
const ArmRelease * _release
Definition mmu.hh:427
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:691
CachedState s1State
Definition mmu.hh:421
void flush(const TLBIOp &tlbi_op)
Definition mmu.cc:216
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition mmu.cc:391
bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition mmu.cc:828
MMU(const ArmMMUParams &p)
Definition mmu.cc:60
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:166
void takeOverFrom(BaseMMU *old_mmu) override
Definition mmu.cc:1649
Fault testAndFinalize(const RequestPtr &req, ThreadContext *tc, Mode mode, TlbEntry *te, CachedState &state) const
Definition mmu.cc:280
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
Definition mmu.cc:882
CachedState s2State
Definition mmu.hh:421
void iflush(const TLBIOp &tlbi_op)
Definition mmu.cc:249
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:240
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:86
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
virtual bool stage1Flush() const
Return true if the TLBI op needs to flush stage1 entries, Defaulting to true in the TLBIOp abstract c...
Definition tlbi_op.hh:95
virtual bool stage2Flush() const
Return true if the TLBI op needs to flush stage2 entries, Defaulting to false in the TLBIOp abstract ...
Definition tlbi_op.hh:106
bool walkCache() const
Definition tlb.hh:223
void setTableWalker(TableWalker *table_walker)
Definition tlb.cc:143
void setVMID(vmid_t _vmid)
Definition tlb.hh:225
void flush(const TLBIOp &tlbi_op)
Flush TLB entries.
Definition tlb.cc:302
void flushAll() override
Reset the entire TLB.
Definition tlb.cc:284
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, SecurityState ss, PASpace ipaspace, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
void setTestInterface(TlbTestInterface *ti)
bool has(ArmExtension ext) const
Definition system.hh:76
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition system.hh:220
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition system.hh:206
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition system.cc:132
const ArmRelease * releaseFS() const
Definition system.hh:156
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition mmu.hh:185
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:53
virtual void flushAll()
Definition mmu.cc:81
BaseTLB * itb
Definition mmu.hh:163
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition mmu.cc:172
std::set< BaseTLB * > data
Definition mmu.hh:186
std::set< BaseTLB * > unified
Definition mmu.hh:187
BaseTLB * dtb
Definition mmu.hh:162
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition request.hh:143
Abstract superclass for simulation objects.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
Statistics container.
Definition group.hh:93
STL pair class.
Definition stl.hh:58
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
atomic_var_t state
Definition helpers.cc:211
uint8_t flags
Definition helpers.cc:87
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
Bitfield< 12 > dc
Bitfield< 30 > te
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 19 > wxn
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition utility.cc:134
Bitfield< 31, 0 > uw
Definition int.hh:63
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
Definition utility.cc:459
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:141
Bitfield< 7, 4 > domain
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:277
Bitfield< 9 > e
Definition misc_types.hh:65
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1380
bool EL2Enabled(ThreadContext *tc)
Definition utility.cc:268
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1399
SecurityState
Security State.
Definition types.hh:273
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:413
Bitfield< 0 > vm
@ MISCREG_SCTLR_EL2
Definition misc.hh:616
@ MISCREG_TCR_EL2
Definition misc.hh:641
@ MISCREG_SCTLR
Definition misc.hh:253
@ MISCREG_TTBCR
Definition misc.hh:278
@ MISCREG_SCR_EL3
Definition misc.hh:628
@ MISCREG_TCR_EL3
Definition misc.hh:648
@ MISCREG_TCR_EL1
Definition misc.hh:636
@ MISCREG_SCTLR_EL1
Definition misc.hh:609
@ MISCREG_PRRR
Definition misc.hh:399
@ MISCREG_ID_AA64MMFR1_EL1
Definition misc.hh:600
@ MISCREG_CPSR
Definition misc.hh:79
@ MISCREG_NMRR
Definition misc.hh:405
@ MISCREG_TTBR1_EL1
Definition misc.hh:634
@ MISCREG_CONTEXTIDR
Definition misc.hh:429
@ MISCREG_TTBR1_EL2
Definition misc.hh:904
@ MISCREG_HCR_EL2
Definition misc.hh:619
@ MISCREG_TTBR1
Definition misc.hh:275
@ MISCREG_VTCR_EL2
Definition misc.hh:644
@ MISCREG_VTTBR
Definition misc.hh:478
@ MISCREG_TTBR0
Definition misc.hh:272
@ MISCREG_DACR
Definition misc.hh:283
@ MISCREG_TTBR0_EL2
Definition misc.hh:640
@ MISCREG_HSCTLR
Definition misc.hh:264
@ MISCREG_TTBR0_EL1
Definition misc.hh:632
@ MISCREG_SCTLR_EL3
Definition misc.hh:625
@ MISCREG_VTTBR_EL2
Definition misc.hh:643
Bitfield< 4 > sd
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 3, 2 > el
Definition misc_types.hh:73
uint16_t vmid_t
Definition types.hh:57
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:686
Bitfield< 34 > aarch64
Definition types.hh:81
bool inAArch64(ThreadContext *tc)
Definition utility.cc:127
PASpace
Physical Address Space.
Definition types.hh:280
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:232
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 39, 12 > pa
Bitfield< 8 > va
Bitfield< 59, 56 > tlb
Bitfield< 23 > px
Bitfield< 5 > ux
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 0 > w
Bitfield< 14 > pr
Definition misc.hh:116
Bitfield< 3 > x
Definition pagetable.hh:74
static void decodeAddrOffset(Addr offset, uint8_t &func)
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
constexpr decltype(nullptr) NoFault
Definition types.hh:253
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition mmu.cc:1301
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition mmu.cc:1232
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Definition mmu.hh:207
Stats(statistics::Group *parent)
Definition mmu.cc:1690
statistics::Scalar permsFaults
Definition mmu.hh:442
statistics::Scalar alignFaults
Definition mmu.hh:439
statistics::Scalar prefetchFaults
Definition mmu.hh:440
statistics::Scalar domainFaults
Definition mmu.hh:441
TranslationRegime targetRegime
Definition pagetable.hh:199
void setAttributes(bool lpae)
Definition pagetable.hh:496
Addr pAddr(Addr va) const
Definition pagetable.hh:448
Definition test.h:38
The file contains the definition of a set of TLB Invalidate Instructions.

Generated on Mon Jan 13 2025 04:28:19 for gem5 by doxygen 1.9.8