gem5 v24.0.0.0
Loading...
Searching...
No Matches
mmu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2013, 2016-2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "arch/arm/mmu.hh"
42
43#include "arch/arm/isa.hh"
44#include "arch/arm/mpam.hh"
45#include "arch/arm/reg_abi.hh"
48#include "arch/arm/tlbi_op.hh"
49#include "debug/TLB.hh"
50#include "debug/TLBVerbose.hh"
51#include "mem/packet_access.hh"
52#include "sim/pseudo_inst.hh"
53#include "sim/process.hh"
54
55namespace gem5
56{
57
58using namespace ArmISA;
59
60MMU::MMU(const ArmMMUParams &p)
61 : BaseMMU(p),
62 itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
63 itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
64 itbStage2Walker(p.stage2_itb_walker),
65 dtbStage2Walker(p.stage2_dtb_walker),
66 test(nullptr),
67 miscRegContext(0),
68 s1State(this, false), s2State(this, true),
69 _attr(0),
70 _release(nullptr),
71 _hasWalkCache(false),
72 stats(this)
73{
74 // Cache system-level properties
75 if (FullSystem) {
76 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
77 assert(arm_sys);
79 physAddrRange = arm_sys->physAddrRange();
80
81 _release = arm_sys->releaseFS();
82 } else {
83 haveLargeAsid64 = false;
84 physAddrRange = 48;
85
86 _release = p.release_se;
87 }
88
89 m5opRange = p.sys->m5opRange();
90}
91
92void
110
111bool
113{
114 for (auto tlb : instruction) {
115 if (static_cast<TLB*>(tlb)->walkCache())
116 return true;
117 }
118 for (auto tlb : data) {
119 if (static_cast<TLB*>(tlb)->walkCache())
120 return true;
121 }
122 for (auto tlb : unified) {
123 if (static_cast<TLB*>(tlb)->walkCache())
124 return true;
125 }
126
127 return false;
128}
129
130void
132{
133 s1State.miscRegValid = false;
134 s2State.miscRegValid = false;
135}
136
137TLB *
138MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
139{
140 if (mode == BaseMMU::Execute) {
141 if (stage2)
142 return itbStage2;
143 else
144 return getITBPtr();
145 } else {
146 if (stage2)
147 return dtbStage2;
148 else
149 return getDTBPtr();
150 }
151}
152
155{
156 if (mode == BaseMMU::Execute) {
157 if (stage2)
158 return itbStage2Walker;
159 else
160 return itbWalker;
161 } else {
162 if (stage2)
163 return dtbStage2Walker;
164 else
165 return dtbWalker;
166 }
167}
168
169bool
171{
173
174 auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
175
176 TlbEntry::Lookup lookup_data;
177
178 lookup_data.va = va;
179 lookup_data.asn = state.asid;
180 lookup_data.ignoreAsn = false;
181 lookup_data.vmid = state.vmid;
182 lookup_data.secure = state.isSecure;
183 lookup_data.functional = true;
184 lookup_data.targetRegime = state.currRegime;
185 lookup_data.mode = BaseMMU::Read;
186
187 TlbEntry *e = tlb->multiLookup(lookup_data);
188
189 if (!e)
190 return false;
191 pa = e->pAddr(va);
192 return true;
193}
194
195void
197{
198 s1State.miscRegValid = false;
199 s1State.computeAddrTop.flush();
200 s2State.computeAddrTop.flush();
201}
202
203Fault
206 TlbEntry* te, CachedState &state) const
207{
208 // If we don't have a valid tlb entry it means virtual memory
209 // is not enabled
211
212 mpam::tagRequest(tc, req, mode == Execute);
213
214 // Check for a tester generated address fault
215 Fault fault = testTranslation(req, mode, domain, state);
216 if (fault != NoFault) {
217 return fault;
218 } else {
219 // Now that we checked no fault has been generated in the
220 // translation process, we can finalize the physical address
221 return finalizePhysical(req, tc, mode);
222 }
223}
224
225Fault
227 ThreadContext *tc, Mode mode) const
228{
229 const Addr paddr = req->getPaddr();
230
231 if (m5opRange.contains(paddr)) {
232 uint8_t func;
234 req->setLocalAccessor(
235 [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
236 {
237 uint64_t ret;
238 if (inAArch64(tc))
240 else
242
243 if (mode == Read)
244 pkt->setLE(ret);
245
246 return Cycles(1);
247 }
248 );
249 }
250
251 return NoFault;
252}
253
254
255Fault
257 Translation *translation, bool &delay, bool timing,
259{
260 updateMiscReg(tc, NormalTran, state.isStage2);
261 Addr vaddr_tainted = req->getVaddr();
262 Addr vaddr = 0;
263 if (state.aarch64) {
264 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
265 static_cast<TCR>(state.ttbcr), mode==Execute, state);
266 } else {
267 vaddr = vaddr_tainted;
268 }
269 Request::Flags flags = req->getFlags();
270
271 bool is_fetch = (mode == Execute);
272 bool is_write = (mode == Write);
273
274 if (!is_fetch) {
275 if (state.sctlr.a || !(flags & AllowUnaligned)) {
276 if (vaddr & mask(flags & AlignmentMask)) {
277 // LPAE is always disabled in SE mode
278 return std::make_shared<DataAbort>(
279 vaddr_tainted,
283 }
284 }
285 }
286
287 Process *p = tc->getProcessPtr();
288 if (const auto pte = p->pTable->lookup(vaddr); !pte) {
289 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
290 } else {
291 req->setPaddr(pte->paddr + p->pTable->pageOffset(vaddr));
292
293 if (pte->flags & EmulationPageTable::Uncacheable)
294 req->setFlags(Request::UNCACHEABLE);
295
296 return finalizePhysical(req, tc, mode);
297 }
298}
299
300Fault
302 bool stage2)
303{
304 return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
305}
306
307Fault
310{
311 // a data cache maintenance instruction that operates by MVA does
312 // not generate a Data Abort exeception due to a Permission fault
313 if (req->isCacheMaintenance()) {
314 return NoFault;
315 }
316
317 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
318 Request::Flags flags = req->getFlags();
319 bool is_fetch = (mode == Execute);
320 bool is_write = (mode == Write);
321 bool is_priv = state.isPriv && !(flags & UserMode);
322
323 // Get the translation type from the actuall table entry
324 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
326
327 // If this is the second stage of translation and the request is for a
328 // stage 1 page table walk then we need to check the HCR.PTW bit. This
329 // allows us to generate a fault if the request targets an area marked
330 // as a device or strongly ordered.
331 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
332 (te->mtype != TlbEntry::MemoryType::Normal)) {
333 return std::make_shared<DataAbort>(
334 vaddr, te->domain, is_write,
335 ArmFault::PermissionLL + te->lookupLevel,
336 state.isStage2, tranMethod);
337 }
338
339 // Generate an alignment fault for unaligned data accesses to device or
340 // strongly ordered memory
341 if (!is_fetch) {
342 if (te->mtype != TlbEntry::MemoryType::Normal) {
343 if (vaddr & mask(flags & AlignmentMask)) {
345 return std::make_shared<DataAbort>(
348 tranMethod);
349 }
350 }
351 }
352
353 if (te->nonCacheable) {
354 // Prevent prefetching from I/O devices.
355 if (req->isPrefetch()) {
356 // Here we can safely use the fault status for the short
357 // desc. format in all cases
358 return std::make_shared<PrefetchAbort>(
360 state.isStage2, tranMethod);
361 }
362 }
363
364 if (!te->longDescFormat) {
365 switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
366 case 0:
368 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
369 " domain: %#x write:%d\n", state.dacr,
370 static_cast<uint8_t>(te->domain), is_write);
371 if (is_fetch) {
372 // Use PC value instead of vaddr because vaddr might
373 // be aligned to cache line and should not be the
374 // address reported in FAR
375 return std::make_shared<PrefetchAbort>(
376 req->getPC(),
377 ArmFault::DomainLL + te->lookupLevel,
378 state.isStage2, tranMethod);
379 } else
380 return std::make_shared<DataAbort>(
381 vaddr, te->domain, is_write,
382 ArmFault::DomainLL + te->lookupLevel,
383 state.isStage2, tranMethod);
384 case 1:
385 // Continue with permissions check
386 break;
387 case 2:
388 panic("UNPRED domain\n");
389 case 3:
390 return NoFault;
391 }
392 }
393
394 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
395 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
396 uint8_t hap = te->hap;
397
398 if (state.sctlr.afe == 1 || te->longDescFormat)
399 ap |= 1;
400
401 bool abt;
402 bool isWritable = true;
403 // If this is a stage 2 access (eg for reading stage 1 page table entries)
404 // then don't perform the AP permissions check, we stil do the HAP check
405 // below.
406 if (state.isStage2) {
407 abt = false;
408 } else {
409 switch (ap) {
410 case 0:
411 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
412 (int)state.sctlr.rs);
413 if (!state.sctlr.xp) {
414 switch ((int)state.sctlr.rs) {
415 case 2:
416 abt = is_write;
417 break;
418 case 1:
419 abt = is_write || !is_priv;
420 break;
421 case 0:
422 case 3:
423 default:
424 abt = true;
425 break;
426 }
427 } else {
428 abt = true;
429 }
430 break;
431 case 1:
432 abt = !is_priv;
433 break;
434 case 2:
435 abt = !is_priv && is_write;
436 isWritable = is_priv;
437 break;
438 case 3:
439 abt = false;
440 break;
441 case 4:
442 panic("UNPRED premissions\n");
443 case 5:
444 abt = !is_priv || is_write;
445 isWritable = false;
446 break;
447 case 6:
448 case 7:
449 abt = is_write;
450 isWritable = false;
451 break;
452 default:
453 panic("Unknown permissions %#x\n", ap);
454 }
455 }
456
457 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
458 bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
459 (ap == 3 && state.sctlr.uwxn && is_priv);
460 if (is_fetch && (abt || xn ||
461 (te->longDescFormat && te->pxn && is_priv) ||
462 (state.isSecure && te->ns && state.scr.sif))) {
464 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
465 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
466 ap, is_priv, is_write, te->ns,
467 state.scr.sif, state.sctlr.afe);
468 // Use PC value instead of vaddr because vaddr might be aligned to
469 // cache line and should not be the address reported in FAR
470 return std::make_shared<PrefetchAbort>(
471 req->getPC(),
472 ArmFault::PermissionLL + te->lookupLevel,
473 state.isStage2, tranMethod);
474 } else if (abt | hapAbt) {
476 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
477 " write:%d\n", ap, is_priv, is_write);
478 return std::make_shared<DataAbort>(
479 vaddr, te->domain, is_write,
480 ArmFault::PermissionLL + te->lookupLevel,
481 state.isStage2 | !abt, tranMethod);
482 }
483 return NoFault;
484}
485
486Fault
488 ThreadContext *tc, bool stage2)
489{
490 return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
491}
492
493Fault
496{
497 assert(state.aarch64);
498
499 // A data cache maintenance instruction that operates by VA does
500 // not generate a Permission fault unless:
501 // * It is a data cache invalidate (dc ivac) which requires write
502 // permissions to the VA, or
503 // * It is executed from EL0
504 if (req->isCacheClean() && state.exceptionLevel != EL0 && !state.isStage2) {
505 return NoFault;
506 }
507
508 Addr vaddr_tainted = req->getVaddr();
509 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
510 static_cast<TCR>(state.ttbcr), mode==Execute, state);
511
512 Request::Flags flags = req->getFlags();
513 bool is_fetch = (mode == Execute);
514 // Cache clean operations require read permissions to the specified VA
515 bool is_write = !req->isCacheClean() && mode == Write;
516 bool is_atomic = req->isAtomic();
517
518 updateMiscReg(tc, state.curTranType, state.isStage2);
519
520 // If this is the second stage of translation and the request is for a
521 // stage 1 page table walk then we need to check the HCR.PTW bit. This
522 // allows us to generate a fault if the request targets an area marked
523 // as a device or strongly ordered.
524 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
525 (te->mtype != TlbEntry::MemoryType::Normal)) {
526 return std::make_shared<DataAbort>(
527 vaddr_tainted, te->domain, is_write,
528 ArmFault::PermissionLL + te->lookupLevel,
529 state.isStage2, ArmFault::LpaeTran);
530 }
531
532 // Generate an alignment fault for unaligned accesses to device or
533 // strongly ordered memory
534 if (!is_fetch) {
535 if (te->mtype != TlbEntry::MemoryType::Normal) {
536 if (vaddr & mask(flags & AlignmentMask)) {
538 return std::make_shared<DataAbort>(
539 vaddr_tainted,
541 is_atomic ? false : is_write,
544 }
545 }
546 }
547
548 if (te->nonCacheable) {
549 // Prevent prefetching from I/O devices.
550 if (req->isPrefetch()) {
551 // Here we can safely use the fault status for the short
552 // desc. format in all cases
553 return std::make_shared<PrefetchAbort>(
554 vaddr_tainted,
556 state.isStage2, ArmFault::LpaeTran);
557 }
558 }
559
560 bool grant = false;
561 // grant_read is used for faults from an atomic instruction that
562 // both reads and writes from a memory location. From a ISS point
563 // of view they count as read if a read to that address would have
564 // generated the fault; they count as writes otherwise
565 bool grant_read = true;
566
567 if (state.isStage2) {
568 std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
569 (!is_write && !is_fetch), is_write, is_fetch);
570 } else {
571 std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
572 (!is_write && !is_fetch), is_write, is_fetch);
573 }
574
575 if (!grant) {
576 if (is_fetch) {
578 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
579 "ns:%d scr.sif:%d sctlr.afe: %d\n",
580 te->ns, state.scr.sif, state.sctlr.afe);
581 // Use PC value instead of vaddr because vaddr might be aligned to
582 // cache line and should not be the address reported in FAR
583 return std::make_shared<PrefetchAbort>(
584 req->getPC(),
585 ArmFault::PermissionLL + te->lookupLevel,
586 state.isStage2, ArmFault::LpaeTran);
587 } else {
589 DPRINTF(TLB, "TLB Fault: Data abort on permission check."
590 "ns:%d", te->ns);
591 return std::make_shared<DataAbort>(
592 vaddr_tainted, te->domain,
593 (is_atomic && !grant_read) ? false : is_write,
594 ArmFault::PermissionLL + te->lookupLevel,
595 state.isStage2, ArmFault::LpaeTran);
596 }
597 }
598
599 return NoFault;
600}
601
604 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
605{
606 assert(ArmSystem::haveEL(tc, EL2) && state.exceptionLevel != EL2);
607
608 // In stage 2 we use the hypervisor access permission bits.
609 // The following permissions are described in ARM DDI 0487A.f
610 // D4-1802
611 bool grant = false;
612 bool grant_read = te->hap & 0b01;
613 bool grant_write = te->hap & 0b10;
614
615 uint8_t xn = te->xn;
616 uint8_t pxn = te->pxn;
617
618 if (ArmSystem::haveEL(tc, EL3) && state.isSecure &&
619 te->ns && state.scr.sif) {
620 xn = true;
621 }
622
623 DPRINTF(TLBVerbose,
624 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
625 "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
626
627 if (x) {
628 grant = grant_read && !xn;
629 } else if (req->isAtomic()) {
630 grant = grant_read || grant_write;
631 } else if (w) {
632 grant = grant_write;
633 } else if (r) {
634 grant = grant_read;
635 } else {
636 panic("Invalid Operation\n");
637 }
638
639 return std::make_pair(grant, grant_read);
640}
641
644 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
645{
646 bool grant = false, grant_read = true, grant_write = true, grant_exec = true;
647
648 const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
649 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
650
651 bool wxn = state.sctlr.wxn;
652 uint8_t xn = te->xn;
653 uint8_t pxn = te->pxn;
654
655 DPRINTF(TLBVerbose, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
656 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
657 pxn, r, w, x, is_priv, wxn);
658
659 if (faultPAN(tc, ap, req, mode, is_priv, state)) {
660 return std::make_pair(false, false);
661 }
662
663 TranslationRegime regime = !is_priv ? TranslationRegime::EL10 : state.currRegime;
664 if (hasUnprivRegime(regime)) {
665 bool pr = false;
666 bool pw = false;
667 bool ur = false;
668 bool uw = false;
669 // Apply leaf permissions
670 switch (ap) {
671 case 0b00: // Privileged access
672 pr = 1; pw = 1; ur = 0; uw = 0;
673 break;
674 case 0b01: // No effect
675 pr = 1; pw = 1; ur = 1; uw = 1;
676 break;
677 case 0b10: // Read-only, privileged access
678 pr = 1; pw = 0; ur = 0; uw = 0;
679 break;
680 case 0b11: // Read-only
681 pr = 1; pw = 0; ur = 1; uw = 0;
682 break;
683 }
684
685 // Locations writable by unprivileged cannot be executed by privileged
686 const bool px = !(pxn || uw);
687 const bool ux = !xn;
688
689 grant_read = is_priv ? pr : ur;
690 grant_write = is_priv ? pw : uw;
691 grant_exec = is_priv ? px : ux;
692 } else {
693 switch (bits(ap, 1)) {
694 case 0b0: // No effect
695 grant_read = 1; grant_write = 1;
696 break;
697 case 0b1: // Read-Only
698 grant_read = 1; grant_write = 0;
699 break;
700 }
701 grant_exec = !xn;
702 }
703
704 // Do not allow execution from writable location
705 // if wxn is set
706 grant_exec = grant_exec && !(wxn && grant_write);
707
708 if (ArmSystem::haveEL(tc, EL3) && state.isSecure && te->ns) {
709 grant_exec = grant_exec && !state.scr.sif;
710 }
711
712 if (x) {
713 grant = grant_exec;
714 } else if (req->isAtomic()) {
715 grant = grant_read && grant_write;
716 } else if (w) {
717 grant = grant_write;
718 } else {
719 grant = grant_read;
720 }
721
722 return std::make_pair(grant, grant_read);
723}
724
725bool
727{
728 switch (regime) {
731 return true;
732 default:
733 return false;
734 }
735}
736
737bool
738MMU::faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
739 const bool is_priv, CachedState &state)
740{
741 bool exception = false;
742 switch (state.exceptionLevel) {
743 case EL0:
744 break;
745 case EL1:
746 if (checkPAN(tc, ap, req, mode, is_priv, state)) {
747 exception = true;;
748 }
749 break;
750 case EL2:
751 if (state.hcr.e2h && checkPAN(tc, ap, req, mode, is_priv, state)) {
752 exception = true;;
753 }
754 break;
755 case EL3:
756 break;
757 }
758
759 return exception;
760}
761
762bool
763MMU::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
764 const bool is_priv, CachedState &state)
765{
766 // The PAN bit has no effect on:
767 // 1) Instruction accesses.
768 // 2) Data Cache instructions other than DC ZVA
769 // 3) Address translation instructions, other than ATS1E1RP and
770 // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
771 // gem5)
772 // 4) Instructions to be treated as unprivileged, unless
773 // HCR_EL2.{E2H, TGE} == {1, 0}
774 if (HaveExt(tc, ArmExtension::FEAT_PAN) && state.cpsr.pan && (ap & 0x1) &&
776
777 if (req->isCacheMaintenance() &&
778 !(req->getFlags() & Request::CACHE_BLOCK_ZERO)) {
779 // Cache maintenance other than DC ZVA
780 return false;
781 } else if (!is_priv && !(state.hcr.e2h && !state.hcr.tge)) {
782 // Treated as unprivileged unless HCR_EL2.{E2H, TGE} == {1, 0}
783 return false;
784 }
785 return true;
786 }
787
788 return false;
789}
790
791Addr
794 TCR tcr, bool is_inst, CachedState& state)
795{
796 const bool selbit = bits(vaddr_tainted, 55);
797
798 // Call the memoized version of computeAddrTop
799 const auto topbit = state.computeAddrTop(tc, selbit, is_inst, tcr, el);
800
801 return maskTaggedAddr(vaddr_tainted, tc, el, topbit);
802}
803
804Fault
806 ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
808{
809 bool is_fetch = (mode == Execute);
810 bool is_atomic = req->isAtomic();
811 req->setPaddr(vaddr);
812 // When the MMU is off the security attribute corresponds to the
813 // security state of the processor
814 if (state.isSecure)
815 req->setFlags(Request::SECURE);
816 else
817 req->clearFlags(Request::SECURE);
818 if (state.aarch64) {
819 bool selbit = bits(vaddr, 55);
820 TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
821 int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
822 int addr_sz = bits(vaddr, topbit, physAddrRange);
823 if (addr_sz != 0){
824 Fault f;
825 if (is_fetch)
826 f = std::make_shared<PrefetchAbort>(vaddr,
829 else
830 f = std::make_shared<DataAbort>( vaddr,
832 is_atomic ? false : mode==Write,
835 return f;
836 }
837 }
838
839 // @todo: double check this (ARM ARM issue C B3.2.1)
840 if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
841 state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
842 if (!req->isCacheMaintenance()) {
843 req->setFlags(Request::UNCACHEABLE);
844 }
845 req->setFlags(Request::STRICT_ORDER);
846 }
847
848 // Set memory attributes
849 TlbEntry temp_te;
850 temp_te.ns = !state.isSecure;
851 bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) &&
852 state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
853 bool i_cacheability = state.sctlr.i && !state.sctlr.m;
854 if (state.isStage2 || !dc || state.exceptionLevel == EL2) {
855 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
857 temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
858 temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
859 temp_te.shareable = true;
860 temp_te.outerShareable = true;
861 } else {
863 temp_te.innerAttrs = 0x3;
864 temp_te.outerAttrs = 0x3;
865 temp_te.shareable = false;
866 temp_te.outerShareable = false;
867 }
868 temp_te.setAttributes(long_desc_format);
869 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
870 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
871 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
872 state.isStage2);
873 setAttr(temp_te.attributes);
874
875 return testAndFinalize(req, tc, mode, nullptr, state);
876}
877
878Fault
880 Translation *translation, bool &delay, bool timing,
881 bool functional, Addr vaddr,
883{
884 TlbEntry *te = NULL;
885 bool is_fetch = (mode == Execute);
886 TlbEntry mergeTe;
887
888 Request::Flags flags = req->getFlags();
889 Addr vaddr_tainted = req->getVaddr();
890
891 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
892 functional, &mergeTe, state);
893 // only proceed if we have a valid table entry
894 if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
895
896 // If we have the table entry transfer some of the attributes to the
897 // request that triggered the translation
899 // Set memory attributes
900 DPRINTF(TLBVerbose,
901 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
902 "outerAttrs: %d, mtype: %d, stage2: %d\n",
903 te->shareable, te->innerAttrs, te->outerAttrs,
904 static_cast<uint8_t>(te->mtype), state.isStage2);
905 setAttr(te->attributes);
906
907 if (te->nonCacheable && !req->isCacheMaintenance())
908 req->setFlags(Request::UNCACHEABLE);
909
910 // Require requests to be ordered if the request goes to
911 // strongly ordered or device memory (i.e., anything other
912 // than normal memory requires strict order).
913 if (te->mtype != TlbEntry::MemoryType::Normal)
914 req->setFlags(Request::STRICT_ORDER);
915
916 Addr pa = te->pAddr(vaddr);
917 req->setPaddr(pa);
918
919 if (state.isSecure && !te->ns) {
920 req->setFlags(Request::SECURE);
921 } else {
922 req->clearFlags(Request::SECURE);
923 }
924 if (!is_fetch && fault == NoFault &&
925 (vaddr & mask(flags & AlignmentMask)) &&
926 (te->mtype != TlbEntry::MemoryType::Normal)) {
927 // Unaligned accesses to Device memory should always cause an
928 // abort regardless of sctlr.a
930 bool is_write = (mode == Write);
931 return std::make_shared<DataAbort>(
932 vaddr_tainted,
935 tranMethod);
936 }
937
938 if (fault == NoFault)
939 fault = testAndFinalize(req, tc, mode, te, state);
940 }
941
942 return fault;
943}
944
945Fault
947 Translation *translation, bool &delay, bool timing,
948 ArmTranslationType tran_type, bool functional,
950{
951 // No such thing as a functional timing access
952 assert(!(timing && functional));
953
954 Addr vaddr_tainted = req->getVaddr();
955 Addr vaddr = 0;
956 if (state.aarch64) {
957 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
958 static_cast<TCR>(state.ttbcr), mode==Execute, state);
959 } else {
960 vaddr = vaddr_tainted;
961 }
962 Request::Flags flags = req->getFlags();
963
964 bool is_fetch = (mode == Execute);
965 bool is_write = (mode == Write);
966 bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
967 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
969
970 DPRINTF(TLBVerbose,
971 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
972 state.isPriv, flags & UserMode, state.isSecure,
973 tran_type & S1S2NsTran);
974
975 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
976 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
977 state.isStage2, state.scr, state.sctlr, flags, tran_type);
978
979 if (!state.isStage2) {
980 if ((req->isInstFetch() && (!state.sctlr.i)) ||
981 ((!req->isInstFetch()) && (!state.sctlr.c))){
982 if (!req->isCacheMaintenance()) {
983 req->setFlags(Request::UNCACHEABLE);
984 }
985 req->setFlags(Request::STRICT_ORDER);
986 }
987 }
988 if (!is_fetch) {
989 if (state.sctlr.a || !(flags & AllowUnaligned)) {
990 if (vaddr & mask(flags & AlignmentMask)) {
992 return std::make_shared<DataAbort>(
993 vaddr_tainted,
996 tranMethod);
997 }
998 }
999 }
1000
1001 bool vm = state.hcr.vm;
1002 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1003 state.hcr.e2h == 1 && state.hcr.tge == 1)
1004 vm = 0;
1005 else if (state.hcr.dc == 1)
1006 vm = 1;
1007
1008 Fault fault = NoFault;
1009 // If guest MMU is off or hcr.vm=0 go straight to stage2
1010 if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1011 fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1012 long_desc_format, state);
1013 } else {
1014 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1015 state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1016 // Translation enabled
1017 fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1018 functional, vaddr, tranMethod, state);
1019 }
1020
1021 // Check for Debug Exceptions
1023
1024 if (sd->enabled() && fault == NoFault) {
1025 fault = sd->testDebug(tc, req, mode);
1026 }
1027
1028 return fault;
1029}
1030
1031Fault
1033 ArmTranslationType tran_type)
1034{
1035 return translateAtomic(req, tc, mode, tran_type, false);
1036}
1037
1038Fault
1040 ArmTranslationType tran_type, bool stage2)
1041{
1042 auto& state = updateMiscReg(tc, tran_type, stage2);
1043
1044 bool delay = false;
1045 Fault fault;
1046 if (FullSystem)
1047 fault = translateFs(req, tc, mode, NULL, delay, false,
1048 tran_type, false, state);
1049 else
1050 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1051 assert(!delay);
1052 return fault;
1053}
1054
1055Fault
1057{
1058 return translateFunctional(req, tc, mode, NormalTran, false);
1059}
1060
1061Fault
1063 ArmTranslationType tran_type)
1064{
1065 return translateFunctional(req, tc, mode, tran_type, false);
1066}
1067
1068Fault
1070 ArmTranslationType tran_type, bool stage2)
1071{
1072 auto& state = updateMiscReg(tc, tran_type, stage2);
1073
1074 bool delay = false;
1075 Fault fault;
1076 if (FullSystem)
1077 fault = translateFs(req, tc, mode, NULL, delay, false,
1078 tran_type, true, state);
1079 else
1080 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1081 assert(!delay);
1082 return fault;
1083}
1084
1085void
1087 Translation *translation, Mode mode, ArmTranslationType tran_type,
1088 bool stage2)
1089{
1090 auto& state = updateMiscReg(tc, tran_type, stage2);
1091
1092 assert(translation);
1093
1094 translateComplete(req, tc, translation, mode, tran_type,
1095 stage2, state);
1096}
1097
1098Fault
1100 Translation *translation, Mode mode, ArmTranslationType tran_type,
1101 bool call_from_s2)
1102{
1103 return translateComplete(req, tc, translation, mode, tran_type,
1104 call_from_s2, s1State);
1105}
1106
1107Fault
1109 Translation *translation, Mode mode, ArmTranslationType tran_type,
1110 bool call_from_s2, CachedState &state)
1111{
1112 bool delay = false;
1113 Fault fault;
1114 if (FullSystem)
1115 fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1116 false, state);
1117 else
1118 fault = translateSe(req, tc, mode, translation, delay, true, state);
1119
1120 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay,
1121 fault != NoFault);
1122 // If we have a translation, and we're not in the middle of doing a stage
1123 // 2 translation tell the translation that we've either finished or its
1124 // going to take a while. By not doing this when we're in the middle of a
1125 // stage 2 translation we prevent marking the translation as delayed twice,
1126 // one when the translation starts and again when the stage 1 translation
1127 // completes.
1128
1129 if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1130 fault != NoFault)) {
1131 if (!delay)
1132 translation->finish(fault, req, tc, mode);
1133 else
1134 translation->markDelayed();
1135 }
1136 return fault;
1137}
1138
1139vmid_t
1141{
1142 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1143 VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1144 vmid_t vmid = 0;
1145
1146 switch (mmfr1.vmidbits) {
1147 case 0b0000:
1148 // 8 bits
1149 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1150 break;
1151 case 0b0010:
1152 if (vtcr.vs && ELIs64(tc, EL2)) {
1153 // 16 bits
1154 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1155 } else {
1156 // 8 bits
1157 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1158 }
1159 break;
1160 default:
1161 panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1162 mmfr1.vmidbits);
1163 }
1164
1165 return vmid;
1166}
1167
1170 ArmTranslationType tran_type, bool stage2)
1171{
1172 // check if the regs have changed, or the translation mode is different.
1173 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1174 // one type of translation anyway
1175
1176 auto& state = stage2 ? s2State : s1State;
1177 if (state.miscRegValid && miscRegContext == tc->contextId() &&
1178 ((tran_type == state.curTranType) || stage2)) {
1179
1180 } else {
1181 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1182 state.updateMiscReg(tc, tran_type);
1183
1184 itbStage2->setVMID(state.vmid);
1185 dtbStage2->setVMID(state.vmid);
1186
1187 for (auto tlb : instruction) {
1188 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1189 }
1190 for (auto tlb : data) {
1191 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1192 }
1193 for (auto tlb : unified) {
1194 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1195 }
1196
1197 miscRegContext = tc->contextId();
1198 }
1199
1200 if (state.directToStage2) {
1201 s2State.updateMiscReg(tc, tran_type);
1202 return s2State;
1203 } else {
1204 return state;
1205 }
1206}
1207
1208void
1210 ArmTranslationType tran_type)
1211{
1212 cpsr = tc->readMiscReg(MISCREG_CPSR);
1213 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1214 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1215
1216 // Dependencies: SCR/SCR_EL3, CPSR
1217 isSecure = ArmISA::isSecure(tc) &&
1218 !(tran_type & HypMode) && !(tran_type & S1S2NsTran);
1219
1220 exceptionLevel = tranTypeEL(cpsr, scr, tran_type);
1221 currRegime = translationRegime(tc, exceptionLevel);
1222 aarch64 = isStage2 ?
1223 ELIs64(tc, EL2) :
1224 ELIs64(tc, translationEl(currRegime));
1225
1226 if (aarch64) { // AArch64
1227 // determine EL we need to translate in
1228 switch (currRegime) {
1230 {
1231 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1232 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1233 uint64_t ttbr_asid = ttbcr.a1 ?
1236 asid = bits(ttbr_asid,
1237 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1238 }
1239 break;
1241 {
1242 // VHE code for EL2&0 regime
1243 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1244 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1245 uint64_t ttbr_asid = ttbcr.a1 ?
1248 asid = bits(ttbr_asid,
1249 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1250 }
1251 break;
1253 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1254 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1255 asid = -1;
1256 break;
1258 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1259 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1260 asid = -1;
1261 break;
1262 }
1263
1264 isPriv = exceptionLevel != EL0;
1265 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1266 vmid = getVMID(tc);
1267 bool vm = hcr.vm;
1268 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1269 hcr.e2h == 1 && hcr.tge ==1) {
1270 vm = 0;
1271 }
1272
1273 if (hcr.e2h == 1 && (exceptionLevel == EL2
1274 || (hcr.tge ==1 && exceptionLevel == EL0))) {
1275 directToStage2 = false;
1276 stage2Req = false;
1277 stage2DescReq = false;
1278 } else {
1279 // Work out if we should skip the first stage of translation and go
1280 // directly to stage 2. This value is cached so we don't have to
1281 // compute it for every translation.
1282 const bool el2_enabled = EL2Enabled(tc);
1283 stage2Req = isStage2 ||
1284 (vm && exceptionLevel < EL2 && el2_enabled &&
1285 !(tran_type & S1CTran) &&
1286 !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1287 stage2DescReq = isStage2 ||
1288 (vm && exceptionLevel < EL2 && el2_enabled);
1289 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1290 }
1291 } else {
1292 vmid = 0;
1293 directToStage2 = false;
1294 stage2Req = false;
1295 stage2DescReq = false;
1296 }
1297 } else { // AArch32
1299 !isSecure));
1301 !isSecure));
1302 isPriv = cpsr.mode != MODE_USER;
1303 if (longDescFormatInUse(tc)) {
1304 uint64_t ttbr_asid = tc->readMiscReg(
1305 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1307 tc, !isSecure));
1308 asid = bits(ttbr_asid, 55, 48);
1309 } else { // Short-descriptor translation table format in use
1310 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1312 asid = context_id.asid;
1313 }
1315 !isSecure));
1317 !isSecure));
1319 !isSecure));
1320
1321 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1322 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1323 if (exceptionLevel == EL2) {
1324 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1325 }
1326 // Work out if we should skip the first stage of translation and go
1327 // directly to stage 2. This value is cached so we don't have to
1328 // compute it for every translation.
1329 const bool el2_enabled = EL2Enabled(tc);
1330 stage2Req = isStage2 ||
1331 (hcr.vm && exceptionLevel < EL2 && el2_enabled &&
1332 !(tran_type & S1CTran));
1333 stage2DescReq = isStage2 ||
1334 (hcr.vm && exceptionLevel < EL2 && el2_enabled);
1335 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1336 } else {
1337 vmid = 0;
1338 stage2Req = false;
1339 directToStage2 = false;
1340 stage2DescReq = false;
1341 }
1342 }
1343 miscRegValid = true;
1344 curTranType = tran_type;
1345}
1346
1349{
1350 switch (type) {
1351 case S1E0Tran:
1352 case S12E0Tran:
1353 return EL0;
1354
1355 case S1E1Tran:
1356 case S12E1Tran:
1357 case S1S2NsTran:
1358 return EL1;
1359
1360 case S1E2Tran:
1361 case HypMode:
1362 return EL2;
1363
1364 case S1E3Tran:
1365 return EL3;
1366
1367 case S1CTran:
1368 return currEL(cpsr) == EL3 && scr.ns == 0 ?
1369 EL3 : EL1;
1370
1371 case NormalTran:
1372 return currEL(cpsr);
1373
1374 default:
1375 panic("Unknown translation mode!\n");
1376 }
1377}
1378
1379Fault
1381 Translation *translation, bool timing, bool functional,
1382 bool is_secure, ArmTranslationType tran_type,
1383 bool stage2)
1384{
1385 return getTE(te, req, tc, mode, translation, timing, functional,
1386 is_secure, tran_type, stage2 ? s2State : s1State);
1387}
1388
1389TlbEntry*
1390MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool secure,
1391 bool functional, bool ignore_asn, TranslationRegime regime,
1392 bool stage2, BaseMMU::Mode mode)
1393{
1394 TLB *tlb = getTlb(mode, stage2);
1395
1396 TlbEntry::Lookup lookup_data;
1397
1398 lookup_data.va = va;
1399 lookup_data.asn = asid;
1400 lookup_data.ignoreAsn = ignore_asn;
1401 lookup_data.vmid = vmid;
1402 lookup_data.secure = secure;
1403 lookup_data.functional = functional;
1404 lookup_data.targetRegime = regime;
1405 lookup_data.mode = mode;
1406
1407 return tlb->multiLookup(lookup_data);
1408}
1409
1410Fault
1412 Translation *translation, bool timing, bool functional,
1413 bool is_secure, ArmTranslationType tran_type,
1415{
1416 // In a 2-stage system, the IPA->PA translation can be started via this
1417 // call so make sure the miscRegs are correct.
1418 if (state.isStage2) {
1419 updateMiscReg(tc, tran_type, true);
1420 }
1421
1422 Addr vaddr_tainted = req->getVaddr();
1423 Addr vaddr = 0;
1424 TranslationRegime regime = state.currRegime;
1425
1426 if (state.aarch64) {
1427 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.exceptionLevel,
1428 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1429 } else {
1430 vaddr = vaddr_tainted;
1431 }
1432
1433 *te = lookup(vaddr, state.asid, state.vmid, is_secure, false,
1434 false, regime, state.isStage2, mode);
1435
1436 if (!isCompleteTranslation(*te)) {
1437 if (req->isPrefetch()) {
1438 // if the request is a prefetch don't attempt to fill the TLB or go
1439 // any further with the memory access (here we can safely use the
1440 // fault status for the short desc. format in all cases)
1442 return std::make_shared<PrefetchAbort>(
1443 vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1444 }
1445
1446 // start translation table walk, pass variables rather than
1447 // re-retreaving in table walker for speed
1448 DPRINTF(TLB,
1449 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1450 vaddr_tainted, state.asid, state.vmid);
1451
1452 Fault fault;
1453 fault = getTableWalker(mode, state.isStage2)->walk(
1454 req, tc, state.asid, state.vmid, mode,
1455 translation, timing, functional, is_secure,
1456 tran_type, state.stage2DescReq, *te);
1457
1458 // for timing mode, return and wait for table walk,
1459 if (timing || fault != NoFault) {
1460 return fault;
1461 }
1462
1463 *te = lookup(vaddr, state.asid, state.vmid, is_secure,
1464 true, false, regime, state.isStage2, mode);
1465 assert(*te);
1466 }
1467 return NoFault;
1468}
1469
1470Fault
1472 ThreadContext *tc, Mode mode,
1473 Translation *translation, bool timing, bool functional,
1474 TlbEntry *mergeTe, CachedState &state)
1475{
1476 Fault fault;
1477
1478 if (state.isStage2) {
1479 // We are already in the stage 2 TLB. Grab the table entry for stage
1480 // 2 only. We are here because stage 1 translation is disabled.
1481 TlbEntry *s2_te = nullptr;
1482 // Get the stage 2 table entry
1483 fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1484 state.isSecure, state.curTranType, state);
1485 // Check permissions of stage 2
1486 if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1487 if (state.aarch64)
1488 fault = checkPermissions64(s2_te, req, mode, tc, state);
1489 else
1490 fault = checkPermissions(s2_te, req, mode, state);
1491 }
1492 *te = s2_te;
1493 return fault;
1494 }
1495
1496 TlbEntry *s1_te = nullptr;
1497
1498 Addr vaddr_tainted = req->getVaddr();
1499
1500 // Get the stage 1 table entry
1501 fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1502 state.isSecure, state.curTranType, state);
1503 // only proceed if we have a valid table entry
1504 if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1505 // Check stage 1 permissions before checking stage 2
1506 if (state.aarch64)
1507 fault = checkPermissions64(s1_te, req, mode, tc, state);
1508 else
1509 fault = checkPermissions(s1_te, req, mode, state);
1510 if (state.stage2Req & (fault == NoFault)) {
1511 Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1512 req, translation, mode, timing, functional, state.isSecure,
1513 state.curTranType);
1514 fault = s2_lookup->getTe(tc, mergeTe);
1515 if (s2_lookup->isComplete()) {
1516 *te = mergeTe;
1517 // We've finished with the lookup so delete it
1518 delete s2_lookup;
1519 } else {
1520 // The lookup hasn't completed, so we can't delete it now. We
1521 // get round this by asking the object to self delete when the
1522 // translation is complete.
1523 s2_lookup->setSelfDelete();
1524 }
1525 } else {
1526 // This case deals with an S1 hit (or bypass), followed by
1527 // an S2 hit-but-perms issue
1528 if (state.isStage2) {
1529 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1530 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1531 fault);
1532 if (fault != NoFault) {
1533 auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1534 arm_fault->annotate(ArmFault::S1PTW, false);
1535 arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1536 }
1537 }
1538 *te = s1_te;
1539 }
1540 }
1541 return fault;
1542}
1543
1544bool
1546{
1547 return entry && !entry->partial;
1548}
1549
1550void
1552{
1553 BaseMMU::takeOverFrom(old_mmu);
1554
1555 auto *ommu = dynamic_cast<MMU*>(old_mmu);
1556 assert(ommu);
1557
1558 _attr = ommu->_attr;
1559
1560 s1State = ommu->s1State;
1561 s2State = ommu->s2State;
1562}
1563
1564void
1566{
1567 if (!_ti) {
1568 test = nullptr;
1569 } else {
1570 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1571 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1572 test = ti;
1577 }
1578}
1579
1580Fault
1583{
1584 if (!test || !req->hasSize() || req->getSize() == 0 ||
1585 req->isCacheMaintenance()) {
1586 return NoFault;
1587 } else {
1588 return test->translationCheck(req, state.isPriv, mode, domain);
1589 }
1590}
1591
1593 : statistics::Group(parent),
1594 ADD_STAT(alignFaults, statistics::units::Count::get(),
1595 "Number of MMU faults due to alignment restrictions"),
1596 ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1597 "Number of MMU faults due to prefetch"),
1598 ADD_STAT(domainFaults, statistics::units::Count::get(),
1599 "Number of MMU faults due to domain restrictions"),
1600 ADD_STAT(permsFaults, statistics::units::Count::get(),
1601 "Number of MMU faults due to permissions restrictions")
1602{
1603}
1604
1605} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:236
SelfDebug * getSelfDebug() const
Definition isa.hh:181
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition mmu.hh:377
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition mmu.cc:763
static bool hasUnprivRegime(TranslationRegime regime)
Definition mmu.cc:726
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition mmu.cc:256
void drainResume() override
Resume execution after a successful drain.
Definition mmu.cc:131
uint64_t _attr
Definition mmu.hh:485
ContextID miscRegContext
Definition mmu.hh:479
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition mmu.cc:1099
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:138
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition mmu.cc:805
TLB * dtbStage2
Definition mmu.hh:82
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:93
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, bool is_secure, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1380
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1169
bool isCompleteTranslation(TlbEntry *te) const
Definition mmu.cc:1545
void invalidateMiscReg()
Definition mmu.cc:196
bool haveLargeAsid64
Definition mmu.hh:489
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition mmu.cc:226
bool _hasWalkCache
Definition mmu.hh:494
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:643
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition mmu.cc:1471
static ExceptionLevel tranTypeEL(CPSR cpsr, SCR scr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1348
uint8_t physAddrRange
Definition mmu.hh:490
bool checkWalkCache() const
Definition mmu.cc:112
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:256
TLB * itbStage2
Definition mmu.hh:81
void setTestInterface(SimObject *ti)
Definition mmu.cc:1565
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool secure, bool functional, bool ignore_asn, TranslationRegime target_regime, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1390
TableWalker * itbStage2Walker
Definition mmu.hh:86
AddrRange m5opRange
Definition mmu.hh:492
TableWalker * dtbStage2Walker
Definition mmu.hh:87
TableWalker * dtbWalker
Definition mmu.hh:85
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition mmu.cc:487
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition mmu.cc:946
ArmISA::TLB * getITBPtr() const
Definition mmu.hh:72
TableWalker * itbWalker
Definition mmu.hh:84
const ArmRelease * _release
Definition mmu.hh:488
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:603
CachedState s1State
Definition mmu.hh:482
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition mmu.cc:301
bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition mmu.cc:738
MMU(const ArmMMUParams &p)
Definition mmu.cc:60
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:154
void takeOverFrom(BaseMMU *old_mmu) override
Definition mmu.cc:1551
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, ArmFault::TranMethod tranMethod, CachedState &state)
Definition mmu.cc:879
Fault testAndFinalize(const RequestPtr &req, ThreadContext *tc, Mode mode, TlbEntry *te, CachedState &state) const
Definition mmu.cc:204
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
Definition mmu.cc:792
CachedState s2State
Definition mmu.hh:482
ArmISA::TLB * getDTBPtr() const
Definition mmu.hh:66
Fault testTranslation(const RequestPtr &req, Mode mode, TlbEntry::DomainType domain, CachedState &state) const
Definition mmu.cc:1581
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:245
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:91
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
bool walkCache() const
Definition tlb.hh:190
void setTableWalker(TableWalker *table_walker)
Definition tlb.cc:99
void setVMID(vmid_t _vmid)
Definition tlb.hh:192
vmid_t vmid
Definition tlb.hh:157
bool isStage2
Indicates this TLB caches IPA->PA translations.
Definition tlb.hh:111
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
void setTestInterface(TlbTestInterface *ti)
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition system.hh:220
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition system.hh:206
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition system.cc:132
const ArmRelease * releaseFS() const
Definition system.hh:156
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition mmu.hh:181
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:53
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition mmu.cc:159
std::set< BaseTLB * > data
Definition mmu.hh:182
std::set< BaseTLB * > unified
Definition mmu.hh:183
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition request.hh:143
Abstract superclass for simulation objects.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
Statistics container.
Definition group.hh:93
STL pair class.
Definition stl.hh:58
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
atomic_var_t state
Definition helpers.cc:211
uint8_t flags
Definition helpers.cc:87
void tagRequest(ThreadContext *tc, const RequestPtr &req, bool ind)
Tag a memory request with MPAM information.
Definition mpam.cc:240
Bitfield< 12 > dc
Bitfield< 30 > te
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 19 > wxn
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition utility.cc:133
Bitfield< 31, 0 > uw
Definition int.hh:63
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
Definition utility.cc:458
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:140
Bitfield< 7, 4 > domain
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:276
Bitfield< 9 > e
Definition misc_types.hh:65
TranslationRegime translationRegime(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:1379
bool EL2Enabled(ThreadContext *tc)
Definition utility.cc:267
ExceptionLevel translationEl(TranslationRegime regime)
Definition utility.cc:1398
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:412
Bitfield< 0 > vm
@ MISCREG_SCTLR_EL2
Definition misc.hh:592
@ MISCREG_TCR_EL2
Definition misc.hh:617
@ MISCREG_SCTLR
Definition misc.hh:241
@ MISCREG_TTBCR
Definition misc.hh:266
@ MISCREG_SCR_EL3
Definition misc.hh:604
@ MISCREG_TCR_EL3
Definition misc.hh:624
@ MISCREG_TCR_EL1
Definition misc.hh:612
@ MISCREG_SCTLR_EL1
Definition misc.hh:585
@ MISCREG_PRRR
Definition misc.hh:375
@ MISCREG_ID_AA64MMFR1_EL1
Definition misc.hh:576
@ MISCREG_CPSR
Definition misc.hh:67
@ MISCREG_NMRR
Definition misc.hh:381
@ MISCREG_TTBR1_EL1
Definition misc.hh:610
@ MISCREG_CONTEXTIDR
Definition misc.hh:405
@ MISCREG_TTBR1_EL2
Definition misc.hh:880
@ MISCREG_HCR_EL2
Definition misc.hh:595
@ MISCREG_TTBR1
Definition misc.hh:263
@ MISCREG_VTCR_EL2
Definition misc.hh:620
@ MISCREG_VTTBR
Definition misc.hh:454
@ MISCREG_TTBR0
Definition misc.hh:260
@ MISCREG_DACR
Definition misc.hh:271
@ MISCREG_TTBR0_EL2
Definition misc.hh:616
@ MISCREG_HSCTLR
Definition misc.hh:252
@ MISCREG_TTBR0_EL1
Definition misc.hh:608
@ MISCREG_SCTLR_EL3
Definition misc.hh:601
@ MISCREG_VTTBR_EL2
Definition misc.hh:619
Bitfield< 4 > sd
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 3, 2 > el
Definition misc_types.hh:73
uint16_t vmid_t
Definition types.hh:57
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:672
Bitfield< 34 > aarch64
Definition types.hh:81
bool inAArch64(ThreadContext *tc)
Definition utility.cc:126
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:231
Bitfield< 39, 12 > pa
Bitfield< 8 > va
Bitfield< 59, 56 > tlb
Bitfield< 23 > px
Bitfield< 5 > ux
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 0 > w
Bitfield< 14 > pr
Definition misc.hh:116
Bitfield< 3 > x
Definition pagetable.hh:73
static void decodeAddrOffset(Addr offset, uint8_t &func)
bool pseudoInst(ThreadContext *tc, uint8_t func, uint64_t &result)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
constexpr decltype(nullptr) NoFault
Definition types.hh:253
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition mmu.cc:1209
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition mmu.cc:1140
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Definition mmu.hh:212
Stats(statistics::Group *parent)
Definition mmu.cc:1592
statistics::Scalar permsFaults
Definition mmu.hh:503
statistics::Scalar alignFaults
Definition mmu.hh:500
statistics::Scalar prefetchFaults
Definition mmu.hh:501
statistics::Scalar domainFaults
Definition mmu.hh:502
TranslationRegime targetRegime
Definition pagetable.hh:206
void setAttributes(bool lpae)
Definition pagetable.hh:399
Addr pAddr(Addr va) const
Definition pagetable.hh:351
Definition test.h:38
The file contains the definition of a set of TLB Invalidate Instructions.

Generated on Tue Jun 18 2024 16:23:57 for gem5 by doxygen 1.11.0