gem5 v23.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mmu.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2013, 2016-2022 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2001-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "arch/arm/mmu.hh"
42
43#include "arch/arm/isa.hh"
44#include "arch/arm/reg_abi.hh"
47#include "arch/arm/tlbi_op.hh"
48#include "debug/TLB.hh"
49#include "debug/TLBVerbose.hh"
50#include "mem/packet_access.hh"
51#include "sim/pseudo_inst.hh"
52#include "sim/process.hh"
53
54namespace gem5
55{
56
57using namespace ArmISA;
58
59MMU::MMU(const ArmMMUParams &p)
60 : BaseMMU(p),
61 itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
62 itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
63 itbStage2Walker(p.stage2_itb_walker),
64 dtbStage2Walker(p.stage2_dtb_walker),
65 test(nullptr),
66 miscRegContext(0),
67 s1State(this, false), s2State(this, true),
68 _attr(0),
69 _release(nullptr),
70 _hasWalkCache(false),
71 stats(this)
72{
73 // Cache system-level properties
74 if (FullSystem) {
75 ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
76 assert(arm_sys);
78 physAddrRange = arm_sys->physAddrRange();
79
80 _release = arm_sys->releaseFS();
81 } else {
82 haveLargeAsid64 = false;
83 physAddrRange = 48;
84
85 _release = p.release_se;
86 }
87
88 m5opRange = p.sys->m5opRange();
89}
90
91void
93{
94 itbWalker->setMmu(this);
95 dtbWalker->setMmu(this);
98
101
104
106
108}
109
110bool
112{
113 for (auto tlb : instruction) {
114 if (static_cast<TLB*>(tlb)->walkCache())
115 return true;
116 }
117 for (auto tlb : data) {
118 if (static_cast<TLB*>(tlb)->walkCache())
119 return true;
120 }
121 for (auto tlb : unified) {
122 if (static_cast<TLB*>(tlb)->walkCache())
123 return true;
124 }
125
126 return false;
127}
128
129void
131{
132 s1State.miscRegValid = false;
133 s2State.miscRegValid = false;
134}
135
136TLB *
137MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
138{
139 if (mode == BaseMMU::Execute) {
140 if (stage2)
141 return itbStage2;
142 else
143 return getITBPtr();
144 } else {
145 if (stage2)
146 return dtbStage2;
147 else
148 return getDTBPtr();
149 }
150}
151
154{
155 if (mode == BaseMMU::Execute) {
156 if (stage2)
157 return itbStage2Walker;
158 else
159 return itbWalker;
160 } else {
161 if (stage2)
162 return dtbStage2Walker;
163 else
164 return dtbWalker;
165 }
166}
167
168bool
170{
172
173 auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
174
175 TlbEntry::Lookup lookup_data;
176
177 lookup_data.va = va;
178 lookup_data.asn = state.asid;
179 lookup_data.ignoreAsn = false;
180 lookup_data.vmid = state.vmid;
181 lookup_data.hyp = state.isHyp;
182 lookup_data.secure = state.isSecure;
183 lookup_data.functional = true;
184 lookup_data.targetEL = state.aarch64 ? state.aarch64EL : EL1;
185 lookup_data.inHost = false;
186 lookup_data.mode = BaseMMU::Read;
187
188 TlbEntry *e = tlb->multiLookup(lookup_data);
189
190 if (!e)
191 return false;
192 pa = e->pAddr(va);
193 return true;
194}
195
196void
198{
199 s1State.miscRegValid = false;
200 s1State.computeAddrTop.flush();
201 s2State.computeAddrTop.flush();
202}
203
204Fault
206 ThreadContext *tc, Mode mode) const
207{
208 const Addr paddr = req->getPaddr();
209
210 if (m5opRange.contains(paddr)) {
211 uint8_t func;
213 req->setLocalAccessor(
214 [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
215 {
216 uint64_t ret;
217 if (inAArch64(tc))
218 pseudo_inst::pseudoInst<RegABI64>(tc, func, ret);
219 else
220 pseudo_inst::pseudoInst<RegABI32>(tc, func, ret);
221
222 if (mode == Read)
223 pkt->setLE(ret);
224
225 return Cycles(1);
226 }
227 );
228 }
229
230 return NoFault;
231}
232
233
234Fault
236 Translation *translation, bool &delay, bool timing,
238{
239 updateMiscReg(tc, NormalTran, state.isStage2);
240 Addr vaddr_tainted = req->getVaddr();
241 Addr vaddr = 0;
242 if (state.aarch64) {
243 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
244 static_cast<TCR>(state.ttbcr), mode==Execute, state);
245 } else {
246 vaddr = vaddr_tainted;
247 }
248 Request::Flags flags = req->getFlags();
249
250 bool is_fetch = (mode == Execute);
251 bool is_write = (mode == Write);
252
253 if (!is_fetch) {
254 if (state.sctlr.a || !(flags & AllowUnaligned)) {
255 if (vaddr & mask(flags & AlignmentMask)) {
256 // LPAE is always disabled in SE mode
257 return std::make_shared<DataAbort>(
258 vaddr_tainted,
262 }
263 }
264 }
265
266 Addr paddr;
267 Process *p = tc->getProcessPtr();
268
269 if (!p->pTable->translate(vaddr, paddr))
270 return std::make_shared<GenericPageTableFault>(vaddr_tainted);
271 req->setPaddr(paddr);
272
273 return finalizePhysical(req, tc, mode);
274}
275
276Fault
278 bool stage2)
279{
280 return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
281}
282
283Fault
286{
287 // a data cache maintenance instruction that operates by MVA does
288 // not generate a Data Abort exeception due to a Permission fault
289 if (req->isCacheMaintenance()) {
290 return NoFault;
291 }
292
293 Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
294 Request::Flags flags = req->getFlags();
295 bool is_fetch = (mode == Execute);
296 bool is_write = (mode == Write);
297 bool is_priv = state.isPriv && !(flags & UserMode);
298
299 // Get the translation type from the actuall table entry
300 ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
302
303 // If this is the second stage of translation and the request is for a
304 // stage 1 page table walk then we need to check the HCR.PTW bit. This
305 // allows us to generate a fault if the request targets an area marked
306 // as a device or strongly ordered.
307 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
308 (te->mtype != TlbEntry::MemoryType::Normal)) {
309 return std::make_shared<DataAbort>(
310 vaddr, te->domain, is_write,
311 ArmFault::PermissionLL + te->lookupLevel,
312 state.isStage2, tranMethod);
313 }
314
315 // Generate an alignment fault for unaligned data accesses to device or
316 // strongly ordered memory
317 if (!is_fetch) {
318 if (te->mtype != TlbEntry::MemoryType::Normal) {
319 if (vaddr & mask(flags & AlignmentMask)) {
321 return std::make_shared<DataAbort>(
324 tranMethod);
325 }
326 }
327 }
328
329 if (te->nonCacheable) {
330 // Prevent prefetching from I/O devices.
331 if (req->isPrefetch()) {
332 // Here we can safely use the fault status for the short
333 // desc. format in all cases
334 return std::make_shared<PrefetchAbort>(
336 state.isStage2, tranMethod);
337 }
338 }
339
340 if (!te->longDescFormat) {
341 switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
342 case 0:
344 DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
345 " domain: %#x write:%d\n", state.dacr,
346 static_cast<uint8_t>(te->domain), is_write);
347 if (is_fetch) {
348 // Use PC value instead of vaddr because vaddr might
349 // be aligned to cache line and should not be the
350 // address reported in FAR
351 return std::make_shared<PrefetchAbort>(
352 req->getPC(),
353 ArmFault::DomainLL + te->lookupLevel,
354 state.isStage2, tranMethod);
355 } else
356 return std::make_shared<DataAbort>(
357 vaddr, te->domain, is_write,
358 ArmFault::DomainLL + te->lookupLevel,
359 state.isStage2, tranMethod);
360 case 1:
361 // Continue with permissions check
362 break;
363 case 2:
364 panic("UNPRED domain\n");
365 case 3:
366 return NoFault;
367 }
368 }
369
370 // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
371 uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
372 uint8_t hap = te->hap;
373
374 if (state.sctlr.afe == 1 || te->longDescFormat)
375 ap |= 1;
376
377 bool abt;
378 bool isWritable = true;
379 // If this is a stage 2 access (eg for reading stage 1 page table entries)
380 // then don't perform the AP permissions check, we stil do the HAP check
381 // below.
382 if (state.isStage2) {
383 abt = false;
384 } else {
385 switch (ap) {
386 case 0:
387 DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
388 (int)state.sctlr.rs);
389 if (!state.sctlr.xp) {
390 switch ((int)state.sctlr.rs) {
391 case 2:
392 abt = is_write;
393 break;
394 case 1:
395 abt = is_write || !is_priv;
396 break;
397 case 0:
398 case 3:
399 default:
400 abt = true;
401 break;
402 }
403 } else {
404 abt = true;
405 }
406 break;
407 case 1:
408 abt = !is_priv;
409 break;
410 case 2:
411 abt = !is_priv && is_write;
412 isWritable = is_priv;
413 break;
414 case 3:
415 abt = false;
416 break;
417 case 4:
418 panic("UNPRED premissions\n");
419 case 5:
420 abt = !is_priv || is_write;
421 isWritable = false;
422 break;
423 case 6:
424 case 7:
425 abt = is_write;
426 isWritable = false;
427 break;
428 default:
429 panic("Unknown permissions %#x\n", ap);
430 }
431 }
432
433 bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
434 bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
435 (ap == 3 && state.sctlr.uwxn && is_priv);
436 if (is_fetch && (abt || xn ||
437 (te->longDescFormat && te->pxn && is_priv) ||
438 (state.isSecure && te->ns && state.scr.sif))) {
440 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
441 "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
442 ap, is_priv, is_write, te->ns,
443 state.scr.sif, state.sctlr.afe);
444 // Use PC value instead of vaddr because vaddr might be aligned to
445 // cache line and should not be the address reported in FAR
446 return std::make_shared<PrefetchAbort>(
447 req->getPC(),
448 ArmFault::PermissionLL + te->lookupLevel,
449 state.isStage2, tranMethod);
450 } else if (abt | hapAbt) {
452 DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
453 " write:%d\n", ap, is_priv, is_write);
454 return std::make_shared<DataAbort>(
455 vaddr, te->domain, is_write,
456 ArmFault::PermissionLL + te->lookupLevel,
457 state.isStage2 | !abt, tranMethod);
458 }
459 return NoFault;
460}
461
462Fault
464 ThreadContext *tc, bool stage2)
465{
466 return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
467}
468
469Fault
472{
473 assert(state.aarch64);
474
475 // A data cache maintenance instruction that operates by VA does
476 // not generate a Permission fault unless:
477 // * It is a data cache invalidate (dc ivac) which requires write
478 // permissions to the VA, or
479 // * It is executed from EL0
480 if (req->isCacheClean() && state.aarch64EL != EL0 && !state.isStage2) {
481 return NoFault;
482 }
483
484 Addr vaddr_tainted = req->getVaddr();
485 Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
486 static_cast<TCR>(state.ttbcr), mode==Execute, state);
487
488 Request::Flags flags = req->getFlags();
489 bool is_fetch = (mode == Execute);
490 // Cache clean operations require read permissions to the specified VA
491 bool is_write = !req->isCacheClean() && mode == Write;
492 bool is_atomic = req->isAtomic();
493
494 updateMiscReg(tc, state.curTranType, state.isStage2);
495
496 // If this is the second stage of translation and the request is for a
497 // stage 1 page table walk then we need to check the HCR.PTW bit. This
498 // allows us to generate a fault if the request targets an area marked
499 // as a device or strongly ordered.
500 if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
501 (te->mtype != TlbEntry::MemoryType::Normal)) {
502 return std::make_shared<DataAbort>(
503 vaddr_tainted, te->domain, is_write,
504 ArmFault::PermissionLL + te->lookupLevel,
505 state.isStage2, ArmFault::LpaeTran);
506 }
507
508 // Generate an alignment fault for unaligned accesses to device or
509 // strongly ordered memory
510 if (!is_fetch) {
511 if (te->mtype != TlbEntry::MemoryType::Normal) {
512 if (vaddr & mask(flags & AlignmentMask)) {
514 return std::make_shared<DataAbort>(
515 vaddr_tainted,
517 is_atomic ? false : is_write,
520 }
521 }
522 }
523
524 if (te->nonCacheable) {
525 // Prevent prefetching from I/O devices.
526 if (req->isPrefetch()) {
527 // Here we can safely use the fault status for the short
528 // desc. format in all cases
529 return std::make_shared<PrefetchAbort>(
530 vaddr_tainted,
532 state.isStage2, ArmFault::LpaeTran);
533 }
534 }
535
536 bool grant = false;
537 // grant_read is used for faults from an atomic instruction that
538 // both reads and writes from a memory location. From a ISS point
539 // of view they count as read if a read to that address would have
540 // generated the fault; they count as writes otherwise
541 bool grant_read = true;
542
543 if (state.isStage2) {
544 std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
545 (!is_write && !is_fetch), is_write, is_fetch);
546 } else {
547 std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
548 (!is_write && !is_fetch), is_write, is_fetch);
549 }
550
551 if (!grant) {
552 if (is_fetch) {
554 DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
555 "ns:%d scr.sif:%d sctlr.afe: %d\n",
556 te->ns, state.scr.sif, state.sctlr.afe);
557 // Use PC value instead of vaddr because vaddr might be aligned to
558 // cache line and should not be the address reported in FAR
559 return std::make_shared<PrefetchAbort>(
560 req->getPC(),
561 ArmFault::PermissionLL + te->lookupLevel,
562 state.isStage2, ArmFault::LpaeTran);
563 } else {
565 DPRINTF(TLB, "TLB Fault: Data abort on permission check."
566 "ns:%d", te->ns);
567 return std::make_shared<DataAbort>(
568 vaddr_tainted, te->domain,
569 (is_atomic && !grant_read) ? false : is_write,
570 ArmFault::PermissionLL + te->lookupLevel,
571 state.isStage2, ArmFault::LpaeTran);
572 }
573 }
574
575 return NoFault;
576}
577
580 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
581{
582 assert(ArmSystem::haveEL(tc, EL2) && state.aarch64EL != EL2);
583
584 // In stage 2 we use the hypervisor access permission bits.
585 // The following permissions are described in ARM DDI 0487A.f
586 // D4-1802
587 bool grant = false;
588 bool grant_read = te->hap & 0b01;
589 bool grant_write = te->hap & 0b10;
590
591 uint8_t xn = te->xn;
592 uint8_t pxn = te->pxn;
593
594 if (ArmSystem::haveEL(tc, EL3) && state.isSecure &&
595 te->ns && state.scr.sif) {
596 xn = true;
597 }
598
599 DPRINTF(TLBVerbose,
600 "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
601 "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
602
603 if (x) {
604 grant = grant_read && !xn;
605 } else if (req->isAtomic()) {
606 grant = grant_read || grant_write;
607 } else if (w) {
608 grant = grant_write;
609 } else if (r) {
610 grant = grant_read;
611 } else {
612 panic("Invalid Operation\n");
613 }
614
615 return std::make_pair(grant, grant_read);
616}
617
620 ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
621{
622 bool grant = false, grant_read = true, grant_write = true, grant_exec = true;
623
624 const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
625 const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
626
627 bool wxn = state.sctlr.wxn;
628 uint8_t xn = te->xn;
629 uint8_t pxn = te->pxn;
630
631 DPRINTF(TLBVerbose, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
632 "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
633 pxn, r, w, x, is_priv, wxn);
634
635 if (faultPAN(tc, ap, req, mode, is_priv, state)) {
636 return std::make_pair(false, false);
637 }
638
639 ExceptionLevel regime = !is_priv ? EL0 : state.aarch64EL;
640 if (hasUnprivRegime(regime, state)) {
641 bool pr = false;
642 bool pw = false;
643 bool ur = false;
644 bool uw = false;
645 // Apply leaf permissions
646 switch (ap) {
647 case 0b00: // Privileged access
648 pr = 1; pw = 1; ur = 0; uw = 0;
649 break;
650 case 0b01: // No effect
651 pr = 1; pw = 1; ur = 1; uw = 1;
652 break;
653 case 0b10: // Read-only, privileged access
654 pr = 1; pw = 0; ur = 0; uw = 0;
655 break;
656 case 0b11: // Read-only
657 pr = 1; pw = 0; ur = 1; uw = 0;
658 break;
659 }
660
661 // Locations writable by unprivileged cannot be executed by privileged
662 const bool px = !(pxn || uw);
663 const bool ux = !xn;
664
665 grant_read = is_priv ? pr : ur;
666 grant_write = is_priv ? pw : uw;
667 grant_exec = is_priv ? px : ux;
668 } else {
669 switch (bits(ap, 1)) {
670 case 0b0: // No effect
671 grant_read = 1; grant_write = 1;
672 break;
673 case 0b1: // Read-Only
674 grant_read = 1; grant_write = 0;
675 break;
676 }
677 grant_exec = !xn;
678 }
679
680 // Do not allow execution from writable location
681 // if wxn is set
682 grant_exec = grant_exec && !(wxn && grant_write);
683
684 if (ArmSystem::haveEL(tc, EL3) && state.isSecure && te->ns) {
685 grant_exec = grant_exec && !state.scr.sif;
686 }
687
688 if (x) {
689 grant = grant_exec;
690 } else if (req->isAtomic()) {
691 grant = grant_read && grant_write;
692 } else if (w) {
693 grant = grant_write;
694 } else {
695 grant = grant_read;
696 }
697
698 return std::make_pair(grant, grant_read);
699}
700
701bool
703{
704 switch (el) {
705 case EL0:
706 case EL1:
707 // EL1&0
708 return true;
709 case EL2:
710 // EL2&0 or EL2
711 return e2h;
712 case EL3:
713 default:
714 return false;
715 }
716}
717
718bool
720{
721 return hasUnprivRegime(el, state.hcr.e2h);
722}
723
724bool
725MMU::faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
726 const bool is_priv, CachedState &state)
727{
728 bool exception = false;
729 switch (state.aarch64EL) {
730 case EL0:
731 break;
732 case EL1:
733 if (checkPAN(tc, ap, req, mode, is_priv, state)) {
734 exception = true;;
735 }
736 break;
737 case EL2:
738 if (state.hcr.e2h && checkPAN(tc, ap, req, mode, is_priv, state)) {
739 exception = true;;
740 }
741 break;
742 case EL3:
743 break;
744 }
745
746 return exception;
747}
748
749bool
750MMU::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
751 const bool is_priv, CachedState &state)
752{
753 // The PAN bit has no effect on:
754 // 1) Instruction accesses.
755 // 2) Data Cache instructions other than DC ZVA
756 // 3) Address translation instructions, other than ATS1E1RP and
757 // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
758 // gem5)
759 // 4) Instructions to be treated as unprivileged, unless
760 // HCR_EL2.{E2H, TGE} == {1, 0}
761 if (HaveExt(tc, ArmExtension::FEAT_PAN) && state.cpsr.pan && (ap & 0x1) &&
763
764 if (req->isCacheMaintenance() &&
765 !(req->getFlags() & Request::CACHE_BLOCK_ZERO)) {
766 // Cache maintenance other than DC ZVA
767 return false;
768 } else if (!is_priv && !(state.hcr.e2h && !state.hcr.tge)) {
769 // Treated as unprivileged unless HCR_EL2.{E2H, TGE} == {1, 0}
770 return false;
771 }
772 return true;
773 }
774
775 return false;
776}
777
778Addr
780 TCR tcr, bool is_inst, CachedState& state)
781{
782 const bool selbit = bits(vaddr_tainted, 55);
783
784 // Call the memoized version of computeAddrTop
785 const auto topbit = state.computeAddrTop(tc, selbit, is_inst, tcr, el);
786
787 return maskTaggedAddr(vaddr_tainted, tc, el, topbit);
788}
789
790Fault
792 ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
794{
795 bool is_fetch = (mode == Execute);
796 bool is_atomic = req->isAtomic();
797 req->setPaddr(vaddr);
798 // When the MMU is off the security attribute corresponds to the
799 // security state of the processor
800 if (state.isSecure)
801 req->setFlags(Request::SECURE);
802
803 if (state.aarch64) {
804 bool selbit = bits(vaddr, 55);
805 TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
806 int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
807 int addr_sz = bits(vaddr, topbit, physAddrRange);
808 if (addr_sz != 0){
809 Fault f;
810 if (is_fetch)
811 f = std::make_shared<PrefetchAbort>(vaddr,
814 else
815 f = std::make_shared<DataAbort>( vaddr,
817 is_atomic ? false : mode==Write,
820 return f;
821 }
822 }
823
824 // @todo: double check this (ARM ARM issue C B3.2.1)
825 if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
826 state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
827 if (!req->isCacheMaintenance()) {
828 req->setFlags(Request::UNCACHEABLE);
829 }
830 req->setFlags(Request::STRICT_ORDER);
831 }
832
833 // Set memory attributes
834 TlbEntry temp_te;
835 temp_te.ns = !state.isSecure;
836 bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) &&
837 state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
838 bool i_cacheability = state.sctlr.i && !state.sctlr.m;
839 if (state.isStage2 || !dc || state.isSecure ||
840 (state.isHyp && !(tran_type & S1CTran))) {
841
842 temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
844 temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
845 temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
846 temp_te.shareable = true;
847 temp_te.outerShareable = true;
848 } else {
850 temp_te.innerAttrs = 0x3;
851 temp_te.outerAttrs = 0x3;
852 temp_te.shareable = false;
853 temp_te.outerShareable = false;
854 }
855 temp_te.setAttributes(long_desc_format);
856 DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
857 "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
858 temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
859 state.isStage2);
860 setAttr(temp_te.attributes);
861
863}
864
865Fault
867 Translation *translation, bool &delay, bool timing,
868 bool functional, Addr vaddr,
870{
871 TlbEntry *te = NULL;
872 bool is_fetch = (mode == Execute);
873 TlbEntry mergeTe;
874
875 Request::Flags flags = req->getFlags();
876 Addr vaddr_tainted = req->getVaddr();
877
878 Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
879 functional, &mergeTe, state);
880 // only proceed if we have a valid table entry
881 if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
882
883 // If we have the table entry transfer some of the attributes to the
884 // request that triggered the translation
886 // Set memory attributes
887 DPRINTF(TLBVerbose,
888 "Setting memory attributes: shareable: %d, innerAttrs: %d, "
889 "outerAttrs: %d, mtype: %d, stage2: %d\n",
890 te->shareable, te->innerAttrs, te->outerAttrs,
891 static_cast<uint8_t>(te->mtype), state.isStage2);
892 setAttr(te->attributes);
893
894 if (te->nonCacheable && !req->isCacheMaintenance())
895 req->setFlags(Request::UNCACHEABLE);
896
897 // Require requests to be ordered if the request goes to
898 // strongly ordered or device memory (i.e., anything other
899 // than normal memory requires strict order).
900 if (te->mtype != TlbEntry::MemoryType::Normal)
901 req->setFlags(Request::STRICT_ORDER);
902
903 Addr pa = te->pAddr(vaddr);
904 req->setPaddr(pa);
905
906 if (state.isSecure && !te->ns) {
907 req->setFlags(Request::SECURE);
908 }
909 if (!is_fetch && fault == NoFault &&
910 (vaddr & mask(flags & AlignmentMask)) &&
911 (te->mtype != TlbEntry::MemoryType::Normal)) {
912 // Unaligned accesses to Device memory should always cause an
913 // abort regardless of sctlr.a
915 bool is_write = (mode == Write);
916 return std::make_shared<DataAbort>(
917 vaddr_tainted,
920 tranMethod);
921 }
922
923 // Check for a trickbox generated address fault
924 if (fault == NoFault)
925 fault = testTranslation(req, mode, te->domain, state);
926 }
927
928 if (fault == NoFault) {
929 // Don't try to finalize a physical address unless the
930 // translation has completed (i.e., there is a table entry).
931 return te ? finalizePhysical(req, tc, mode) : NoFault;
932 } else {
933 return fault;
934 }
935}
936
937Fault
939 Translation *translation, bool &delay, bool timing,
940 ArmTranslationType tran_type, bool functional,
942{
943 // No such thing as a functional timing access
944 assert(!(timing && functional));
945
946 Addr vaddr_tainted = req->getVaddr();
947 Addr vaddr = 0;
948 if (state.aarch64) {
949 vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
950 static_cast<TCR>(state.ttbcr), mode==Execute, state);
951 } else {
952 vaddr = vaddr_tainted;
953 }
954 Request::Flags flags = req->getFlags();
955
956 bool is_fetch = (mode == Execute);
957 bool is_write = (mode == Write);
958 bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
959 ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
961
962 DPRINTF(TLBVerbose,
963 "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
964 state.isPriv, flags & UserMode, state.isSecure,
965 tran_type & S1S2NsTran);
966
967 DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
968 "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
969 state.isStage2, state.scr, state.sctlr, flags, tran_type);
970
971 if (!state.isStage2) {
972 if ((req->isInstFetch() && (!state.sctlr.i)) ||
973 ((!req->isInstFetch()) && (!state.sctlr.c))){
974 if (!req->isCacheMaintenance()) {
975 req->setFlags(Request::UNCACHEABLE);
976 }
977 req->setFlags(Request::STRICT_ORDER);
978 }
979 }
980 if (!is_fetch) {
981 if (state.sctlr.a || !(flags & AllowUnaligned)) {
982 if (vaddr & mask(flags & AlignmentMask)) {
984 return std::make_shared<DataAbort>(
985 vaddr_tainted,
988 tranMethod);
989 }
990 }
991 }
992
993 bool vm = state.hcr.vm;
994 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
995 state.hcr.e2h == 1 && state.hcr.tge == 1)
996 vm = 0;
997 else if (state.hcr.dc == 1)
998 vm = 1;
999
1000 Fault fault = NoFault;
1001 // If guest MMU is off or hcr.vm=0 go straight to stage2
1002 if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1003 fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1004 long_desc_format, state);
1005 } else {
1006 DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1007 state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1008 // Translation enabled
1009 fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1010 functional, vaddr, tranMethod, state);
1011 }
1012
1013 // Check for Debug Exceptions
1015
1016 if (sd->enabled() && fault == NoFault) {
1017 fault = sd->testDebug(tc, req, mode);
1018 }
1019
1020 return fault;
1021}
1022
1023Fault
1025 ArmTranslationType tran_type)
1026{
1027 return translateAtomic(req, tc, mode, tran_type, false);
1028}
1029
1030Fault
1032 ArmTranslationType tran_type, bool stage2)
1033{
1034 auto& state = updateMiscReg(tc, tran_type, stage2);
1035
1036 bool delay = false;
1037 Fault fault;
1038 if (FullSystem)
1039 fault = translateFs(req, tc, mode, NULL, delay, false,
1040 tran_type, false, state);
1041 else
1042 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1043 assert(!delay);
1044 return fault;
1045}
1046
1047Fault
1049{
1050 return translateFunctional(req, tc, mode, NormalTran, false);
1051}
1052
1053Fault
1055 ArmTranslationType tran_type)
1056{
1057 return translateFunctional(req, tc, mode, tran_type, false);
1058}
1059
1060Fault
1062 ArmTranslationType tran_type, bool stage2)
1063{
1064 auto& state = updateMiscReg(tc, tran_type, stage2);
1065
1066 bool delay = false;
1067 Fault fault;
1068 if (FullSystem)
1069 fault = translateFs(req, tc, mode, NULL, delay, false,
1070 tran_type, true, state);
1071 else
1072 fault = translateSe(req, tc, mode, NULL, delay, false, state);
1073 assert(!delay);
1074 return fault;
1075}
1076
1077void
1079 Translation *translation, Mode mode, ArmTranslationType tran_type,
1080 bool stage2)
1081{
1082 auto& state = updateMiscReg(tc, tran_type, stage2);
1083
1084 assert(translation);
1085
1086 translateComplete(req, tc, translation, mode, tran_type,
1087 stage2, state);
1088}
1089
1090Fault
1092 Translation *translation, Mode mode, ArmTranslationType tran_type,
1093 bool call_from_s2)
1094{
1095 return translateComplete(req, tc, translation, mode, tran_type,
1096 call_from_s2, s1State);
1097}
1098
1099Fault
1101 Translation *translation, Mode mode, ArmTranslationType tran_type,
1102 bool call_from_s2, CachedState &state)
1103{
1104 bool delay = false;
1105 Fault fault;
1106 if (FullSystem)
1107 fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1108 false, state);
1109 else
1110 fault = translateSe(req, tc, mode, translation, delay, true, state);
1111
1112 DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay,
1113 fault != NoFault);
1114 // If we have a translation, and we're not in the middle of doing a stage
1115 // 2 translation tell the translation that we've either finished or its
1116 // going to take a while. By not doing this when we're in the middle of a
1117 // stage 2 translation we prevent marking the translation as delayed twice,
1118 // one when the translation starts and again when the stage 1 translation
1119 // completes.
1120
1121 if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1122 fault != NoFault)) {
1123 if (!delay)
1124 translation->finish(fault, req, tc, mode);
1125 else
1126 translation->markDelayed();
1127 }
1128 return fault;
1129}
1130
1131vmid_t
1133{
1134 AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1135 VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1136 vmid_t vmid = 0;
1137
1138 switch (mmfr1.vmidbits) {
1139 case 0b0000:
1140 // 8 bits
1141 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1142 break;
1143 case 0b0010:
1144 if (vtcr.vs && ELIs64(tc, EL2)) {
1145 // 16 bits
1146 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1147 } else {
1148 // 8 bits
1149 vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1150 }
1151 break;
1152 default:
1153 panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1154 mmfr1.vmidbits);
1155 }
1156
1157 return vmid;
1158}
1159
1162 ArmTranslationType tran_type, bool stage2)
1163{
1164 // check if the regs have changed, or the translation mode is different.
1165 // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1166 // one type of translation anyway
1167
1168 auto& state = stage2 ? s2State : s1State;
1169 if (state.miscRegValid && miscRegContext == tc->contextId() &&
1170 ((tran_type == state.curTranType) || stage2)) {
1171
1172 } else {
1173 DPRINTF(TLBVerbose, "TLB variables changed!\n");
1174 state.updateMiscReg(tc, tran_type);
1175
1176 itbStage2->setVMID(state.vmid);
1177 dtbStage2->setVMID(state.vmid);
1178
1179 for (auto tlb : instruction) {
1180 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1181 }
1182 for (auto tlb : data) {
1183 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1184 }
1185 for (auto tlb : unified) {
1186 static_cast<TLB*>(tlb)->setVMID(state.vmid);
1187 }
1188
1189 miscRegContext = tc->contextId();
1190 }
1191
1192 if (state.directToStage2) {
1193 s2State.updateMiscReg(tc, tran_type);
1194 return s2State;
1195 } else {
1196 return state;
1197 }
1198}
1199
1200void
1202 ArmTranslationType tran_type)
1203{
1204 cpsr = tc->readMiscReg(MISCREG_CPSR);
1205
1206 // Dependencies: SCR/SCR_EL3, CPSR
1207 isSecure = ArmISA::isSecure(tc) &&
1208 !(tran_type & HypMode) && !(tran_type & S1S2NsTran);
1209
1210 aarch64EL = tranTypeEL(cpsr, tran_type);
1211 aarch64 = isStage2 ?
1212 ELIs64(tc, EL2) :
1213 ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1214
1215 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1216 if (aarch64) { // AArch64
1217 // determine EL we need to translate in
1218 switch (aarch64EL) {
1219 case EL0:
1220 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1221 hcr.tge == 1 && hcr.e2h == 1) {
1222 // VHE code for EL2&0 regime
1223 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1224 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1225 uint64_t ttbr_asid = ttbcr.a1 ?
1228 asid = bits(ttbr_asid,
1229 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1230
1231 } else {
1232 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1233 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1234 uint64_t ttbr_asid = ttbcr.a1 ?
1237 asid = bits(ttbr_asid,
1238 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1239
1240 }
1241 break;
1242 case EL1:
1243 {
1244 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1245 ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1246 uint64_t ttbr_asid = ttbcr.a1 ?
1249 asid = bits(ttbr_asid,
1250 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1251 }
1252 break;
1253 case EL2:
1254 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1255 ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1256 if (hcr.e2h == 1) {
1257 // VHE code for EL2&0 regime
1258 uint64_t ttbr_asid = ttbcr.a1 ?
1261 asid = bits(ttbr_asid,
1262 (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1263 } else {
1264 asid = -1;
1265 }
1266 break;
1267 case EL3:
1268 sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1269 ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1270 asid = -1;
1271 break;
1272 }
1273
1274 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1275 isPriv = aarch64EL != EL0;
1276 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1277 vmid = getVMID(tc);
1278 isHyp = aarch64EL == EL2;
1279 isHyp |= tran_type & HypMode;
1280 isHyp &= (tran_type & S1S2NsTran) == 0;
1281 isHyp &= (tran_type & S1CTran) == 0;
1282 bool vm = hcr.vm;
1283 if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1284 hcr.e2h == 1 && hcr.tge ==1) {
1285 vm = 0;
1286 }
1287
1288 if (hcr.e2h == 1 && (aarch64EL == EL2
1289 || (hcr.tge ==1 && aarch64EL == EL0))) {
1290 isHyp = true;
1291 directToStage2 = false;
1292 stage2Req = false;
1293 stage2DescReq = false;
1294 } else {
1295 // Work out if we should skip the first stage of translation and go
1296 // directly to stage 2. This value is cached so we don't have to
1297 // compute it for every translation.
1298 bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc));
1299 stage2Req = isStage2 ||
1300 (vm && !isHyp && sec &&
1301 !(tran_type & S1CTran) && (aarch64EL < EL2) &&
1302 !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1303 stage2DescReq = isStage2 || (vm && !isHyp && sec &&
1304 (aarch64EL < EL2));
1305 directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1306 }
1307 } else {
1308 vmid = 0;
1309 isHyp = false;
1310 directToStage2 = false;
1311 stage2Req = false;
1312 stage2DescReq = false;
1313 }
1314 } else { // AArch32
1316 !isSecure));
1318 !isSecure));
1319 scr = tc->readMiscReg(MISCREG_SCR_EL3);
1320 isPriv = cpsr.mode != MODE_USER;
1321 if (longDescFormatInUse(tc)) {
1322 uint64_t ttbr_asid = tc->readMiscReg(
1323 snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1325 tc, !isSecure));
1326 asid = bits(ttbr_asid, 55, 48);
1327 } else { // Short-descriptor translation table format in use
1328 CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1330 asid = context_id.asid;
1331 }
1333 !isSecure));
1335 !isSecure));
1337 !isSecure));
1338 hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1339
1340 if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1341 vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1342 isHyp = cpsr.mode == MODE_HYP;
1343 isHyp |= tran_type & HypMode;
1344 isHyp &= (tran_type & S1S2NsTran) == 0;
1345 isHyp &= (tran_type & S1CTran) == 0;
1346 if (isHyp) {
1347 sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1348 }
1349 // Work out if we should skip the first stage of translation and go
1350 // directly to stage 2. This value is cached so we don't have to
1351 // compute it for every translation.
1352 bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc));
1353 stage2Req = hcr.vm && !isStage2 && !isHyp && sec &&
1354 !(tran_type & S1CTran);
1355 stage2DescReq = hcr.vm && !isStage2 && !isHyp && sec;
1356 directToStage2 = stage2Req && !sctlr.m;
1357 } else {
1358 vmid = 0;
1359 stage2Req = false;
1360 isHyp = false;
1361 directToStage2 = false;
1362 stage2DescReq = false;
1363 }
1364 }
1365 miscRegValid = true;
1366 curTranType = tran_type;
1367}
1368
1371{
1372 switch (type) {
1373 case S1E0Tran:
1374 case S12E0Tran:
1375 return EL0;
1376
1377 case S1E1Tran:
1378 case S12E1Tran:
1379 return EL1;
1380
1381 case S1E2Tran:
1382 return EL2;
1383
1384 case S1E3Tran:
1385 return EL3;
1386
1387 case NormalTran:
1388 case S1CTran:
1389 case S1S2NsTran:
1390 case HypMode:
1391 return currEL(cpsr);
1392
1393 default:
1394 panic("Unknown translation mode!\n");
1395 }
1396}
1397
1398Fault
1400 Translation *translation, bool timing, bool functional,
1401 bool is_secure, ArmTranslationType tran_type,
1402 bool stage2)
1403{
1404 return getTE(te, req, tc, mode, translation, timing, functional,
1405 is_secure, tran_type, stage2 ? s2State : s1State);
1406}
1407
1408TlbEntry*
1409MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool hyp, bool secure,
1410 bool functional, bool ignore_asn, ExceptionLevel target_el,
1411 bool in_host, bool stage2, BaseMMU::Mode mode)
1412{
1413 TLB *tlb = getTlb(mode, stage2);
1414
1415 TlbEntry::Lookup lookup_data;
1416
1417 lookup_data.va = va;
1418 lookup_data.asn = asid;
1419 lookup_data.ignoreAsn = ignore_asn;
1420 lookup_data.vmid = vmid;
1421 lookup_data.hyp = hyp;
1422 lookup_data.secure = secure;
1423 lookup_data.functional = functional;
1424 lookup_data.targetEL = target_el;
1425 lookup_data.inHost = in_host;
1426 lookup_data.mode = mode;
1427
1428 return tlb->multiLookup(lookup_data);
1429}
1430
1431Fault
1433 Translation *translation, bool timing, bool functional,
1434 bool is_secure, ArmTranslationType tran_type,
1436{
1437 // In a 2-stage system, the IPA->PA translation can be started via this
1438 // call so make sure the miscRegs are correct.
1439 if (state.isStage2) {
1440 updateMiscReg(tc, tran_type, true);
1441 }
1442
1443 Addr vaddr_tainted = req->getVaddr();
1444 Addr vaddr = 0;
1445 ExceptionLevel target_el = state.aarch64 ? state.aarch64EL : EL1;
1446 if (state.aarch64) {
1447 vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el,
1448 static_cast<TCR>(state.ttbcr), mode==Execute, state);
1449 } else {
1450 vaddr = vaddr_tainted;
1451 }
1452
1453 *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure, false,
1454 false, target_el, false, state.isStage2, mode);
1455
1456 if (!isCompleteTranslation(*te)) {
1457 if (req->isPrefetch()) {
1458 // if the request is a prefetch don't attempt to fill the TLB or go
1459 // any further with the memory access (here we can safely use the
1460 // fault status for the short desc. format in all cases)
1462 return std::make_shared<PrefetchAbort>(
1463 vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1464 }
1465
1466 // start translation table walk, pass variables rather than
1467 // re-retreaving in table walker for speed
1468 DPRINTF(TLB,
1469 "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1470 vaddr_tainted, state.asid, state.vmid);
1471
1472 Fault fault;
1473 fault = getTableWalker(mode, state.isStage2)->walk(
1474 req, tc, state.asid, state.vmid, state.isHyp, mode,
1475 translation, timing, functional, is_secure,
1476 tran_type, state.stage2DescReq, *te);
1477
1478 // for timing mode, return and wait for table walk,
1479 if (timing || fault != NoFault) {
1480 return fault;
1481 }
1482
1483 *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure,
1484 true, false, target_el, false, state.isStage2, mode);
1485 assert(*te);
1486 }
1487 return NoFault;
1488}
1489
1490Fault
1492 ThreadContext *tc, Mode mode,
1493 Translation *translation, bool timing, bool functional,
1494 TlbEntry *mergeTe, CachedState &state)
1495{
1496 Fault fault;
1497
1498 if (state.isStage2) {
1499 // We are already in the stage 2 TLB. Grab the table entry for stage
1500 // 2 only. We are here because stage 1 translation is disabled.
1501 TlbEntry *s2_te = nullptr;
1502 // Get the stage 2 table entry
1503 fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1504 state.isSecure, state.curTranType, state);
1505 // Check permissions of stage 2
1506 if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1507 if (state.aarch64)
1508 fault = checkPermissions64(s2_te, req, mode, tc, state);
1509 else
1510 fault = checkPermissions(s2_te, req, mode, state);
1511 }
1512 *te = s2_te;
1513 return fault;
1514 }
1515
1516 TlbEntry *s1_te = nullptr;
1517
1518 Addr vaddr_tainted = req->getVaddr();
1519
1520 // Get the stage 1 table entry
1521 fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1522 state.isSecure, state.curTranType, state);
1523 // only proceed if we have a valid table entry
1524 if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1525 // Check stage 1 permissions before checking stage 2
1526 if (state.aarch64)
1527 fault = checkPermissions64(s1_te, req, mode, tc, state);
1528 else
1529 fault = checkPermissions(s1_te, req, mode, state);
1530 if (state.stage2Req & (fault == NoFault)) {
1531 Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1532 req, translation, mode, timing, functional, state.isSecure,
1533 state.curTranType);
1534 fault = s2_lookup->getTe(tc, mergeTe);
1535 if (s2_lookup->isComplete()) {
1536 *te = mergeTe;
1537 // We've finished with the lookup so delete it
1538 delete s2_lookup;
1539 } else {
1540 // The lookup hasn't completed, so we can't delete it now. We
1541 // get round this by asking the object to self delete when the
1542 // translation is complete.
1543 s2_lookup->setSelfDelete();
1544 }
1545 } else {
1546 // This case deals with an S1 hit (or bypass), followed by
1547 // an S2 hit-but-perms issue
1548 if (state.isStage2) {
1549 DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1550 vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1551 fault);
1552 if (fault != NoFault) {
1553 auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1554 arm_fault->annotate(ArmFault::S1PTW, false);
1555 arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1556 }
1557 }
1558 *te = s1_te;
1559 }
1560 }
1561 return fault;
1562}
1563
1564bool
1566{
1567 return entry && !entry->partial;
1568}
1569
1570void
1572{
1573 BaseMMU::takeOverFrom(old_mmu);
1574
1575 auto *ommu = dynamic_cast<MMU*>(old_mmu);
1576 assert(ommu);
1577
1578 _attr = ommu->_attr;
1579
1580 s1State = ommu->s1State;
1581 s2State = ommu->s2State;
1582}
1583
1584void
1586{
1587 if (!_ti) {
1588 test = nullptr;
1589 } else {
1590 TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1591 fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1592 test = ti;
1593 }
1594}
1595
1596Fault
1599{
1600 if (!test || !req->hasSize() || req->getSize() == 0 ||
1601 req->isCacheMaintenance()) {
1602 return NoFault;
1603 } else {
1604 return test->translationCheck(req, state.isPriv, mode, domain);
1605 }
1606}
1607
1608Fault
1609MMU::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1611 bool stage2)
1612{
1613 return testWalk(pa, size, va, is_secure, mode, domain, lookup_level,
1614 stage2 ? s2State : s1State);
1615}
1616
1617Fault
1618MMU::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1621{
1622 if (!test) {
1623 return NoFault;
1624 } else {
1625 return test->walkCheck(pa, size, va, is_secure, state.isPriv, mode,
1626 domain, lookup_level);
1627 }
1628}
1629
1631 : statistics::Group(parent),
1632 ADD_STAT(alignFaults, statistics::units::Count::get(),
1633 "Number of MMU faults due to alignment restrictions"),
1634 ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1635 "Number of MMU faults due to prefetch"),
1636 ADD_STAT(domainFaults, statistics::units::Count::get(),
1637 "Number of MMU faults due to domain restrictions"),
1638 ADD_STAT(permsFaults, statistics::units::Count::get(),
1639 "Number of MMU faults due to permissions restrictions")
1640{
1641}
1642
1643} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition faults.hh:239
SelfDebug * getSelfDebug() const
Definition isa.hh:180
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition mmu.hh:377
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition mmu.cc:750
static bool hasUnprivRegime(ExceptionLevel el, bool e2h)
Definition mmu.cc:702
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition mmu.cc:235
void drainResume() override
Resume execution after a successful drain.
Definition mmu.cc:130
uint64_t _attr
Definition mmu.hh:491
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition mmu.cc:1409
ContextID miscRegContext
Definition mmu.hh:485
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition mmu.cc:1091
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:137
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition mmu.cc:791
TLB * dtbStage2
Definition mmu.hh:82
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition mmu.cc:1609
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:92
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, bool is_secure, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1399
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition mmu.cc:1161
bool isCompleteTranslation(TlbEntry *te) const
Definition mmu.cc:1565
void invalidateMiscReg()
Definition mmu.cc:197
bool haveLargeAsid64
Definition mmu.hh:495
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition mmu.cc:205
bool _hasWalkCache
Definition mmu.hh:500
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:619
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition mmu.cc:1491
uint8_t physAddrRange
Definition mmu.hh:496
bool checkWalkCache() const
Definition mmu.cc:111
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition mmu.hh:256
enums::ArmLookupLevel LookupLevel
Definition mmu.hh:63
TLB * itbStage2
Definition mmu.hh:81
void setTestInterface(SimObject *ti)
Definition mmu.cc:1585
TableWalker * itbStage2Walker
Definition mmu.hh:86
AddrRange m5opRange
Definition mmu.hh:498
TableWalker * dtbStage2Walker
Definition mmu.hh:87
TableWalker * dtbWalker
Definition mmu.hh:85
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition mmu.cc:463
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition mmu.cc:938
ArmISA::TLB * getITBPtr() const
Definition mmu.hh:72
TableWalker * itbWalker
Definition mmu.hh:84
const ArmRelease * _release
Definition mmu.hh:494
gem5::ArmISA::MMU::Stats stats
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition mmu.cc:579
CachedState s1State
Definition mmu.hh:488
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition mmu.cc:277
bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition mmu.cc:725
MMU(const ArmMMUParams &p)
Definition mmu.cc:59
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition mmu.cc:153
void takeOverFrom(BaseMMU *old_mmu) override
Definition mmu.cc:1571
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, ArmFault::TranMethod tranMethod, CachedState &state)
Definition mmu.cc:866
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
Definition mmu.cc:779
Fault testTranslation(const RequestPtr &req, Mode mode, TlbEntry::DomainType domain, CachedState &state)
Definition mmu.cc:1597
CachedState s2State
Definition mmu.hh:488
ArmISA::TLB * getDTBPtr() const
Definition mmu.hh:66
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition mmu.cc:1370
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition mmu.hh:245
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition mmu.hh:91
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
bool walkCache() const
Definition tlb.hh:190
void setTableWalker(TableWalker *table_walker)
Definition tlb.cc:99
void setVMID(vmid_t _vmid)
Definition tlb.hh:192
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition system.hh:220
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition system.hh:206
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition system.cc:132
const ArmRelease * releaseFS() const
Definition system.hh:156
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition mmu.hh:181
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition mmu.cc:53
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition mmu.cc:157
std::set< BaseTLB * > data
Definition mmu.hh:182
std::set< BaseTLB * > unified
Definition mmu.hh:183
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void setLE(T v)
Set the value in the data pointer to v as little endian.
@ SECURE
The request targets the secure memory space.
Definition request.hh:186
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ UNCACHEABLE
The request is to an uncacheable address.
Definition request.hh:125
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition request.hh:143
Abstract superclass for simulation objects.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual Process * getProcessPtr()=0
virtual ContextID contextId() const =0
Statistics container.
Definition group.hh:93
STL pair class.
Definition stl.hh:58
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:76
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
atomic_var_t state
Definition helpers.cc:188
uint8_t flags
Definition helpers.cc:66
Bitfield< 12 > dc
Bitfield< 30 > te
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 19 > wxn
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition utility.cc:124
Bitfield< 31, 0 > uw
Definition int.hh:63
bool isSecure(ThreadContext *tc)
Definition utility.cc:74
Bitfield< 34 > e2h
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
Definition utility.cc:449
bool longDescFormatInUse(ThreadContext *tc)
Definition utility.cc:131
Bitfield< 7, 4 > domain
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition utility.cc:267
Bitfield< 9 > e
Definition misc_types.hh:65
bool IsSecureEL2Enabled(ThreadContext *tc)
Definition utility.cc:244
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition utility.cc:403
Bitfield< 0 > vm
@ MISCREG_SCTLR_EL2
Definition misc.hh:589
@ MISCREG_TCR_EL2
Definition misc.hh:610
@ MISCREG_SCTLR
Definition misc.hh:240
@ MISCREG_TTBCR
Definition misc.hh:265
@ MISCREG_SCR_EL3
Definition misc.hh:599
@ MISCREG_TCR_EL3
Definition misc.hh:616
@ MISCREG_TCR_EL1
Definition misc.hh:607
@ MISCREG_SCTLR_EL1
Definition misc.hh:584
@ MISCREG_PRRR
Definition misc.hh:374
@ MISCREG_ID_AA64MMFR1_EL1
Definition misc.hh:575
@ MISCREG_CPSR
Definition misc.hh:66
@ MISCREG_NMRR
Definition misc.hh:380
@ MISCREG_TTBR1_EL1
Definition misc.hh:605
@ MISCREG_CONTEXTIDR
Definition misc.hh:404
@ MISCREG_TTBR1_EL2
Definition misc.hh:842
@ MISCREG_HCR_EL2
Definition misc.hh:591
@ MISCREG_TTBR1
Definition misc.hh:262
@ MISCREG_VTCR_EL2
Definition misc.hh:612
@ MISCREG_VTTBR
Definition misc.hh:453
@ MISCREG_TTBR0
Definition misc.hh:259
@ MISCREG_DACR
Definition misc.hh:270
@ MISCREG_TTBR0_EL2
Definition misc.hh:609
@ MISCREG_HSCTLR
Definition misc.hh:251
@ MISCREG_TTBR0_EL1
Definition misc.hh:603
@ MISCREG_SCTLR_EL3
Definition misc.hh:597
@ MISCREG_VTTBR_EL2
Definition misc.hh:611
Bitfield< 4 > sd
Bitfield< 6 > f
Definition misc_types.hh:68
Bitfield< 3, 2 > el
Definition misc_types.hh:73
uint16_t vmid_t
Definition types.hh:57
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition misc.cc:671
Bitfield< 34 > aarch64
Definition types.hh:81
bool inAArch64(ThreadContext *tc)
Definition utility.cc:117
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition utility.cc:222
Bitfield< 39, 12 > pa
Bitfield< 8 > va
Bitfield< 59, 56 > tlb
Bitfield< 23 > px
Bitfield< 5 > ux
Bitfield< 0 > p
Bitfield< 30 > ti
Bitfield< 0 > w
Bitfield< 14 > pr
Definition misc.hh:116
Bitfield< 3 > x
Definition pagetable.hh:73
static void decodeAddrOffset(Addr offset, uint8_t &func)
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition root.cc:220
constexpr decltype(nullptr) NoFault
Definition types.hh:253
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition mmu.cc:1201
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition mmu.cc:1132
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Definition mmu.hh:212
Stats(statistics::Group *parent)
Definition mmu.cc:1630
statistics::Scalar permsFaults
Definition mmu.hh:509
statistics::Scalar alignFaults
Definition mmu.hh:506
statistics::Scalar prefetchFaults
Definition mmu.hh:507
statistics::Scalar domainFaults
Definition mmu.hh:508
void setAttributes(bool lpae)
Definition pagetable.hh:396
Definition test.h:38
The file contains the definition of a set of TLB Invalidate Instructions.

Generated on Mon Jul 10 2023 15:31:58 for gem5 by doxygen 1.9.7