gem5  v22.0.0.2
mmu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2013, 2016-2022 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2001-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "arch/arm/mmu.hh"
42 
43 #include "arch/arm/isa.hh"
44 #include "arch/arm/reg_abi.hh"
46 #include "arch/arm/table_walker.hh"
47 #include "arch/arm/tlbi_op.hh"
48 #include "debug/TLB.hh"
49 #include "debug/TLBVerbose.hh"
50 #include "mem/packet_access.hh"
51 #include "sim/pseudo_inst.hh"
52 #include "sim/process.hh"
53 
54 namespace gem5
55 {
56 
57 using namespace ArmISA;
58 
59 MMU::MMU(const ArmMMUParams &p)
60  : BaseMMU(p),
61  itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
62  itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
63  itbStage2Walker(p.stage2_itb_walker),
64  dtbStage2Walker(p.stage2_dtb_walker),
65  test(nullptr),
66  miscRegContext(0),
67  s1State(this, false), s2State(this, true),
68  _attr(0),
69  _release(nullptr),
70  _hasWalkCache(false),
71  stats(this)
72 {
73  // Cache system-level properties
74  if (FullSystem) {
75  ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
76  assert(arm_sys);
77  haveLargeAsid64 = arm_sys->haveLargeAsid64();
78  physAddrRange = arm_sys->physAddrRange();
79 
80  _release = arm_sys->releaseFS();
81  } else {
82  haveLargeAsid64 = false;
83  physAddrRange = 48;
84 
85  _release = p.release_se;
86  }
87 
88  m5opRange = p.sys->m5opRange();
89 }
90 
91 void
93 {
94  itbWalker->setMmu(this);
95  dtbWalker->setMmu(this);
96  itbStage2Walker->setMmu(this);
97  dtbStage2Walker->setMmu(this);
98 
101 
104 
105  BaseMMU::init();
106 
108 }
109 
110 bool
112 {
113  for (auto tlb : instruction) {
114  if (static_cast<TLB*>(tlb)->walkCache())
115  return true;
116  }
117  for (auto tlb : data) {
118  if (static_cast<TLB*>(tlb)->walkCache())
119  return true;
120  }
121  for (auto tlb : unified) {
122  if (static_cast<TLB*>(tlb)->walkCache())
123  return true;
124  }
125 
126  return false;
127 }
128 
129 void
131 {
132  s1State.miscRegValid = false;
133  s2State.miscRegValid = false;
134 }
135 
136 TLB *
137 MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
138 {
139  if (mode == BaseMMU::Execute) {
140  if (stage2)
141  return itbStage2;
142  else
143  return getITBPtr();
144  } else {
145  if (stage2)
146  return dtbStage2;
147  else
148  return getDTBPtr();
149  }
150 }
151 
152 TableWalker *
154 {
155  if (mode == BaseMMU::Execute) {
156  if (stage2)
157  return itbStage2Walker;
158  else
159  return itbWalker;
160  } else {
161  if (stage2)
162  return dtbStage2Walker;
163  else
164  return dtbWalker;
165  }
166 }
167 
168 bool
170 {
171  CachedState& state = updateMiscReg(tc, NormalTran, false);
172 
173  auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
174 
175  TlbEntry::Lookup lookup_data;
176 
177  lookup_data.va = va;
178  lookup_data.asn = state.asid;
179  lookup_data.ignoreAsn = false;
180  lookup_data.vmid = state.vmid;
181  lookup_data.hyp = state.isHyp;
182  lookup_data.secure = state.isSecure;
183  lookup_data.functional = true;
184  lookup_data.targetEL = state.aarch64 ? state.aarch64EL : EL1;
185  lookup_data.inHost = false;
186  lookup_data.mode = BaseMMU::Read;
187 
188  TlbEntry *e = tlb->multiLookup(lookup_data);
189 
190  if (!e)
191  return false;
192  pa = e->pAddr(va);
193  return true;
194 }
195 
196 void
198 {
199  s1State.miscRegValid = false;
200  s1State.computeAddrTop.flush();
201  s2State.computeAddrTop.flush();
202 }
203 
204 Fault
206  ThreadContext *tc, Mode mode) const
207 {
208  const Addr paddr = req->getPaddr();
209 
210  if (m5opRange.contains(paddr)) {
211  uint8_t func;
213  req->setLocalAccessor(
214  [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
215  {
216  uint64_t ret;
217  if (inAArch64(tc))
218  pseudo_inst::pseudoInst<RegABI64>(tc, func, ret);
219  else
220  pseudo_inst::pseudoInst<RegABI32>(tc, func, ret);
221 
222  if (mode == Read)
223  pkt->setLE(ret);
224 
225  return Cycles(1);
226  }
227  );
228  }
229 
230  return NoFault;
231 }
232 
233 
234 Fault
236  Translation *translation, bool &delay, bool timing,
238 {
239  updateMiscReg(tc, NormalTran, state.isStage2);
240  Addr vaddr_tainted = req->getVaddr();
241  Addr vaddr = 0;
242  if (state.aarch64) {
243  vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
244  static_cast<TCR>(state.ttbcr), mode==Execute, state);
245  } else {
246  vaddr = vaddr_tainted;
247  }
248  Request::Flags flags = req->getFlags();
249 
250  bool is_fetch = (mode == Execute);
251  bool is_write = (mode == Write);
252 
253  if (!is_fetch) {
254  if (state.sctlr.a || !(flags & AllowUnaligned)) {
255  if (vaddr & mask(flags & AlignmentMask)) {
256  // LPAE is always disabled in SE mode
257  return std::make_shared<DataAbort>(
258  vaddr_tainted,
262  }
263  }
264  }
265 
266  Addr paddr;
267  Process *p = tc->getProcessPtr();
268 
269  if (!p->pTable->translate(vaddr, paddr))
270  return std::make_shared<GenericPageTableFault>(vaddr_tainted);
271  req->setPaddr(paddr);
272 
273  return finalizePhysical(req, tc, mode);
274 }
275 
276 Fault
278  bool stage2)
279 {
280  return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
281 }
282 
283 Fault
286 {
287  // a data cache maintenance instruction that operates by MVA does
288  // not generate a Data Abort exeception due to a Permission fault
289  if (req->isCacheMaintenance()) {
290  return NoFault;
291  }
292 
293  Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
294  Request::Flags flags = req->getFlags();
295  bool is_fetch = (mode == Execute);
296  bool is_write = (mode == Write);
297  bool is_priv = state.isPriv && !(flags & UserMode);
298 
299  // Get the translation type from the actuall table entry
300  ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
302 
303  // If this is the second stage of translation and the request is for a
304  // stage 1 page table walk then we need to check the HCR.PTW bit. This
305  // allows us to generate a fault if the request targets an area marked
306  // as a device or strongly ordered.
307  if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
308  (te->mtype != TlbEntry::MemoryType::Normal)) {
309  return std::make_shared<DataAbort>(
310  vaddr, te->domain, is_write,
311  ArmFault::PermissionLL + te->lookupLevel,
312  state.isStage2, tranMethod);
313  }
314 
315  // Generate an alignment fault for unaligned data accesses to device or
316  // strongly ordered memory
317  if (!is_fetch) {
318  if (te->mtype != TlbEntry::MemoryType::Normal) {
319  if (vaddr & mask(flags & AlignmentMask)) {
320  stats.alignFaults++;
321  return std::make_shared<DataAbort>(
324  tranMethod);
325  }
326  }
327  }
328 
329  if (te->nonCacheable) {
330  // Prevent prefetching from I/O devices.
331  if (req->isPrefetch()) {
332  // Here we can safely use the fault status for the short
333  // desc. format in all cases
334  return std::make_shared<PrefetchAbort>(
336  state.isStage2, tranMethod);
337  }
338  }
339 
340  if (!te->longDescFormat) {
341  switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
342  case 0:
344  DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
345  " domain: %#x write:%d\n", state.dacr,
346  static_cast<uint8_t>(te->domain), is_write);
347  if (is_fetch) {
348  // Use PC value instead of vaddr because vaddr might
349  // be aligned to cache line and should not be the
350  // address reported in FAR
351  return std::make_shared<PrefetchAbort>(
352  req->getPC(),
353  ArmFault::DomainLL + te->lookupLevel,
354  state.isStage2, tranMethod);
355  } else
356  return std::make_shared<DataAbort>(
357  vaddr, te->domain, is_write,
358  ArmFault::DomainLL + te->lookupLevel,
359  state.isStage2, tranMethod);
360  case 1:
361  // Continue with permissions check
362  break;
363  case 2:
364  panic("UNPRED domain\n");
365  case 3:
366  return NoFault;
367  }
368  }
369 
370  // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
371  uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
372  uint8_t hap = te->hap;
373 
374  if (state.sctlr.afe == 1 || te->longDescFormat)
375  ap |= 1;
376 
377  bool abt;
378  bool isWritable = true;
379  // If this is a stage 2 access (eg for reading stage 1 page table entries)
380  // then don't perform the AP permissions check, we stil do the HAP check
381  // below.
382  if (state.isStage2) {
383  abt = false;
384  } else {
385  switch (ap) {
386  case 0:
387  DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
388  (int)state.sctlr.rs);
389  if (!state.sctlr.xp) {
390  switch ((int)state.sctlr.rs) {
391  case 2:
392  abt = is_write;
393  break;
394  case 1:
395  abt = is_write || !is_priv;
396  break;
397  case 0:
398  case 3:
399  default:
400  abt = true;
401  break;
402  }
403  } else {
404  abt = true;
405  }
406  break;
407  case 1:
408  abt = !is_priv;
409  break;
410  case 2:
411  abt = !is_priv && is_write;
412  isWritable = is_priv;
413  break;
414  case 3:
415  abt = false;
416  break;
417  case 4:
418  panic("UNPRED premissions\n");
419  case 5:
420  abt = !is_priv || is_write;
421  isWritable = false;
422  break;
423  case 6:
424  case 7:
425  abt = is_write;
426  isWritable = false;
427  break;
428  default:
429  panic("Unknown permissions %#x\n", ap);
430  }
431  }
432 
433  bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
434  bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
435  (ap == 3 && state.sctlr.uwxn && is_priv);
436  if (is_fetch && (abt || xn ||
437  (te->longDescFormat && te->pxn && is_priv) ||
438  (state.isSecure && te->ns && state.scr.sif))) {
439  stats.permsFaults++;
440  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
441  "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
442  ap, is_priv, is_write, te->ns,
443  state.scr.sif, state.sctlr.afe);
444  // Use PC value instead of vaddr because vaddr might be aligned to
445  // cache line and should not be the address reported in FAR
446  return std::make_shared<PrefetchAbort>(
447  req->getPC(),
448  ArmFault::PermissionLL + te->lookupLevel,
449  state.isStage2, tranMethod);
450  } else if (abt | hapAbt) {
451  stats.permsFaults++;
452  DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
453  " write:%d\n", ap, is_priv, is_write);
454  return std::make_shared<DataAbort>(
455  vaddr, te->domain, is_write,
456  ArmFault::PermissionLL + te->lookupLevel,
457  state.isStage2 | !abt, tranMethod);
458  }
459  return NoFault;
460 }
461 
462 Fault
464  ThreadContext *tc, bool stage2)
465 {
466  return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
467 }
468 
469 Fault
472 {
473  assert(state.aarch64);
474 
475  // A data cache maintenance instruction that operates by VA does
476  // not generate a Permission fault unless:
477  // * It is a data cache invalidate (dc ivac) which requires write
478  // permissions to the VA, or
479  // * It is executed from EL0
480  if (req->isCacheClean() && state.aarch64EL != EL0 && !state.isStage2) {
481  return NoFault;
482  }
483 
484  Addr vaddr_tainted = req->getVaddr();
485  Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
486  static_cast<TCR>(state.ttbcr), mode==Execute, state);
487 
488  Request::Flags flags = req->getFlags();
489  bool is_fetch = (mode == Execute);
490  // Cache clean operations require read permissions to the specified VA
491  bool is_write = !req->isCacheClean() && mode == Write;
492  bool is_atomic = req->isAtomic();
493 
494  updateMiscReg(tc, state.curTranType, state.isStage2);
495 
496  // If this is the second stage of translation and the request is for a
497  // stage 1 page table walk then we need to check the HCR.PTW bit. This
498  // allows us to generate a fault if the request targets an area marked
499  // as a device or strongly ordered.
500  if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
501  (te->mtype != TlbEntry::MemoryType::Normal)) {
502  return std::make_shared<DataAbort>(
503  vaddr_tainted, te->domain, is_write,
504  ArmFault::PermissionLL + te->lookupLevel,
505  state.isStage2, ArmFault::LpaeTran);
506  }
507 
508  // Generate an alignment fault for unaligned accesses to device or
509  // strongly ordered memory
510  if (!is_fetch) {
511  if (te->mtype != TlbEntry::MemoryType::Normal) {
512  if (vaddr & mask(flags & AlignmentMask)) {
513  stats.alignFaults++;
514  return std::make_shared<DataAbort>(
515  vaddr_tainted,
517  is_atomic ? false : is_write,
520  }
521  }
522  }
523 
524  if (te->nonCacheable) {
525  // Prevent prefetching from I/O devices.
526  if (req->isPrefetch()) {
527  // Here we can safely use the fault status for the short
528  // desc. format in all cases
529  return std::make_shared<PrefetchAbort>(
530  vaddr_tainted,
532  state.isStage2, ArmFault::LpaeTran);
533  }
534  }
535 
536  bool grant = false;
537  // grant_read is used for faults from an atomic instruction that
538  // both reads and writes from a memory location. From a ISS point
539  // of view they count as read if a read to that address would have
540  // generated the fault; they count as writes otherwise
541  bool grant_read = true;
542 
543  if (state.isStage2) {
544  std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
545  (!is_write && !is_fetch), is_write, is_fetch);
546  } else {
547  std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
548  (!is_write && !is_fetch), is_write, is_fetch);
549  }
550 
551  if (!grant) {
552  if (is_fetch) {
553  stats.permsFaults++;
554  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
555  "ns:%d scr.sif:%d sctlr.afe: %d\n",
556  te->ns, state.scr.sif, state.sctlr.afe);
557  // Use PC value instead of vaddr because vaddr might be aligned to
558  // cache line and should not be the address reported in FAR
559  return std::make_shared<PrefetchAbort>(
560  req->getPC(),
561  ArmFault::PermissionLL + te->lookupLevel,
562  state.isStage2, ArmFault::LpaeTran);
563  } else {
564  stats.permsFaults++;
565  DPRINTF(TLB, "TLB Fault: Data abort on permission check."
566  "ns:%d", te->ns);
567  return std::make_shared<DataAbort>(
568  vaddr_tainted, te->domain,
569  (is_atomic && !grant_read) ? false : is_write,
570  ArmFault::PermissionLL + te->lookupLevel,
571  state.isStage2, ArmFault::LpaeTran);
572  }
573  }
574 
575  return NoFault;
576 }
577 
580  ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
581 {
582  assert(ArmSystem::haveEL(tc, EL2) && state.aarch64EL != EL2);
583 
584  // In stage 2 we use the hypervisor access permission bits.
585  // The following permissions are described in ARM DDI 0487A.f
586  // D4-1802
587  bool grant = false;
588  bool grant_read = te->hap & 0b01;
589  bool grant_write = te->hap & 0b10;
590 
591  uint8_t xn = te->xn;
592  uint8_t pxn = te->pxn;
593 
594  if (ArmSystem::haveEL(tc, EL3) && state.isSecure &&
595  te->ns && state.scr.sif) {
596  xn = true;
597  }
598 
599  DPRINTF(TLBVerbose,
600  "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
601  "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
602 
603  if (x) {
604  grant = grant_read && !xn;
605  } else if (req->isAtomic()) {
606  grant = grant_read || grant_write;
607  } else if (w) {
608  grant = grant_write;
609  } else if (r) {
610  grant = grant_read;
611  } else {
612  panic("Invalid Operation\n");
613  }
614 
615  return std::make_pair(grant, grant_read);
616 }
617 
620  ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
621 {
622  bool grant = false, grant_read = true;
623 
624  const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
625  const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
626 
627  bool wxn = state.sctlr.wxn;
628  uint8_t xn = te->xn;
629  uint8_t pxn = te->pxn;
630 
631  if (ArmSystem::haveEL(tc, EL3) && state.isSecure &&
632  te->ns && state.scr.sif) {
633  xn = true;
634  }
635 
636  DPRINTF(TLBVerbose, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
637  "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
638  pxn, r, w, x, is_priv, wxn);
639 
640  if (faultPAN(tc, ap, req, mode, is_priv, state)) {
641  return std::make_pair(false, false);
642  }
643 
644  ExceptionLevel regime = !is_priv ? EL0 : state.aarch64EL;
645  switch (regime) {
646  case EL0:
647  {
648  grant_read = ap & 0x1;
649  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
650  switch (perm) {
651  case 0:
652  case 1:
653  case 8:
654  case 9:
655  grant = x;
656  break;
657  case 4:
658  case 5:
659  grant = r || w || (x && !wxn);
660  break;
661  case 6:
662  case 7:
663  grant = r || w;
664  break;
665  case 12:
666  case 13:
667  grant = r || x;
668  break;
669  case 14:
670  case 15:
671  grant = r;
672  break;
673  default:
674  grant = false;
675  }
676  }
677  break;
678  case EL1:
679  {
680  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
681  switch (perm) {
682  case 0:
683  case 2:
684  grant = r || w || (x && !wxn);
685  break;
686  case 1:
687  case 3:
688  case 4:
689  case 5:
690  case 6:
691  case 7:
692  // regions that are writeable at EL0 should not be
693  // executable at EL1
694  grant = r || w;
695  break;
696  case 8:
697  case 10:
698  case 12:
699  case 14:
700  grant = r || x;
701  break;
702  case 9:
703  case 11:
704  case 13:
705  case 15:
706  grant = r;
707  break;
708  default:
709  grant = false;
710  }
711  }
712  break;
713  case EL2:
714  case EL3:
715  {
716  uint8_t perm = (ap & 0x2) | xn;
717  switch (perm) {
718  case 0:
719  grant = r || w || (x && !wxn);
720  break;
721  case 1:
722  grant = r || w;
723  break;
724  case 2:
725  grant = r || x;
726  break;
727  case 3:
728  grant = r;
729  break;
730  default:
731  grant = false;
732  }
733  }
734  break;
735  }
736 
737  return std::make_pair(grant, grant_read);
738 }
739 
740 bool
741 MMU::faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
742  const bool is_priv, CachedState &state)
743 {
744  bool exception = false;
745  switch (state.aarch64EL) {
746  case EL0:
747  break;
748  case EL1:
749  if (checkPAN(tc, ap, req, mode, is_priv, state)) {
750  exception = true;;
751  }
752  break;
753  case EL2:
754  if (state.hcr.e2h && checkPAN(tc, ap, req, mode, is_priv, state)) {
755  exception = true;;
756  }
757  break;
758  case EL3:
759  break;
760  }
761 
762  return exception;
763 }
764 
765 bool
766 MMU::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
767  const bool is_priv, CachedState &state)
768 {
769  // The PAN bit has no effect on:
770  // 1) Instruction accesses.
771  // 2) Data Cache instructions other than DC ZVA
772  // 3) Address translation instructions, other than ATS1E1RP and
773  // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
774  // gem5)
775  // 4) Instructions to be treated as unprivileged, unless
776  // HCR_EL2.{E2H, TGE} == {1, 0}
777  if (HaveExt(tc, ArmExtension::FEAT_PAN) && state.cpsr.pan && (ap & 0x1) &&
778  mode != BaseMMU::Execute) {
779 
780  if (req->isCacheMaintenance() &&
781  !(req->getFlags() & Request::CACHE_BLOCK_ZERO)) {
782  // Cache maintenance other than DC ZVA
783  return false;
784  } else if (!is_priv && !(state.hcr.e2h && !state.hcr.tge)) {
785  // Treated as unprivileged unless HCR_EL2.{E2H, TGE} == {1, 0}
786  return false;
787  }
788  return true;
789  }
790 
791  return false;
792 }
793 
794 Addr
796  TCR tcr, bool is_inst, CachedState& state)
797 {
798  const bool selbit = bits(vaddr_tainted, 55);
799 
800  // Call the memoized version of computeAddrTop
801  const auto topbit = state.computeAddrTop(tc, selbit, is_inst, tcr, el);
802 
803  return maskTaggedAddr(vaddr_tainted, tc, el, topbit);
804 }
805 
806 Fault
808  ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
810 {
811  bool is_fetch = (mode == Execute);
812  bool is_atomic = req->isAtomic();
813  req->setPaddr(vaddr);
814  // When the MMU is off the security attribute corresponds to the
815  // security state of the processor
816  if (state.isSecure)
817  req->setFlags(Request::SECURE);
818 
819  if (state.aarch64) {
820  bool selbit = bits(vaddr, 55);
821  TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
822  int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
823  int addr_sz = bits(vaddr, topbit, physAddrRange);
824  if (addr_sz != 0){
825  Fault f;
826  if (is_fetch)
827  f = std::make_shared<PrefetchAbort>(vaddr,
828  ArmFault::AddressSizeLL, state.isStage2,
830  else
831  f = std::make_shared<DataAbort>( vaddr,
833  is_atomic ? false : mode==Write,
834  ArmFault::AddressSizeLL, state.isStage2,
836  return f;
837  }
838  }
839 
840  // @todo: double check this (ARM ARM issue C B3.2.1)
841  if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
842  state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
843  if (!req->isCacheMaintenance()) {
844  req->setFlags(Request::UNCACHEABLE);
845  }
846  req->setFlags(Request::STRICT_ORDER);
847  }
848 
849  // Set memory attributes
850  TlbEntry temp_te;
851  temp_te.ns = !state.isSecure;
852  bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) &&
853  state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
854  bool i_cacheability = state.sctlr.i && !state.sctlr.m;
855  if (state.isStage2 || !dc || state.isSecure ||
856  (state.isHyp && !(tran_type & S1CTran))) {
857 
858  temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
860  temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
861  temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
862  temp_te.shareable = true;
863  temp_te.outerShareable = true;
864  } else {
866  temp_te.innerAttrs = 0x3;
867  temp_te.outerAttrs = 0x3;
868  temp_te.shareable = false;
869  temp_te.outerShareable = false;
870  }
871  temp_te.setAttributes(long_desc_format);
872  DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
873  "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
874  temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
875  state.isStage2);
876  setAttr(temp_te.attributes);
877 
879 }
880 
881 Fault
883  Translation *translation, bool &delay, bool timing,
884  bool functional, Addr vaddr,
886 {
887  TlbEntry *te = NULL;
888  bool is_fetch = (mode == Execute);
889  TlbEntry mergeTe;
890 
891  Request::Flags flags = req->getFlags();
892  Addr vaddr_tainted = req->getVaddr();
893 
894  Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
895  functional, &mergeTe, state);
896  // only proceed if we have a valid table entry
897  if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
898 
899  // If we have the table entry transfer some of the attributes to the
900  // request that triggered the translation
901  if (isCompleteTranslation(te)) {
902  // Set memory attributes
903  DPRINTF(TLBVerbose,
904  "Setting memory attributes: shareable: %d, innerAttrs: %d, "
905  "outerAttrs: %d, mtype: %d, stage2: %d\n",
906  te->shareable, te->innerAttrs, te->outerAttrs,
907  static_cast<uint8_t>(te->mtype), state.isStage2);
908  setAttr(te->attributes);
909 
910  if (te->nonCacheable && !req->isCacheMaintenance())
911  req->setFlags(Request::UNCACHEABLE);
912 
913  // Require requests to be ordered if the request goes to
914  // strongly ordered or device memory (i.e., anything other
915  // than normal memory requires strict order).
916  if (te->mtype != TlbEntry::MemoryType::Normal)
917  req->setFlags(Request::STRICT_ORDER);
918 
919  Addr pa = te->pAddr(vaddr);
920  req->setPaddr(pa);
921 
922  if (state.isSecure && !te->ns) {
923  req->setFlags(Request::SECURE);
924  }
925  if (!is_fetch && fault == NoFault &&
926  (vaddr & mask(flags & AlignmentMask)) &&
927  (te->mtype != TlbEntry::MemoryType::Normal)) {
928  // Unaligned accesses to Device memory should always cause an
929  // abort regardless of sctlr.a
930  stats.alignFaults++;
931  bool is_write = (mode == Write);
932  return std::make_shared<DataAbort>(
933  vaddr_tainted,
936  tranMethod);
937  }
938 
939  // Check for a trickbox generated address fault
940  if (fault == NoFault)
941  fault = testTranslation(req, mode, te->domain, state);
942  }
943 
944  if (fault == NoFault) {
945  // Don't try to finalize a physical address unless the
946  // translation has completed (i.e., there is a table entry).
947  return te ? finalizePhysical(req, tc, mode) : NoFault;
948  } else {
949  return fault;
950  }
951 }
952 
953 Fault
955  Translation *translation, bool &delay, bool timing,
956  ArmTranslationType tran_type, bool functional,
958 {
959  // No such thing as a functional timing access
960  assert(!(timing && functional));
961 
962  Addr vaddr_tainted = req->getVaddr();
963  Addr vaddr = 0;
964  if (state.aarch64) {
965  vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
966  static_cast<TCR>(state.ttbcr), mode==Execute, state);
967  } else {
968  vaddr = vaddr_tainted;
969  }
970  Request::Flags flags = req->getFlags();
971 
972  bool is_fetch = (mode == Execute);
973  bool is_write = (mode == Write);
974  bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
975  ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
977 
978  DPRINTF(TLBVerbose,
979  "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
980  state.isPriv, flags & UserMode, state.isSecure,
981  tran_type & S1S2NsTran);
982 
983  DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
984  "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
985  state.isStage2, state.scr, state.sctlr, flags, tran_type);
986 
987  if (!state.isStage2) {
988  if ((req->isInstFetch() && (!state.sctlr.i)) ||
989  ((!req->isInstFetch()) && (!state.sctlr.c))){
990  if (!req->isCacheMaintenance()) {
991  req->setFlags(Request::UNCACHEABLE);
992  }
993  req->setFlags(Request::STRICT_ORDER);
994  }
995  }
996  if (!is_fetch) {
997  if (state.sctlr.a || !(flags & AllowUnaligned)) {
998  if (vaddr & mask(flags & AlignmentMask)) {
999  stats.alignFaults++;
1000  return std::make_shared<DataAbort>(
1001  vaddr_tainted,
1003  ArmFault::AlignmentFault, state.isStage2,
1004  tranMethod);
1005  }
1006  }
1007  }
1008 
1009  bool vm = state.hcr.vm;
1010  if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1011  state.hcr.e2h == 1 && state.hcr.tge == 1)
1012  vm = 0;
1013  else if (state.hcr.dc == 1)
1014  vm = 1;
1015 
1016  Fault fault = NoFault;
1017  // If guest MMU is off or hcr.vm=0 go straight to stage2
1018  if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1019  fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1020  long_desc_format, state);
1021  } else {
1022  DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1023  state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1024  // Translation enabled
1025  fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1026  functional, vaddr, tranMethod, state);
1027  }
1028 
1029  // Check for Debug Exceptions
1031 
1032  if (sd->enabled() && fault == NoFault) {
1033  fault = sd->testDebug(tc, req, mode);
1034  }
1035 
1036  return fault;
1037 }
1038 
1039 Fault
1041  ArmTranslationType tran_type)
1042 {
1043  return translateAtomic(req, tc, mode, tran_type, false);
1044 }
1045 
1046 Fault
1048  ArmTranslationType tran_type, bool stage2)
1049 {
1050  auto& state = updateMiscReg(tc, tran_type, stage2);
1051 
1052  bool delay = false;
1053  Fault fault;
1054  if (FullSystem)
1055  fault = translateFs(req, tc, mode, NULL, delay, false,
1056  tran_type, false, state);
1057  else
1058  fault = translateSe(req, tc, mode, NULL, delay, false, state);
1059  assert(!delay);
1060  return fault;
1061 }
1062 
1063 Fault
1065 {
1066  return translateFunctional(req, tc, mode, NormalTran, false);
1067 }
1068 
1069 Fault
1071  ArmTranslationType tran_type)
1072 {
1073  return translateFunctional(req, tc, mode, tran_type, false);
1074 }
1075 
1076 Fault
1078  ArmTranslationType tran_type, bool stage2)
1079 {
1080  auto& state = updateMiscReg(tc, tran_type, stage2);
1081 
1082  bool delay = false;
1083  Fault fault;
1084  if (FullSystem)
1085  fault = translateFs(req, tc, mode, NULL, delay, false,
1086  tran_type, true, state);
1087  else
1088  fault = translateSe(req, tc, mode, NULL, delay, false, state);
1089  assert(!delay);
1090  return fault;
1091 }
1092 
1093 void
1095  Translation *translation, Mode mode, ArmTranslationType tran_type,
1096  bool stage2)
1097 {
1098  auto& state = updateMiscReg(tc, tran_type, stage2);
1099 
1100  assert(translation);
1101 
1102  translateComplete(req, tc, translation, mode, tran_type,
1103  stage2, state);
1104 }
1105 
1106 Fault
1108  Translation *translation, Mode mode, ArmTranslationType tran_type,
1109  bool call_from_s2)
1110 {
1111  return translateComplete(req, tc, translation, mode, tran_type,
1112  call_from_s2, s1State);
1113 }
1114 
1115 Fault
1117  Translation *translation, Mode mode, ArmTranslationType tran_type,
1118  bool call_from_s2, CachedState &state)
1119 {
1120  bool delay = false;
1121  Fault fault;
1122  if (FullSystem)
1123  fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1124  false, state);
1125  else
1126  fault = translateSe(req, tc, mode, translation, delay, true, state);
1127 
1128  DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay,
1129  fault != NoFault);
1130  // If we have a translation, and we're not in the middle of doing a stage
1131  // 2 translation tell the translation that we've either finished or its
1132  // going to take a while. By not doing this when we're in the middle of a
1133  // stage 2 translation we prevent marking the translation as delayed twice,
1134  // one when the translation starts and again when the stage 1 translation
1135  // completes.
1136 
1137  if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1138  fault != NoFault)) {
1139  if (!delay)
1140  translation->finish(fault, req, tc, mode);
1141  else
1142  translation->markDelayed();
1143  }
1144  return fault;
1145 }
1146 
1147 vmid_t
1149 {
1150  AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1151  VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1152  vmid_t vmid = 0;
1153 
1154  switch (mmfr1.vmidbits) {
1155  case 0b0000:
1156  // 8 bits
1157  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1158  break;
1159  case 0b0010:
1160  if (vtcr.vs && ELIs64(tc, EL2)) {
1161  // 16 bits
1162  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1163  } else {
1164  // 8 bits
1165  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1166  }
1167  break;
1168  default:
1169  panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1170  mmfr1.vmidbits);
1171  }
1172 
1173  return vmid;
1174 }
1175 
1178  ArmTranslationType tran_type, bool stage2)
1179 {
1180  // check if the regs have changed, or the translation mode is different.
1181  // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1182  // one type of translation anyway
1183 
1184  auto& state = stage2 ? s2State : s1State;
1185  if (state.miscRegValid && miscRegContext == tc->contextId() &&
1186  ((tran_type == state.curTranType) || stage2)) {
1187 
1188  } else {
1189  DPRINTF(TLBVerbose, "TLB variables changed!\n");
1190  state.updateMiscReg(tc, tran_type);
1191 
1192  itbStage2->setVMID(state.vmid);
1193  dtbStage2->setVMID(state.vmid);
1194 
1195  for (auto tlb : instruction) {
1196  static_cast<TLB*>(tlb)->setVMID(state.vmid);
1197  }
1198  for (auto tlb : data) {
1199  static_cast<TLB*>(tlb)->setVMID(state.vmid);
1200  }
1201  for (auto tlb : unified) {
1202  static_cast<TLB*>(tlb)->setVMID(state.vmid);
1203  }
1204 
1205  miscRegContext = tc->contextId();
1206  }
1207 
1208  if (state.directToStage2) {
1209  s2State.updateMiscReg(tc, tran_type);
1210  return s2State;
1211  } else {
1212  return state;
1213  }
1214 }
1215 
1216 void
1218  ArmTranslationType tran_type)
1219 {
1220  cpsr = tc->readMiscReg(MISCREG_CPSR);
1221 
1222  // Dependencies: SCR/SCR_EL3, CPSR
1223  isSecure = ArmISA::isSecure(tc) &&
1224  !(tran_type & HypMode) && !(tran_type & S1S2NsTran);
1225 
1226  aarch64EL = tranTypeEL(cpsr, tran_type);
1227  aarch64 = isStage2 ?
1228  ELIs64(tc, EL2) :
1229  ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1230 
1231  hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1232  if (aarch64) { // AArch64
1233  // determine EL we need to translate in
1234  switch (aarch64EL) {
1235  case EL0:
1236  if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1237  hcr.tge == 1 && hcr.e2h == 1) {
1238  // VHE code for EL2&0 regime
1239  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1240  ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1241  uint64_t ttbr_asid = ttbcr.a1 ?
1244  asid = bits(ttbr_asid,
1245  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1246 
1247  } else {
1248  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1249  ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1250  uint64_t ttbr_asid = ttbcr.a1 ?
1253  asid = bits(ttbr_asid,
1254  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1255 
1256  }
1257  break;
1258  case EL1:
1259  {
1260  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1261  ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1262  uint64_t ttbr_asid = ttbcr.a1 ?
1265  asid = bits(ttbr_asid,
1266  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1267  }
1268  break;
1269  case EL2:
1270  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1271  ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1272  if (hcr.e2h == 1) {
1273  // VHE code for EL2&0 regime
1274  uint64_t ttbr_asid = ttbcr.a1 ?
1277  asid = bits(ttbr_asid,
1278  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1279  } else {
1280  asid = -1;
1281  }
1282  break;
1283  case EL3:
1284  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1285  ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1286  asid = -1;
1287  break;
1288  }
1289 
1290  scr = tc->readMiscReg(MISCREG_SCR_EL3);
1291  isPriv = aarch64EL != EL0;
1292  if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1293  vmid = getVMID(tc);
1294  isHyp = aarch64EL == EL2;
1295  isHyp |= tran_type & HypMode;
1296  isHyp &= (tran_type & S1S2NsTran) == 0;
1297  isHyp &= (tran_type & S1CTran) == 0;
1298  bool vm = hcr.vm;
1299  if (HaveExt(tc, ArmExtension::FEAT_VHE) &&
1300  hcr.e2h == 1 && hcr.tge ==1) {
1301  vm = 0;
1302  }
1303 
1304  if (hcr.e2h == 1 && (aarch64EL == EL2
1305  || (hcr.tge ==1 && aarch64EL == EL0))) {
1306  isHyp = true;
1307  directToStage2 = false;
1308  stage2Req = false;
1309  stage2DescReq = false;
1310  } else {
1311  // Work out if we should skip the first stage of translation and go
1312  // directly to stage 2. This value is cached so we don't have to
1313  // compute it for every translation.
1314  bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc));
1315  stage2Req = isStage2 ||
1316  (vm && !isHyp && sec &&
1317  !(tran_type & S1CTran) && (aarch64EL < EL2) &&
1318  !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1319  stage2DescReq = isStage2 || (vm && !isHyp && sec &&
1320  (aarch64EL < EL2));
1321  directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1322  }
1323  } else {
1324  vmid = 0;
1325  isHyp = false;
1326  directToStage2 = false;
1327  stage2Req = false;
1328  stage2DescReq = false;
1329  }
1330  } else { // AArch32
1331  sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1332  !isSecure));
1333  ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1334  !isSecure));
1335  scr = tc->readMiscReg(MISCREG_SCR);
1336  isPriv = cpsr.mode != MODE_USER;
1337  if (longDescFormatInUse(tc)) {
1338  uint64_t ttbr_asid = tc->readMiscReg(
1339  snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1340  MISCREG_TTBR0,
1341  tc, !isSecure));
1342  asid = bits(ttbr_asid, 55, 48);
1343  } else { // Short-descriptor translation table format in use
1344  CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1346  asid = context_id.asid;
1347  }
1348  prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1349  !isSecure));
1350  nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1351  !isSecure));
1352  dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1353  !isSecure));
1354  hcr = tc->readMiscReg(MISCREG_HCR);
1355 
1356  if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1357  vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1358  isHyp = cpsr.mode == MODE_HYP;
1359  isHyp |= tran_type & HypMode;
1360  isHyp &= (tran_type & S1S2NsTran) == 0;
1361  isHyp &= (tran_type & S1CTran) == 0;
1362  if (isHyp) {
1363  sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1364  }
1365  // Work out if we should skip the first stage of translation and go
1366  // directly to stage 2. This value is cached so we don't have to
1367  // compute it for every translation.
1368  bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc));
1369  stage2Req = hcr.vm && !isStage2 && !isHyp && sec &&
1370  !(tran_type & S1CTran);
1371  stage2DescReq = hcr.vm && !isStage2 && !isHyp && sec;
1372  directToStage2 = stage2Req && !sctlr.m;
1373  } else {
1374  vmid = 0;
1375  stage2Req = false;
1376  isHyp = false;
1377  directToStage2 = false;
1378  stage2DescReq = false;
1379  }
1380  }
1381  miscRegValid = true;
1382  curTranType = tran_type;
1383 }
1384 
1387 {
1388  switch (type) {
1389  case S1E0Tran:
1390  case S12E0Tran:
1391  return EL0;
1392 
1393  case S1E1Tran:
1394  case S12E1Tran:
1395  return EL1;
1396 
1397  case S1E2Tran:
1398  return EL2;
1399 
1400  case S1E3Tran:
1401  return EL3;
1402 
1403  case NormalTran:
1404  case S1CTran:
1405  case S1S2NsTran:
1406  case HypMode:
1407  return currEL(cpsr);
1408 
1409  default:
1410  panic("Unknown translation mode!\n");
1411  }
1412 }
1413 
1414 Fault
1416  Translation *translation, bool timing, bool functional,
1417  bool is_secure, ArmTranslationType tran_type,
1418  bool stage2)
1419 {
1420  return getTE(te, req, tc, mode, translation, timing, functional,
1421  is_secure, tran_type, stage2 ? s2State : s1State);
1422 }
1423 
1424 TlbEntry*
1425 MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool hyp, bool secure,
1426  bool functional, bool ignore_asn, ExceptionLevel target_el,
1427  bool in_host, bool stage2, BaseMMU::Mode mode)
1428 {
1429  TLB *tlb = getTlb(mode, stage2);
1430 
1431  TlbEntry::Lookup lookup_data;
1432 
1433  lookup_data.va = va;
1434  lookup_data.asn = asid;
1435  lookup_data.ignoreAsn = ignore_asn;
1436  lookup_data.vmid = vmid;
1437  lookup_data.hyp = hyp;
1438  lookup_data.secure = secure;
1439  lookup_data.functional = functional;
1440  lookup_data.targetEL = target_el;
1441  lookup_data.inHost = in_host;
1442  lookup_data.mode = mode;
1443 
1444  return tlb->multiLookup(lookup_data);
1445 }
1446 
1447 Fault
1449  Translation *translation, bool timing, bool functional,
1450  bool is_secure, ArmTranslationType tran_type,
1451  CachedState& state)
1452 {
1453  // In a 2-stage system, the IPA->PA translation can be started via this
1454  // call so make sure the miscRegs are correct.
1455  if (state.isStage2) {
1456  updateMiscReg(tc, tran_type, true);
1457  }
1458 
1459  Addr vaddr_tainted = req->getVaddr();
1460  Addr vaddr = 0;
1461  ExceptionLevel target_el = state.aarch64 ? state.aarch64EL : EL1;
1462  if (state.aarch64) {
1463  vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el,
1464  static_cast<TCR>(state.ttbcr), mode==Execute, state);
1465  } else {
1466  vaddr = vaddr_tainted;
1467  }
1468 
1469  *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure, false,
1470  false, target_el, false, state.isStage2, mode);
1471 
1472  if (!isCompleteTranslation(*te)) {
1473  if (req->isPrefetch()) {
1474  // if the request is a prefetch don't attempt to fill the TLB or go
1475  // any further with the memory access (here we can safely use the
1476  // fault status for the short desc. format in all cases)
1478  return std::make_shared<PrefetchAbort>(
1479  vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1480  }
1481 
1482  // start translation table walk, pass variables rather than
1483  // re-retreaving in table walker for speed
1484  DPRINTF(TLB,
1485  "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1486  vaddr_tainted, state.asid, state.vmid);
1487 
1488  Fault fault;
1489  fault = getTableWalker(mode, state.isStage2)->walk(
1490  req, tc, state.asid, state.vmid, state.isHyp, mode,
1491  translation, timing, functional, is_secure,
1492  tran_type, state.stage2DescReq, *te);
1493 
1494  // for timing mode, return and wait for table walk,
1495  if (timing || fault != NoFault) {
1496  return fault;
1497  }
1498 
1499  *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure,
1500  true, false, target_el, false, state.isStage2, mode);
1501  assert(*te);
1502  }
1503  return NoFault;
1504 }
1505 
1506 Fault
1508  ThreadContext *tc, Mode mode,
1509  Translation *translation, bool timing, bool functional,
1510  TlbEntry *mergeTe, CachedState &state)
1511 {
1512  Fault fault;
1513 
1514  if (state.isStage2) {
1515  // We are already in the stage 2 TLB. Grab the table entry for stage
1516  // 2 only. We are here because stage 1 translation is disabled.
1517  TlbEntry *s2_te = nullptr;
1518  // Get the stage 2 table entry
1519  fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1520  state.isSecure, state.curTranType, state);
1521  // Check permissions of stage 2
1522  if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1523  if (state.aarch64)
1524  fault = checkPermissions64(s2_te, req, mode, tc, state);
1525  else
1526  fault = checkPermissions(s2_te, req, mode, state);
1527  }
1528  *te = s2_te;
1529  return fault;
1530  }
1531 
1532  TlbEntry *s1_te = nullptr;
1533 
1534  Addr vaddr_tainted = req->getVaddr();
1535 
1536  // Get the stage 1 table entry
1537  fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1538  state.isSecure, state.curTranType, state);
1539  // only proceed if we have a valid table entry
1540  if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1541  // Check stage 1 permissions before checking stage 2
1542  if (state.aarch64)
1543  fault = checkPermissions64(s1_te, req, mode, tc, state);
1544  else
1545  fault = checkPermissions(s1_te, req, mode, state);
1546  if (state.stage2Req & (fault == NoFault)) {
1547  Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1548  req, translation, mode, timing, functional, state.isSecure,
1549  state.curTranType);
1550  fault = s2_lookup->getTe(tc, mergeTe);
1551  if (s2_lookup->isComplete()) {
1552  *te = mergeTe;
1553  // We've finished with the lookup so delete it
1554  delete s2_lookup;
1555  } else {
1556  // The lookup hasn't completed, so we can't delete it now. We
1557  // get round this by asking the object to self delete when the
1558  // translation is complete.
1559  s2_lookup->setSelfDelete();
1560  }
1561  } else {
1562  // This case deals with an S1 hit (or bypass), followed by
1563  // an S2 hit-but-perms issue
1564  if (state.isStage2) {
1565  DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1566  vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1567  fault);
1568  if (fault != NoFault) {
1569  auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1570  arm_fault->annotate(ArmFault::S1PTW, false);
1571  arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1572  }
1573  }
1574  *te = s1_te;
1575  }
1576  }
1577  return fault;
1578 }
1579 
1580 bool
1582 {
1583  return entry && !entry->partial;
1584 }
1585 
1586 void
1588 {
1589  BaseMMU::takeOverFrom(old_mmu);
1590 
1591  auto *ommu = dynamic_cast<MMU*>(old_mmu);
1592  assert(ommu);
1593 
1594  _attr = ommu->_attr;
1595 
1596  s1State = ommu->s1State;
1597  s2State = ommu->s2State;
1598 }
1599 
1600 void
1602 {
1603  if (!_ti) {
1604  test = nullptr;
1605  } else {
1606  TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1607  fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1608  test = ti;
1609  }
1610 }
1611 
1612 Fault
1615 {
1616  if (!test || !req->hasSize() || req->getSize() == 0 ||
1617  req->isCacheMaintenance()) {
1618  return NoFault;
1619  } else {
1620  return test->translationCheck(req, state.isPriv, mode, domain);
1621  }
1622 }
1623 
1624 Fault
1625 MMU::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1626  TlbEntry::DomainType domain, LookupLevel lookup_level,
1627  bool stage2)
1628 {
1629  return testWalk(pa, size, va, is_secure, mode, domain, lookup_level,
1630  stage2 ? s2State : s1State);
1631 }
1632 
1633 Fault
1634 MMU::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1635  TlbEntry::DomainType domain, LookupLevel lookup_level,
1636  CachedState &state)
1637 {
1638  if (!test) {
1639  return NoFault;
1640  } else {
1641  return test->walkCheck(pa, size, va, is_secure, state.isPriv, mode,
1642  domain, lookup_level);
1643  }
1644 }
1645 
1647  : statistics::Group(parent),
1648  ADD_STAT(alignFaults, statistics::units::Count::get(),
1649  "Number of MMU faults due to alignment restrictions"),
1650  ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1651  "Number of MMU faults due to prefetch"),
1652  ADD_STAT(domainFaults, statistics::units::Count::get(),
1653  "Number of MMU faults due to domain restrictions"),
1654  ADD_STAT(permsFaults, statistics::units::Count::get(),
1655  "Number of MMU faults due to permissions restrictions")
1656 {
1657 }
1658 
1659 } // namespace gem5
gem5::ArmISA::MMU::itbWalker
TableWalker * itbWalker
Definition: mmu.hh:84
gem5::ArmISA::MISCREG_CPSR
@ MISCREG_CPSR
Definition: misc.hh:61
gem5::ArmISA::MMU::S12E0Tran
@ S12E0Tran
Definition: mmu.hh:130
gem5::ArmSystem::physAddrRange
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition: system.hh:213
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:92
gem5::ArmISA::MISCREG_VTTBR
@ MISCREG_VTTBR
Definition: misc.hh:449
gem5::ArmISA::MMU::finalizePhysical
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition: mmu.cc:205
gem5::ArmISA::TlbEntry::Lookup::mode
BaseMMU::Mode mode
Definition: pagetable.hh:206
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::ArmISA::MMU::haveLargeAsid64
bool haveLargeAsid64
Definition: mmu.hh:491
gem5::ArmISA::MMU::S12E1Tran
@ S12E1Tran
Definition: mmu.hh:131
gem5::ArmISA::MISCREG_TTBR0_EL2
@ MISCREG_TTBR0_EL2
Definition: misc.hh:604
gem5::ArmISA::maskTaggedAddr
Addr maskTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, int topbit)
Definition: utility.cc:455
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
gem5::ArmISA::ELIs64
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:273
gem5::ArmISA::ArmFault::AddressSizeLL
@ AddressSizeLL
Definition: faults.hh:111
gem5::AddrRange::start
Addr start() const
Get the start address of the range.
Definition: addr_range.hh:343
gem5::ArmISA::MMU::LookupLevel
enums::ArmLookupLevel LookupLevel
Definition: mmu.hh:63
gem5::ArmISA::MISCREG_SCTLR_EL3
@ MISCREG_SCTLR_EL3
Definition: misc.hh:592
gem5::RiscvISA::perm
Bitfield< 3, 1 > perm
Definition: pagetable.hh:72
gem5::ArmISA::SelfDebug
Definition: self_debug.hh:277
gem5::ArmISA::MMU::s2PermBits64
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition: mmu.cc:579
gem5::ArmISA::MISCREG_TTBR0
@ MISCREG_TTBR0
Definition: misc.hh:255
gem5::ArmISA::MMU::Stats::domainFaults
statistics::Scalar domainFaults
Definition: mmu.hh:504
gem5::ArmISA::el
Bitfield< 3, 2 > el
Definition: misc_types.hh:73
gem5::ArmISA::MISCREG_CONTEXTIDR
@ MISCREG_CONTEXTIDR
Definition: misc.hh:400
gem5::ArmISA::TlbEntry::ns
bool ns
Definition: pagetable.hh:239
gem5::ArmISA::MMU::getTE
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, bool is_secure, ArmTranslationType tran_type, bool stage2)
Definition: mmu.cc:1415
gem5::ArmISA::MISCREG_TCR_EL2
@ MISCREG_TCR_EL2
Definition: misc.hh:605
gem5::ArmISA::aarch64
Bitfield< 34 > aarch64
Definition: types.hh:81
test
Definition: test.h:38
pseudo_inst.hh
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::ArmISA::ArmFault::PrefetchUncacheable
@ PrefetchUncacheable
Definition: faults.hh:117
gem5::ArmISA::MMU::m5opRange
AddrRange m5opRange
Definition: mmu.hh:494
gem5::ArmISA::asid
asid
Definition: misc_types.hh:618
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::ArmISA::MISCREG_SCR_EL3
@ MISCREG_SCR_EL3
Definition: misc.hh:594
gem5::ArmISA::domain
Bitfield< 7, 4 > domain
Definition: misc_types.hh:424
gem5::ArmISA::TlbEntry::DomainType::NoAccess
@ NoAccess
gem5::AddrRange::contains
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:471
gem5::ArmISA::TlbEntry::outerAttrs
uint8_t outerAttrs
Definition: pagetable.hh:224
gem5::ArmISA::MMU::dtbStage2Walker
TableWalker * dtbStage2Walker
Definition: mmu.hh:87
gem5::ArmISA::f
Bitfield< 6 > f
Definition: misc_types.hh:68
gem5::ArmISA::ArmFault::LpaeTran
@ LpaeTran
Definition: faults.hh:152
gem5::ArmISA::ArmFault::PrefetchTLBMiss
@ PrefetchTLBMiss
Definition: faults.hh:116
gem5::ArmISA::MISCREG_TTBR1_EL1
@ MISCREG_TTBR1_EL1
Definition: misc.hh:600
gem5::ArmISA::MISCREG_TTBCR
@ MISCREG_TTBCR
Definition: misc.hh:261
gem5::ArmISA::vmid_t
uint16_t vmid_t
Definition: types.hh:57
gem5::BaseMMU::Translation::markDelayed
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
gem5::ArmISA::MMU::CachedState::updateMiscReg
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition: mmu.cc:1217
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
gem5::ArmISA::MISCREG_HSCTLR
@ MISCREG_HSCTLR
Definition: misc.hh:247
gem5::Request::CACHE_BLOCK_ZERO
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition: request.hh:143
gem5::ArmISA::MMU::S1E0Tran
@ S1E0Tran
Definition: mmu.hh:126
gem5::ArmSystem::haveLargeAsid64
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition: system.hh:202
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:65
gem5::ArmISA::TLB
Definition: tlb.hh:115
gem5::ArmISA::MMU::S1CTran
@ S1CTran
Definition: mmu.hh:117
gem5::ArmISA::MMU::CachedState::vmid
vmid_t vmid
Definition: mmu.hh:190
gem5::ArmISA::MISCREG_TCR_EL3
@ MISCREG_TCR_EL3
Definition: misc.hh:611
gem5::ArmISA::ArmFault::DomainLL
@ DomainLL
Definition: faults.hh:103
gem5::ThreadContext::contextId
virtual ContextID contextId() const =0
gem5::ArmISA::EL1
@ EL1
Definition: types.hh:274
gem5::ArmISA::MMU::getResultTe
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition: mmu.cc:1507
gem5::BaseMMU::init
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition: mmu.cc:53
gem5::VegaISA::w
Bitfield< 6 > w
Definition: pagetable.hh:59
gem5::ArmISA::TlbEntry::setAttributes
void setAttributes(bool lpae)
Definition: pagetable.hh:396
gem5::ArmISA::MMU::purifyTaggedAddr
Addr purifyTaggedAddr(Addr vaddr_tainted, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_inst, CachedState &state)
Definition: mmu.cc:795
gem5::ArmISA::MMU::S1E3Tran
@ S1E3Tran
Definition: mmu.hh:129
gem5::ArmISA::inAArch64
bool inAArch64(ThreadContext *tc)
Definition: utility.cc:122
gem5::ArmISA::MMU::AllowUnaligned
@ AllowUnaligned
Definition: mmu.hh:109
gem5::ArmISA::MMU::_hasWalkCache
bool _hasWalkCache
Definition: mmu.hh:496
gem5::ArmISA::MMU::getDTBPtr
ArmISA::TLB * getDTBPtr() const
Definition: mmu.hh:66
gem5::VegaISA::r
Bitfield< 5 > r
Definition: pagetable.hh:60
gem5::ArmISA::TlbEntry::MemoryType::Normal
@ Normal
gem5::ArmISA::TableWalker::walk
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Definition: table_walker.cc:289
gem5::ArmISA::MISCREG_SCTLR
@ MISCREG_SCTLR
Definition: misc.hh:236
table_walker.hh
gem5::ArmISA::MMU::dtbWalker
TableWalker * dtbWalker
Definition: mmu.hh:85
gem5::ArmISA::TlbTestInterface
Definition: tlb.hh:79
gem5::ArmISA::MMU::testWalk
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: mmu.cc:1625
gem5::ArmISA::MMU::MMU
MMU(const ArmMMUParams &p)
Definition: mmu.cc:59
gem5::ArmISA::TlbEntry
Definition: pagetable.hh:165
gem5::ArmISA::ArmFault::PermissionLL
@ PermissionLL
Definition: faults.hh:104
gem5::ArmISA::MMU::S1E2Tran
@ S1E2Tran
Definition: mmu.hh:128
gem5::ArmISA::MMU::init
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mmu.cc:92
gem5::ArmISA::MMU::_attr
uint64_t _attr
Definition: mmu.hh:487
gem5::ArmISA::TlbEntry::DomainType
DomainType
Definition: pagetable.hh:177
gem5::ArmISA::MISCREG_VTCR_EL2
@ MISCREG_VTCR_EL2
Definition: misc.hh:607
gem5::ArmISA::MMU::Stats::permsFaults
statistics::Scalar permsFaults
Definition: mmu.hh:505
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:56
gem5::BaseMMU
Definition: mmu.hh:53
gem5::ArmISA::MMU::itbStage2Walker
TableWalker * itbStage2Walker
Definition: mmu.hh:86
gem5::ArmISA::ArmFault::VmsaTran
@ VmsaTran
Definition: faults.hh:153
gem5::ArmISA::MISCREG_DACR
@ MISCREG_DACR
Definition: misc.hh:266
gem5::ArmISA::TlbEntry::Lookup
Definition: pagetable.hh:185
gem5::ArmISA::MMU::translateFs
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition: mmu.cc:954
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
mmu.hh
gem5::ArmISA::MMU::UserMode
@ UserMode
Definition: mmu.hh:111
gem5::ArmISA::MMU::itbStage2
TLB * itbStage2
Definition: mmu.hh:81
gem5::ArmISA::MMU::translateComplete
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition: mmu.cc:1107
gem5::QARMA::b11
Bitfield< 47, 44 > b11
Definition: qarma.hh:55
gem5::ArmISA::pa
Bitfield< 39, 12 > pa
Definition: misc_types.hh:657
gem5::Flags< FlagsType >
gem5::ArmISA::MMU::getTableWalker
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition: mmu.cc:153
gem5::ArmISA::MISCREG_PRRR
@ MISCREG_PRRR
Definition: misc.hh:370
gem5::ArmISA::MMU::CachedState
Definition: mmu.hh:134
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::ArmISA::MMU::Stats::prefetchFaults
statistics::Scalar prefetchFaults
Definition: mmu.hh:503
gem5::ArmISA::TlbEntry::shareable
bool shareable
Definition: pagetable.hh:254
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::ArmISA::MODE_HYP
@ MODE_HYP
Definition: types.hh:294
gem5::ArmISA::MISCREG_SCTLR_EL1
@ MISCREG_SCTLR_EL1
Definition: misc.hh:580
gem5::ArmISA::MMU::setTestInterface
void setTestInterface(SimObject *ti)
Definition: mmu.cc:1601
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
gem5::VegaISA::p
Bitfield< 54 > p
Definition: pagetable.hh:70
gem5::BaseMMU::unified
std::set< BaseTLB * > unified
Definition: mmu.hh:183
gem5::ArmISA::MMU::NormalTran
@ NormalTran
Definition: mmu.hh:116
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::ArmISA::TlbEntry::Lookup::functional
bool functional
Definition: pagetable.hh:200
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:291
gem5::ArmISA::TlbEntry::Lookup::secure
bool secure
Definition: pagetable.hh:198
gem5::ArmISA::MMU::tranTypeEL
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition: mmu.cc:1386
gem5::ArmISA::TlbEntry::mtype
MemoryType mtype
Definition: pagetable.hh:229
gem5::ArmISA::MMU::translateSe
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition: mmu.cc:235
gem5::ArmISA::EL2
@ EL2
Definition: types.hh:275
gem5::ArmISA::MISCREG_SCTLR_EL2
@ MISCREG_SCTLR_EL2
Definition: misc.hh:585
gem5::ArmISA::wxn
Bitfield< 19 > wxn
Definition: misc_types.hh:360
isa.hh
gem5::VegaISA::x
Bitfield< 4 > x
Definition: pagetable.hh:61
gem5::X86ISA::type
type
Definition: misc.hh:727
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
process.hh
stage2_lookup.hh
gem5::ArmISA::MMU::s1State
CachedState s1State
Definition: mmu.hh:484
gem5::ArmISA::MMU::AlignmentMask
@ AlignmentMask
Definition: mmu.hh:100
gem5::ArmISA::MMU::CachedState::computeAddrTop
Memoizer< int, ThreadContext *, bool, bool, TCR, ExceptionLevel > computeAddrTop
Definition: mmu.hh:212
gem5::ArmISA::TlbEntry::Lookup::inHost
bool inHost
Definition: pagetable.hh:204
reg_abi.hh
gem5::ArmISA::MMU::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: mmu.hh:245
gem5::ArmISA::MMU::drainResume
void drainResume() override
Resume execution after a successful drain.
Definition: mmu.cc:130
tlbi_op.hh
gem5::ArmISA::te
Bitfield< 30 > te
Definition: misc_types.hh:338
gem5::ArmISA::mask
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
flags
uint8_t flags
Definition: helpers.cc:66
gem5::ArmISA::TlbEntry::Lookup::va
Addr va
Definition: pagetable.hh:188
gem5::ArmISA::EL3
@ EL3
Definition: types.hh:276
gem5::ArmISA::TlbEntry::Lookup::vmid
vmid_t vmid
Definition: pagetable.hh:194
gem5::SimObject
Abstract superclass for simulation objects.
Definition: sim_object.hh:146
gem5::ArmISA::MODE_USER
@ MODE_USER
Definition: types.hh:288
std::pair
STL pair class.
Definition: stl.hh:58
gem5::ArmISA::MMU::miscRegContext
ContextID miscRegContext
Definition: mmu.hh:481
gem5::pseudo_inst::decodeAddrOffset
static void decodeAddrOffset(Addr offset, uint8_t &func)
Definition: pseudo_inst.hh:63
gem5::ArmISA::MISCREG_ID_AA64MMFR1_EL1
@ MISCREG_ID_AA64MMFR1_EL1
Definition: misc.hh:571
gem5::ArmISA::MMU::translateMmuOff
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition: mmu.cc:807
gem5::ArmISA::Stage2LookUp
Definition: stage2_lookup.hh:59
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::ArmISA::TlbEntry::Lookup::targetEL
ExceptionLevel targetEL
Definition: pagetable.hh:202
gem5::ArmISA::MISCREG_NMRR
@ MISCREG_NMRR
Definition: misc.hh:376
gem5::ArmISA::TLB::setVMID
void setVMID(vmid_t _vmid)
Definition: tlb.hh:208
gem5::ArmISA::MISCREG_TTBR1
@ MISCREG_TTBR1
Definition: misc.hh:258
gem5::ArmISA::isSecure
bool isSecure(ThreadContext *tc)
Definition: utility.cc:73
gem5::ArmISA::MMU::checkWalkCache
bool checkWalkCache() const
Definition: mmu.cc:111
gem5::ArmISA::MMU::HypMode
@ HypMode
Definition: mmu.hh:118
gem5::MipsISA::ti
Bitfield< 30 > ti
Definition: pra_constants.hh:179
gem5::BaseMMU::instruction
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition: mmu.hh:181
packet_access.hh
gem5::ArmISA::vm
Bitfield< 0 > vm
Definition: misc_types.hh:285
gem5::ArmISA::va
Bitfield< 8 > va
Definition: misc_types.hh:276
gem5::ArmISA::TlbEntry::outerShareable
bool outerShareable
Definition: pagetable.hh:255
gem5::ArmISA::currEL
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition: utility.cc:129
gem5::ArmISA::MMU::faultPAN
bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition: mmu.cc:741
gem5::ArmISA::ArmFault
Definition: faults.hh:64
gem5::Process
Definition: process.hh:68
gem5::Request::STRICT_ORDER
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:135
gem5::ArmISA::MMU::CachedState::miscRegValid
bool miscRegValid
Definition: mmu.hh:195
gem5::ThreadContext::getProcessPtr
virtual Process * getProcessPtr()=0
gem5::ArmISA::MMU::getITBPtr
ArmISA::TLB * getITBPtr() const
Definition: mmu.hh:72
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
gem5::ArmISA::TlbEntry::attributes
uint64_t attributes
Definition: pagetable.hh:213
gem5::BaseMMU::Translation
Definition: mmu.hh:58
gem5::ArmISA::MMU::getTlb
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition: mmu.cc:137
gem5::ArmISA::MMU::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: mmu.hh:256
gem5::ArmISA::MMU::physAddrRange
uint8_t physAddrRange
Definition: mmu.hh:492
gem5::ArmISA::IsSecureEL2Enabled
bool IsSecureEL2Enabled(ThreadContext *tc)
Definition: utility.cc:250
state
atomic_var_t state
Definition: helpers.cc:188
gem5::ArmISA::MMU::ArmTranslationType
ArmTranslationType
Definition: mmu.hh:114
gem5::ArmISA::EL0
@ EL0
Definition: types.hh:273
gem5::ArmISA::Stage2LookUp::getTe
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
Definition: stage2_lookup.cc:57
gem5::ArmISA::MMU::S1S2NsTran
@ S1S2NsTran
Definition: mmu.hh:121
gem5::ArmISA::TlbEntry::MemoryType::StronglyOrdered
@ StronglyOrdered
gem5::ArmSystem
Definition: system.hh:91
gem5::ArmISA::TableWalker
Definition: table_walker.hh:66
gem5::ArmISA::ArmFault::TranMethod
TranMethod
Definition: faults.hh:150
gem5::ArmISA::MISCREG_HCR
@ MISCREG_HCR
Definition: misc.hh:249
gem5::ArmISA::ArmFault::AlignmentFault
@ AlignmentFault
Definition: faults.hh:97
gem5::ArmISA::MMU::testTranslation
Fault testTranslation(const RequestPtr &req, Mode mode, TlbEntry::DomainType domain, CachedState &state)
Definition: mmu.cc:1613
gem5::ArmISA::MISCREG_VTTBR_EL2
@ MISCREG_VTTBR_EL2
Definition: misc.hh:606
gem5::ArmISA::MMU::translateFunctional
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition: mmu.hh:91
gem5::ArmISA::TlbEntry::Lookup::asn
uint16_t asn
Definition: pagetable.hh:190
gem5::ArmISA::TLB::setTableWalker
void setTableWalker(TableWalker *table_walker)
Definition: tlb.cc:99
gem5::ArmISA::Stage2LookUp::isComplete
bool isComplete() const
Definition: stage2_lookup.hh:97
gem5::ArmISA::MMU::checkPAN
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition: mmu.cc:766
gem5::ArmISA::ISA::getSelfDebug
SelfDebug * getSelfDebug() const
Definition: isa.hh:629
gem5::ArmISA::MISCREG_SCR
@ MISCREG_SCR
Definition: misc.hh:244
gem5::ArmISA::MMU::CachedState::getVMID
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition: mmu.cc:1148
gem5::ArmISA::MMU::checkPermissions
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition: mmu.cc:277
gem5::ArmISA::TlbEntry::Lookup::hyp
bool hyp
Definition: pagetable.hh:196
gem5::ArmISA::longDescFormatInUse
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:136
gem5::ArmISA::ArmFault::annotate
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:239
gem5::ArmISA::MMU::updateMiscReg
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition: mmu.cc:1177
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::QARMA::b10
Bitfield< 43, 40 > b10
Definition: qarma.hh:56
gem5::ArmSystem::releaseFS
const ArmRelease * releaseFS() const
Definition: system.hh:152
gem5::ArmISA::MMU::invalidateMiscReg
void invalidateMiscReg()
Definition: mmu.cc:197
gem5::ArmISA::ArmFault::S1PTW
@ S1PTW
Definition: faults.hh:134
gem5::ArmISA::MMU::setAttr
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition: mmu.hh:377
gem5::ArmISA::sd
Bitfield< 4 > sd
Definition: misc_types.hh:827
gem5::ArmISA::TlbEntry::partial
bool partial
Definition: pagetable.hh:248
gem5::ArmISA::snsBankedIndex
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: misc.cc:666
gem5::ArmISA::MMU::S1E1Tran
@ S1E1Tran
Definition: mmu.hh:127
gem5::ArmISA::Stage2LookUp::setSelfDelete
void setSelfDelete()
Definition: stage2_lookup.hh:95
gem5::Packet::setLE
void setLE(T v)
Set the value in the data pointer to v as little endian.
Definition: packet_access.hh:108
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::ArmISA::MMU::takeOverFrom
void takeOverFrom(BaseMMU *old_mmu) override
Definition: mmu.cc:1587
gem5::ArmISA::MMU::dtbStage2
TLB * dtbStage2
Definition: mmu.hh:82
gem5::ArmSystem::haveEL
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition: system.cc:131
gem5::ArmISA::MISCREG_HCR_EL2
@ MISCREG_HCR_EL2
Definition: misc.hh:587
gem5::ArmISA::MISCREG_TTBR0_EL1
@ MISCREG_TTBR0_EL1
Definition: misc.hh:598
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
gem5::ArmISA::TlbEntry::Lookup::ignoreAsn
bool ignoreAsn
Definition: pagetable.hh:192
gem5::ArmISA::MMU::translateMmuOn
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, ArmFault::TranMethod tranMethod, CachedState &state)
Definition: mmu.cc:882
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::ArmISA::computeAddrTop
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition: utility.cc:409
gem5::ArmISA::MMU::_release
const ArmRelease * _release
Definition: mmu.hh:490
gem5::BaseMMU::takeOverFrom
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition: mmu.cc:157
gem5::ArmISA::MMU::stats
gem5::ArmISA::MMU::Stats stats
gem5::ArmISA::MMU
Definition: mmu.hh:60
gem5::ArmISA::TableWalker::setMmu
void setMmu(MMU *_mmu)
Definition: table_walker.cc:119
gem5::ArmISA::MMU::isCompleteTranslation
bool isCompleteTranslation(TlbEntry *te) const
Definition: mmu.cc:1581
gem5::BaseMMU::Translation::finish
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
gem5::ArmISA::MMU::Stats::Stats
Stats(statistics::Group *parent)
Definition: mmu.cc:1646
gem5::ArmISA::HaveExt
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition: utility.cc:229
gem5::ArmISA::MMU::Stats::alignFaults
statistics::Scalar alignFaults
Definition: mmu.hh:502
gem5::ArmISA::MMU::lookup
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition: mmu.cc:1425
gem5::ArmISA::ArmFault::OVA
@ OVA
Definition: faults.hh:135
gem5::BaseMMU::data
std::set< BaseTLB * > data
Definition: mmu.hh:182
gem5::ArmISA::MISCREG_TTBR1_EL2
@ MISCREG_TTBR1_EL2
Definition: misc.hh:821
gem5::ArmISA::TlbEntry::innerAttrs
uint8_t innerAttrs
Definition: pagetable.hh:223
gem5::ArmISA::MMU::s2State
CachedState s2State
Definition: mmu.hh:484
gem5::ArmISA::dc
Bitfield< 12 > dc
Definition: misc_types.hh:273
gem5::ArmISA::MISCREG_TCR_EL1
@ MISCREG_TCR_EL1
Definition: misc.hh:602
gem5::ArmISA::ExceptionLevel
ExceptionLevel
Definition: types.hh:271
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::MMU::checkPermissions64
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition: mmu.cc:463
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::ArmISA::MMU::s1PermBits64
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition: mmu.cc:619

Generated on Thu Jul 28 2022 13:32:24 for gem5 by doxygen 1.8.17