gem5  v21.2.1.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mmu.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2013, 2016-2021 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2001-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "arch/arm/mmu.hh"
42 
43 #include "arch/arm/isa.hh"
44 #include "arch/arm/reg_abi.hh"
46 #include "arch/arm/table_walker.hh"
47 #include "arch/arm/tlbi_op.hh"
48 #include "debug/TLB.hh"
49 #include "debug/TLBVerbose.hh"
50 #include "mem/packet_access.hh"
51 #include "sim/pseudo_inst.hh"
52 #include "sim/process.hh"
53 
54 namespace gem5
55 {
56 
57 using namespace ArmISA;
58 
59 MMU::MMU(const ArmMMUParams &p)
60  : BaseMMU(p),
61  itbStage2(p.stage2_itb), dtbStage2(p.stage2_dtb),
62  itbWalker(p.itb_walker), dtbWalker(p.dtb_walker),
63  itbStage2Walker(p.stage2_itb_walker),
64  dtbStage2Walker(p.stage2_dtb_walker),
65  test(nullptr),
66  miscRegContext(0),
67  s1State(this, false), s2State(this, true),
68  _attr(0),
69  _release(nullptr),
70  _hasWalkCache(false),
71  stats(this)
72 {
73  // Cache system-level properties
74  if (FullSystem) {
75  ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
76  assert(arm_sys);
77  haveLargeAsid64 = arm_sys->haveLargeAsid64();
78  physAddrRange = arm_sys->physAddrRange();
79 
80  _release = arm_sys->releaseFS();
81  } else {
82  haveLargeAsid64 = false;
83  physAddrRange = 48;
84 
85  _release = p.release_se;
86  }
87 
88  m5opRange = p.sys->m5opRange();
89 }
90 
91 void
93 {
94  itbWalker->setMmu(this);
95  dtbWalker->setMmu(this);
96  itbStage2Walker->setMmu(this);
97  dtbStage2Walker->setMmu(this);
98 
101 
104 
105  BaseMMU::init();
106 
108 }
109 
110 bool
112 {
113  for (auto tlb : instruction) {
114  if (static_cast<TLB*>(tlb)->walkCache())
115  return true;
116  }
117  for (auto tlb : data) {
118  if (static_cast<TLB*>(tlb)->walkCache())
119  return true;
120  }
121  for (auto tlb : unified) {
122  if (static_cast<TLB*>(tlb)->walkCache())
123  return true;
124  }
125 
126  return false;
127 }
128 
129 void
131 {
132  s1State.miscRegValid = false;
133  s2State.miscRegValid = false;
134 }
135 
136 TLB *
137 MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
138 {
139  if (mode == BaseMMU::Execute) {
140  if (stage2)
141  return itbStage2;
142  else
143  return getITBPtr();
144  } else {
145  if (stage2)
146  return dtbStage2;
147  else
148  return getDTBPtr();
149  }
150 }
151 
152 TableWalker *
154 {
155  if (mode == BaseMMU::Execute) {
156  if (stage2)
157  return itbStage2Walker;
158  else
159  return itbWalker;
160  } else {
161  if (stage2)
162  return dtbStage2Walker;
163  else
164  return dtbWalker;
165  }
166 }
167 
168 bool
170 {
171  CachedState& state = updateMiscReg(tc, NormalTran, false);
172 
173  auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
174 
175  TlbEntry::Lookup lookup_data;
176 
177  lookup_data.va = va;
178  lookup_data.asn = state.asid;
179  lookup_data.ignoreAsn = false;
180  lookup_data.vmid = state.vmid;
181  lookup_data.hyp = state.isHyp;
182  lookup_data.secure = state.isSecure;
183  lookup_data.functional = true;
184  lookup_data.targetEL = state.aarch64 ? state.aarch64EL : EL1;
185  lookup_data.inHost = false;
186  lookup_data.mode = BaseMMU::Read;
187 
188  TlbEntry *e = tlb->multiLookup(lookup_data);
189 
190  if (!e)
191  return false;
192  pa = e->pAddr(va);
193  return true;
194 }
195 
196 void
198 {
199  s1State.miscRegValid = false;
200 }
201 
202 Fault
204  ThreadContext *tc, Mode mode) const
205 {
206  const Addr paddr = req->getPaddr();
207 
208  if (m5opRange.contains(paddr)) {
209  uint8_t func;
211  req->setLocalAccessor(
212  [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
213  {
214  uint64_t ret;
215  if (inAArch64(tc))
216  pseudo_inst::pseudoInst<RegABI64>(tc, func, ret);
217  else
218  pseudo_inst::pseudoInst<RegABI32>(tc, func, ret);
219 
220  if (mode == Read)
221  pkt->setLE(ret);
222 
223  return Cycles(1);
224  }
225  );
226  }
227 
228  return NoFault;
229 }
230 
231 
232 Fault
234  Translation *translation, bool &delay, bool timing,
235  CachedState &state)
236 {
237  updateMiscReg(tc, NormalTran, state.isStage2);
238  Addr vaddr_tainted = req->getVaddr();
239  Addr vaddr = 0;
240  if (state.aarch64)
241  vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
242  (TCR)state.ttbcr, mode==Execute);
243  else
244  vaddr = vaddr_tainted;
245  Request::Flags flags = req->getFlags();
246 
247  bool is_fetch = (mode == Execute);
248  bool is_write = (mode == Write);
249 
250  if (!is_fetch) {
251  if (state.sctlr.a || !(flags & AllowUnaligned)) {
252  if (vaddr & mask(flags & AlignmentMask)) {
253  // LPAE is always disabled in SE mode
254  return std::make_shared<DataAbort>(
255  vaddr_tainted,
259  }
260  }
261  }
262 
263  Addr paddr;
264  Process *p = tc->getProcessPtr();
265 
266  if (!p->pTable->translate(vaddr, paddr))
267  return std::make_shared<GenericPageTableFault>(vaddr_tainted);
268  req->setPaddr(paddr);
269 
270  return finalizePhysical(req, tc, mode);
271 }
272 
273 Fault
275  bool stage2)
276 {
277  return checkPermissions(te, req, mode, stage2 ? s2State : s1State);
278 }
279 
280 Fault
282  CachedState &state)
283 {
284  // a data cache maintenance instruction that operates by MVA does
285  // not generate a Data Abort exeception due to a Permission fault
286  if (req->isCacheMaintenance()) {
287  return NoFault;
288  }
289 
290  Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
291  Request::Flags flags = req->getFlags();
292  bool is_fetch = (mode == Execute);
293  bool is_write = (mode == Write);
294  bool is_priv = state.isPriv && !(flags & UserMode);
295 
296  // Get the translation type from the actuall table entry
297  ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
299 
300  // If this is the second stage of translation and the request is for a
301  // stage 1 page table walk then we need to check the HCR.PTW bit. This
302  // allows us to generate a fault if the request targets an area marked
303  // as a device or strongly ordered.
304  if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
305  (te->mtype != TlbEntry::MemoryType::Normal)) {
306  return std::make_shared<DataAbort>(
307  vaddr, te->domain, is_write,
308  ArmFault::PermissionLL + te->lookupLevel,
309  state.isStage2, tranMethod);
310  }
311 
312  // Generate an alignment fault for unaligned data accesses to device or
313  // strongly ordered memory
314  if (!is_fetch) {
315  if (te->mtype != TlbEntry::MemoryType::Normal) {
316  if (vaddr & mask(flags & AlignmentMask)) {
317  stats.alignFaults++;
318  return std::make_shared<DataAbort>(
321  tranMethod);
322  }
323  }
324  }
325 
326  if (te->nonCacheable) {
327  // Prevent prefetching from I/O devices.
328  if (req->isPrefetch()) {
329  // Here we can safely use the fault status for the short
330  // desc. format in all cases
331  return std::make_shared<PrefetchAbort>(
333  state.isStage2, tranMethod);
334  }
335  }
336 
337  if (!te->longDescFormat) {
338  switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
339  case 0:
341  DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
342  " domain: %#x write:%d\n", state.dacr,
343  static_cast<uint8_t>(te->domain), is_write);
344  if (is_fetch) {
345  // Use PC value instead of vaddr because vaddr might
346  // be aligned to cache line and should not be the
347  // address reported in FAR
348  return std::make_shared<PrefetchAbort>(
349  req->getPC(),
350  ArmFault::DomainLL + te->lookupLevel,
351  state.isStage2, tranMethod);
352  } else
353  return std::make_shared<DataAbort>(
354  vaddr, te->domain, is_write,
355  ArmFault::DomainLL + te->lookupLevel,
356  state.isStage2, tranMethod);
357  case 1:
358  // Continue with permissions check
359  break;
360  case 2:
361  panic("UNPRED domain\n");
362  case 3:
363  return NoFault;
364  }
365  }
366 
367  // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
368  uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
369  uint8_t hap = te->hap;
370 
371  if (state.sctlr.afe == 1 || te->longDescFormat)
372  ap |= 1;
373 
374  bool abt;
375  bool isWritable = true;
376  // If this is a stage 2 access (eg for reading stage 1 page table entries)
377  // then don't perform the AP permissions check, we stil do the HAP check
378  // below.
379  if (state.isStage2) {
380  abt = false;
381  } else {
382  switch (ap) {
383  case 0:
384  DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
385  (int)state.sctlr.rs);
386  if (!state.sctlr.xp) {
387  switch ((int)state.sctlr.rs) {
388  case 2:
389  abt = is_write;
390  break;
391  case 1:
392  abt = is_write || !is_priv;
393  break;
394  case 0:
395  case 3:
396  default:
397  abt = true;
398  break;
399  }
400  } else {
401  abt = true;
402  }
403  break;
404  case 1:
405  abt = !is_priv;
406  break;
407  case 2:
408  abt = !is_priv && is_write;
409  isWritable = is_priv;
410  break;
411  case 3:
412  abt = false;
413  break;
414  case 4:
415  panic("UNPRED premissions\n");
416  case 5:
417  abt = !is_priv || is_write;
418  isWritable = false;
419  break;
420  case 6:
421  case 7:
422  abt = is_write;
423  isWritable = false;
424  break;
425  default:
426  panic("Unknown permissions %#x\n", ap);
427  }
428  }
429 
430  bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
431  bool xn = te->xn || (isWritable && state.sctlr.wxn) ||
432  (ap == 3 && state.sctlr.uwxn && is_priv);
433  if (is_fetch && (abt || xn ||
434  (te->longDescFormat && te->pxn && is_priv) ||
435  (state.isSecure && te->ns && state.scr.sif))) {
436  stats.permsFaults++;
437  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
438  "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
439  ap, is_priv, is_write, te->ns,
440  state.scr.sif, state.sctlr.afe);
441  // Use PC value instead of vaddr because vaddr might be aligned to
442  // cache line and should not be the address reported in FAR
443  return std::make_shared<PrefetchAbort>(
444  req->getPC(),
445  ArmFault::PermissionLL + te->lookupLevel,
446  state.isStage2, tranMethod);
447  } else if (abt | hapAbt) {
448  stats.permsFaults++;
449  DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
450  " write:%d\n", ap, is_priv, is_write);
451  return std::make_shared<DataAbort>(
452  vaddr, te->domain, is_write,
453  ArmFault::PermissionLL + te->lookupLevel,
454  state.isStage2 | !abt, tranMethod);
455  }
456  return NoFault;
457 }
458 
459 Fault
461  ThreadContext *tc, bool stage2)
462 {
463  return checkPermissions64(te, req, mode, tc, stage2 ? s2State : s1State);
464 }
465 
466 Fault
468  ThreadContext *tc, CachedState &state)
469 {
470  assert(state.aarch64);
471 
472  // A data cache maintenance instruction that operates by VA does
473  // not generate a Permission fault unless:
474  // * It is a data cache invalidate (dc ivac) which requires write
475  // permissions to the VA, or
476  // * It is executed from EL0
477  if (req->isCacheClean() && state.aarch64EL != EL0 && !state.isStage2) {
478  return NoFault;
479  }
480 
481  Addr vaddr_tainted = req->getVaddr();
482  Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
483  (TCR)state.ttbcr, mode==Execute);
484 
485  Request::Flags flags = req->getFlags();
486  bool is_fetch = (mode == Execute);
487  // Cache clean operations require read permissions to the specified VA
488  bool is_write = !req->isCacheClean() && mode == Write;
489  bool is_atomic = req->isAtomic();
490 
491  updateMiscReg(tc, state.curTranType, state.isStage2);
492 
493  // If this is the second stage of translation and the request is for a
494  // stage 1 page table walk then we need to check the HCR.PTW bit. This
495  // allows us to generate a fault if the request targets an area marked
496  // as a device or strongly ordered.
497  if (state.isStage2 && req->isPTWalk() && state.hcr.ptw &&
498  (te->mtype != TlbEntry::MemoryType::Normal)) {
499  return std::make_shared<DataAbort>(
500  vaddr_tainted, te->domain, is_write,
501  ArmFault::PermissionLL + te->lookupLevel,
503  }
504 
505  // Generate an alignment fault for unaligned accesses to device or
506  // strongly ordered memory
507  if (!is_fetch) {
508  if (te->mtype != TlbEntry::MemoryType::Normal) {
509  if (vaddr & mask(flags & AlignmentMask)) {
510  stats.alignFaults++;
511  return std::make_shared<DataAbort>(
512  vaddr_tainted,
514  is_atomic ? false : is_write,
517  }
518  }
519  }
520 
521  if (te->nonCacheable) {
522  // Prevent prefetching from I/O devices.
523  if (req->isPrefetch()) {
524  // Here we can safely use the fault status for the short
525  // desc. format in all cases
526  return std::make_shared<PrefetchAbort>(
527  vaddr_tainted,
530  }
531  }
532 
533  bool grant = false;
534  // grant_read is used for faults from an atomic instruction that
535  // both reads and writes from a memory location. From a ISS point
536  // of view they count as read if a read to that address would have
537  // generated the fault; they count as writes otherwise
538  bool grant_read = true;
539 
540  if (state.isStage2) {
541  std::tie(grant, grant_read) = s2PermBits64(te, req, mode, tc, state,
542  (!is_write && !is_fetch), is_write, is_fetch);
543  } else {
544  std::tie(grant, grant_read) = s1PermBits64(te, req, mode, tc, state,
545  (!is_write && !is_fetch), is_write, is_fetch);
546  }
547 
548  if (!grant) {
549  if (is_fetch) {
550  stats.permsFaults++;
551  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
552  "ns:%d scr.sif:%d sctlr.afe: %d\n",
553  te->ns, state.scr.sif, state.sctlr.afe);
554  // Use PC value instead of vaddr because vaddr might be aligned to
555  // cache line and should not be the address reported in FAR
556  return std::make_shared<PrefetchAbort>(
557  req->getPC(),
558  ArmFault::PermissionLL + te->lookupLevel,
560  } else {
561  stats.permsFaults++;
562  DPRINTF(TLB, "TLB Fault: Data abort on permission check."
563  "ns:%d", te->ns);
564  return std::make_shared<DataAbort>(
565  vaddr_tainted, te->domain,
566  (is_atomic && !grant_read) ? false : is_write,
567  ArmFault::PermissionLL + te->lookupLevel,
569  }
570  }
571 
572  return NoFault;
573 }
574 
577  ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
578 {
579  assert(ArmSystem::haveEL(tc, EL2) && state.aarch64EL != EL2);
580 
581  // In stage 2 we use the hypervisor access permission bits.
582  // The following permissions are described in ARM DDI 0487A.f
583  // D4-1802
584  bool grant = false;
585  bool grant_read = te->hap & 0b01;
586  bool grant_write = te->hap & 0b10;
587 
588  uint8_t xn = te->xn;
589  uint8_t pxn = te->pxn;
590 
591  if (ArmSystem::haveEL(tc, EL3) && state.isSecure &&
592  te->ns && state.scr.sif) {
593  xn = true;
594  }
595 
596  DPRINTF(TLBVerbose,
597  "Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
598  "w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
599 
600  if (x) {
601  grant = grant_read && !xn;
602  } else if (req->isAtomic()) {
603  grant = grant_read || grant_write;
604  } else if (w) {
605  grant = grant_write;
606  } else if (r) {
607  grant = grant_read;
608  } else {
609  panic("Invalid Operation\n");
610  }
611 
612  return std::make_pair(grant, grant_read);
613 }
614 
617  ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
618 {
619  bool grant = false, grant_read = true;
620 
621  const uint8_t ap = te->ap & 0b11; // 2-bit access protection field
622  const bool is_priv = state.isPriv && !(req->getFlags() & UserMode);
623 
624  bool wxn = state.sctlr.wxn;
625  uint8_t xn = te->xn;
626  uint8_t pxn = te->pxn;
627 
628  if (ArmSystem::haveEL(tc, EL3) && state.isSecure &&
629  te->ns && state.scr.sif) {
630  xn = true;
631  }
632 
633  DPRINTF(TLBVerbose, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
634  "w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
635  pxn, r, w, x, is_priv, wxn);
636 
637  if (faultPAN(tc, ap, req, mode, is_priv, state)) {
638  return std::make_pair(false, false);
639  }
640 
641  ExceptionLevel regime = !is_priv ? EL0 : state.aarch64EL;
642  switch (regime) {
643  case EL0:
644  {
645  grant_read = ap & 0x1;
646  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
647  switch (perm) {
648  case 0:
649  case 1:
650  case 8:
651  case 9:
652  grant = x;
653  break;
654  case 4:
655  case 5:
656  grant = r || w || (x && !wxn);
657  break;
658  case 6:
659  case 7:
660  grant = r || w;
661  break;
662  case 12:
663  case 13:
664  grant = r || x;
665  break;
666  case 14:
667  case 15:
668  grant = r;
669  break;
670  default:
671  grant = false;
672  }
673  }
674  break;
675  case EL1:
676  {
677  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
678  switch (perm) {
679  case 0:
680  case 2:
681  grant = r || w || (x && !wxn);
682  break;
683  case 1:
684  case 3:
685  case 4:
686  case 5:
687  case 6:
688  case 7:
689  // regions that are writeable at EL0 should not be
690  // executable at EL1
691  grant = r || w;
692  break;
693  case 8:
694  case 10:
695  case 12:
696  case 14:
697  grant = r || x;
698  break;
699  case 9:
700  case 11:
701  case 13:
702  case 15:
703  grant = r;
704  break;
705  default:
706  grant = false;
707  }
708  }
709  break;
710  case EL2:
711  case EL3:
712  {
713  uint8_t perm = (ap & 0x2) | xn;
714  switch (perm) {
715  case 0:
716  grant = r || w || (x && !wxn);
717  break;
718  case 1:
719  grant = r || w;
720  break;
721  case 2:
722  grant = r || x;
723  break;
724  case 3:
725  grant = r;
726  break;
727  default:
728  grant = false;
729  }
730  }
731  break;
732  }
733 
734  return std::make_pair(grant, grant_read);
735 }
736 
737 bool
738 MMU::faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
739  const bool is_priv, CachedState &state)
740 {
741  bool exception = false;
742  switch (state.aarch64EL) {
743  case EL0:
744  break;
745  case EL1:
746  if (checkPAN(tc, ap, req, mode, is_priv, state)) {
747  exception = true;;
748  }
749  break;
750  case EL2:
751  if (state.hcr.e2h && checkPAN(tc, ap, req, mode, is_priv, state)) {
752  exception = true;;
753  }
754  break;
755  case EL3:
756  break;
757  }
758 
759  return exception;
760 }
761 
762 bool
763 MMU::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode,
764  const bool is_priv, CachedState &state)
765 {
766  // The PAN bit has no effect on:
767  // 1) Instruction accesses.
768  // 2) Data Cache instructions other than DC ZVA
769  // 3) Address translation instructions, other than ATS1E1RP and
770  // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
771  // gem5)
772  // 4) Instructions to be treated as unprivileged, unless
773  // HCR_EL2.{E2H, TGE} == {1, 0}
774  const AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
775  if (mmfr1.pan && state.cpsr.pan && (ap & 0x1) &&
776  mode != BaseMMU::Execute) {
777 
778  if (req->isCacheMaintenance() &&
779  !(req->getFlags() & Request::CACHE_BLOCK_ZERO)) {
780  // Cache maintenance other than DC ZVA
781  return false;
782  } else if (!is_priv && !(state.hcr.e2h && !state.hcr.tge)) {
783  // Treated as unprivileged unless HCR_EL2.{E2H, TGE} == {1, 0}
784  return false;
785  }
786  return true;
787  }
788 
789  return false;
790 }
791 
792 Fault
794  ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
795  CachedState &state)
796 {
797  bool is_fetch = (mode == Execute);
798  bool is_atomic = req->isAtomic();
799  req->setPaddr(vaddr);
800  // When the MMU is off the security attribute corresponds to the
801  // security state of the processor
802  if (state.isSecure)
803  req->setFlags(Request::SECURE);
804 
805  if (state.aarch64) {
806  bool selbit = bits(vaddr, 55);
807  TCR tcr1 = tc->readMiscReg(MISCREG_TCR_EL1);
808  int topbit = computeAddrTop(tc, selbit, is_fetch, tcr1, currEL(tc));
809  int addr_sz = bits(vaddr, topbit, physAddrRange);
810  if (addr_sz != 0){
811  Fault f;
812  if (is_fetch)
813  f = std::make_shared<PrefetchAbort>(vaddr,
816  else
817  f = std::make_shared<DataAbort>( vaddr,
819  is_atomic ? false : mode==Write,
822  return f;
823  }
824  }
825 
826  // @todo: double check this (ARM ARM issue C B3.2.1)
827  if (long_desc_format || state.sctlr.tre == 0 || state.nmrr.ir0 == 0 ||
828  state.nmrr.or0 == 0 || state.prrr.tr0 != 0x2) {
829  if (!req->isCacheMaintenance()) {
830  req->setFlags(Request::UNCACHEABLE);
831  }
832  req->setFlags(Request::STRICT_ORDER);
833  }
834 
835  // Set memory attributes
836  TlbEntry temp_te;
837  temp_te.ns = !state.isSecure;
838  bool dc = (HaveVirtHostExt(tc)
839  && state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc;
840  bool i_cacheability = state.sctlr.i && !state.sctlr.m;
841  if (state.isStage2 || !dc || state.isSecure ||
842  (state.isHyp && !(tran_type & S1CTran))) {
843 
844  temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
846  temp_te.innerAttrs = i_cacheability? 0x2: 0x0;
847  temp_te.outerAttrs = i_cacheability? 0x2: 0x0;
848  temp_te.shareable = true;
849  temp_te.outerShareable = true;
850  } else {
852  temp_te.innerAttrs = 0x3;
853  temp_te.outerAttrs = 0x3;
854  temp_te.shareable = false;
855  temp_te.outerShareable = false;
856  }
857  temp_te.setAttributes(long_desc_format);
858  DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
859  "%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
860  temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
861  state.isStage2);
862  setAttr(temp_te.attributes);
863 
865 }
866 
867 Fault
869  Translation *translation, bool &delay, bool timing,
870  bool functional, Addr vaddr,
871  ArmFault::TranMethod tranMethod, CachedState &state)
872 {
873  TlbEntry *te = NULL;
874  bool is_fetch = (mode == Execute);
875  TlbEntry mergeTe;
876 
877  Request::Flags flags = req->getFlags();
878  Addr vaddr_tainted = req->getVaddr();
879 
880  Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
881  functional, &mergeTe, state);
882  // only proceed if we have a valid table entry
883  if (!isCompleteTranslation(te) && (fault == NoFault)) delay = true;
884 
885  // If we have the table entry transfer some of the attributes to the
886  // request that triggered the translation
887  if (isCompleteTranslation(te)) {
888  // Set memory attributes
889  DPRINTF(TLBVerbose,
890  "Setting memory attributes: shareable: %d, innerAttrs: %d, "
891  "outerAttrs: %d, mtype: %d, stage2: %d\n",
892  te->shareable, te->innerAttrs, te->outerAttrs,
893  static_cast<uint8_t>(te->mtype), state.isStage2);
894  setAttr(te->attributes);
895 
896  if (te->nonCacheable && !req->isCacheMaintenance())
897  req->setFlags(Request::UNCACHEABLE);
898 
899  // Require requests to be ordered if the request goes to
900  // strongly ordered or device memory (i.e., anything other
901  // than normal memory requires strict order).
902  if (te->mtype != TlbEntry::MemoryType::Normal)
903  req->setFlags(Request::STRICT_ORDER);
904 
905  Addr pa = te->pAddr(vaddr);
906  req->setPaddr(pa);
907 
908  if (state.isSecure && !te->ns) {
909  req->setFlags(Request::SECURE);
910  }
911  if (!is_fetch && fault == NoFault &&
912  (vaddr & mask(flags & AlignmentMask)) &&
913  (te->mtype != TlbEntry::MemoryType::Normal)) {
914  // Unaligned accesses to Device memory should always cause an
915  // abort regardless of sctlr.a
916  stats.alignFaults++;
917  bool is_write = (mode == Write);
918  return std::make_shared<DataAbort>(
919  vaddr_tainted,
922  tranMethod);
923  }
924 
925  // Check for a trickbox generated address fault
926  if (fault == NoFault)
927  fault = testTranslation(req, mode, te->domain, state);
928  }
929 
930  if (fault == NoFault) {
931  // Don't try to finalize a physical address unless the
932  // translation has completed (i.e., there is a table entry).
933  return te ? finalizePhysical(req, tc, mode) : NoFault;
934  } else {
935  return fault;
936  }
937 }
938 
939 Fault
941  Translation *translation, bool &delay, bool timing,
942  ArmTranslationType tran_type, bool functional,
943  CachedState &state)
944 {
945  // No such thing as a functional timing access
946  assert(!(timing && functional));
947 
948  Addr vaddr_tainted = req->getVaddr();
949  Addr vaddr = 0;
950  if (state.aarch64)
951  vaddr = purifyTaggedAddr(vaddr_tainted, tc, state.aarch64EL,
952  (TCR)state.ttbcr, mode==Execute);
953  else
954  vaddr = vaddr_tainted;
955  Request::Flags flags = req->getFlags();
956 
957  bool is_fetch = (mode == Execute);
958  bool is_write = (mode == Write);
959  bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
960  ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
962 
963  DPRINTF(TLBVerbose,
964  "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
965  state.isPriv, flags & UserMode, state.isSecure,
966  tran_type & S1S2NsTran);
967 
968  DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
969  "flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
970  state.isStage2, state.scr, state.sctlr, flags, tran_type);
971 
972  if (!state.isStage2) {
973  if ((req->isInstFetch() && (!state.sctlr.i)) ||
974  ((!req->isInstFetch()) && (!state.sctlr.c))){
975  if (!req->isCacheMaintenance()) {
976  req->setFlags(Request::UNCACHEABLE);
977  }
978  req->setFlags(Request::STRICT_ORDER);
979  }
980  }
981  if (!is_fetch) {
982  if (state.sctlr.a || !(flags & AllowUnaligned)) {
983  if (vaddr & mask(flags & AlignmentMask)) {
984  stats.alignFaults++;
985  return std::make_shared<DataAbort>(
986  vaddr_tainted,
989  tranMethod);
990  }
991  }
992  }
993 
994  bool vm = state.hcr.vm;
995  if (HaveVirtHostExt(tc) && state.hcr.e2h == 1 && state.hcr.tge ==1)
996  vm = 0;
997  else if (state.hcr.dc == 1)
998  vm = 1;
999 
1000  Fault fault = NoFault;
1001  // If guest MMU is off or hcr.vm=0 go straight to stage2
1002  if ((state.isStage2 && !vm) || (!state.isStage2 && !state.sctlr.m)) {
1003  fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
1004  long_desc_format, state);
1005  } else {
1006  DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1007  state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
1008  // Translation enabled
1009  fault = translateMmuOn(tc, req, mode, translation, delay, timing,
1010  functional, vaddr, tranMethod, state);
1011  }
1012 
1013  // Check for Debug Exceptions
1015 
1016  if (sd->enabled() && fault == NoFault) {
1017  fault = sd->testDebug(tc, req, mode);
1018  }
1019 
1020  return fault;
1021 }
1022 
1023 Fault
1025  ArmTranslationType tran_type)
1026 {
1027  return translateAtomic(req, tc, mode, tran_type, false);
1028 }
1029 
1030 Fault
1032  ArmTranslationType tran_type, bool stage2)
1033 {
1034  auto& state = updateMiscReg(tc, tran_type, stage2);
1035 
1036  bool delay = false;
1037  Fault fault;
1038  if (FullSystem)
1039  fault = translateFs(req, tc, mode, NULL, delay, false,
1040  tran_type, false, state);
1041  else
1042  fault = translateSe(req, tc, mode, NULL, delay, false, state);
1043  assert(!delay);
1044  return fault;
1045 }
1046 
1047 Fault
1049 {
1050  return translateFunctional(req, tc, mode, NormalTran, false);
1051 }
1052 
1053 Fault
1055  ArmTranslationType tran_type)
1056 {
1057  return translateFunctional(req, tc, mode, tran_type, false);
1058 }
1059 
1060 Fault
1062  ArmTranslationType tran_type, bool stage2)
1063 {
1064  auto& state = updateMiscReg(tc, tran_type, stage2);
1065 
1066  bool delay = false;
1067  Fault fault;
1068  if (FullSystem)
1069  fault = translateFs(req, tc, mode, NULL, delay, false,
1070  tran_type, true, state);
1071  else
1072  fault = translateSe(req, tc, mode, NULL, delay, false, state);
1073  assert(!delay);
1074  return fault;
1075 }
1076 
1077 void
1079  Translation *translation, Mode mode, ArmTranslationType tran_type,
1080  bool stage2)
1081 {
1082  auto& state = updateMiscReg(tc, tran_type, stage2);
1083 
1084  assert(translation);
1085 
1086  translateComplete(req, tc, translation, mode, tran_type,
1087  stage2, state);
1088 }
1089 
1090 Fault
1092  Translation *translation, Mode mode, ArmTranslationType tran_type,
1093  bool call_from_s2)
1094 {
1095  return translateComplete(req, tc, translation, mode, tran_type,
1096  call_from_s2, s1State);
1097 }
1098 
1099 Fault
1101  Translation *translation, Mode mode, ArmTranslationType tran_type,
1102  bool call_from_s2, CachedState &state)
1103 {
1104  bool delay = false;
1105  Fault fault;
1106  if (FullSystem)
1107  fault = translateFs(req, tc, mode, translation, delay, true, tran_type,
1108  false, state);
1109  else
1110  fault = translateSe(req, tc, mode, translation, delay, true, state);
1111 
1112  DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay,
1113  fault != NoFault);
1114  // If we have a translation, and we're not in the middle of doing a stage
1115  // 2 translation tell the translation that we've either finished or its
1116  // going to take a while. By not doing this when we're in the middle of a
1117  // stage 2 translation we prevent marking the translation as delayed twice,
1118  // one when the translation starts and again when the stage 1 translation
1119  // completes.
1120 
1121  if (translation && (call_from_s2 || !state.stage2Req || req->hasPaddr() ||
1122  fault != NoFault)) {
1123  if (!delay)
1124  translation->finish(fault, req, tc, mode);
1125  else
1126  translation->markDelayed();
1127  }
1128  return fault;
1129 }
1130 
1131 vmid_t
1133 {
1134  AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
1135  VTCR_t vtcr = tc->readMiscReg(MISCREG_VTCR_EL2);
1136  vmid_t vmid = 0;
1137 
1138  switch (mmfr1.vmidbits) {
1139  case 0b0000:
1140  // 8 bits
1141  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1142  break;
1143  case 0b0010:
1144  if (vtcr.vs && ELIs64(tc, EL2)) {
1145  // 16 bits
1146  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 63, 48);
1147  } else {
1148  // 8 bits
1149  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1150  }
1151  break;
1152  default:
1153  panic("Reserved ID_AA64MMFR1_EL1.VMIDBits value: %#x",
1154  mmfr1.vmidbits);
1155  }
1156 
1157  return vmid;
1158 }
1159 
1162  ArmTranslationType tran_type, bool stage2)
1163 {
1164  // check if the regs have changed, or the translation mode is different.
1165  // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1166  // one type of translation anyway
1167 
1168  auto& state = stage2 ? s2State : s1State;
1169  if (state.miscRegValid && miscRegContext == tc->contextId() &&
1170  ((tran_type == state.curTranType) || stage2)) {
1171 
1172  } else {
1173  DPRINTF(TLBVerbose, "TLB variables changed!\n");
1174  state.updateMiscReg(tc, tran_type);
1175 
1176  itbStage2->setVMID(state.vmid);
1177  dtbStage2->setVMID(state.vmid);
1178 
1179  for (auto tlb : instruction) {
1180  static_cast<TLB*>(tlb)->setVMID(state.vmid);
1181  }
1182  for (auto tlb : data) {
1183  static_cast<TLB*>(tlb)->setVMID(state.vmid);
1184  }
1185  for (auto tlb : unified) {
1186  static_cast<TLB*>(tlb)->setVMID(state.vmid);
1187  }
1188 
1189  miscRegContext = tc->contextId();
1190  }
1191 
1192  if (state.directToStage2) {
1193  s2State.updateMiscReg(tc, tran_type);
1194  return s2State;
1195  } else {
1196  return state;
1197  }
1198 }
1199 
1200 void
1202  ArmTranslationType tran_type)
1203 {
1204  cpsr = tc->readMiscReg(MISCREG_CPSR);
1205 
1206  // Dependencies: SCR/SCR_EL3, CPSR
1207  isSecure = ArmISA::isSecure(tc) &&
1208  !(tran_type & HypMode) && !(tran_type & S1S2NsTran);
1209 
1210  aarch64EL = tranTypeEL(cpsr, tran_type);
1211  aarch64 = isStage2 ?
1212  ELIs64(tc, EL2) :
1213  ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1214 
1215  hcr = tc->readMiscReg(MISCREG_HCR_EL2);
1216  if (aarch64) { // AArch64
1217  // determine EL we need to translate in
1218  switch (aarch64EL) {
1219  case EL0:
1220  if (HaveVirtHostExt(tc) && hcr.tge == 1 && hcr.e2h == 1) {
1221  // VHE code for EL2&0 regime
1222  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1223  ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1224  uint64_t ttbr_asid = ttbcr.a1 ?
1227  asid = bits(ttbr_asid,
1228  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1229 
1230  } else {
1231  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1232  ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1233  uint64_t ttbr_asid = ttbcr.a1 ?
1236  asid = bits(ttbr_asid,
1237  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1238 
1239  }
1240  break;
1241  case EL1:
1242  {
1243  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL1);
1244  ttbcr = tc->readMiscReg(MISCREG_TCR_EL1);
1245  uint64_t ttbr_asid = ttbcr.a1 ?
1248  asid = bits(ttbr_asid,
1249  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1250  }
1251  break;
1252  case EL2:
1253  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL2);
1254  ttbcr = tc->readMiscReg(MISCREG_TCR_EL2);
1255  if (hcr.e2h == 1) {
1256  // VHE code for EL2&0 regime
1257  uint64_t ttbr_asid = ttbcr.a1 ?
1260  asid = bits(ttbr_asid,
1261  (mmu->haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1262  } else {
1263  asid = -1;
1264  }
1265  break;
1266  case EL3:
1267  sctlr = tc->readMiscReg(MISCREG_SCTLR_EL3);
1268  ttbcr = tc->readMiscReg(MISCREG_TCR_EL3);
1269  asid = -1;
1270  break;
1271  }
1272 
1273  scr = tc->readMiscReg(MISCREG_SCR_EL3);
1274  isPriv = aarch64EL != EL0;
1275  if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1276  vmid = getVMID(tc);
1277  isHyp = aarch64EL == EL2;
1278  isHyp |= tran_type & HypMode;
1279  isHyp &= (tran_type & S1S2NsTran) == 0;
1280  isHyp &= (tran_type & S1CTran) == 0;
1281  bool vm = hcr.vm;
1282  if (HaveVirtHostExt(tc) && hcr.e2h == 1 && hcr.tge ==1) {
1283  vm = 0;
1284  }
1285 
1286  if (hcr.e2h == 1 && (aarch64EL == EL2
1287  || (hcr.tge ==1 && aarch64EL == EL0))) {
1288  isHyp = true;
1289  directToStage2 = false;
1290  stage2Req = false;
1291  stage2DescReq = false;
1292  } else {
1293  // Work out if we should skip the first stage of translation and go
1294  // directly to stage 2. This value is cached so we don't have to
1295  // compute it for every translation.
1296  bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc));
1297  stage2Req = isStage2 ||
1298  (vm && !isHyp && sec &&
1299  !(tran_type & S1CTran) && (aarch64EL < EL2) &&
1300  !(tran_type & S1E1Tran)); // <--- FIX THIS HACK
1301  stage2DescReq = isStage2 || (vm && !isHyp && sec &&
1302  (aarch64EL < EL2));
1303  directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1304  }
1305  } else {
1306  vmid = 0;
1307  isHyp = false;
1308  directToStage2 = false;
1309  stage2Req = false;
1310  stage2DescReq = false;
1311  }
1312  } else { // AArch32
1313  sctlr = tc->readMiscReg(snsBankedIndex(MISCREG_SCTLR, tc,
1314  !isSecure));
1315  ttbcr = tc->readMiscReg(snsBankedIndex(MISCREG_TTBCR, tc,
1316  !isSecure));
1317  scr = tc->readMiscReg(MISCREG_SCR);
1318  isPriv = cpsr.mode != MODE_USER;
1319  if (longDescFormatInUse(tc)) {
1320  uint64_t ttbr_asid = tc->readMiscReg(
1321  snsBankedIndex(ttbcr.a1 ? MISCREG_TTBR1 :
1322  MISCREG_TTBR0,
1323  tc, !isSecure));
1324  asid = bits(ttbr_asid, 55, 48);
1325  } else { // Short-descriptor translation table format in use
1326  CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1328  asid = context_id.asid;
1329  }
1330  prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR, tc,
1331  !isSecure));
1332  nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR, tc,
1333  !isSecure));
1334  dacr = tc->readMiscReg(snsBankedIndex(MISCREG_DACR, tc,
1335  !isSecure));
1336  hcr = tc->readMiscReg(MISCREG_HCR);
1337 
1338  if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) {
1339  vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1340  isHyp = cpsr.mode == MODE_HYP;
1341  isHyp |= tran_type & HypMode;
1342  isHyp &= (tran_type & S1S2NsTran) == 0;
1343  isHyp &= (tran_type & S1CTran) == 0;
1344  if (isHyp) {
1345  sctlr = tc->readMiscReg(MISCREG_HSCTLR);
1346  }
1347  // Work out if we should skip the first stage of translation and go
1348  // directly to stage 2. This value is cached so we don't have to
1349  // compute it for every translation.
1350  bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc));
1351  stage2Req = hcr.vm && !isStage2 && !isHyp && sec &&
1352  !(tran_type & S1CTran);
1353  stage2DescReq = hcr.vm && !isStage2 && !isHyp && sec;
1354  directToStage2 = stage2Req && !sctlr.m;
1355  } else {
1356  vmid = 0;
1357  stage2Req = false;
1358  isHyp = false;
1359  directToStage2 = false;
1360  stage2DescReq = false;
1361  }
1362  }
1363  miscRegValid = true;
1364  curTranType = tran_type;
1365 }
1366 
1369 {
1370  switch (type) {
1371  case S1E0Tran:
1372  case S12E0Tran:
1373  return EL0;
1374 
1375  case S1E1Tran:
1376  case S12E1Tran:
1377  return EL1;
1378 
1379  case S1E2Tran:
1380  return EL2;
1381 
1382  case S1E3Tran:
1383  return EL3;
1384 
1385  case NormalTran:
1386  case S1CTran:
1387  case S1S2NsTran:
1388  case HypMode:
1389  return currEL(cpsr);
1390 
1391  default:
1392  panic("Unknown translation mode!\n");
1393  }
1394 }
1395 
1396 Fault
1398  Translation *translation, bool timing, bool functional,
1399  bool is_secure, ArmTranslationType tran_type,
1400  bool stage2)
1401 {
1402  return getTE(te, req, tc, mode, translation, timing, functional,
1403  is_secure, tran_type, stage2 ? s2State : s1State);
1404 }
1405 
1406 TlbEntry*
1407 MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool hyp, bool secure,
1408  bool functional, bool ignore_asn, ExceptionLevel target_el,
1409  bool in_host, bool stage2, BaseMMU::Mode mode)
1410 {
1411  TLB *tlb = getTlb(mode, stage2);
1412 
1413  TlbEntry::Lookup lookup_data;
1414 
1415  lookup_data.va = va;
1416  lookup_data.asn = asid;
1417  lookup_data.ignoreAsn = ignore_asn;
1418  lookup_data.vmid = vmid;
1419  lookup_data.hyp = hyp;
1420  lookup_data.secure = secure;
1421  lookup_data.functional = functional;
1422  lookup_data.targetEL = target_el;
1423  lookup_data.inHost = in_host;
1424  lookup_data.mode = mode;
1425 
1426  return tlb->multiLookup(lookup_data);
1427 }
1428 
1429 Fault
1431  Translation *translation, bool timing, bool functional,
1432  bool is_secure, ArmTranslationType tran_type,
1433  CachedState& state)
1434 {
1435  // In a 2-stage system, the IPA->PA translation can be started via this
1436  // call so make sure the miscRegs are correct.
1437  if (state.isStage2) {
1438  updateMiscReg(tc, tran_type, true);
1439  }
1440 
1441  Addr vaddr_tainted = req->getVaddr();
1442  Addr vaddr = 0;
1443  ExceptionLevel target_el = state.aarch64 ? state.aarch64EL : EL1;
1444  if (state.aarch64) {
1445  vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el,
1446  (TCR)state.ttbcr, mode==Execute);
1447  } else {
1448  vaddr = vaddr_tainted;
1449  }
1450 
1451  *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure, false,
1452  false, target_el, false, state.isStage2, mode);
1453 
1454  if (!isCompleteTranslation(*te)) {
1455  if (req->isPrefetch()) {
1456  // if the request is a prefetch don't attempt to fill the TLB or go
1457  // any further with the memory access (here we can safely use the
1458  // fault status for the short desc. format in all cases)
1460  return std::make_shared<PrefetchAbort>(
1461  vaddr_tainted, ArmFault::PrefetchTLBMiss, state.isStage2);
1462  }
1463 
1464  // start translation table walk, pass variables rather than
1465  // re-retreaving in table walker for speed
1466  DPRINTF(TLB,
1467  "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1468  vaddr_tainted, state.asid, state.vmid);
1469 
1470  Fault fault;
1471  fault = getTableWalker(mode, state.isStage2)->walk(
1472  req, tc, state.asid, state.vmid, state.isHyp, mode,
1473  translation, timing, functional, is_secure,
1474  tran_type, state.stage2DescReq, *te);
1475 
1476  // for timing mode, return and wait for table walk,
1477  if (timing || fault != NoFault) {
1478  return fault;
1479  }
1480 
1481  *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure,
1482  true, false, target_el, false, state.isStage2, mode);
1483  assert(*te);
1484  }
1485  return NoFault;
1486 }
1487 
1488 Fault
1490  ThreadContext *tc, Mode mode,
1491  Translation *translation, bool timing, bool functional,
1492  TlbEntry *mergeTe, CachedState &state)
1493 {
1494  Fault fault;
1495 
1496  if (state.isStage2) {
1497  // We are already in the stage 2 TLB. Grab the table entry for stage
1498  // 2 only. We are here because stage 1 translation is disabled.
1499  TlbEntry *s2_te = nullptr;
1500  // Get the stage 2 table entry
1501  fault = getTE(&s2_te, req, tc, mode, translation, timing, functional,
1502  state.isSecure, state.curTranType, state);
1503  // Check permissions of stage 2
1504  if (isCompleteTranslation(s2_te) && (fault == NoFault)) {
1505  if (state.aarch64)
1506  fault = checkPermissions64(s2_te, req, mode, tc, state);
1507  else
1508  fault = checkPermissions(s2_te, req, mode, state);
1509  }
1510  *te = s2_te;
1511  return fault;
1512  }
1513 
1514  TlbEntry *s1_te = nullptr;
1515 
1516  Addr vaddr_tainted = req->getVaddr();
1517 
1518  // Get the stage 1 table entry
1519  fault = getTE(&s1_te, req, tc, mode, translation, timing, functional,
1520  state.isSecure, state.curTranType, state);
1521  // only proceed if we have a valid table entry
1522  if (isCompleteTranslation(s1_te) && (fault == NoFault)) {
1523  // Check stage 1 permissions before checking stage 2
1524  if (state.aarch64)
1525  fault = checkPermissions64(s1_te, req, mode, tc, state);
1526  else
1527  fault = checkPermissions(s1_te, req, mode, state);
1528  if (state.stage2Req & (fault == NoFault)) {
1529  Stage2LookUp *s2_lookup = new Stage2LookUp(this, *s1_te,
1530  req, translation, mode, timing, functional, state.isSecure,
1531  state.curTranType);
1532  fault = s2_lookup->getTe(tc, mergeTe);
1533  if (s2_lookup->isComplete()) {
1534  *te = mergeTe;
1535  // We've finished with the lookup so delete it
1536  delete s2_lookup;
1537  } else {
1538  // The lookup hasn't completed, so we can't delete it now. We
1539  // get round this by asking the object to self delete when the
1540  // translation is complete.
1541  s2_lookup->setSelfDelete();
1542  }
1543  } else {
1544  // This case deals with an S1 hit (or bypass), followed by
1545  // an S2 hit-but-perms issue
1546  if (state.isStage2) {
1547  DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1548  vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
1549  fault);
1550  if (fault != NoFault) {
1551  auto arm_fault = reinterpret_cast<ArmFault*>(fault.get());
1552  arm_fault->annotate(ArmFault::S1PTW, false);
1553  arm_fault->annotate(ArmFault::OVA, vaddr_tainted);
1554  }
1555  }
1556  *te = s1_te;
1557  }
1558  }
1559  return fault;
1560 }
1561 
1562 bool
1564 {
1565  return entry && !entry->partial;
1566 }
1567 
1568 void
1570 {
1571  BaseMMU::takeOverFrom(old_mmu);
1572 
1573  auto *ommu = dynamic_cast<MMU*>(old_mmu);
1574  assert(ommu);
1575 
1576  _attr = ommu->_attr;
1577 
1578  s1State = ommu->s1State;
1579  s2State = ommu->s2State;
1580 }
1581 
1582 void
1584 {
1585  if (!_ti) {
1586  test = nullptr;
1587  } else {
1588  TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1589  fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1590  test = ti;
1591  }
1592 }
1593 
1594 Fault
1597 {
1598  if (!test || !req->hasSize() || req->getSize() == 0 ||
1599  req->isCacheMaintenance()) {
1600  return NoFault;
1601  } else {
1602  return test->translationCheck(req, state.isPriv, mode, domain);
1603  }
1604 }
1605 
1606 Fault
1607 MMU::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1608  TlbEntry::DomainType domain, LookupLevel lookup_level,
1609  bool stage2)
1610 {
1611  return testWalk(pa, size, va, is_secure, mode, domain, lookup_level,
1612  stage2 ? s2State : s1State);
1613 }
1614 
1615 Fault
1616 MMU::testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
1617  TlbEntry::DomainType domain, LookupLevel lookup_level,
1618  CachedState &state)
1619 {
1620  if (!test) {
1621  return NoFault;
1622  } else {
1623  return test->walkCheck(pa, size, va, is_secure, state.isPriv, mode,
1624  domain, lookup_level);
1625  }
1626 }
1627 
1629  : statistics::Group(parent),
1630  ADD_STAT(alignFaults, statistics::units::Count::get(),
1631  "Number of MMU faults due to alignment restrictions"),
1632  ADD_STAT(prefetchFaults, statistics::units::Count::get(),
1633  "Number of MMU faults due to prefetch"),
1634  ADD_STAT(domainFaults, statistics::units::Count::get(),
1635  "Number of MMU faults due to domain restrictions"),
1636  ADD_STAT(permsFaults, statistics::units::Count::get(),
1637  "Number of MMU faults due to permissions restrictions")
1638 {
1639 }
1640 
1641 } // namespace gem5
gem5::ArmISA::MMU::itbWalker
TableWalker * itbWalker
Definition: mmu.hh:83
gem5::ArmISA::MISCREG_CPSR
@ MISCREG_CPSR
Definition: misc.hh:61
gem5::ArmISA::MMU::S12E0Tran
@ S12E0Tran
Definition: mmu.hh:129
gem5::ArmSystem::physAddrRange
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition: system.hh:213
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:92
gem5::ArmISA::MISCREG_VTTBR
@ MISCREG_VTTBR
Definition: misc.hh:448
gem5::ArmISA::MMU::finalizePhysical
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Definition: mmu.cc:203
gem5::ArmISA::TlbEntry::Lookup::mode
BaseMMU::Mode mode
Definition: pagetable.hh:206
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::ArmISA::MMU::haveLargeAsid64
bool haveLargeAsid64
Definition: mmu.hh:449
gem5::ArmISA::MMU::S12E1Tran
@ S12E1Tran
Definition: mmu.hh:130
gem5::ArmISA::MISCREG_TTBR0_EL2
@ MISCREG_TTBR0_EL2
Definition: misc.hh:603
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::ArmISA::ELIs64
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:290
gem5::ArmISA::ArmFault::AddressSizeLL
@ AddressSizeLL
Definition: faults.hh:111
gem5::AddrRange::start
Addr start() const
Get the start address of the range.
Definition: addr_range.hh:343
gem5::ArmISA::MMU::LookupLevel
enums::ArmLookupLevel LookupLevel
Definition: mmu.hh:62
gem5::ArmISA::MISCREG_SCTLR_EL3
@ MISCREG_SCTLR_EL3
Definition: misc.hh:591
gem5::RiscvISA::perm
Bitfield< 3, 1 > perm
Definition: pagetable.hh:72
gem5::MipsISA::w
Bitfield< 0 > w
Definition: pra_constants.hh:281
gem5::ArmISA::MMU::CachedState::nmrr
NMRR nmrr
Definition: mmu.hh:158
gem5::ArmISA::SelfDebug
Definition: self_debug.hh:277
gem5::ArmISA::MMU::s2PermBits64
std::pair< bool, bool > s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition: mmu.cc:576
gem5::ArmISA::MISCREG_TTBR0
@ MISCREG_TTBR0
Definition: misc.hh:254
gem5::ArmISA::MMU::Stats::domainFaults
statistics::Scalar domainFaults
Definition: mmu.hh:462
gem5::ArmISA::MISCREG_CONTEXTIDR
@ MISCREG_CONTEXTIDR
Definition: misc.hh:399
gem5::ArmISA::TlbEntry::ns
bool ns
Definition: pagetable.hh:239
gem5::ArmISA::MMU::getTE
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, bool is_secure, ArmTranslationType tran_type, bool stage2)
Definition: mmu.cc:1397
gem5::ArmISA::MISCREG_TCR_EL2
@ MISCREG_TCR_EL2
Definition: misc.hh:604
gem5::ArmISA::aarch64
Bitfield< 34 > aarch64
Definition: types.hh:81
test
Definition: test.h:38
pseudo_inst.hh
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::ArmISA::ArmFault::PrefetchUncacheable
@ PrefetchUncacheable
Definition: faults.hh:117
gem5::ArmISA::MMU::m5opRange
AddrRange m5opRange
Definition: mmu.hh:452
gem5::ArmISA::asid
asid
Definition: misc_types.hh:618
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::ArmISA::MISCREG_SCR_EL3
@ MISCREG_SCR_EL3
Definition: misc.hh:593
gem5::ArmISA::domain
Bitfield< 7, 4 > domain
Definition: misc_types.hh:424
gem5::ArmISA::TlbEntry::DomainType::NoAccess
@ NoAccess
gem5::AddrRange::contains
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:471
gem5::ArmISA::TlbEntry::outerAttrs
uint8_t outerAttrs
Definition: pagetable.hh:224
gem5::ArmISA::MMU::dtbStage2Walker
TableWalker * dtbStage2Walker
Definition: mmu.hh:86
gem5::ArmISA::f
Bitfield< 6 > f
Definition: misc_types.hh:68
gem5::ArmISA::ArmFault::LpaeTran
@ LpaeTran
Definition: faults.hh:152
gem5::ArmISA::ArmFault::PrefetchTLBMiss
@ PrefetchTLBMiss
Definition: faults.hh:116
gem5::ArmISA::MISCREG_TTBR1_EL1
@ MISCREG_TTBR1_EL1
Definition: misc.hh:599
gem5::ArmISA::MISCREG_TTBCR
@ MISCREG_TTBCR
Definition: misc.hh:260
gem5::ArmISA::vmid_t
uint16_t vmid_t
Definition: types.hh:57
gem5::BaseMMU::Translation::markDelayed
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
gem5::ArmISA::MMU::CachedState::updateMiscReg
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type)
Definition: mmu.cc:1201
gem5::ArmISA::MISCREG_HSCTLR
@ MISCREG_HSCTLR
Definition: misc.hh:246
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
gem5::ArmISA::MMU::S1E0Tran
@ S1E0Tran
Definition: mmu.hh:125
gem5::ArmSystem::haveLargeAsid64
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition: system.hh:202
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:65
gem5::ArmISA::TLB
Definition: tlb.hh:115
gem5::ArmISA::MMU::S1CTran
@ S1CTran
Definition: mmu.hh:116
gem5::ArmISA::MMU::CachedState::vmid
vmid_t vmid
Definition: mmu.hh:156
gem5::ArmISA::MISCREG_TCR_EL3
@ MISCREG_TCR_EL3
Definition: misc.hh:610
gem5::ArmISA::ArmFault::DomainLL
@ DomainLL
Definition: faults.hh:103
gem5::ThreadContext::contextId
virtual ContextID contextId() const =0
gem5::ArmISA::EL1
@ EL1
Definition: types.hh:267
gem5::ArmISA::MMU::getResultTe
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe, CachedState &state)
Definition: mmu.cc:1489
gem5::BaseMMU::init
void init() override
Called at init time, this method is traversing the TLB hierarchy and pupulating the instruction/data/...
Definition: mmu.cc:53
gem5::ArmISA::TlbEntry::setAttributes
void setAttributes(bool lpae)
Definition: pagetable.hh:396
gem5::ArmISA::MMU::S1E3Tran
@ S1E3Tran
Definition: mmu.hh:128
gem5::ArmISA::inAArch64
bool inAArch64(ThreadContext *tc)
Definition: utility.cc:121
gem5::ArmISA::MMU::AllowUnaligned
@ AllowUnaligned
Definition: mmu.hh:108
gem5::ArmISA::MMU::_hasWalkCache
bool _hasWalkCache
Definition: mmu.hh:454
gem5::ArmISA::MMU::getDTBPtr
ArmISA::TLB * getDTBPtr() const
Definition: mmu.hh:65
gem5::ArmISA::TlbEntry::MemoryType::Normal
@ Normal
gem5::ArmISA::TableWalker::walk
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Definition: table_walker.cc:289
gem5::ArmISA::MISCREG_SCTLR
@ MISCREG_SCTLR
Definition: misc.hh:235
table_walker.hh
gem5::ArmISA::MMU::dtbWalker
TableWalker * dtbWalker
Definition: mmu.hh:84
gem5::ArmISA::TlbTestInterface
Definition: tlb.hh:79
gem5::ArmISA::MMU::testWalk
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: mmu.cc:1607
gem5::ArmISA::MMU::MMU
MMU(const ArmMMUParams &p)
Definition: mmu.cc:59
gem5::ArmISA::MMU::CachedState::ttbcr
TTBCR ttbcr
Definition: mmu.hh:154
gem5::ArmISA::TlbEntry
Definition: pagetable.hh:165
gem5::ArmISA::ArmFault::PermissionLL
@ PermissionLL
Definition: faults.hh:104
gem5::ArmISA::MMU::S1E2Tran
@ S1E2Tran
Definition: mmu.hh:127
gem5::ArmISA::MMU::init
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mmu.cc:92
gem5::ArmISA::MMU::_attr
uint64_t _attr
Definition: mmu.hh:445
gem5::ArmISA::TlbEntry::DomainType
DomainType
Definition: pagetable.hh:177
gem5::ArmISA::MISCREG_VTCR_EL2
@ MISCREG_VTCR_EL2
Definition: misc.hh:606
gem5::ArmISA::MMU::Stats::permsFaults
statistics::Scalar permsFaults
Definition: mmu.hh:463
gem5::ArmISA::MMU::CachedState::curTranType
ArmTranslationType curTranType
Definition: mmu.hh:162
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:56
gem5::BaseMMU
Definition: mmu.hh:53
gem5::ArmISA::MMU::itbStage2Walker
TableWalker * itbStage2Walker
Definition: mmu.hh:85
gem5::ArmISA::ArmFault::VmsaTran
@ VmsaTran
Definition: faults.hh:153
gem5::ArmISA::MISCREG_DACR
@ MISCREG_DACR
Definition: misc.hh:265
gem5::ArmISA::TlbEntry::Lookup
Definition: pagetable.hh:185
gem5::ArmISA::MMU::translateFs
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tran_type, bool functional, CachedState &state)
Definition: mmu.cc:940
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::ArmISA::purifyTaggedAddr
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:469
gem5::ArmISA::MMU::CachedState::cpsr
CPSR cpsr
Definition: mmu.hh:146
mmu.hh
gem5::ArmISA::MMU::UserMode
@ UserMode
Definition: mmu.hh:110
gem5::ArmISA::MMU::itbStage2
TLB * itbStage2
Definition: mmu.hh:80
gem5::ArmISA::MMU::CachedState::aarch64
bool aarch64
Definition: mmu.hh:147
gem5::ArmISA::MMU::translateComplete
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tran_type, bool call_from_s2)
Definition: mmu.cc:1091
gem5::QARMA::b11
Bitfield< 47, 44 > b11
Definition: qarma.hh:55
gem5::ArmISA::pa
Bitfield< 39, 12 > pa
Definition: misc_types.hh:657
gem5::Flags< FlagsType >
gem5::ArmISA::MMU::getTableWalker
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const
Definition: mmu.cc:153
gem5::ArmISA::MISCREG_PRRR
@ MISCREG_PRRR
Definition: misc.hh:369
gem5::ArmISA::MMU::CachedState::isPriv
bool isPriv
Definition: mmu.hh:151
gem5::ArmISA::MMU::CachedState
Definition: mmu.hh:133
gem5::ArmISA::MMU::Stats::prefetchFaults
statistics::Scalar prefetchFaults
Definition: mmu.hh:461
gem5::ArmISA::TlbEntry::shareable
bool shareable
Definition: pagetable.hh:254
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::ArmISA::MODE_HYP
@ MODE_HYP
Definition: types.hh:287
gem5::ArmISA::MISCREG_SCTLR_EL1
@ MISCREG_SCTLR_EL1
Definition: misc.hh:579
gem5::ArmISA::MMU::setTestInterface
void setTestInterface(SimObject *ti)
Definition: mmu.cc:1583
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::ArmISA::HaveVirtHostExt
bool HaveVirtHostExt(ThreadContext *tc)
Definition: utility.cc:232
gem5::BaseMMU::unified
std::set< BaseTLB * > unified
Definition: mmu.hh:183
gem5::ArmISA::MMU::NormalTran
@ NormalTran
Definition: mmu.hh:115
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::ArmISA::TlbEntry::Lookup::functional
bool functional
Definition: pagetable.hh:200
gem5::ArmISA::MMU::CachedState::prrr
PRRR prrr
Definition: mmu.hh:157
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::ArmISA::TlbEntry::Lookup::secure
bool secure
Definition: pagetable.hh:198
gem5::ArmISA::MMU::tranTypeEL
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition: mmu.cc:1368
gem5::ArmISA::TlbEntry::mtype
MemoryType mtype
Definition: pagetable.hh:229
gem5::ArmISA::MMU::translateSe
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, CachedState &state)
Definition: mmu.cc:233
gem5::ArmISA::EL2
@ EL2
Definition: types.hh:268
gem5::ArmISA::MISCREG_SCTLR_EL2
@ MISCREG_SCTLR_EL2
Definition: misc.hh:584
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::ArmISA::MMU::CachedState::dacr
uint32_t dacr
Definition: mmu.hh:160
gem5::ArmISA::wxn
Bitfield< 19 > wxn
Definition: misc_types.hh:360
isa.hh
gem5::X86ISA::type
type
Definition: misc.hh:733
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
process.hh
gem5::ArmISA::MMU::CachedState::asid
uint16_t asid
Definition: mmu.hh:155
stage2_lookup.hh
gem5::ArmISA::MMU::s1State
CachedState s1State
Definition: mmu.hh:442
gem5::ArmISA::MMU::AlignmentMask
@ AlignmentMask
Definition: mmu.hh:99
gem5::ArmISA::TlbEntry::Lookup::inHost
bool inHost
Definition: pagetable.hh:204
reg_abi.hh
gem5::ArmISA::MMU::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: mmu.hh:208
gem5::ArmISA::MMU::drainResume
void drainResume() override
Resume execution after a successful drain.
Definition: mmu.cc:130
tlbi_op.hh
gem5::ArmISA::te
Bitfield< 30 > te
Definition: misc_types.hh:338
gem5::ArmISA::mask
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
gem5::ArmISA::MMU::CachedState::isSecure
bool isSecure
Definition: mmu.hh:152
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
gem5::ArmISA::TlbEntry::Lookup::va
Addr va
Definition: pagetable.hh:188
gem5::ArmISA::EL3
@ EL3
Definition: types.hh:269
gem5::ArmISA::TlbEntry::Lookup::vmid
vmid_t vmid
Definition: pagetable.hh:194
gem5::SimObject
Abstract superclass for simulation objects.
Definition: sim_object.hh:146
gem5::ArmISA::MODE_USER
@ MODE_USER
Definition: types.hh:281
std::pair
STL pair class.
Definition: stl.hh:58
gem5::ArmISA::MMU::miscRegContext
ContextID miscRegContext
Definition: mmu.hh:439
gem5::pseudo_inst::decodeAddrOffset
static void decodeAddrOffset(Addr offset, uint8_t &func)
Definition: pseudo_inst.hh:63
gem5::ArmISA::MISCREG_ID_AA64MMFR1_EL1
@ MISCREG_ID_AA64MMFR1_EL1
Definition: misc.hh:570
gem5::ArmISA::MMU::translateMmuOff
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, ArmTranslationType tran_type, Addr vaddr, bool long_desc_format, CachedState &state)
Definition: mmu.cc:793
gem5::ArmISA::Stage2LookUp
Definition: stage2_lookup.hh:59
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::ArmISA::TlbEntry::Lookup::targetEL
ExceptionLevel targetEL
Definition: pagetable.hh:202
gem5::ArmISA::MISCREG_NMRR
@ MISCREG_NMRR
Definition: misc.hh:375
gem5::ArmISA::MMU::CachedState::aarch64EL
ExceptionLevel aarch64EL
Definition: mmu.hh:148
gem5::ArmISA::TLB::setVMID
void setVMID(vmid_t _vmid)
Definition: tlb.hh:208
gem5::ArmISA::MISCREG_TTBR1
@ MISCREG_TTBR1
Definition: misc.hh:257
gem5::ArmISA::isSecure
bool isSecure(ThreadContext *tc)
Definition: utility.cc:73
gem5::ArmISA::MMU::checkWalkCache
bool checkWalkCache() const
Definition: mmu.cc:111
gem5::ArmISA::MMU::HypMode
@ HypMode
Definition: mmu.hh:117
gem5::MipsISA::ti
Bitfield< 30 > ti
Definition: pra_constants.hh:179
gem5::BaseMMU::instruction
std::set< BaseTLB * > instruction
It is possible from the MMU to traverse the entire hierarchy of TLBs, starting from the DTB and ITB (...
Definition: mmu.hh:181
packet_access.hh
gem5::ArmISA::vm
Bitfield< 0 > vm
Definition: misc_types.hh:285
gem5::ArmISA::va
Bitfield< 8 > va
Definition: misc_types.hh:276
gem5::Request::CACHE_BLOCK_ZERO
@ CACHE_BLOCK_ZERO
This is a write that is targeted and zeroing an entire cache block.
Definition: request.hh:143
gem5::ArmISA::MMU::CachedState::hcr
HCR hcr
Definition: mmu.hh:159
gem5::ArmISA::TlbEntry::outerShareable
bool outerShareable
Definition: pagetable.hh:255
gem5::ArmISA::currEL
ExceptionLevel currEL(const ThreadContext *tc)
Returns the current Exception Level (EL) of the provided ThreadContext.
Definition: utility.cc:128
gem5::ArmISA::MMU::faultPAN
bool faultPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition: mmu.cc:738
gem5::ArmISA::ArmFault
Definition: faults.hh:64
gem5::Process
Definition: process.hh:68
gem5::ArmISA::MMU::CachedState::miscRegValid
bool miscRegValid
Definition: mmu.hh:161
gem5::ThreadContext::getProcessPtr
virtual Process * getProcessPtr()=0
gem5::ArmISA::MMU::getITBPtr
ArmISA::TLB * getITBPtr() const
Definition: mmu.hh:71
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
gem5::ArmISA::TlbEntry::attributes
uint64_t attributes
Definition: pagetable.hh:213
gem5::BaseMMU::Translation
Definition: mmu.hh:58
gem5::ArmISA::MMU::getTlb
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const
Definition: mmu.cc:137
gem5::ArmISA::MMU::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: mmu.hh:219
gem5::ArmISA::MMU::physAddrRange
uint8_t physAddrRange
Definition: mmu.hh:450
gem5::ArmISA::IsSecureEL2Enabled
bool IsSecureEL2Enabled(ThreadContext *tc)
Definition: utility.cc:268
gem5::ArmISA::MMU::ArmTranslationType
ArmTranslationType
Definition: mmu.hh:113
gem5::ArmISA::EL0
@ EL0
Definition: types.hh:266
gem5::ArmISA::Stage2LookUp::getTe
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
Definition: stage2_lookup.cc:57
gem5::ArmISA::MMU::S1S2NsTran
@ S1S2NsTran
Definition: mmu.hh:120
gem5::ArmISA::TlbEntry::MemoryType::StronglyOrdered
@ StronglyOrdered
gem5::ArmSystem
Definition: system.hh:91
gem5::ArmISA::TableWalker
Definition: table_walker.hh:66
gem5::ArmISA::ArmFault::TranMethod
TranMethod
Definition: faults.hh:150
gem5::ArmISA::MISCREG_HCR
@ MISCREG_HCR
Definition: misc.hh:248
gem5::RiscvISA::x
Bitfield< 3 > x
Definition: pagetable.hh:73
gem5::Request::STRICT_ORDER
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:135
gem5::ArmISA::ArmFault::AlignmentFault
@ AlignmentFault
Definition: faults.hh:97
gem5::ArmISA::MMU::testTranslation
Fault testTranslation(const RequestPtr &req, Mode mode, TlbEntry::DomainType domain, CachedState &state)
Definition: mmu.cc:1595
gem5::ArmISA::MISCREG_VTTBR_EL2
@ MISCREG_VTTBR_EL2
Definition: misc.hh:605
gem5::ArmISA::MMU::translateFunctional
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition: mmu.hh:90
gem5::ArmISA::TlbEntry::Lookup::asn
uint16_t asn
Definition: pagetable.hh:190
gem5::ArmISA::TLB::setTableWalker
void setTableWalker(TableWalker *table_walker)
Definition: tlb.cc:99
gem5::ArmISA::Stage2LookUp::isComplete
bool isComplete() const
Definition: stage2_lookup.hh:97
gem5::ArmISA::MMU::checkPAN
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode, const bool is_priv, CachedState &state)
Definition: mmu.cc:763
gem5::ArmISA::ISA::getSelfDebug
SelfDebug * getSelfDebug() const
Definition: isa.hh:633
gem5::ArmISA::MISCREG_SCR
@ MISCREG_SCR
Definition: misc.hh:243
gem5::ArmISA::MMU::CachedState::getVMID
vmid_t getVMID(ThreadContext *tc) const
Returns the current VMID (information stored in the VTTBR_EL2 register)
Definition: mmu.cc:1132
gem5::ArmISA::MMU::checkPermissions
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode, bool stage2)
Definition: mmu.cc:274
gem5::ArmISA::TlbEntry::Lookup::hyp
bool hyp
Definition: pagetable.hh:196
gem5::ArmISA::longDescFormatInUse
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:135
gem5::ArmISA::ArmFault::annotate
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:238
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::ArmISA::MMU::updateMiscReg
CachedState & updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type, bool stage2)
Definition: mmu.cc:1161
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::ArmISA::MMU::CachedState::isHyp
bool isHyp
Definition: mmu.hh:153
gem5::QARMA::b10
Bitfield< 43, 40 > b10
Definition: qarma.hh:56
gem5::ArmSystem::releaseFS
const ArmRelease * releaseFS() const
Definition: system.hh:152
gem5::ArmISA::MMU::invalidateMiscReg
void invalidateMiscReg()
Definition: mmu.cc:197
gem5::ArmISA::MMU::CachedState::sctlr
SCTLR sctlr
Definition: mmu.hh:149
gem5::ArmISA::ArmFault::S1PTW
@ S1PTW
Definition: faults.hh:134
gem5::ArmISA::MMU::setAttr
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition: mmu.hh:340
gem5::MipsISA::r
r
Definition: pra_constants.hh:98
gem5::ArmISA::sd
Bitfield< 4 > sd
Definition: misc_types.hh:775
gem5::ArmISA::TlbEntry::partial
bool partial
Definition: pagetable.hh:248
gem5::ArmISA::snsBankedIndex
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: misc.cc:1313
gem5::ArmISA::MMU::S1E1Tran
@ S1E1Tran
Definition: mmu.hh:126
gem5::ArmISA::Stage2LookUp::setSelfDelete
void setSelfDelete()
Definition: stage2_lookup.hh:95
gem5::ArmISA::MMU::CachedState::stage2Req
bool stage2Req
Definition: mmu.hh:165
gem5::ArmISA::MMU::CachedState::isStage2
bool isStage2
Definition: mmu.hh:145
gem5::Packet::setLE
void setLE(T v)
Set the value in the data pointer to v as little endian.
Definition: packet_access.hh:108
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::ArmISA::MMU::takeOverFrom
void takeOverFrom(BaseMMU *old_mmu) override
Definition: mmu.cc:1569
gem5::ArmISA::MMU::dtbStage2
TLB * dtbStage2
Definition: mmu.hh:81
gem5::ArmSystem::haveEL
static bool haveEL(ThreadContext *tc, ArmISA::ExceptionLevel el)
Return true if the system implements a specific exception level.
Definition: system.cc:131
gem5::ArmISA::MISCREG_HCR_EL2
@ MISCREG_HCR_EL2
Definition: misc.hh:586
gem5::ArmISA::MISCREG_TTBR0_EL1
@ MISCREG_TTBR0_EL1
Definition: misc.hh:597
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
gem5::ArmISA::TlbEntry::Lookup::ignoreAsn
bool ignoreAsn
Definition: pagetable.hh:192
gem5::ArmISA::MMU::translateMmuOn
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, ArmFault::TranMethod tranMethod, CachedState &state)
Definition: mmu.cc:868
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::ArmISA::MMU::CachedState::scr
SCR scr
Definition: mmu.hh:150
gem5::ArmISA::computeAddrTop
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition: utility.cc:424
gem5::ArmISA::MMU::_release
const ArmRelease * _release
Definition: mmu.hh:448
gem5::BaseMMU::takeOverFrom
virtual void takeOverFrom(BaseMMU *old_mmu)
Definition: mmu.cc:157
gem5::ArmISA::MMU::CachedState::stage2DescReq
bool stage2DescReq
Definition: mmu.hh:171
gem5::ArmISA::MMU::stats
gem5::ArmISA::MMU::Stats stats
gem5::ArmISA::MMU
Definition: mmu.hh:59
gem5::ArmISA::TableWalker::setMmu
void setMmu(MMU *_mmu)
Definition: table_walker.cc:119
gem5::ArmISA::MMU::isCompleteTranslation
bool isCompleteTranslation(TlbEntry *te) const
Definition: mmu.cc:1563
gem5::BaseMMU::Translation::finish
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
gem5::ArmISA::MMU::Stats::Stats
Stats(statistics::Group *parent)
Definition: mmu.cc:1628
gem5::ArmISA::MMU::Stats::alignFaults
statistics::Scalar alignFaults
Definition: mmu.hh:460
gem5::ArmISA::MMU::lookup
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition: mmu.cc:1407
gem5::ArmISA::ArmFault::OVA
@ OVA
Definition: faults.hh:135
gem5::BaseMMU::data
std::set< BaseTLB * > data
Definition: mmu.hh:182
gem5::ArmISA::MISCREG_TTBR1_EL2
@ MISCREG_TTBR1_EL2
Definition: misc.hh:820
gem5::ArmISA::TlbEntry::innerAttrs
uint8_t innerAttrs
Definition: pagetable.hh:223
gem5::ArmISA::MMU::s2State
CachedState s2State
Definition: mmu.hh:442
gem5::ArmISA::dc
Bitfield< 12 > dc
Definition: misc_types.hh:273
gem5::ArmISA::MISCREG_TCR_EL1
@ MISCREG_TCR_EL1
Definition: misc.hh:601
gem5::ArmISA::ExceptionLevel
ExceptionLevel
Definition: types.hh:264
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::MMU::checkPermissions64
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, bool stage2)
Definition: mmu.cc:460
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::ArmISA::MMU::CachedState::directToStage2
bool directToStage2
Definition: mmu.hh:175
gem5::ArmISA::MMU::s1PermBits64
std::pair< bool, bool > s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc, CachedState &state, bool r, bool w, bool x)
Definition: mmu.cc:616

Generated on Wed May 4 2022 12:13:48 for gem5 by doxygen 1.8.17