gem5  v20.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
tlb.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2013, 2016-2019 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2001-2005 The Regents of The University of Michigan
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "arch/arm/tlb.hh"
42 
43 #include <memory>
44 #include <string>
45 #include <vector>
46 
47 #include "arch/arm/faults.hh"
48 #include "arch/arm/pagetable.hh"
50 #include "arch/arm/stage2_mmu.hh"
51 #include "arch/arm/system.hh"
52 #include "arch/arm/table_walker.hh"
53 #include "arch/arm/utility.hh"
54 #include "base/inifile.hh"
55 #include "base/str.hh"
56 #include "base/trace.hh"
57 #include "cpu/base.hh"
58 #include "cpu/thread_context.hh"
59 #include "debug/Checkpoint.hh"
60 #include "debug/TLB.hh"
61 #include "debug/TLBVerbose.hh"
62 #include "mem/packet_access.hh"
63 #include "mem/page_table.hh"
64 #include "mem/request.hh"
65 #include "params/ArmTLB.hh"
66 #include "sim/full_system.hh"
67 #include "sim/process.hh"
68 #include "sim/pseudo_inst.hh"
69 
70 using namespace std;
71 using namespace ArmISA;
72 
73 TLB::TLB(const ArmTLBParams *p)
74  : BaseTLB(p), table(new TlbEntry[p->size]), size(p->size),
75  isStage2(p->is_stage2), stage2Req(false), stage2DescReq(false), _attr(0),
76  directToStage2(false), tableWalker(p->walker), stage2Tlb(NULL),
77  stage2Mmu(NULL), test(nullptr), rangeMRU(1),
78  aarch64(false), aarch64EL(EL0), isPriv(false), isSecure(false),
79  isHyp(false), asid(0), vmid(0), hcr(0), dacr(0),
80  miscRegValid(false), miscRegContext(0), curTranType(NormalTran)
81 {
82  const ArmSystem *sys = dynamic_cast<const ArmSystem *>(p->sys);
83 
84  tableWalker->setTlb(this);
85 
86  // Cache system-level properties
90 
91  if (sys)
92  m5opRange = sys->m5opRange();
93 }
94 
96 {
97  delete[] table;
98 }
99 
100 void
102 {
103  if (stage2Mmu && !isStage2)
105 }
106 
107 void
109 {
110  stage2Mmu = m;
111  tableWalker->setMMU(m, master_id);
112 }
113 
114 bool
116 {
117  updateMiscReg(tc);
118 
119  if (directToStage2) {
120  assert(stage2Tlb);
121  return stage2Tlb->translateFunctional(tc, va, pa);
122  }
123 
124  TlbEntry *e = lookup(va, asid, vmid, isHyp, isSecure, true, false,
125  aarch64 ? aarch64EL : EL1);
126  if (!e)
127  return false;
128  pa = e->pAddr(va);
129  return true;
130 }
131 
132 Fault
134  ThreadContext *tc, Mode mode) const
135 {
136  const Addr paddr = req->getPaddr();
137 
138  if (m5opRange.contains(paddr)) {
139  uint8_t func;
141  req->setLocalAccessor(
142  [func, mode](ThreadContext *tc, PacketPtr pkt) -> Cycles
143  {
144  uint64_t ret;
145  PseudoInst::pseudoInst<PseudoInstABI>(tc, func, ret);
146  if (mode == Read)
147  pkt->setLE(ret);
148  return Cycles(1);
149  }
150  );
151  }
152 
153  return NoFault;
154 }
155 
156 TlbEntry*
157 TLB::lookup(Addr va, uint16_t asn, uint8_t vmid, bool hyp, bool secure,
158  bool functional, bool ignore_asn, ExceptionLevel target_el)
159 {
160 
161  TlbEntry *retval = NULL;
162 
163  // Maintaining LRU array
164  int x = 0;
165  while (retval == NULL && x < size) {
166  if ((!ignore_asn && table[x].match(va, asn, vmid, hyp, secure, false,
167  target_el)) ||
168  (ignore_asn && table[x].match(va, vmid, hyp, secure, target_el))) {
169  // We only move the hit entry ahead when the position is higher
170  // than rangeMRU
171  if (x > rangeMRU && !functional) {
172  TlbEntry tmp_entry = table[x];
173  for (int i = x; i > 0; i--)
174  table[i] = table[i - 1];
175  table[0] = tmp_entry;
176  retval = &table[0];
177  } else {
178  retval = &table[x];
179  }
180  break;
181  }
182  ++x;
183  }
184 
185  DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x hyp %d secure %d "
186  "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d "
187  "el: %d\n",
188  va, asn, retval ? "hit" : "miss", vmid, hyp, secure,
189  retval ? retval->pfn : 0, retval ? retval->size : 0,
190  retval ? retval->pAddr(va) : 0, retval ? retval->ap : 0,
191  retval ? retval->ns : 0, retval ? retval->nstid : 0,
192  retval ? retval->global : 0, retval ? retval->asid : 0,
193  retval ? retval->el : 0);
194 
195  return retval;
196 }
197 
198 // insert a new TLB entry
199 void
201 {
202  DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
203  " asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
204  " ap:%#x domain:%#x ns:%d nstid:%d isHyp:%d\n", entry.pfn,
205  entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
206  entry.global, entry.valid, entry.nonCacheable, entry.xn,
207  entry.ap, static_cast<uint8_t>(entry.domain), entry.ns, entry.nstid,
208  entry.isHyp);
209 
210  if (table[size - 1].valid)
211  DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
212  "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n",
213  table[size-1].vpn << table[size-1].N, table[size-1].asid,
214  table[size-1].vmid, table[size-1].pfn << table[size-1].N,
215  table[size-1].size, table[size-1].ap, table[size-1].ns,
216  table[size-1].nstid, table[size-1].global, table[size-1].isHyp,
217  table[size-1].el);
218 
219  //inserting to MRU position and evicting the LRU one
220 
221  for (int i = size - 1; i > 0; --i)
222  table[i] = table[i-1];
223  table[0] = entry;
224 
225  inserts++;
226  ppRefills->notify(1);
227 }
228 
229 void
231 {
232  int x = 0;
233  TlbEntry *te;
234  DPRINTF(TLB, "Current TLB contents:\n");
235  while (x < size) {
236  te = &table[x];
237  if (te->valid)
238  DPRINTF(TLB, " * %s\n", te->print());
239  ++x;
240  }
241 }
242 
243 void
244 TLB::flushAllSecurity(bool secure_lookup, ExceptionLevel target_el,
245  bool ignore_el)
246 {
247  DPRINTF(TLB, "Flushing all TLB entries (%s lookup)\n",
248  (secure_lookup ? "secure" : "non-secure"));
249  int x = 0;
250  TlbEntry *te;
251  while (x < size) {
252  te = &table[x];
253  const bool el_match = ignore_el ?
254  true : te->checkELMatch(target_el);
255 
256  if (te->valid && secure_lookup == !te->nstid &&
257  (te->vmid == vmid || secure_lookup) && el_match) {
258 
259  DPRINTF(TLB, " - %s\n", te->print());
260  te->valid = false;
261  flushedEntries++;
262  }
263  ++x;
264  }
265 
266  flushTlb++;
267 
268  // If there's a second stage TLB (and we're not it) then flush it as well
269  // if we're currently in hyp mode
270  if (!isStage2 && isHyp) {
271  stage2Tlb->flushAllSecurity(secure_lookup, EL1, true);
272  }
273 }
274 
275 void
276 TLB::flushAllNs(ExceptionLevel target_el, bool ignore_el)
277 {
278  bool hyp = target_el == EL2;
279 
280  DPRINTF(TLB, "Flushing all NS TLB entries (%s lookup)\n",
281  (hyp ? "hyp" : "non-hyp"));
282  int x = 0;
283  TlbEntry *te;
284  while (x < size) {
285  te = &table[x];
286  const bool el_match = ignore_el ?
287  true : te->checkELMatch(target_el);
288 
289  if (te->valid && te->nstid && te->isHyp == hyp && el_match) {
290 
291  DPRINTF(TLB, " - %s\n", te->print());
292  flushedEntries++;
293  te->valid = false;
294  }
295  ++x;
296  }
297 
298  flushTlb++;
299 
300  // If there's a second stage TLB (and we're not it) then flush it as well
301  if (!isStage2 && !hyp) {
302  stage2Tlb->flushAllNs(EL1, true);
303  }
304 }
305 
306 void
307 TLB::flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup,
308  ExceptionLevel target_el)
309 {
310  DPRINTF(TLB, "Flushing TLB entries with mva: %#x, asid: %#x "
311  "(%s lookup)\n", mva, asn, (secure_lookup ?
312  "secure" : "non-secure"));
313  _flushMva(mva, asn, secure_lookup, false, target_el);
314  flushTlbMvaAsid++;
315 }
316 
317 void
318 TLB::flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el)
319 {
320  DPRINTF(TLB, "Flushing TLB entries with asid: %#x (%s lookup)\n", asn,
321  (secure_lookup ? "secure" : "non-secure"));
322 
323  int x = 0 ;
324  TlbEntry *te;
325 
326  while (x < size) {
327  te = &table[x];
328  if (te->valid && te->asid == asn && secure_lookup == !te->nstid &&
329  (te->vmid == vmid || secure_lookup) &&
330  te->checkELMatch(target_el)) {
331 
332  te->valid = false;
333  DPRINTF(TLB, " - %s\n", te->print());
334  flushedEntries++;
335  }
336  ++x;
337  }
338  flushTlbAsid++;
339 }
340 
341 void
342 TLB::flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el)
343 {
344  DPRINTF(TLB, "Flushing TLB entries with mva: %#x (%s lookup)\n", mva,
345  (secure_lookup ? "secure" : "non-secure"));
346  _flushMva(mva, 0xbeef, secure_lookup, true, target_el);
347  flushTlbMva++;
348 }
349 
350 void
351 TLB::_flushMva(Addr mva, uint64_t asn, bool secure_lookup,
352  bool ignore_asn, ExceptionLevel target_el)
353 {
354  TlbEntry *te;
355  // D5.7.2: Sign-extend address to 64 bits
356  mva = sext<56>(mva);
357 
358  bool hyp = target_el == EL2;
359 
360  te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
361  target_el);
362  while (te != NULL) {
363  if (secure_lookup == !te->nstid) {
364  DPRINTF(TLB, " - %s\n", te->print());
365  te->valid = false;
366  flushedEntries++;
367  }
368  te = lookup(mva, asn, vmid, hyp, secure_lookup, false, ignore_asn,
369  target_el);
370  }
371 }
372 
373 void
374 TLB::flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el)
375 {
376  assert(!isStage2);
377  stage2Tlb->_flushMva(ipa, 0xbeef, secure_lookup, true, target_el);
378 }
379 
380 void
382 {
383  // We might have unserialized something or switched CPUs, so make
384  // sure to re-read the misc regs.
385  miscRegValid = false;
386 }
387 
388 void
390 {
391  TLB *otlb = dynamic_cast<TLB*>(_otlb);
392  /* Make sure we actually have a valid type */
393  if (otlb) {
394  _attr = otlb->_attr;
395  haveLPAE = otlb->haveLPAE;
397  stage2Req = otlb->stage2Req;
399 
400  /* Sync the stage2 MMU if they exist in both
401  * the old CPU and the new
402  */
403  if (!isStage2 &&
404  stage2Tlb && otlb->stage2Tlb) {
406  }
407  } else {
408  panic("Incompatible TLB type!");
409  }
410 }
411 
412 void
414 {
416  instHits
417  .name(name() + ".inst_hits")
418  .desc("ITB inst hits")
419  ;
420 
421  instMisses
422  .name(name() + ".inst_misses")
423  .desc("ITB inst misses")
424  ;
425 
427  .name(name() + ".inst_accesses")
428  .desc("ITB inst accesses")
429  ;
430 
431  readHits
432  .name(name() + ".read_hits")
433  .desc("DTB read hits")
434  ;
435 
436  readMisses
437  .name(name() + ".read_misses")
438  .desc("DTB read misses")
439  ;
440 
442  .name(name() + ".read_accesses")
443  .desc("DTB read accesses")
444  ;
445 
446  writeHits
447  .name(name() + ".write_hits")
448  .desc("DTB write hits")
449  ;
450 
452  .name(name() + ".write_misses")
453  .desc("DTB write misses")
454  ;
455 
457  .name(name() + ".write_accesses")
458  .desc("DTB write accesses")
459  ;
460 
461  hits
462  .name(name() + ".hits")
463  .desc("DTB hits")
464  ;
465 
466  misses
467  .name(name() + ".misses")
468  .desc("DTB misses")
469  ;
470 
471  accesses
472  .name(name() + ".accesses")
473  .desc("DTB accesses")
474  ;
475 
476  flushTlb
477  .name(name() + ".flush_tlb")
478  .desc("Number of times complete TLB was flushed")
479  ;
480 
482  .name(name() + ".flush_tlb_mva")
483  .desc("Number of times TLB was flushed by MVA")
484  ;
485 
487  .name(name() + ".flush_tlb_mva_asid")
488  .desc("Number of times TLB was flushed by MVA & ASID")
489  ;
490 
492  .name(name() + ".flush_tlb_asid")
493  .desc("Number of times TLB was flushed by ASID")
494  ;
495 
497  .name(name() + ".flush_entries")
498  .desc("Number of entries that have been flushed from TLB")
499  ;
500 
502  .name(name() + ".align_faults")
503  .desc("Number of TLB faults due to alignment restrictions")
504  ;
505 
507  .name(name() + ".prefetch_faults")
508  .desc("Number of TLB faults due to prefetch")
509  ;
510 
512  .name(name() + ".domain_faults")
513  .desc("Number of TLB faults due to domain restrictions")
514  ;
515 
517  .name(name() + ".perms_faults")
518  .desc("Number of TLB faults due to permissions restrictions")
519  ;
520 
525  misses = readMisses + writeMisses + instMisses;
527 }
528 
529 void
531 {
532  ppRefills.reset(new ProbePoints::PMU(getProbeManager(), "Refills"));
533 }
534 
535 Fault
537  Translation *translation, bool &delay, bool timing)
538 {
539  updateMiscReg(tc);
540  Addr vaddr_tainted = req->getVaddr();
541  Addr vaddr = 0;
542  if (aarch64)
543  vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, (TCR)ttbcr,
544  mode==Execute);
545  else
546  vaddr = vaddr_tainted;
547  Request::Flags flags = req->getFlags();
548 
549  bool is_fetch = (mode == Execute);
550  bool is_write = (mode == Write);
551 
552  if (!is_fetch) {
553  if (sctlr.a || !(flags & AllowUnaligned)) {
554  if (vaddr & mask(flags & AlignmentMask)) {
555  // LPAE is always disabled in SE mode
556  return std::make_shared<DataAbort>(
557  vaddr_tainted,
561  }
562  }
563  }
564 
565  Addr paddr;
566  Process *p = tc->getProcessPtr();
567 
568  if (!p->pTable->translate(vaddr, paddr))
569  return std::make_shared<GenericPageTableFault>(vaddr_tainted);
570  req->setPaddr(paddr);
571 
572  return finalizePhysical(req, tc, mode);
573 }
574 
575 Fault
577 {
578  // a data cache maintenance instruction that operates by MVA does
579  // not generate a Data Abort exeception due to a Permission fault
580  if (req->isCacheMaintenance()) {
581  return NoFault;
582  }
583 
584  Addr vaddr = req->getVaddr(); // 32-bit don't have to purify
585  Request::Flags flags = req->getFlags();
586  bool is_fetch = (mode == Execute);
587  bool is_write = (mode == Write);
588  bool is_priv = isPriv && !(flags & UserMode);
589 
590  // Get the translation type from the actuall table entry
593 
594  // If this is the second stage of translation and the request is for a
595  // stage 1 page table walk then we need to check the HCR.PTW bit. This
596  // allows us to generate a fault if the request targets an area marked
597  // as a device or strongly ordered.
598  if (isStage2 && req->isPTWalk() && hcr.ptw &&
600  return std::make_shared<DataAbort>(
601  vaddr, te->domain, is_write,
603  isStage2, tranMethod);
604  }
605 
606  // Generate an alignment fault for unaligned data accesses to device or
607  // strongly ordered memory
608  if (!is_fetch) {
609  if (te->mtype != TlbEntry::MemoryType::Normal) {
610  if (vaddr & mask(flags & AlignmentMask)) {
611  alignFaults++;
612  return std::make_shared<DataAbort>(
615  tranMethod);
616  }
617  }
618  }
619 
620  if (te->nonCacheable) {
621  // Prevent prefetching from I/O devices.
622  if (req->isPrefetch()) {
623  // Here we can safely use the fault status for the short
624  // desc. format in all cases
625  return std::make_shared<PrefetchAbort>(
627  isStage2, tranMethod);
628  }
629  }
630 
631  if (!te->longDescFormat) {
632  switch ((dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
633  case 0:
634  domainFaults++;
635  DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
636  " domain: %#x write:%d\n", dacr,
637  static_cast<uint8_t>(te->domain), is_write);
638  if (is_fetch) {
639  // Use PC value instead of vaddr because vaddr might
640  // be aligned to cache line and should not be the
641  // address reported in FAR
642  return std::make_shared<PrefetchAbort>(
643  req->getPC(),
645  isStage2, tranMethod);
646  } else
647  return std::make_shared<DataAbort>(
648  vaddr, te->domain, is_write,
650  isStage2, tranMethod);
651  case 1:
652  // Continue with permissions check
653  break;
654  case 2:
655  panic("UNPRED domain\n");
656  case 3:
657  return NoFault;
658  }
659  }
660 
661  // The 'ap' variable is AP[2:0] or {AP[2,1],1b'0}, i.e. always three bits
662  uint8_t ap = te->longDescFormat ? te->ap << 1 : te->ap;
663  uint8_t hap = te->hap;
664 
665  if (sctlr.afe == 1 || te->longDescFormat)
666  ap |= 1;
667 
668  bool abt;
669  bool isWritable = true;
670  // If this is a stage 2 access (eg for reading stage 1 page table entries)
671  // then don't perform the AP permissions check, we stil do the HAP check
672  // below.
673  if (isStage2) {
674  abt = false;
675  } else {
676  switch (ap) {
677  case 0:
678  DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
679  (int)sctlr.rs);
680  if (!sctlr.xp) {
681  switch ((int)sctlr.rs) {
682  case 2:
683  abt = is_write;
684  break;
685  case 1:
686  abt = is_write || !is_priv;
687  break;
688  case 0:
689  case 3:
690  default:
691  abt = true;
692  break;
693  }
694  } else {
695  abt = true;
696  }
697  break;
698  case 1:
699  abt = !is_priv;
700  break;
701  case 2:
702  abt = !is_priv && is_write;
703  isWritable = is_priv;
704  break;
705  case 3:
706  abt = false;
707  break;
708  case 4:
709  panic("UNPRED premissions\n");
710  case 5:
711  abt = !is_priv || is_write;
712  isWritable = false;
713  break;
714  case 6:
715  case 7:
716  abt = is_write;
717  isWritable = false;
718  break;
719  default:
720  panic("Unknown permissions %#x\n", ap);
721  }
722  }
723 
724  bool hapAbt = is_write ? !(hap & 2) : !(hap & 1);
725  bool xn = te->xn || (isWritable && sctlr.wxn) ||
726  (ap == 3 && sctlr.uwxn && is_priv);
727  if (is_fetch && (abt || xn ||
728  (te->longDescFormat && te->pxn && is_priv) ||
729  (isSecure && te->ns && scr.sif))) {
730  permsFaults++;
731  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
732  "priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
733  ap, is_priv, is_write, te->ns, scr.sif,sctlr.afe);
734  // Use PC value instead of vaddr because vaddr might be aligned to
735  // cache line and should not be the address reported in FAR
736  return std::make_shared<PrefetchAbort>(
737  req->getPC(),
739  isStage2, tranMethod);
740  } else if (abt | hapAbt) {
741  permsFaults++;
742  DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
743  " write:%d\n", ap, is_priv, is_write);
744  return std::make_shared<DataAbort>(
745  vaddr, te->domain, is_write,
747  isStage2 | !abt, tranMethod);
748  }
749  return NoFault;
750 }
751 
752 
753 Fault
755  ThreadContext *tc)
756 {
757  assert(aarch64);
758 
759  // A data cache maintenance instruction that operates by VA does
760  // not generate a Permission fault unless:
761  // * It is a data cache invalidate (dc ivac) which requires write
762  // permissions to the VA, or
763  // * It is executed from EL0
764  if (req->isCacheClean() && aarch64EL != EL0 && !isStage2) {
765  return NoFault;
766  }
767 
768  Addr vaddr_tainted = req->getVaddr();
769  Addr vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, (TCR)ttbcr,
770  mode==Execute);
771 
772  Request::Flags flags = req->getFlags();
773  bool is_fetch = (mode == Execute);
774  // Cache clean operations require read permissions to the specified VA
775  bool is_write = !req->isCacheClean() && mode == Write;
776  bool is_atomic = req->isAtomic();
777  bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
778 
780 
781  // If this is the second stage of translation and the request is for a
782  // stage 1 page table walk then we need to check the HCR.PTW bit. This
783  // allows us to generate a fault if the request targets an area marked
784  // as a device or strongly ordered.
785  if (isStage2 && req->isPTWalk() && hcr.ptw &&
787  return std::make_shared<DataAbort>(
788  vaddr_tainted, te->domain, is_write,
791  }
792 
793  // Generate an alignment fault for unaligned accesses to device or
794  // strongly ordered memory
795  if (!is_fetch) {
796  if (te->mtype != TlbEntry::MemoryType::Normal) {
797  if (vaddr & mask(flags & AlignmentMask)) {
798  alignFaults++;
799  return std::make_shared<DataAbort>(
800  vaddr_tainted,
802  is_atomic ? false : is_write,
805  }
806  }
807  }
808 
809  if (te->nonCacheable) {
810  // Prevent prefetching from I/O devices.
811  if (req->isPrefetch()) {
812  // Here we can safely use the fault status for the short
813  // desc. format in all cases
814  return std::make_shared<PrefetchAbort>(
815  vaddr_tainted,
818  }
819  }
820 
821  uint8_t ap = 0x3 & (te->ap); // 2-bit access protection field
822  bool grant = false;
823 
824  uint8_t xn = te->xn;
825  uint8_t pxn = te->pxn;
826  bool r = !is_write && !is_fetch;
827  bool w = is_write;
828  bool x = is_fetch;
829 
830  // grant_read is used for faults from an atomic instruction that
831  // both reads and writes from a memory location. From a ISS point
832  // of view they count as read if a read to that address would have
833  // generated the fault; they count as writes otherwise
834  bool grant_read = true;
835  DPRINTF(TLBVerbose, "Checking permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
836  "w:%d, x:%d\n", ap, xn, pxn, r, w, x);
837 
838  if (isStage2) {
839  assert(ArmSystem::haveVirtualization(tc) && aarch64EL != EL2);
840  // In stage 2 we use the hypervisor access permission bits.
841  // The following permissions are described in ARM DDI 0487A.f
842  // D4-1802
843  uint8_t hap = 0x3 & te->hap;
844  grant_read = hap & 0x1;
845  if (is_fetch) {
846  // sctlr.wxn overrides the xn bit
847  grant = !sctlr.wxn && !xn;
848  } else if (is_write) {
849  grant = hap & 0x2;
850  } else { // is_read
851  grant = grant_read;
852  }
853  } else {
854  switch (aarch64EL) {
855  case EL0:
856  {
857  grant_read = ap & 0x1;
858  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
859  switch (perm) {
860  case 0:
861  case 1:
862  case 8:
863  case 9:
864  grant = x;
865  break;
866  case 4:
867  case 5:
868  grant = r || w || (x && !sctlr.wxn);
869  break;
870  case 6:
871  case 7:
872  grant = r || w;
873  break;
874  case 12:
875  case 13:
876  grant = r || x;
877  break;
878  case 14:
879  case 15:
880  grant = r;
881  break;
882  default:
883  grant = false;
884  }
885  }
886  break;
887  case EL1:
888  {
889  if (checkPAN(tc, ap, req, mode)) {
890  grant = false;
891  grant_read = false;
892  break;
893  }
894 
895  uint8_t perm = (ap << 2) | (xn << 1) | pxn;
896  switch (perm) {
897  case 0:
898  case 2:
899  grant = r || w || (x && !sctlr.wxn);
900  break;
901  case 1:
902  case 3:
903  case 4:
904  case 5:
905  case 6:
906  case 7:
907  // regions that are writeable at EL0 should not be
908  // executable at EL1
909  grant = r || w;
910  break;
911  case 8:
912  case 10:
913  case 12:
914  case 14:
915  grant = r || x;
916  break;
917  case 9:
918  case 11:
919  case 13:
920  case 15:
921  grant = r;
922  break;
923  default:
924  grant = false;
925  }
926  }
927  break;
928  case EL2:
929  if (hcr.e2h && checkPAN(tc, ap, req, mode)) {
930  grant = false;
931  grant_read = false;
932  break;
933  }
935  case EL3:
936  {
937  uint8_t perm = (ap & 0x2) | xn;
938  switch (perm) {
939  case 0:
940  grant = r || w || (x && !sctlr.wxn) ;
941  break;
942  case 1:
943  grant = r || w;
944  break;
945  case 2:
946  grant = r || x;
947  break;
948  case 3:
949  grant = r;
950  break;
951  default:
952  grant = false;
953  }
954  }
955  break;
956  }
957  }
958 
959  if (!grant) {
960  if (is_fetch) {
961  permsFaults++;
962  DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
963  "AP:%d priv:%d write:%d ns:%d sif:%d "
964  "sctlr.afe: %d\n",
965  ap, is_priv, is_write, te->ns, scr.sif, sctlr.afe);
966  // Use PC value instead of vaddr because vaddr might be aligned to
967  // cache line and should not be the address reported in FAR
968  return std::make_shared<PrefetchAbort>(
969  req->getPC(),
972  } else {
973  permsFaults++;
974  DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d "
975  "priv:%d write:%d\n", ap, is_priv, is_write);
976  return std::make_shared<DataAbort>(
977  vaddr_tainted, te->domain,
978  (is_atomic && !grant_read) ? false : is_write,
981  }
982  }
983 
984  return NoFault;
985 }
986 
987 bool
988 TLB::checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode)
989 {
990  // The PAN bit has no effect on:
991  // 1) Instruction accesses.
992  // 2) Data Cache instructions other than DC ZVA
993  // 3) Address translation instructions, other than ATS1E1RP and
994  // ATS1E1WP when ARMv8.2-ATS1E1 is implemented. (Unimplemented in
995  // gem5)
996  // 4) Unprivileged instructions (Unimplemented in gem5)
997  AA64MMFR1 mmfr1 = tc->readMiscReg(MISCREG_ID_AA64MMFR1_EL1);
998  if (mmfr1.pan && cpsr.pan && (ap & 0x1) && mode != Execute &&
999  (!req->isCacheMaintenance() ||
1000  (req->getFlags() & Request::CACHE_BLOCK_ZERO))) {
1001  return true;
1002  } else {
1003  return false;
1004  }
1005 }
1006 
1007 Fault
1009  TLB::ArmTranslationType tranType, Addr vaddr, bool long_desc_format)
1010 {
1011  bool is_fetch = (mode == Execute);
1012  req->setPaddr(vaddr);
1013  // When the MMU is off the security attribute corresponds to the
1014  // security state of the processor
1015  if (isSecure)
1016  req->setFlags(Request::SECURE);
1017 
1018  // @todo: double check this (ARM ARM issue C B3.2.1)
1019  if (long_desc_format || sctlr.tre == 0 || nmrr.ir0 == 0 ||
1020  nmrr.or0 == 0 || prrr.tr0 != 0x2) {
1021  if (!req->isCacheMaintenance()) {
1022  req->setFlags(Request::UNCACHEABLE);
1023  }
1024  req->setFlags(Request::STRICT_ORDER);
1025  }
1026 
1027  // Set memory attributes
1028  TlbEntry temp_te;
1029  temp_te.ns = !isSecure;
1030  if (isStage2 || hcr.dc == 0 || isSecure ||
1031  (isHyp && !(tranType & S1CTran))) {
1032 
1033  temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal
1035  temp_te.innerAttrs = 0x0;
1036  temp_te.outerAttrs = 0x0;
1037  temp_te.shareable = true;
1038  temp_te.outerShareable = true;
1039  } else {
1041  temp_te.innerAttrs = 0x3;
1042  temp_te.outerAttrs = 0x3;
1043  temp_te.shareable = false;
1044  temp_te.outerShareable = false;
1045  }
1046  temp_te.setAttributes(long_desc_format);
1047  DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
1048  "%d, innerAttrs: %d, outerAttrs: %d, isStage2: %d\n",
1049  temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
1050  isStage2);
1051  setAttr(temp_te.attributes);
1052 
1054 }
1055 
1056 Fault
1058  Translation *translation, bool &delay, bool timing,
1059  bool functional, Addr vaddr,
1060  ArmFault::TranMethod tranMethod)
1061 {
1062  TlbEntry *te = NULL;
1063  bool is_fetch = (mode == Execute);
1064  TlbEntry mergeTe;
1065 
1066  Request::Flags flags = req->getFlags();
1067  Addr vaddr_tainted = req->getVaddr();
1068 
1069  Fault fault = getResultTe(&te, req, tc, mode, translation, timing,
1070  functional, &mergeTe);
1071  // only proceed if we have a valid table entry
1072  if ((te == NULL) && (fault == NoFault)) delay = true;
1073 
1074  // If we have the table entry transfer some of the attributes to the
1075  // request that triggered the translation
1076  if (te != NULL) {
1077  // Set memory attributes
1078  DPRINTF(TLBVerbose,
1079  "Setting memory attributes: shareable: %d, innerAttrs: %d, "
1080  "outerAttrs: %d, mtype: %d, isStage2: %d\n",
1081  te->shareable, te->innerAttrs, te->outerAttrs,
1082  static_cast<uint8_t>(te->mtype), isStage2);
1083  setAttr(te->attributes);
1084 
1085  if (te->nonCacheable && !req->isCacheMaintenance())
1086  req->setFlags(Request::UNCACHEABLE);
1087 
1088  // Require requests to be ordered if the request goes to
1089  // strongly ordered or device memory (i.e., anything other
1090  // than normal memory requires strict order).
1092  req->setFlags(Request::STRICT_ORDER);
1093 
1094  Addr pa = te->pAddr(vaddr);
1095  req->setPaddr(pa);
1096 
1097  if (isSecure && !te->ns) {
1098  req->setFlags(Request::SECURE);
1099  }
1100  if ((!is_fetch) && (vaddr & mask(flags & AlignmentMask)) &&
1102  // Unaligned accesses to Device memory should always cause an
1103  // abort regardless of sctlr.a
1104  alignFaults++;
1105  bool is_write = (mode == Write);
1106  return std::make_shared<DataAbort>(
1107  vaddr_tainted,
1110  tranMethod);
1111  }
1112 
1113  // Check for a trickbox generated address fault
1114  if (fault == NoFault)
1115  fault = testTranslation(req, mode, te->domain);
1116  }
1117 
1118  if (fault == NoFault) {
1119  // Don't try to finalize a physical address unless the
1120  // translation has completed (i.e., there is a table entry).
1121  return te ? finalizePhysical(req, tc, mode) : NoFault;
1122  } else {
1123  return fault;
1124  }
1125 }
1126 
1127 Fault
1129  Translation *translation, bool &delay, bool timing,
1130  TLB::ArmTranslationType tranType, bool functional)
1131 {
1132  // No such thing as a functional timing access
1133  assert(!(timing && functional));
1134 
1135  updateMiscReg(tc, tranType);
1136 
1137  Addr vaddr_tainted = req->getVaddr();
1138  Addr vaddr = 0;
1139  if (aarch64)
1140  vaddr = purifyTaggedAddr(vaddr_tainted, tc, aarch64EL, (TCR)ttbcr,
1141  mode==Execute);
1142  else
1143  vaddr = vaddr_tainted;
1144  Request::Flags flags = req->getFlags();
1145 
1146  bool is_fetch = (mode == Execute);
1147  bool is_write = (mode == Write);
1148  bool long_desc_format = aarch64 || longDescFormatInUse(tc);
1149  ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
1151 
1152  DPRINTF(TLBVerbose,
1153  "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
1154  isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
1155 
1156  DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
1157  "flags %#lx tranType 0x%x\n", vaddr_tainted, mode, isStage2,
1158  scr, sctlr, flags, tranType);
1159 
1160  if ((req->isInstFetch() && (!sctlr.i)) ||
1161  ((!req->isInstFetch()) && (!sctlr.c))){
1162  if (!req->isCacheMaintenance()) {
1163  req->setFlags(Request::UNCACHEABLE);
1164  }
1165  req->setFlags(Request::STRICT_ORDER);
1166  }
1167  if (!is_fetch) {
1168  if (sctlr.a || !(flags & AllowUnaligned)) {
1169  if (vaddr & mask(flags & AlignmentMask)) {
1170  alignFaults++;
1171  return std::make_shared<DataAbort>(
1172  vaddr_tainted,
1175  tranMethod);
1176  }
1177  }
1178  }
1179 
1180  // If guest MMU is off or hcr.vm=0 go straight to stage2
1181  if ((isStage2 && !hcr.vm) || (!isStage2 && !sctlr.m)) {
1182  return translateMmuOff(tc, req, mode, tranType, vaddr,
1183  long_desc_format);
1184  } else {
1185  DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
1186  isStage2 ? "IPA" : "VA", vaddr_tainted, asid);
1187  // Translation enabled
1188  return translateMmuOn(tc, req, mode, translation, delay, timing,
1189  functional, vaddr, tranMethod);
1190  }
1191 }
1192 
1193 Fault
1195  TLB::ArmTranslationType tranType)
1196 {
1197  updateMiscReg(tc, tranType);
1198 
1199  if (directToStage2) {
1200  assert(stage2Tlb);
1201  return stage2Tlb->translateAtomic(req, tc, mode, tranType);
1202  }
1203 
1204  bool delay = false;
1205  Fault fault;
1206  if (FullSystem)
1207  fault = translateFs(req, tc, mode, NULL, delay, false, tranType);
1208  else
1209  fault = translateSe(req, tc, mode, NULL, delay, false);
1210  assert(!delay);
1211  return fault;
1212 }
1213 
1214 Fault
1216  TLB::ArmTranslationType tranType)
1217 {
1218  updateMiscReg(tc, tranType);
1219 
1220  if (directToStage2) {
1221  assert(stage2Tlb);
1222  return stage2Tlb->translateFunctional(req, tc, mode, tranType);
1223  }
1224 
1225  bool delay = false;
1226  Fault fault;
1227  if (FullSystem)
1228  fault = translateFs(req, tc, mode, NULL, delay, false, tranType, true);
1229  else
1230  fault = translateSe(req, tc, mode, NULL, delay, false);
1231  assert(!delay);
1232  return fault;
1233 }
1234 
1235 void
1237  Translation *translation, Mode mode, TLB::ArmTranslationType tranType)
1238 {
1239  updateMiscReg(tc, tranType);
1240 
1241  if (directToStage2) {
1242  assert(stage2Tlb);
1243  stage2Tlb->translateTiming(req, tc, translation, mode, tranType);
1244  return;
1245  }
1246 
1247  assert(translation);
1248 
1249  translateComplete(req, tc, translation, mode, tranType, isStage2);
1250 }
1251 
1252 Fault
1254  Translation *translation, Mode mode, TLB::ArmTranslationType tranType,
1255  bool callFromS2)
1256 {
1257  bool delay = false;
1258  Fault fault;
1259  if (FullSystem)
1260  fault = translateFs(req, tc, mode, translation, delay, true, tranType);
1261  else
1262  fault = translateSe(req, tc, mode, translation, delay, true);
1263  DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay, fault !=
1264  NoFault);
1265  // If we have a translation, and we're not in the middle of doing a stage
1266  // 2 translation tell the translation that we've either finished or its
1267  // going to take a while. By not doing this when we're in the middle of a
1268  // stage 2 translation we prevent marking the translation as delayed twice,
1269  // one when the translation starts and again when the stage 1 translation
1270  // completes.
1271  if (translation && (callFromS2 || !stage2Req || req->hasPaddr() || fault != NoFault)) {
1272  if (!delay)
1273  translation->finish(fault, req, tc, mode);
1274  else
1275  translation->markDelayed();
1276  }
1277  return fault;
1278 }
1279 
1280 Port *
1282 {
1283  return &stage2Mmu->getDMAPort();
1284 }
1285 
1286 void
1288 {
1289  // check if the regs have changed, or the translation mode is different.
1290  // NOTE: the tran type doesn't affect stage 2 TLB's as they only handle
1291  // one type of translation anyway
1292  if (miscRegValid && miscRegContext == tc->contextId() &&
1293  ((tranType == curTranType) || isStage2)) {
1294  return;
1295  }
1296 
1297  DPRINTF(TLBVerbose, "TLB variables changed!\n");
1298  cpsr = tc->readMiscReg(MISCREG_CPSR);
1299 
1300  // Dependencies: SCR/SCR_EL3, CPSR
1301  isSecure = inSecureState(tc) &&
1302  !(tranType & HypMode) && !(tranType & S1S2NsTran);
1303 
1304  aarch64EL = tranTypeEL(cpsr, tranType);
1305  aarch64 = isStage2 ?
1306  ELIs64(tc, EL2) :
1307  ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL);
1308 
1309  if (aarch64) { // AArch64
1310  // determine EL we need to translate in
1311  switch (aarch64EL) {
1312  case EL0:
1313  case EL1:
1314  {
1317  uint64_t ttbr_asid = ttbcr.a1 ?
1320  asid = bits(ttbr_asid,
1321  (haveLargeAsid64 && ttbcr.as) ? 63 : 55, 48);
1322  }
1323  break;
1324  case EL2:
1327  asid = -1;
1328  break;
1329  case EL3:
1332  asid = -1;
1333  break;
1334  }
1337  isPriv = aarch64EL != EL0;
1338  if (haveVirtualization) {
1339  vmid = bits(tc->readMiscReg(MISCREG_VTTBR_EL2), 55, 48);
1340  isHyp = aarch64EL == EL2;
1341  isHyp |= tranType & HypMode;
1342  isHyp &= (tranType & S1S2NsTran) == 0;
1343  isHyp &= (tranType & S1CTran) == 0;
1344  // Work out if we should skip the first stage of translation and go
1345  // directly to stage 2. This value is cached so we don't have to
1346  // compute it for every translation.
1347  stage2Req = isStage2 ||
1348  (hcr.vm && !isHyp && !isSecure &&
1349  !(tranType & S1CTran) && (aarch64EL < EL2) &&
1350  !(tranType & S1E1Tran)); // <--- FIX THIS HACK
1351  stage2DescReq = isStage2 || (hcr.vm && !isHyp && !isSecure &&
1352  (aarch64EL < EL2));
1353  directToStage2 = !isStage2 && stage2Req && !sctlr.m;
1354  } else {
1355  vmid = 0;
1356  isHyp = false;
1357  directToStage2 = false;
1358  stage2Req = false;
1359  stage2DescReq = false;
1360  }
1361  } else { // AArch32
1363  !isSecure));
1365  !isSecure));
1366  scr = tc->readMiscReg(MISCREG_SCR);
1367  isPriv = cpsr.mode != MODE_USER;
1368  if (longDescFormatInUse(tc)) {
1369  uint64_t ttbr_asid = tc->readMiscReg(
1371  MISCREG_TTBR0,
1372  tc, !isSecure));
1373  asid = bits(ttbr_asid, 55, 48);
1374  } else { // Short-descriptor translation table format in use
1375  CONTEXTIDR context_id = tc->readMiscReg(snsBankedIndex(
1377  asid = context_id.asid;
1378  }
1380  !isSecure));
1382  !isSecure));
1384  !isSecure));
1385  hcr = tc->readMiscReg(MISCREG_HCR);
1386 
1387  if (haveVirtualization) {
1388  vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48);
1389  isHyp = cpsr.mode == MODE_HYP;
1390  isHyp |= tranType & HypMode;
1391  isHyp &= (tranType & S1S2NsTran) == 0;
1392  isHyp &= (tranType & S1CTran) == 0;
1393  if (isHyp) {
1395  }
1396  // Work out if we should skip the first stage of translation and go
1397  // directly to stage 2. This value is cached so we don't have to
1398  // compute it for every translation.
1399  stage2Req = hcr.vm && !isStage2 && !isHyp && !isSecure &&
1400  !(tranType & S1CTran);
1401  stage2DescReq = hcr.vm && !isStage2 && !isHyp && !isSecure;
1402  directToStage2 = stage2Req && !sctlr.m;
1403  } else {
1404  vmid = 0;
1405  stage2Req = false;
1406  isHyp = false;
1407  directToStage2 = false;
1408  stage2DescReq = false;
1409  }
1410  }
1411  miscRegValid = true;
1412  miscRegContext = tc->contextId();
1413  curTranType = tranType;
1414 }
1415 
1418 {
1419  switch (type) {
1420  case S1E0Tran:
1421  case S12E0Tran:
1422  return EL0;
1423 
1424  case S1E1Tran:
1425  case S12E1Tran:
1426  return EL1;
1427 
1428  case S1E2Tran:
1429  return EL2;
1430 
1431  case S1E3Tran:
1432  return EL3;
1433 
1434  case NormalTran:
1435  case S1CTran:
1436  case S1S2NsTran:
1437  case HypMode:
1438  return currEL(cpsr);
1439 
1440  default:
1441  panic("Unknown translation mode!\n");
1442  }
1443 }
1444 
1445 Fault
1447  Translation *translation, bool timing, bool functional,
1448  bool is_secure, TLB::ArmTranslationType tranType)
1449 {
1450  // In a 2-stage system, the IPA->PA translation can be started via this
1451  // call so make sure the miscRegs are correct.
1452  if (isStage2) {
1453  updateMiscReg(tc, tranType);
1454  }
1455  bool is_fetch = (mode == Execute);
1456  bool is_write = (mode == Write);
1457 
1458  Addr vaddr_tainted = req->getVaddr();
1459  Addr vaddr = 0;
1460  ExceptionLevel target_el = aarch64 ? aarch64EL : EL1;
1461  if (aarch64) {
1462  vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, (TCR)ttbcr,
1463  mode==Execute);
1464  } else {
1465  vaddr = vaddr_tainted;
1466  }
1467  *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1468  if (*te == NULL) {
1469  if (req->isPrefetch()) {
1470  // if the request is a prefetch don't attempt to fill the TLB or go
1471  // any further with the memory access (here we can safely use the
1472  // fault status for the short desc. format in all cases)
1473  prefetchFaults++;
1474  return std::make_shared<PrefetchAbort>(
1475  vaddr_tainted, ArmFault::PrefetchTLBMiss, isStage2);
1476  }
1477 
1478  if (is_fetch)
1479  instMisses++;
1480  else if (is_write)
1481  writeMisses++;
1482  else
1483  readMisses++;
1484 
1485  // start translation table walk, pass variables rather than
1486  // re-retreaving in table walker for speed
1487  DPRINTF(TLB, "TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
1488  vaddr_tainted, asid, vmid);
1489  Fault fault;
1490  fault = tableWalker->walk(req, tc, asid, vmid, isHyp, mode,
1491  translation, timing, functional, is_secure,
1492  tranType, stage2DescReq);
1493  // for timing mode, return and wait for table walk,
1494  if (timing || fault != NoFault) {
1495  return fault;
1496  }
1497 
1498  *te = lookup(vaddr, asid, vmid, isHyp, is_secure, false, false, target_el);
1499  if (!*te)
1500  printTlb();
1501  assert(*te);
1502  } else {
1503  if (is_fetch)
1504  instHits++;
1505  else if (is_write)
1506  writeHits++;
1507  else
1508  readHits++;
1509  }
1510  return NoFault;
1511 }
1512 
1513 Fault
1515  ThreadContext *tc, Mode mode,
1516  Translation *translation, bool timing, bool functional,
1517  TlbEntry *mergeTe)
1518 {
1519  Fault fault;
1520 
1521  if (isStage2) {
1522  // We are already in the stage 2 TLB. Grab the table entry for stage
1523  // 2 only. We are here because stage 1 translation is disabled.
1524  TlbEntry *s2Te = NULL;
1525  // Get the stage 2 table entry
1526  fault = getTE(&s2Te, req, tc, mode, translation, timing, functional,
1528  // Check permissions of stage 2
1529  if ((s2Te != NULL) && (fault == NoFault)) {
1530  if (aarch64)
1531  fault = checkPermissions64(s2Te, req, mode, tc);
1532  else
1533  fault = checkPermissions(s2Te, req, mode);
1534  }
1535  *te = s2Te;
1536  return fault;
1537  }
1538 
1539  TlbEntry *s1Te = NULL;
1540 
1541  Addr vaddr_tainted = req->getVaddr();
1542 
1543  // Get the stage 1 table entry
1544  fault = getTE(&s1Te, req, tc, mode, translation, timing, functional,
1546  // only proceed if we have a valid table entry
1547  if ((s1Te != NULL) && (fault == NoFault)) {
1548  // Check stage 1 permissions before checking stage 2
1549  if (aarch64)
1550  fault = checkPermissions64(s1Te, req, mode, tc);
1551  else
1552  fault = checkPermissions(s1Te, req, mode);
1553  if (stage2Req & (fault == NoFault)) {
1554  Stage2LookUp *s2Lookup = new Stage2LookUp(this, stage2Tlb, *s1Te,
1555  req, translation, mode, timing, functional, curTranType);
1556  fault = s2Lookup->getTe(tc, mergeTe);
1557  if (s2Lookup->isComplete()) {
1558  *te = mergeTe;
1559  // We've finished with the lookup so delete it
1560  delete s2Lookup;
1561  } else {
1562  // The lookup hasn't completed, so we can't delete it now. We
1563  // get round this by asking the object to self delete when the
1564  // translation is complete.
1565  s2Lookup->setSelfDelete();
1566  }
1567  } else {
1568  // This case deals with an S1 hit (or bypass), followed by
1569  // an S2 hit-but-perms issue
1570  if (isStage2) {
1571  DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
1572  vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0, fault);
1573  if (fault != NoFault) {
1574  ArmFault *armFault = reinterpret_cast<ArmFault *>(fault.get());
1575  armFault->annotate(ArmFault::S1PTW, false);
1576  armFault->annotate(ArmFault::OVA, vaddr_tainted);
1577  }
1578  }
1579  *te = s1Te;
1580  }
1581  }
1582  return fault;
1583 }
1584 
1585 void
1587 {
1588  if (!_ti) {
1589  test = nullptr;
1590  } else {
1591  TlbTestInterface *ti(dynamic_cast<TlbTestInterface *>(_ti));
1592  fatal_if(!ti, "%s is not a valid ARM TLB tester\n", _ti->name());
1593  test = ti;
1594  }
1595 }
1596 
1597 Fault
1600 {
1601  if (!test || !req->hasSize() || req->getSize() == 0 ||
1602  req->isCacheMaintenance()) {
1603  return NoFault;
1604  } else {
1605  return test->translationCheck(req, isPriv, mode, domain);
1606  }
1607 }
1608 
1609 Fault
1611  TlbEntry::DomainType domain, LookupLevel lookup_level)
1612 {
1613  if (!test) {
1614  return NoFault;
1615  } else {
1616  return test->walkCheck(pa, size, va, is_secure, isPriv, mode,
1617  domain, lookup_level);
1618  }
1619 }
1620 
1621 
1622 ArmISA::TLB *
1623 ArmTLBParams::create()
1624 {
1625  return new ArmISA::TLB(this);
1626 }
uint8_t innerAttrs
Definition: pagetable.hh:113
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
#define DPRINTF(x,...)
Definition: trace.hh:225
Stats::Formula hits
Definition: tlb.hh:184
int size
Definition: tlb.hh:146
ProbePoints::PMUUPtr ppRefills
PMU probe for TLB refills.
Definition: tlb.hh:189
ExceptionLevel aarch64EL
Definition: tlb.hh:409
bool aarch64
Definition: tlb.hh:408
Ports are used to interface objects to each other.
Definition: port.hh:56
AddrRange m5opRange
Definition: tlb.hh:431
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tranType)
Definition: tlb.cc:1236
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode, ThreadContext *tc)
Definition: tlb.cc:754
Fault getResultTe(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, TlbEntry *mergeTe)
Definition: tlb.cc:1514
void printTlb() const
Definition: tlb.cc:230
decltype(nullptr) constexpr NoFault
Definition: types.hh:243
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:81
bool isHyp
Definition: tlb.hh:414
virtual ~TLB()
Definition: tlb.cc:95
Bitfield< 7 > i
bool directToStage2
Definition: tlb.hh:154
TLB * stage2Tlb
Definition: tlb.hh:158
Bitfield< 0 > m
Fault getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool timing, bool functional, bool is_secure, ArmTranslationType tranType)
Definition: tlb.cc:1446
void flushAllNs(ExceptionLevel target_el, bool ignore_el=false)
Remove all entries in the non secure world, depending on whether they were allocated in hyp mode or n...
Definition: tlb.cc:276
void flushMva(Addr mva, bool secure_lookup, ExceptionLevel target_el)
Remove all entries that match the va regardless of asn.
Definition: tlb.cc:342
TTBCR ttbcr
Definition: tlb.hh:415
DmaPort & getDMAPort()
Get the port that ultimately belongs to the stage-two MMU, but is used by the two table walkers...
Definition: stage2_mmu.hh:111
Declaration of a request, the overall memory request consisting of the parts of the request that are ...
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:402
std::shared_ptr< Request > RequestPtr
Definition: request.hh:81
void flushAllSecurity(bool secure_lookup, ExceptionLevel target_el, bool ignore_el=false)
Reset the entire TLB.
Definition: tlb.cc:244
virtual void markDelayed()=0
Signal that the translation has been delayed due to a hw page table walk.
static void decodeAddrOffset(Addr offset, uint8_t &func)
Definition: pseudo_inst.hh:91
ip6_addr_t addr
Definition: inet.hh:330
bool stage2Req
Definition: tlb.hh:148
bool isSecure
Definition: tlb.hh:413
The request targets the secure memory space.
Definition: request.hh:172
Bitfield< 30 > te
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:132
bool haveLPAE() const
virtual Process * getProcessPtr()=0
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type...
Definition: tlb.cc:1417
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
Definition: ccregs.hh:41
std::string print() const
Definition: pagetable.hh:284
void regStats() override
Callback to set stat parameters.
Definition: tlb.cc:413
Bitfield< 30 > ti
Stats::Scalar prefetchFaults
Definition: tlb.hh:177
bool stage2DescReq
Definition: tlb.hh:152
MemoryType mtype
Definition: pagetable.hh:119
Bitfield< 4, 0 > mode
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req, Mode mode)
Definition: tlb.cc:988
void drainResume() override
Resume execution after a successful drain.
Definition: tlb.cc:381
Port * getTableWalkerPort() override
Get the table walker port.
Definition: tlb.cc:1281
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool isInstr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:480
ThreadContext is the external interface to all thread state for anything outside of the CPU...
static ExceptionLevel currEL(const ThreadContext *tc)
Definition: utility.hh:141
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, uint8_t _vmid, bool _isHyp, TLB::Mode mode, TLB::Translation *_trans, bool timing, bool functional, bool secure, TLB::ArmTranslationType tranType, bool _stage2Req)
The request is to an uncacheable address.
Definition: request.hh:113
ExceptionLevel
Definition: types.hh:583
void setLE(T v)
Set the value in the data pointer to v as little endian.
ExceptionLevel el
Definition: pagetable.hh:133
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level)
Definition: tlb.cc:1610
Stats::Formula writeAccesses
Definition: tlb.hh:182
bool isPriv
Definition: tlb.hh:412
TableWalker * tableWalker
Definition: tlb.hh:157
Bitfield< 3, 1 > perm
Definition: pagetable.hh:68
bool haveVirtualization() const
Bitfield< 3 > x
Definition: pagetable.hh:69
Stats::Scalar flushedEntries
Definition: tlb.hh:175
Stats::Scalar readMisses
Definition: tlb.hh:167
This is a write that is targeted and zeroing an entire cache block.
Definition: request.hh:131
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:336
Definition: tlb.hh:50
Stats::Formula misses
Definition: tlb.hh:185
Bitfield< 0 > ns
bool miscRegValid
Definition: tlb.hh:422
bool haveLargeAsid64() const
uint8_t type
Definition: inet.hh:328
Fault getTe(ThreadContext *tc, TlbEntry *destTe)
Fault finalizePhysical(const RequestPtr &req, ThreadContext *tc, Mode mode) const override
Do post-translation physical address finalization.
Definition: tlb.cc:133
Bitfield< 3, 2 > el
HCR hcr
Definition: tlb.hh:420
Stats::Scalar permsFaults
Definition: tlb.hh:179
int rangeMRU
Definition: tlb.hh:191
Stats::Scalar writeMisses
Definition: tlb.hh:169
#define M5_FALLTHROUGH
Definition: compiler.hh:84
bool translate(Addr vaddr, Addr &paddr)
Translate function.
Definition: page_table.cc:140
bool haveVirtualization
Definition: tlb.hh:428
Stats::Scalar instMisses
Definition: tlb.hh:165
void _flushMva(Addr mva, uint64_t asn, bool secure_lookup, bool ignore_asn, ExceptionLevel target_el)
Remove any entries that match both a va and asn.
Definition: tlb.cc:351
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:220
uint16_t asid
Definition: tlb.hh:416
void flushAsid(uint64_t asn, bool secure_lookup, ExceptionLevel target_el)
Remove any entries that match the asn.
Definition: tlb.cc:318
void setMMU(Stage2MMU *m, MasterID master_id)
Definition: table_walker.cc:99
PRRR prrr
Definition: tlb.hh:418
Stats::Scalar inserts
Definition: tlb.hh:170
Bitfield< 39, 12 > pa
bool haveLargeAsid64
Definition: tlb.hh:429
Stats::Scalar domainFaults
Definition: tlb.hh:178
void updateMiscReg(ThreadContext *tc, ArmTranslationType tranType=NormalTran)
Definition: tlb.cc:1287
uint32_t dacr
Definition: tlb.hh:421
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:199
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing, ArmTranslationType tranType, bool functional=false)
Definition: tlb.cc:1128
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
bool checkELMatch(ExceptionLevel target_el) const
Definition: pagetable.hh:219
uint64_t attributes
Definition: pagetable.hh:103
Stage2MMU * stage2Mmu
Definition: tlb.hh:159
Addr pAddr(Addr va) const
Definition: pagetable.hh:229
bool translateFunctional(ThreadContext *tc, Addr vaddr, Addr &paddr)
Do a functional lookup on the TLB (for debugging) and don&#39;t modify any internal state.
Definition: tlb.cc:115
bool haveVirtualization() const
Returns true if this system implements the virtualization Extensions.
Definition: system.hh:159
ArmTranslationType
Definition: tlb.hh:118
Bitfield< 0 > w
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
Declaration of IniFile object.
uint8_t outerAttrs
Definition: pagetable.hh:114
uint16_t MasterID
Definition: request.hh:84
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode)
Definition: tlb.cc:576
bool haveLPAE
Definition: tlb.hh:427
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
Stats::Scalar writeHits
Definition: tlb.hh:168
Bitfield< 8 > va
Stats::Scalar flushTlbMva
Definition: tlb.hh:172
Bitfield< 34 > aarch64
Definition: types.hh:89
void init() override
setup all the back pointers
Definition: tlb.cc:101
Stats::Scalar instHits
Definition: tlb.hh:164
Stats::Formula accesses
Definition: tlb.hh:186
bool isComplete() const
ProbeManager * getProbeManager()
Get the probe manager for this object.
Definition: sim_object.cc:117
Mode
Definition: tlb.hh:57
Stats::Scalar flushTlbMvaAsid
Definition: tlb.hh:173
Bitfield< 9 > e
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:276
const AddrRange & m5opRange() const
Range used by memory-mapped m5 pseudo-ops if enabled.
Definition: system.hh:462
virtual const std::string name() const
Definition: sim_object.hh:129
uint64_t _attr
Definition: tlb.hh:153
ProbePointArg generates a point for the class of Arg.
void flushIpaVmid(Addr ipa, bool secure_lookup, ExceptionLevel target_el)
Invalidate all entries in the stage 2 TLB that match the given ipa and the current VMID...
Definition: tlb.cc:374
Stats::Formula readAccesses
Definition: tlb.hh:181
EmulationPageTable * pTable
Definition: process.hh:174
Stats::Scalar alignFaults
Definition: tlb.hh:176
void flushMvaAsid(Addr mva, uint64_t asn, bool secure_lookup, ExceptionLevel target_el)
Remove any entries that match both a va and asn.
Definition: tlb.cc:307
Declarations of a non-full system Page Table.
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:197
Bitfield< 7, 4 > domain
TlbEntry * lookup(Addr vpn, uint16_t asn, uint8_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el)
Lookup an entry in the TLB.
Definition: tlb.cc:157
Stats::Scalar readHits
Definition: tlb.hh:166
void insert(Addr vaddr, TlbEntry &pte)
Definition: tlb.cc:200
Definition: test.h:38
void regProbePoints() override
Register probe points for this object.
Definition: tlb.cc:530
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode, Translation *translation, bool &delay, bool timing)
Definition: tlb.cc:536
void setMMU(Stage2MMU *m, MasterID master_id)
Definition: tlb.cc:108
DomainType domain
Definition: pagetable.hh:117
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: miscregs.cc:1091
NMRR nmrr
Definition: tlb.hh:419
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, TLB::ArmTranslationType tranType, Addr vaddr, bool long_desc_format)
Definition: tlb.cc:1008
virtual ContextID contextId() const =0
Stats::Formula instAccesses
Definition: tlb.hh:183
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition: request.hh:123
SCTLR sctlr
Definition: tlb.hh:410
void setTlb(TLB *_tlb)
Addr start() const
Get the start address of the range.
Definition: addr_range.hh:293
Bitfield< 3, 0 > mask
Definition: types.hh:62
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:309
CPSR cpsr
Definition: tlb.hh:407
Stats::Scalar flushTlbAsid
Definition: tlb.hh:174
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:64
bool inSecureState(ThreadContext *tc)
Definition: utility.cc:174
Fault translateComplete(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tranType, bool callFromS2)
Definition: tlb.cc:1253
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:71
void takeOverFrom(BaseTLB *otlb) override
Take over from an old tlb context.
Definition: tlb.cc:389
ArmTranslationType curTranType
Definition: tlb.hh:424
LookupLevel lookupLevel
Definition: pagetable.hh:105
Bitfield< 0 > p
virtual RegVal readMiscReg(RegIndex misc_reg)=0
LookupLevel
Definition: pagetable.hh:74
void setAttr(uint64_t attr)
Accessor functions for memory attributes for last accessed TLB entry.
Definition: tlb.hh:337
Bitfield< 29, 6 > pfn
std::shared_ptr< FaultBase > Fault
Definition: types.hh:238
TlbEntry * table
Definition: tlb.hh:145
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode, Translation *translation, bool &delay, bool timing, bool functional, Addr vaddr, ArmFault::TranMethod tranMethod)
Definition: tlb.cc:1057
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode, ArmTranslationType tranType)
Definition: tlb.cc:1194
Abstract superclass for simulation objects.
Definition: sim_object.hh:93
Stats::Scalar flushTlb
Definition: tlb.hh:171
uint8_t vmid
Definition: tlb.hh:417
void setAttributes(bool lpae)
Definition: pagetable.hh:277
void setTestInterface(SimObject *ti)
Definition: tlb.cc:1586
TLB * stage2Tlb() const
Definition: stage2_mmu.hh:120
bool isStage2
Definition: tlb.hh:147
ContextID miscRegContext
Definition: tlb.hh:423
SCR scr
Definition: tlb.hh:411
Fault testTranslation(const RequestPtr &req, Mode mode, TlbEntry::DomainType domain)
Definition: tlb.cc:1598

Generated on Thu May 28 2020 16:11:00 for gem5 by doxygen 1.8.13