gem5  v20.0.0.2
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
table_walker.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010, 2012-2019 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 #include "arch/arm/table_walker.hh"
38 
39 #include <memory>
40 
41 #include "arch/arm/faults.hh"
42 #include "arch/arm/stage2_mmu.hh"
43 #include "arch/arm/system.hh"
44 #include "arch/arm/tlb.hh"
45 #include "cpu/base.hh"
46 #include "cpu/thread_context.hh"
47 #include "debug/Checkpoint.hh"
48 #include "debug/Drain.hh"
49 #include "debug/TLB.hh"
50 #include "debug/TLBVerbose.hh"
51 #include "dev/dma_device.hh"
52 #include "sim/system.hh"
53 
54 using namespace ArmISA;
55 
57  : ClockedObject(p),
58  stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
59  isStage2(p->is_stage2), tlb(NULL),
60  currState(NULL), pending(false),
61  numSquashable(p->num_squash_per_cycle),
62  pendingReqs(0),
63  pendingChangeTick(curTick()),
64  doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
65  doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
72  doProcessEvent([this]{ processWalkWrapper(); }, name())
73 {
74  sctlr = 0;
75 
76  // Cache system-level properties
77  if (FullSystem) {
78  ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
79  assert(armSys);
80  haveSecurity = armSys->haveSecurity();
81  _haveLPAE = armSys->haveLPAE();
83  physAddrRange = armSys->physAddrRange();
85  } else {
87  _haveLargeAsid64 = false;
88  physAddrRange = 32;
89  }
90 
91 }
92 
94 {
95  ;
96 }
97 
98 void
100 {
101  stage2Mmu = m;
102  port = &m->getDMAPort();
103  masterId = master_id;
104 }
105 
106 void
108 {
109  fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
110  fatal_if(!port, "Table walker must have a valid port\n");
111  fatal_if(!tlb, "Table walker must have a valid TLB\n");
112 }
113 
114 Port &
115 TableWalker::getPort(const std::string &if_name, PortID idx)
116 {
117  if (if_name == "port") {
118  if (!isStage2) {
119  return *port;
120  } else {
121  fatal("Cannot access table walker port through stage-two walker\n");
122  }
123  }
124  return ClockedObject::getPort(if_name, idx);
125 }
126 
128  tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
129  asid(0), vmid(0), isHyp(false), transState(nullptr),
130  vaddr(0), vaddr_tainted(0),
131  sctlr(0), scr(0), cpsr(0), tcr(0),
132  htcr(0), hcr(0), vtcr(0),
133  isWrite(false), isFetch(false), isSecure(false),
134  isUncacheable(false),
135  secureLookup(false), rwTable(false), userTable(false), xnTable(false),
136  pxnTable(false), hpd(false), stage2Req(false),
137  stage2Tran(nullptr), timing(false), functional(false),
138  mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
139  delayed(false), tableWalker(nullptr)
140 {
141 }
142 
143 void
145 {
146  if (drainState() == DrainState::Draining &&
147  stateQueues[L0].empty() && stateQueues[L1].empty() &&
148  stateQueues[L2].empty() && stateQueues[L3].empty() &&
149  pendingQueue.empty()) {
150 
151  DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
152  signalDrainDone();
153  }
154 }
155 
158 {
159  bool state_queues_not_empty = false;
160 
161  for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
162  if (!stateQueues[i].empty()) {
163  state_queues_not_empty = true;
164  break;
165  }
166  }
167 
168  if (state_queues_not_empty || pendingQueue.size()) {
169  DPRINTF(Drain, "TableWalker not drained\n");
170  return DrainState::Draining;
171  } else {
172  DPRINTF(Drain, "TableWalker free, no need to drain\n");
173  return DrainState::Drained;
174  }
175 }
176 
177 void
179 {
180  if (params()->sys->isTimingMode() && currState) {
181  delete currState;
182  currState = NULL;
183  pendingChange();
184  }
185 }
186 
187 Fault
188 TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
189  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
190  TLB::Translation *_trans, bool _timing, bool _functional,
191  bool secure, TLB::ArmTranslationType tranType,
192  bool _stage2Req)
193 {
194  assert(!(_functional && _timing));
195  ++statWalks;
196 
197  WalkerState *savedCurrState = NULL;
198 
199  if (!currState && !_functional) {
200  // For atomic mode, a new WalkerState instance should be only created
201  // once per TLB. For timing mode, a new instance is generated for every
202  // TLB miss.
203  DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
204 
205  currState = new WalkerState();
206  currState->tableWalker = this;
207  } else if (_functional) {
208  // If we are mixing functional mode with timing (or even
209  // atomic), we need to to be careful and clean up after
210  // ourselves to not risk getting into an inconsistent state.
211  DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
212  savedCurrState = currState;
213  currState = new WalkerState();
214  currState->tableWalker = this;
215  } else if (_timing) {
216  // This is a translation that was completed and then faulted again
217  // because some underlying parameters that affect the translation
218  // changed out from under us (e.g. asid). It will either be a
219  // misprediction, in which case nothing will happen or we'll use
220  // this fault to re-execute the faulting instruction which should clean
221  // up everything.
222  if (currState->vaddr_tainted == _req->getVaddr()) {
224  return std::make_shared<ReExec>();
225  }
226  }
227  pendingChange();
228 
230  currState->tc = _tc;
231  // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
232  // aarch32/translation/translation/AArch32.TranslateAddress dictates
233  // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
234  if (isStage2) {
235  currState->el = EL1;
236  currState->aarch64 = ELIs64(_tc, EL2);
237  } else {
238  currState->el =
240  currState->aarch64 =
241  ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
242  }
243  currState->transState = _trans;
244  currState->req = _req;
246  currState->asid = _asid;
247  currState->vmid = _vmid;
248  currState->isHyp = _isHyp;
249  currState->timing = _timing;
250  currState->functional = _functional;
251  currState->mode = _mode;
253  currState->isSecure = secure;
255 
258  currState->vaddr_tainted = currState->req->getVaddr();
259  if (currState->aarch64)
263  else
265 
266  if (currState->aarch64) {
267  if (isStage2) {
270  } else switch (currState->el) {
271  case EL0:
272  case EL1:
275  break;
276  case EL2:
277  assert(_haveVirtualization);
280  break;
281  case EL3:
282  assert(haveSecurity);
285  break;
286  default:
287  panic("Invalid exception level");
288  break;
289  }
291  } else {
299  }
300  sctlr = currState->sctlr;
301 
304 
306 
307  currState->stage2Req = _stage2Req && !isStage2;
308 
309  bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
311 
312  if (long_desc_format) {
313  // Helper variables used for hierarchical permissions
315  currState->rwTable = true;
316  currState->userTable = true;
317  currState->xnTable = false;
318  currState->pxnTable = false;
319 
321  } else {
323  }
324 
325  if (!currState->timing) {
326  Fault fault = NoFault;
327  if (currState->aarch64)
328  fault = processWalkAArch64();
329  else if (long_desc_format)
330  fault = processWalkLPAE();
331  else
332  fault = processWalk();
333 
334  // If this was a functional non-timing access restore state to
335  // how we found it.
336  if (currState->functional) {
337  delete currState;
338  currState = savedCurrState;
339  }
340  return fault;
341  }
342 
343  if (pending || pendingQueue.size()) {
344  pendingQueue.push_back(currState);
345  currState = NULL;
346  pendingChange();
347  } else {
348  pending = true;
349  pendingChange();
350  if (currState->aarch64)
351  return processWalkAArch64();
352  else if (long_desc_format)
353  return processWalkLPAE();
354  else
355  return processWalk();
356  }
357 
358  return NoFault;
359 }
360 
361 void
363 {
364  assert(!currState);
365  assert(pendingQueue.size());
366  pendingChange();
367  currState = pendingQueue.front();
368 
369  // Check if a previous walk filled this request already
370  // @TODO Should this always be the TLB or should we look in the stage2 TLB?
372  currState->vmid, currState->isHyp, currState->isSecure, true, false,
373  currState->el);
374 
375  // Check if we still need to have a walk for this request. If the requesting
376  // instruction has been squashed, or a previous walk has filled the TLB with
377  // a match, we just want to get rid of the walk. The latter could happen
378  // when there are multiple outstanding misses to a single page and a
379  // previous request has been successfully translated.
380  if (!currState->transState->squashed() && !te) {
381  // We've got a valid request, lets process it
382  pending = true;
383  pendingQueue.pop_front();
384  // Keep currState in case one of the processWalk... calls NULLs it
385  WalkerState *curr_state_copy = currState;
386  Fault f;
387  if (currState->aarch64)
388  f = processWalkAArch64();
389  else if (longDescFormatInUse(currState->tc) ||
391  f = processWalkLPAE();
392  else
393  f = processWalk();
394 
395  if (f != NoFault) {
396  curr_state_copy->transState->finish(f, curr_state_copy->req,
397  curr_state_copy->tc, curr_state_copy->mode);
398 
399  delete curr_state_copy;
400  }
401  return;
402  }
403 
404 
405  // If the instruction that we were translating for has been
406  // squashed we shouldn't bother.
407  unsigned num_squashed = 0;
409  while ((num_squashed < numSquashable) && currState &&
410  (currState->transState->squashed() || te)) {
411  pendingQueue.pop_front();
412  num_squashed++;
414 
415  DPRINTF(TLB, "Squashing table walk for address %#x\n",
417 
418  if (currState->transState->squashed()) {
419  // finish the translation which will delete the translation object
421  std::make_shared<UnimpFault>("Squashed Inst"),
423  } else {
424  // translate the request now that we know it will work
428 
429  }
430 
431  // delete the current request
432  delete currState;
433 
434  // peak at the next one
435  if (pendingQueue.size()) {
436  currState = pendingQueue.front();
439  false, currState->el);
440  } else {
441  // Terminate the loop, nothing more to do
442  currState = NULL;
443  }
444  }
445  pendingChange();
446 
447  // if we still have pending translations, schedule more work
448  nextWalk(tc);
449  currState = NULL;
450 }
451 
452 Fault
454 {
455  Addr ttbr = 0;
456 
457  // For short descriptors, translation configs are held in
458  // TTBR1.
461 
462  const auto irgn0_mask = 0x1;
463  const auto irgn1_mask = 0x40;
464  currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
465 
466  // If translation isn't enabled, we shouldn't be here
467  assert(currState->sctlr.m || isStage2);
468  const bool is_atomic = currState->req->isAtomic();
469 
470  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
472  32 - currState->ttbcr.n));
473 
475 
476  if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
477  32 - currState->ttbcr.n)) {
478  DPRINTF(TLB, " - Selecting TTBR0\n");
479  // Check if table walk is allowed when Security Extensions are enabled
480  if (haveSecurity && currState->ttbcr.pd0) {
481  if (currState->isFetch)
482  return std::make_shared<PrefetchAbort>(
485  isStage2,
487  else
488  return std::make_shared<DataAbort>(
491  is_atomic ? false : currState->isWrite,
494  }
497  } else {
498  DPRINTF(TLB, " - Selecting TTBR1\n");
499  // Check if table walk is allowed when Security Extensions are enabled
500  if (haveSecurity && currState->ttbcr.pd1) {
501  if (currState->isFetch)
502  return std::make_shared<PrefetchAbort>(
505  isStage2,
507  else
508  return std::make_shared<DataAbort>(
511  is_atomic ? false : currState->isWrite,
514  }
515  ttbr = ttbr1;
516  currState->ttbcr.n = 0;
517  }
518 
519  Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
520  (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
521  DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
522  currState->isSecure ? "s" : "ns");
523 
524  // Trickbox address check
525  Fault f;
526  f = testWalk(l1desc_addr, sizeof(uint32_t),
528  if (f) {
529  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
530  if (currState->timing) {
531  pending = false;
533  currState = NULL;
534  } else {
535  currState->tc = NULL;
536  currState->req = NULL;
537  }
538  return f;
539  }
540 
542  if (currState->sctlr.c == 0 || currState->isUncacheable) {
544  }
545 
546  if (currState->isSecure) {
547  flag.set(Request::SECURE);
548  }
549 
550  bool delayed;
551  delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
552  sizeof(uint32_t), flag, L1, &doL1DescEvent,
554  if (!delayed) {
555  f = currState->fault;
556  }
557 
558  return f;
559 }
560 
561 Fault
563 {
564  Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
565  int tsz, n;
566  LookupLevel start_lookup_level = L1;
567 
568  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
570 
572 
574  if (currState->isSecure)
575  flag.set(Request::SECURE);
576 
577  // work out which base address register to use, if in hyp mode we always
578  // use HTTBR
579  if (isStage2) {
580  DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
582  tsz = sext<4>(currState->vtcr.t0sz);
583  start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
584  currState->isUncacheable = currState->vtcr.irgn0 == 0;
585  } else if (currState->isHyp) {
586  DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
588  tsz = currState->htcr.t0sz;
589  currState->isUncacheable = currState->htcr.irgn0 == 0;
590  } else {
591  assert(longDescFormatInUse(currState->tc));
592 
593  // Determine boundaries of TTBR0/1 regions
594  if (currState->ttbcr.t0sz)
595  ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
596  else if (currState->ttbcr.t1sz)
597  ttbr0_max = (1ULL << 32) -
598  (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
599  else
600  ttbr0_max = (1ULL << 32) - 1;
601  if (currState->ttbcr.t1sz)
602  ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
603  else
604  ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
605 
606  const bool is_atomic = currState->req->isAtomic();
607 
608  // The following code snippet selects the appropriate translation table base
609  // address (TTBR0 or TTBR1) and the appropriate starting lookup level
610  // depending on the address range supported by the translation table (ARM
611  // ARM issue C B3.6.4)
612  if (currState->vaddr <= ttbr0_max) {
613  DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
614  // Check if table walk is allowed
615  if (currState->ttbcr.epd0) {
616  if (currState->isFetch)
617  return std::make_shared<PrefetchAbort>(
620  isStage2,
622  else
623  return std::make_shared<DataAbort>(
626  is_atomic ? false : currState->isWrite,
628  isStage2,
630  }
633  tsz = currState->ttbcr.t0sz;
634  currState->isUncacheable = currState->ttbcr.irgn0 == 0;
635  if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
636  start_lookup_level = L2;
637  } else if (currState->vaddr >= ttbr1_min) {
638  DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
639  // Check if table walk is allowed
640  if (currState->ttbcr.epd1) {
641  if (currState->isFetch)
642  return std::make_shared<PrefetchAbort>(
645  isStage2,
647  else
648  return std::make_shared<DataAbort>(
651  is_atomic ? false : currState->isWrite,
653  isStage2,
655  }
658  tsz = currState->ttbcr.t1sz;
659  currState->isUncacheable = currState->ttbcr.irgn1 == 0;
660  // Lower limit >= 3 GB
661  if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
662  start_lookup_level = L2;
663  } else {
664  // Out of boundaries -> translation fault
665  if (currState->isFetch)
666  return std::make_shared<PrefetchAbort>(
669  isStage2,
671  else
672  return std::make_shared<DataAbort>(
675  is_atomic ? false : currState->isWrite,
678  }
679 
680  }
681 
682  // Perform lookup (ARM ARM issue C B3.6.6)
683  if (start_lookup_level == L1) {
684  n = 5 - tsz;
685  desc_addr = mbits(ttbr, 39, n) |
686  (bits(currState->vaddr, n + 26, 30) << 3);
687  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
688  desc_addr, currState->isSecure ? "s" : "ns");
689  } else {
690  // Skip first-level lookup
691  n = (tsz >= 2 ? 14 - tsz : 12);
692  desc_addr = mbits(ttbr, 39, n) |
693  (bits(currState->vaddr, n + 17, 21) << 3);
694  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
695  desc_addr, currState->isSecure ? "s" : "ns");
696  }
697 
698  // Trickbox address check
699  Fault f = testWalk(desc_addr, sizeof(uint64_t),
700  TlbEntry::DomainType::NoAccess, start_lookup_level);
701  if (f) {
702  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
703  if (currState->timing) {
704  pending = false;
706  currState = NULL;
707  } else {
708  currState->tc = NULL;
709  currState->req = NULL;
710  }
711  return f;
712  }
713 
714  if (currState->sctlr.c == 0 || currState->isUncacheable) {
716  }
717 
718  currState->longDesc.lookupLevel = start_lookup_level;
719  currState->longDesc.aarch64 = false;
721 
722  bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
723  sizeof(uint64_t), flag, start_lookup_level,
724  LongDescEventByLevel[start_lookup_level],
726  if (!delayed) {
727  f = currState->fault;
728  }
729 
730  return f;
731 }
732 
733 unsigned
735 {
736  if (tsz < 25)
737  return 25;
738  if (tsz > 48)
739  return 48;
740  return tsz;
741 }
742 
743 bool
745 {
746  return (currPhysAddrRange != MaxPhysAddrRange &&
747  bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
748 }
749 
750 Fault
752 {
753  assert(currState->aarch64);
754 
755  DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
757 
758  static const GrainSize GrainMap_tg0[] =
760  static const GrainSize GrainMap_tg1[] =
762 
764 
765  // Determine TTBR, table size, granule size and phys. address range
766  Addr ttbr = 0;
767  int tsz = 0, ps = 0;
768  GrainSize tg = Grain4KB; // grain size computed from tg* field
769  bool fault = false;
770 
771  LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
772 
773  switch (currState->el) {
774  case EL0:
775  case EL1:
776  if (isStage2) {
777  DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
779  tsz = 64 - currState->vtcr.t0sz64;
780  tg = GrainMap_tg0[currState->vtcr.tg0];
781  // ARM DDI 0487A.f D7-2148
782  // The starting level of stage 2 translation depends on
783  // VTCR_EL2.SL0 and VTCR_EL2.TG0
784  LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
785  uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
786  static const LookupLevel SLL[] = {
787  L2, L3, L3, __, // sl0 == 0
788  L1, L2, L2, __, // sl0 == 1, etc.
789  L0, L1, L1, __,
790  __, __, __, __
791  };
792  start_lookup_level = SLL[sl_tg];
793  panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
794  "Cannot discern lookup level from vtcr.{sl0,tg0}");
795  ps = currState->vtcr.ps;
796  currState->isUncacheable = currState->vtcr.irgn0 == 0;
797  } else {
798  switch (bits(currState->vaddr, 63,48)) {
799  case 0:
800  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
802  tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
803  tg = GrainMap_tg0[currState->tcr.tg0];
804  currState->hpd = currState->tcr.hpd0;
805  currState->isUncacheable = currState->tcr.irgn0 == 0;
806  if (bits(currState->vaddr, 63, tsz) != 0x0 ||
807  currState->tcr.epd0)
808  fault = true;
809  break;
810  case 0xffff:
811  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
813  tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
814  tg = GrainMap_tg1[currState->tcr.tg1];
815  currState->hpd = currState->tcr.hpd1;
816  currState->isUncacheable = currState->tcr.irgn1 == 0;
817  if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
818  currState->tcr.epd1)
819  fault = true;
820  break;
821  default:
822  // top two bytes must be all 0s or all 1s, else invalid addr
823  fault = true;
824  }
825  ps = currState->tcr.ips;
826  }
827  break;
828  case EL2:
829  switch(bits(currState->vaddr, 63,48)) {
830  case 0:
831  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
833  tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
834  tg = GrainMap_tg0[currState->tcr.tg0];
835  currState->hpd = currState->hcr.e2h ?
836  currState->tcr.hpd0 : currState->tcr.hpd;
837  currState->isUncacheable = currState->tcr.irgn0 == 0;
838  break;
839 
840  case 0xffff:
841  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
843  tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
844  tg = GrainMap_tg1[currState->tcr.tg1];
845  currState->hpd = currState->tcr.hpd1;
846  currState->isUncacheable = currState->tcr.irgn1 == 0;
847  if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
848  currState->tcr.epd1 || !currState->hcr.e2h)
849  fault = true;
850  break;
851 
852  default:
853  // invalid addr if top two bytes are not all 0s
854  fault = true;
855  }
856  ps = currState->tcr.ps;
857  break;
858  case EL3:
859  switch(bits(currState->vaddr, 63,48)) {
860  case 0:
861  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
863  tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
864  tg = GrainMap_tg0[currState->tcr.tg0];
865  currState->hpd = currState->tcr.hpd;
866  currState->isUncacheable = currState->tcr.irgn0 == 0;
867  break;
868  default:
869  // invalid addr if top two bytes are not all 0s
870  fault = true;
871  }
872  ps = currState->tcr.ps;
873  break;
874  }
875 
876  const bool is_atomic = currState->req->isAtomic();
877 
878  if (fault) {
879  Fault f;
880  if (currState->isFetch)
881  f = std::make_shared<PrefetchAbort>(
885  else
886  f = std::make_shared<DataAbort>(
889  is_atomic ? false : currState->isWrite,
892 
893  if (currState->timing) {
894  pending = false;
896  currState = NULL;
897  } else {
898  currState->tc = NULL;
899  currState->req = NULL;
900  }
901  return f;
902 
903  }
904 
905  if (tg == ReservedGrain) {
906  warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
907  "DEFINED behavior takes this to mean 4KB granules\n");
908  tg = Grain4KB;
909  }
910 
911  // Determine starting lookup level
912  // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
913  // in ARM DDI 0487A. These table values correspond to the cascading tests
914  // to compute the lookup level and are of the form
915  // (grain_size + N*stride), for N = {1, 2, 3}.
916  // A value of 64 will never succeed and a value of 0 will always succeed.
917  if (start_lookup_level == MAX_LOOKUP_LEVELS) {
918  struct GrainMap {
919  GrainSize grain_size;
920  unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
921  };
922  static const GrainMap GM[] = {
923  { Grain4KB, { 39, 30, 0, 0 } },
924  { Grain16KB, { 47, 36, 25, 0 } },
925  { Grain64KB, { 64, 42, 29, 0 } }
926  };
927 
928  const unsigned *lookup = NULL; // points to a lookup_level_cutoff
929 
930  for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
931  if (tg == GM[i].grain_size) {
932  lookup = GM[i].lookup_level_cutoff;
933  break;
934  }
935  }
936  assert(lookup);
937 
938  for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
939  if (tsz > lookup[L]) {
940  start_lookup_level = (LookupLevel) L;
941  break;
942  }
943  }
944  panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
945  "Table walker couldn't find lookup level\n");
946  }
947 
948  int stride = tg - 3;
949 
950  // Determine table base address
951  int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
952  Addr base_addr = mbits(ttbr, 47, base_addr_lo);
953 
954  // Determine physical address size and raise an Address Size Fault if
955  // necessary
956  int pa_range = decodePhysAddrRange64(ps);
957  // Clamp to lower limit
958  if (pa_range > physAddrRange)
960  else
961  currState->physAddrRange = pa_range;
963  DPRINTF(TLB, "Address size fault before any lookup\n");
964  Fault f;
965  if (currState->isFetch)
966  f = std::make_shared<PrefetchAbort>(
968  ArmFault::AddressSizeLL + start_lookup_level,
969  isStage2,
971  else
972  f = std::make_shared<DataAbort>(
975  is_atomic ? false : currState->isWrite,
976  ArmFault::AddressSizeLL + start_lookup_level,
977  isStage2,
979 
980 
981  if (currState->timing) {
982  pending = false;
984  currState = NULL;
985  } else {
986  currState->tc = NULL;
987  currState->req = NULL;
988  }
989  return f;
990 
991  }
992 
993  // Determine descriptor address
994  Addr desc_addr = base_addr |
995  (bits(currState->vaddr, tsz - 1,
996  stride * (3 - start_lookup_level) + tg) << 3);
997 
998  // Trickbox address check
999  Fault f = testWalk(desc_addr, sizeof(uint64_t),
1000  TlbEntry::DomainType::NoAccess, start_lookup_level);
1001  if (f) {
1002  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1003  if (currState->timing) {
1004  pending = false;
1005  nextWalk(currState->tc);
1006  currState = NULL;
1007  } else {
1008  currState->tc = NULL;
1009  currState->req = NULL;
1010  }
1011  return f;
1012  }
1013 
1015  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1016  flag.set(Request::UNCACHEABLE);
1017  }
1018 
1019  if (currState->isSecure) {
1020  flag.set(Request::SECURE);
1021  }
1022 
1023  currState->longDesc.lookupLevel = start_lookup_level;
1024  currState->longDesc.aarch64 = true;
1026 
1027  if (currState->timing) {
1028  fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1029  sizeof(uint64_t), flag, start_lookup_level,
1030  LongDescEventByLevel[start_lookup_level], NULL);
1031  } else {
1032  fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1033  sizeof(uint64_t), flag, -1, NULL,
1035  f = currState->fault;
1036  }
1037 
1038  return f;
1039 }
1040 
1041 void
1043  uint8_t texcb, bool s)
1044 {
1045  // Note: tc and sctlr local variables are hiding tc and sctrl class
1046  // variables
1047  DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1048  te.shareable = false; // default value
1049  te.nonCacheable = false;
1050  te.outerShareable = false;
1051  if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1052  switch(texcb) {
1053  case 0: // Stongly-ordered
1054  te.nonCacheable = true;
1056  te.shareable = true;
1057  te.innerAttrs = 1;
1058  te.outerAttrs = 0;
1059  break;
1060  case 1: // Shareable Device
1061  te.nonCacheable = true;
1063  te.shareable = true;
1064  te.innerAttrs = 3;
1065  te.outerAttrs = 0;
1066  break;
1067  case 2: // Outer and Inner Write-Through, no Write-Allocate
1069  te.shareable = s;
1070  te.innerAttrs = 6;
1071  te.outerAttrs = bits(texcb, 1, 0);
1072  break;
1073  case 3: // Outer and Inner Write-Back, no Write-Allocate
1075  te.shareable = s;
1076  te.innerAttrs = 7;
1077  te.outerAttrs = bits(texcb, 1, 0);
1078  break;
1079  case 4: // Outer and Inner Non-cacheable
1080  te.nonCacheable = true;
1082  te.shareable = s;
1083  te.innerAttrs = 0;
1084  te.outerAttrs = bits(texcb, 1, 0);
1085  break;
1086  case 5: // Reserved
1087  panic("Reserved texcb value!\n");
1088  break;
1089  case 6: // Implementation Defined
1090  panic("Implementation-defined texcb value!\n");
1091  break;
1092  case 7: // Outer and Inner Write-Back, Write-Allocate
1094  te.shareable = s;
1095  te.innerAttrs = 5;
1096  te.outerAttrs = 1;
1097  break;
1098  case 8: // Non-shareable Device
1099  te.nonCacheable = true;
1101  te.shareable = false;
1102  te.innerAttrs = 3;
1103  te.outerAttrs = 0;
1104  break;
1105  case 9 ... 15: // Reserved
1106  panic("Reserved texcb value!\n");
1107  break;
1108  case 16 ... 31: // Cacheable Memory
1110  te.shareable = s;
1111  if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1112  te.nonCacheable = true;
1113  te.innerAttrs = bits(texcb, 1, 0);
1114  te.outerAttrs = bits(texcb, 3, 2);
1115  break;
1116  default:
1117  panic("More than 32 states for 5 bits?\n");
1118  }
1119  } else {
1120  assert(tc);
1121  PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1123  NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1125  DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1126  uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1127  switch(bits(texcb, 2,0)) {
1128  case 0:
1129  curr_tr = prrr.tr0;
1130  curr_ir = nmrr.ir0;
1131  curr_or = nmrr.or0;
1132  te.outerShareable = (prrr.nos0 == 0);
1133  break;
1134  case 1:
1135  curr_tr = prrr.tr1;
1136  curr_ir = nmrr.ir1;
1137  curr_or = nmrr.or1;
1138  te.outerShareable = (prrr.nos1 == 0);
1139  break;
1140  case 2:
1141  curr_tr = prrr.tr2;
1142  curr_ir = nmrr.ir2;
1143  curr_or = nmrr.or2;
1144  te.outerShareable = (prrr.nos2 == 0);
1145  break;
1146  case 3:
1147  curr_tr = prrr.tr3;
1148  curr_ir = nmrr.ir3;
1149  curr_or = nmrr.or3;
1150  te.outerShareable = (prrr.nos3 == 0);
1151  break;
1152  case 4:
1153  curr_tr = prrr.tr4;
1154  curr_ir = nmrr.ir4;
1155  curr_or = nmrr.or4;
1156  te.outerShareable = (prrr.nos4 == 0);
1157  break;
1158  case 5:
1159  curr_tr = prrr.tr5;
1160  curr_ir = nmrr.ir5;
1161  curr_or = nmrr.or5;
1162  te.outerShareable = (prrr.nos5 == 0);
1163  break;
1164  case 6:
1165  panic("Imp defined type\n");
1166  case 7:
1167  curr_tr = prrr.tr7;
1168  curr_ir = nmrr.ir7;
1169  curr_or = nmrr.or7;
1170  te.outerShareable = (prrr.nos7 == 0);
1171  break;
1172  }
1173 
1174  switch(curr_tr) {
1175  case 0:
1176  DPRINTF(TLBVerbose, "StronglyOrdered\n");
1178  te.nonCacheable = true;
1179  te.innerAttrs = 1;
1180  te.outerAttrs = 0;
1181  te.shareable = true;
1182  break;
1183  case 1:
1184  DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1185  prrr.ds1, prrr.ds0, s);
1187  te.nonCacheable = true;
1188  te.innerAttrs = 3;
1189  te.outerAttrs = 0;
1190  if (prrr.ds1 && s)
1191  te.shareable = true;
1192  if (prrr.ds0 && !s)
1193  te.shareable = true;
1194  break;
1195  case 2:
1196  DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1197  prrr.ns1, prrr.ns0, s);
1199  if (prrr.ns1 && s)
1200  te.shareable = true;
1201  if (prrr.ns0 && !s)
1202  te.shareable = true;
1203  break;
1204  case 3:
1205  panic("Reserved type");
1206  }
1207 
1209  switch(curr_ir) {
1210  case 0:
1211  te.nonCacheable = true;
1212  te.innerAttrs = 0;
1213  break;
1214  case 1:
1215  te.innerAttrs = 5;
1216  break;
1217  case 2:
1218  te.innerAttrs = 6;
1219  break;
1220  case 3:
1221  te.innerAttrs = 7;
1222  break;
1223  }
1224 
1225  switch(curr_or) {
1226  case 0:
1227  te.nonCacheable = true;
1228  te.outerAttrs = 0;
1229  break;
1230  case 1:
1231  te.outerAttrs = 1;
1232  break;
1233  case 2:
1234  te.outerAttrs = 2;
1235  break;
1236  case 3:
1237  te.outerAttrs = 3;
1238  break;
1239  }
1240  }
1241  }
1242  DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1243  "outerAttrs: %d\n",
1244  te.shareable, te.innerAttrs, te.outerAttrs);
1245  te.setAttributes(false);
1246 }
1247 
1248 void
1250  LongDescriptor &lDescriptor)
1251 {
1252  assert(_haveLPAE);
1253 
1254  uint8_t attr;
1255  uint8_t sh = lDescriptor.sh();
1256  // Different format and source of attributes if this is a stage 2
1257  // translation
1258  if (isStage2) {
1259  attr = lDescriptor.memAttr();
1260  uint8_t attr_3_2 = (attr >> 2) & 0x3;
1261  uint8_t attr_1_0 = attr & 0x3;
1262 
1263  DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1264 
1265  if (attr_3_2 == 0) {
1266  te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1268  te.outerAttrs = 0;
1269  te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1270  te.nonCacheable = true;
1271  } else {
1273  te.outerAttrs = attr_3_2 == 1 ? 0 :
1274  attr_3_2 == 2 ? 2 : 1;
1275  te.innerAttrs = attr_1_0 == 1 ? 0 :
1276  attr_1_0 == 2 ? 6 : 5;
1277  te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1278  }
1279  } else {
1280  uint8_t attrIndx = lDescriptor.attrIndx();
1281 
1282  // LPAE always uses remapping of memory attributes, irrespective of the
1283  // value of SCTLR.TRE
1284  MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1285  int reg_as_int = snsBankedIndex(reg, currState->tc,
1286  !currState->isSecure);
1287  uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1288  attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1289  uint8_t attr_7_4 = bits(attr, 7, 4);
1290  uint8_t attr_3_0 = bits(attr, 3, 0);
1291  DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1292 
1293  // Note: the memory subsystem only cares about the 'cacheable' memory
1294  // attribute. The other attributes are only used to fill the PAR register
1295  // accordingly to provide the illusion of full support
1296  te.nonCacheable = false;
1297 
1298  switch (attr_7_4) {
1299  case 0x0:
1300  // Strongly-ordered or Device memory
1301  if (attr_3_0 == 0x0)
1303  else if (attr_3_0 == 0x4)
1305  else
1306  panic("Unpredictable behavior\n");
1307  te.nonCacheable = true;
1308  te.outerAttrs = 0;
1309  break;
1310  case 0x4:
1311  // Normal memory, Outer Non-cacheable
1313  te.outerAttrs = 0;
1314  if (attr_3_0 == 0x4)
1315  // Inner Non-cacheable
1316  te.nonCacheable = true;
1317  else if (attr_3_0 < 0x8)
1318  panic("Unpredictable behavior\n");
1319  break;
1320  case 0x8:
1321  case 0x9:
1322  case 0xa:
1323  case 0xb:
1324  case 0xc:
1325  case 0xd:
1326  case 0xe:
1327  case 0xf:
1328  if (attr_7_4 & 0x4) {
1329  te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1330  } else {
1331  te.outerAttrs = 0x2;
1332  }
1333  // Normal memory, Outer Cacheable
1335  if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1336  panic("Unpredictable behavior\n");
1337  break;
1338  default:
1339  panic("Unpredictable behavior\n");
1340  break;
1341  }
1342 
1343  switch (attr_3_0) {
1344  case 0x0:
1345  te.innerAttrs = 0x1;
1346  break;
1347  case 0x4:
1348  te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1349  break;
1350  case 0x8:
1351  case 0x9:
1352  case 0xA:
1353  case 0xB:
1354  te.innerAttrs = 6;
1355  break;
1356  case 0xC:
1357  case 0xD:
1358  case 0xE:
1359  case 0xF:
1360  te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1361  break;
1362  default:
1363  panic("Unpredictable behavior\n");
1364  break;
1365  }
1366  }
1367 
1368  te.outerShareable = sh == 2;
1369  te.shareable = (sh & 0x2) ? true : false;
1370  te.setAttributes(true);
1371  te.attributes |= (uint64_t) attr << 56;
1372 }
1373 
1374 void
1376  LongDescriptor &lDescriptor)
1377 {
1378  uint8_t attr;
1379  uint8_t attr_hi;
1380  uint8_t attr_lo;
1381  uint8_t sh = lDescriptor.sh();
1382 
1383  if (isStage2) {
1384  attr = lDescriptor.memAttr();
1385  uint8_t attr_hi = (attr >> 2) & 0x3;
1386  uint8_t attr_lo = attr & 0x3;
1387 
1388  DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1389 
1390  if (attr_hi == 0) {
1391  te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1393  te.outerAttrs = 0;
1394  te.innerAttrs = attr_lo == 0 ? 1 : 3;
1395  te.nonCacheable = true;
1396  } else {
1398  te.outerAttrs = attr_hi == 1 ? 0 :
1399  attr_hi == 2 ? 2 : 1;
1400  te.innerAttrs = attr_lo == 1 ? 0 :
1401  attr_lo == 2 ? 6 : 5;
1402  // Treat write-through memory as uncacheable, this is safe
1403  // but for performance reasons not optimal.
1404  te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1405  (attr_lo == 1) || (attr_lo == 2);
1406  }
1407  } else {
1408  uint8_t attrIndx = lDescriptor.attrIndx();
1409 
1410  DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1411 
1412  // Select MAIR
1413  uint64_t mair;
1414  switch (currState->el) {
1415  case EL0:
1416  case EL1:
1417  mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1418  break;
1419  case EL2:
1420  mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1421  break;
1422  case EL3:
1423  mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1424  break;
1425  default:
1426  panic("Invalid exception level");
1427  break;
1428  }
1429 
1430  // Select attributes
1431  attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1432  attr_lo = bits(attr, 3, 0);
1433  attr_hi = bits(attr, 7, 4);
1434 
1435  // Memory type
1437 
1438  // Cacheability
1439  te.nonCacheable = false;
1440  if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1441  te.nonCacheable = true;
1442  }
1443  // Treat write-through memory as uncacheable, this is safe
1444  // but for performance reasons not optimal.
1445  switch (attr_hi) {
1446  case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1447  case 0x4: // Normal memory, Outer Non-cacheable
1448  case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1449  te.nonCacheable = true;
1450  }
1451  switch (attr_lo) {
1452  case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1453  case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1454  warn_if(!attr_hi, "Unpredictable behavior");
1456  case 0x4: // Device-nGnRE memory or
1457  // Normal memory, Inner Non-cacheable
1458  case 0x8: // Device-nGRE memory or
1459  // Normal memory, Inner Write-through non-transient
1460  te.nonCacheable = true;
1461  }
1462 
1463  te.shareable = sh == 2;
1464  te.outerShareable = (sh & 0x2) ? true : false;
1465  // Attributes formatted according to the 64-bit PAR
1466  te.attributes = ((uint64_t) attr << 56) |
1467  (1 << 11) | // LPAE bit
1468  (te.ns << 9) | // NS bit
1469  (sh << 7);
1470  }
1471 }
1472 
1473 void
1475 {
1476  if (currState->fault != NoFault) {
1477  return;
1478  }
1479 
1481  byteOrder(currState->tc));
1482 
1483  DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1485  TlbEntry te;
1486 
1487  const bool is_atomic = currState->req->isAtomic();
1488 
1489  switch (currState->l1Desc.type()) {
1490  case L1Descriptor::Ignore:
1492  if (!currState->timing) {
1493  currState->tc = NULL;
1494  currState->req = NULL;
1495  }
1496  DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1497  if (currState->isFetch)
1498  currState->fault =
1499  std::make_shared<PrefetchAbort>(
1502  isStage2,
1504  else
1505  currState->fault =
1506  std::make_shared<DataAbort>(
1509  is_atomic ? false : currState->isWrite,
1512  return;
1513  case L1Descriptor::Section:
1514  if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1520  currState->fault = std::make_shared<DataAbort>(
1522  currState->l1Desc.domain(),
1523  is_atomic ? false : currState->isWrite,
1525  isStage2,
1527  }
1528  if (currState->l1Desc.supersection()) {
1529  panic("Haven't implemented supersections\n");
1530  }
1532  return;
1534  {
1535  Addr l2desc_addr;
1536  l2desc_addr = currState->l1Desc.l2Addr() |
1537  (bits(currState->vaddr, 19, 12) << 2);
1538  DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1539  l2desc_addr, currState->isSecure ? "s" : "ns");
1540 
1541  // Trickbox address check
1542  currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1543  currState->l1Desc.domain(), L2);
1544 
1545  if (currState->fault) {
1546  if (!currState->timing) {
1547  currState->tc = NULL;
1548  currState->req = NULL;
1549  }
1550  return;
1551  }
1552 
1554 
1555  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1556  flag.set(Request::UNCACHEABLE);
1557  }
1558 
1559  if (currState->isSecure)
1560  flag.set(Request::SECURE);
1561 
1562  bool delayed;
1563  delayed = fetchDescriptor(l2desc_addr,
1564  (uint8_t*)&currState->l2Desc.data,
1565  sizeof(uint32_t), flag, -1, &doL2DescEvent,
1567  if (delayed) {
1568  currState->delayed = true;
1569  }
1570 
1571  return;
1572  }
1573  default:
1574  panic("A new type in a 2 bit field?\n");
1575  }
1576 }
1577 
1578 Fault
1580 {
1581  if (currState->isFetch) {
1582  return std::make_shared<PrefetchAbort>(
1585  isStage2,
1587  } else {
1588  return std::make_shared<DataAbort>(
1591  currState->req->isAtomic() ? false : currState->isWrite,
1593  isStage2,
1595  }
1596 }
1597 
1598 void
1600 {
1601  if (currState->fault != NoFault) {
1602  return;
1603  }
1604 
1606  byteOrder(currState->tc));
1607 
1608  DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1611  currState->aarch64 ? "AArch64" : "long-desc.");
1612 
1615  DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1616  "xn: %d, ap: %d, af: %d, type: %d\n",
1619  currState->longDesc.pxn(),
1620  currState->longDesc.xn(),
1621  currState->longDesc.ap(),
1622  currState->longDesc.af(),
1623  currState->longDesc.type());
1624  } else {
1625  DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1628  currState->longDesc.type());
1629  }
1630 
1631  TlbEntry te;
1632 
1633  switch (currState->longDesc.type()) {
1635  DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1638 
1640  if (!currState->timing) {
1641  currState->tc = NULL;
1642  currState->req = NULL;
1643  }
1644  return;
1645 
1646  case LongDescriptor::Block:
1647  case LongDescriptor::Page:
1648  {
1649  auto fault_source = ArmFault::FaultSourceInvalid;
1650  // Check for address size fault
1655 
1656  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1658  fault_source = ArmFault::AddressSizeLL;
1659 
1660  // Check for access fault
1661  } else if (currState->longDesc.af() == 0) {
1662 
1663  DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1665  fault_source = ArmFault::AccessFlagLL;
1666  }
1667 
1668  if (fault_source != ArmFault::FaultSourceInvalid) {
1669  currState->fault = generateLongDescFault(fault_source);
1670  } else {
1672  }
1673  }
1674  return;
1675  case LongDescriptor::Table:
1676  {
1677  // Set hierarchical permission flags
1688 
1689  // Set up next level lookup
1690  Addr next_desc_addr = currState->longDesc.nextDescAddr(
1691  currState->vaddr);
1692 
1693  DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1696  next_desc_addr,
1697  currState->secureLookup ? "s" : "ns");
1698 
1699  // Check for address size fault
1701  next_desc_addr, currState->physAddrRange)) {
1702  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1704 
1707  return;
1708  }
1709 
1710  // Trickbox address check
1712  next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1714 
1715  if (currState->fault) {
1716  if (!currState->timing) {
1717  currState->tc = NULL;
1718  currState->req = NULL;
1719  }
1720  return;
1721  }
1722 
1724  if (currState->secureLookup)
1725  flag.set(Request::SECURE);
1726 
1727  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1728  flag.set(Request::UNCACHEABLE);
1729  }
1730 
1733  Event *event = NULL;
1734  switch (L) {
1735  case L1:
1736  assert(currState->aarch64);
1737  case L2:
1738  case L3:
1739  event = LongDescEventByLevel[L];
1740  break;
1741  default:
1742  panic("Wrong lookup level in table walk\n");
1743  break;
1744  }
1745 
1746  bool delayed;
1747  delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1748  sizeof(uint64_t), flag, -1, event,
1750  if (delayed) {
1751  currState->delayed = true;
1752  }
1753  }
1754  return;
1755  default:
1756  panic("A new type in a 2 bit field?\n");
1757  }
1758 }
1759 
1760 void
1762 {
1763  if (currState->fault != NoFault) {
1764  return;
1765  }
1766 
1768  byteOrder(currState->tc));
1769 
1770  DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1772  TlbEntry te;
1773 
1774  const bool is_atomic = currState->req->isAtomic();
1775 
1776  if (currState->l2Desc.invalid()) {
1777  DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1778  if (!currState->timing) {
1779  currState->tc = NULL;
1780  currState->req = NULL;
1781  }
1782  if (currState->isFetch)
1783  currState->fault = std::make_shared<PrefetchAbort>(
1786  isStage2,
1788  else
1789  currState->fault = std::make_shared<DataAbort>(
1791  is_atomic ? false : currState->isWrite,
1793  isStage2,
1795  return;
1796  }
1797 
1798  if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1802  DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1803  currState->sctlr.afe, currState->l2Desc.ap());
1804 
1805  currState->fault = std::make_shared<DataAbort>(
1808  is_atomic ? false : currState->isWrite,
1811  }
1812 
1814 }
1815 
1816 void
1818 {
1819  currState = stateQueues[L1].front();
1820  currState->delayed = false;
1821  // if there's a stage2 translation object we don't need it any more
1822  if (currState->stage2Tran) {
1823  delete currState->stage2Tran;
1824  currState->stage2Tran = NULL;
1825  }
1826 
1827 
1828  DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1829  DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1830 
1831  DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1832  doL1Descriptor();
1833 
1834  stateQueues[L1].pop_front();
1835  // Check if fault was generated
1836  if (currState->fault != NoFault) {
1838  currState->tc, currState->mode);
1840 
1841  pending = false;
1842  nextWalk(currState->tc);
1843 
1844  currState->req = NULL;
1845  currState->tc = NULL;
1846  currState->delayed = false;
1847  delete currState;
1848  }
1849  else if (!currState->delayed) {
1850  // delay is not set so there is no L2 to do
1851  // Don't finish the translation if a stage 2 look up is underway
1853  DPRINTF(TLBVerbose, "calling translateTiming again\n");
1857 
1858  pending = false;
1859  nextWalk(currState->tc);
1860 
1861  currState->req = NULL;
1862  currState->tc = NULL;
1863  currState->delayed = false;
1864  delete currState;
1865  } else {
1866  // need to do L2 descriptor
1867  stateQueues[L2].push_back(currState);
1868  }
1869  currState = NULL;
1870 }
1871 
1872 void
1874 {
1875  currState = stateQueues[L2].front();
1876  assert(currState->delayed);
1877  // if there's a stage2 translation object we don't need it any more
1878  if (currState->stage2Tran) {
1879  delete currState->stage2Tran;
1880  currState->stage2Tran = NULL;
1881  }
1882 
1883  DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1885  doL2Descriptor();
1886 
1887  // Check if fault was generated
1888  if (currState->fault != NoFault) {
1890  currState->tc, currState->mode);
1892  } else {
1894  DPRINTF(TLBVerbose, "calling translateTiming again\n");
1898  }
1899 
1900 
1901  stateQueues[L2].pop_front();
1902  pending = false;
1903  nextWalk(currState->tc);
1904 
1905  currState->req = NULL;
1906  currState->tc = NULL;
1907  currState->delayed = false;
1908 
1909  delete currState;
1910  currState = NULL;
1911 }
1912 
1913 void
1915 {
1917 }
1918 
1919 void
1921 {
1923 }
1924 
1925 void
1927 {
1929 }
1930 
1931 void
1933 {
1935 }
1936 
1937 void
1939 {
1940  currState = stateQueues[curr_lookup_level].front();
1941  assert(curr_lookup_level == currState->longDesc.lookupLevel);
1942  currState->delayed = false;
1943 
1944  // if there's a stage2 translation object we don't need it any more
1945  if (currState->stage2Tran) {
1946  delete currState->stage2Tran;
1947  currState->stage2Tran = NULL;
1948  }
1949 
1950  DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1952  doLongDescriptor();
1953 
1954  stateQueues[curr_lookup_level].pop_front();
1955 
1956  if (currState->fault != NoFault) {
1957  // A fault was generated
1959  currState->tc, currState->mode);
1960 
1961  pending = false;
1962  nextWalk(currState->tc);
1963 
1964  currState->req = NULL;
1965  currState->tc = NULL;
1966  currState->delayed = false;
1967  delete currState;
1968  } else if (!currState->delayed) {
1969  // No additional lookups required
1970  DPRINTF(TLBVerbose, "calling translateTiming again\n");
1974  statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1975 
1976  pending = false;
1977  nextWalk(currState->tc);
1978 
1979  currState->req = NULL;
1980  currState->tc = NULL;
1981  currState->delayed = false;
1982  delete currState;
1983  } else {
1984  if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1985  panic("Max. number of lookups already reached in table walk\n");
1986  // Need to perform additional lookups
1988  }
1989  currState = NULL;
1990 }
1991 
1992 
1993 void
1995 {
1996  if (pendingQueue.size())
1998  else
1999  completeDrain();
2000 }
2001 
2002 bool
2003 TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2004  Request::Flags flags, int queueIndex, Event *event,
2005  void (TableWalker::*doDescriptor)())
2006 {
2007  bool isTiming = currState->timing;
2008 
2009  DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2010  descAddr, currState->stage2Req);
2011 
2012  // If this translation has a stage 2 then we know descAddr is an IPA and
2013  // needs to be translated before we can access the page table. Do that
2014  // check here.
2015  if (currState->stage2Req) {
2016  Fault fault;
2017 
2018  if (isTiming) {
2019  Stage2MMU::Stage2Translation *tran = new
2021  currState->vaddr);
2022  currState->stage2Tran = tran;
2023  stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
2024  flags);
2025  fault = tran->fault;
2026  } else {
2028  currState->vaddr, descAddr, data, numBytes, flags,
2030  }
2031 
2032  if (fault != NoFault) {
2033  currState->fault = fault;
2034  }
2035  if (isTiming) {
2036  if (queueIndex >= 0) {
2037  DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2038  stateQueues[queueIndex].size());
2039  stateQueues[queueIndex].push_back(currState);
2040  currState = NULL;
2041  }
2042  } else {
2043  (this->*doDescriptor)();
2044  }
2045  } else {
2046  if (isTiming) {
2047  port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2048  currState->tc->getCpuPtr()->clockPeriod(),flags);
2049  if (queueIndex >= 0) {
2050  DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2051  stateQueues[queueIndex].size());
2052  stateQueues[queueIndex].push_back(currState);
2053  currState = NULL;
2054  }
2055  } else if (!currState->functional) {
2056  port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2057  currState->tc->getCpuPtr()->clockPeriod(), flags);
2058  (this->*doDescriptor)();
2059  } else {
2060  RequestPtr req = std::make_shared<Request>(
2061  descAddr, numBytes, flags, masterId);
2062 
2063  req->taskId(ContextSwitchTaskId::DMA);
2064  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
2065  pkt->dataStatic(data);
2066  port->sendFunctional(pkt);
2067  (this->*doDescriptor)();
2068  delete pkt;
2069  }
2070  }
2071  return (isTiming);
2072 }
2073 
2074 void
2075 TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2076 {
2077  TlbEntry te;
2078 
2079  // Create and fill a new page table entry
2080  te.valid = true;
2081  te.longDescFormat = longDescriptor;
2082  te.isHyp = currState->isHyp;
2083  te.asid = currState->asid;
2084  te.vmid = currState->vmid;
2085  te.N = descriptor.offsetBits();
2086  te.vpn = currState->vaddr >> te.N;
2087  te.size = (1<<te.N) - 1;
2088  te.pfn = descriptor.pfn();
2089  te.domain = descriptor.domain();
2090  te.lookupLevel = descriptor.lookupLevel;
2091  te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
2092  te.nstid = !currState->isSecure;
2093  te.xn = descriptor.xn();
2094  if (currState->aarch64)
2095  te.el = currState->el;
2096  else
2097  te.el = EL1;
2098 
2101 
2102  // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2103  // as global
2104  te.global = descriptor.global(currState) || isStage2;
2105  if (longDescriptor) {
2106  LongDescriptor lDescriptor =
2107  dynamic_cast<LongDescriptor &>(descriptor);
2108 
2109  te.xn |= currState->xnTable;
2110  te.pxn = currState->pxnTable || lDescriptor.pxn();
2111  if (isStage2) {
2112  // this is actually the HAP field, but its stored in the same bit
2113  // possitions as the AP field in a stage 1 translation.
2114  te.hap = lDescriptor.ap();
2115  } else {
2116  te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2117  (currState->userTable && (descriptor.ap() & 0x1));
2118  }
2119  if (currState->aarch64)
2120  memAttrsAArch64(currState->tc, te, lDescriptor);
2121  else
2122  memAttrsLPAE(currState->tc, te, lDescriptor);
2123  } else {
2124  te.ap = descriptor.ap();
2125  memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2126  descriptor.shareable());
2127  }
2128 
2129  // Debug output
2130  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2131  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2132  te.N, te.pfn, te.size, te.global, te.valid);
2133  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2134  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2135  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2136  te.nonCacheable, te.ns);
2137  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2138  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2139  descriptor.getRawData());
2140 
2141  // Insert the entry into the TLB
2142  tlb->insert(currState->vaddr, te);
2143  if (!currState->timing) {
2144  currState->tc = NULL;
2145  currState->req = NULL;
2146  }
2147 }
2148 
2150 ArmTableWalkerParams::create()
2151 {
2152  return new ArmISA::TableWalker(this);
2153 }
2154 
2156 TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2157 {
2158  switch (lookup_level_as_int) {
2159  case L1:
2160  return L1;
2161  case L2:
2162  return L2;
2163  case L3:
2164  return L3;
2165  default:
2166  panic("Invalid lookup level conversion");
2167  }
2168 }
2169 
2170 /* this method keeps track of the table walker queue's residency, so
2171  * needs to be called whenever requests start and complete. */
2172 void
2174 {
2175  unsigned n = pendingQueue.size();
2176  if ((currState != NULL) && (currState != pendingQueue.front())) {
2177  ++n;
2178  }
2179 
2180  if (n != pendingReqs) {
2181  Tick now = curTick();
2183  pendingReqs = n;
2184  pendingChangeTick = now;
2185  }
2186 }
2187 
2188 Fault
2190  LookupLevel lookup_level)
2191 {
2192  return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2193  currState->mode, domain, lookup_level);
2194 }
2195 
2196 
2197 uint8_t
2199 {
2200  /* for statPageSizes */
2201  switch(N) {
2202  case 12: return 0; // 4K
2203  case 14: return 1; // 16K (using 16K granule in v8-64)
2204  case 16: return 2; // 64K
2205  case 20: return 3; // 1M
2206  case 21: return 4; // 2M-LPAE
2207  case 24: return 5; // 16M
2208  case 25: return 6; // 32M (using 16K granule in v8-64)
2209  case 29: return 7; // 512M (using 64K granule in v8-64)
2210  case 30: return 8; // 1G-LPAE
2211  default:
2212  panic("unknown page size");
2213  return 255;
2214  }
2215 }
2216 
2217 void
2219 {
2221 
2222  statWalks
2223  .name(name() + ".walks")
2224  .desc("Table walker walks requested")
2225  ;
2226 
2228  .name(name() + ".walksShort")
2229  .desc("Table walker walks initiated with short descriptors")
2231  ;
2232 
2234  .name(name() + ".walksLong")
2235  .desc("Table walker walks initiated with long descriptors")
2237  ;
2238 
2240  .init(2)
2241  .name(name() + ".walksShortTerminationLevel")
2242  .desc("Level at which table walker walks "
2243  "with short descriptors terminate")
2245  ;
2248 
2250  .init(4)
2251  .name(name() + ".walksLongTerminationLevel")
2252  .desc("Level at which table walker walks "
2253  "with long descriptors terminate")
2255  ;
2260 
2262  .name(name() + ".walksSquashedBefore")
2263  .desc("Table walks squashed before starting")
2265  ;
2266 
2268  .name(name() + ".walksSquashedAfter")
2269  .desc("Table walks squashed after completion")
2271  ;
2272 
2274  .init(16)
2275  .name(name() + ".walkWaitTime")
2276  .desc("Table walker wait (enqueue to first request) latency")
2278  ;
2279 
2281  .init(16)
2282  .name(name() + ".walkCompletionTime")
2283  .desc("Table walker service (enqueue to completion) latency")
2285  ;
2286 
2288  .init(16)
2289  .name(name() + ".walksPending")
2290  .desc("Table walker pending requests distribution")
2292  ;
2293 
2294  statPageSizes // see DDI 0487A D4-1661
2295  .init(9)
2296  .name(name() + ".walkPageSizes")
2297  .desc("Table walker page sizes translated")
2299  ;
2300  statPageSizes.subname(0, "4K");
2301  statPageSizes.subname(1, "16K");
2302  statPageSizes.subname(2, "64K");
2303  statPageSizes.subname(3, "1M");
2304  statPageSizes.subname(4, "2M");
2305  statPageSizes.subname(5, "16M");
2306  statPageSizes.subname(6, "32M");
2307  statPageSizes.subname(7, "512M");
2308  statPageSizes.subname(8, "1G");
2309 
2311  .init(2,2) // Instruction/Data, requests/completed
2312  .name(name() + ".walkRequestOrigin")
2313  .desc("Table walker requests started/completed, data/inst")
2315  ;
2316  statRequestOrigin.subname(0,"Requested");
2317  statRequestOrigin.subname(1,"Completed");
2318  statRequestOrigin.ysubname(0,"Data");
2319  statRequestOrigin.ysubname(1,"Inst");
2320 }
uint8_t innerAttrs
Definition: pagetable.hh:113
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
#define DPRINTF(x,...)
Definition: trace.hh:222
void regStats() override
Callback to set stat parameters.
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:51
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Ports are used to interface objects to each other.
Definition: port.hh:56
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tranType)
Definition: tlb.cc:1236
MiscRegIndex
Definition: miscregs.hh:56
Bitfield< 5, 3 > reg
Definition: types.hh:87
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Derived & init(size_type _x, size_type _y)
Definition: statistics.hh:1285
decltype(nullptr) constexpr NoFault
Definition: types.hh:243
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation...
Definition: statistics.hh:376
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:81
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
LookupLevel lookupLevel
Current lookup level for this descriptor.
Definition: table_walker.hh:71
Bitfield< 7, 0 > L
Definition: int.hh:57
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
virtual TlbEntry::DomainType domain() const =0
uint32_t data
The raw bits of the entry.
virtual Addr pfn() const =0
Bitfield< 7 > i
bool haveSecurity() const
Returns true if this system implements the Security Extensions.
Definition: system.hh:150
Bitfield< 0 > m
void doL3LongDescriptorWrapper()
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
Addr l2Addr() const
Address of L2 descriptor if it exists.
std::list< WalkerState * > stateQueues[MAX_LOOKUP_LEVELS]
Queues of requests for all the different lookup levels.
const FlagsType nonan
Don&#39;t print if this is NAN.
Definition: info.hh:59
GrainSize grainSize
Width of the granule size in bits.
bool pending
If a timing translation is currently in progress.
DmaPort & getDMAPort()
Get the port that ultimately belongs to the stage-two MMU, but is used by the two table walkers...
Definition: stage2_mmu.hh:111
EventFunctionWrapper doL2LongDescEvent
Bitfield< 21, 20 > stride
EntryType type() const
Return the descriptor type.
std::shared_ptr< Request > RequestPtr
Definition: request.hh:81
static uint8_t pageSizeNtoStatBin(uint8_t N)
Bitfield< 8, 7 > sh
Stats::Vector statWalksShortTerminatedAtLevel
ip6_addr_t addr
Definition: inet.hh:330
bool secureTable() const
Whether the subsequent levels of lookup are secure.
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level)
bool isWrite
If the access is a write.
TableWalker(const Params *p)
Definition: table_walker.cc:56
bool invalid() const
Is the entry invalid.
Stats::Histogram statPendingWalks
The request targets the secure memory space.
Definition: request.hh:172
DrainState drain() override
Notify an object that it needs to drain its state.
virtual uint8_t texcb() const
Definition: table_walker.hh:82
Bitfield< 30 > te
bool haveSecurity
Cached copies of system-level properties.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:132
Stats::Scalar statWalksShortDescriptor
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type...
Definition: tlb.cc:1417
bool timing
If the mode is timing or atomic.
uint64_t RegVal
Definition: types.hh:166
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t ap() const
Three bit access protection flags.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
Stats::Scalar statSquashedBefore
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2641
virtual BaseCPU * getCpuPtr()=0
Definition: ccregs.hh:41
bool hpd
Hierarchical access permission disable.
MemoryType mtype
Definition: pagetable.hh:119
bool stage2Req
Flag indicating if a second stage of lookup is required.
Bitfield< 4, 0 > mode
TLB::Translation * transState
Translation state for delayed requests.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
void doL2LongDescriptorWrapper()
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool isInstr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:480
Tick clockPeriod() const
uint8_t attrIndx() const
Attribute index.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:333
ThreadContext is the external interface to all thread state for anything outside of the CPU...
EventFunctionWrapper doL0LongDescEvent
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, uint8_t _vmid, bool _isHyp, TLB::Mode mode, TLB::Translation *_trans, bool timing, bool functional, bool secure, TLB::ArmTranslationType tranType, bool _stage2Req)
Addr vaddr
The virtual address that is being translated with tagging removed.
virtual bool secure(bool have_security, WalkerState *currState) const =0
The request is to an uncacheable address.
Definition: request.hh:113
DrainState
Object drain/handover states.
Definition: drain.hh:71
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1149
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1034
DmaPort * port
Port shared by the two table walkers.
uint8_t offsetBits() const
Return the bit width of the page/block offset.
Bitfield< 31 > n
ExceptionLevel el
Definition: pagetable.hh:133
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level)
Definition: tlb.cc:1610
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
Bitfield< 6 > f
WalkerState * currState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:308
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition: utility.cc:1281
EventFunctionWrapper doL1DescEvent
virtual uint8_t offsetBits() const =0
The request is a page table walk.
Definition: request.hh:174
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:336
Definition: tlb.hh:50
HCR hcr
Cached copy of the htcr as it existed when translation began.
Draining buffers pending serialization/handover.
MasterID masterId
Master id assigned by the MMU.
Tick curTick()
The current simulated tick.
Definition: core.hh:44
Bitfield< 3, 2 > el
Stats::Vector statWalksLongTerminatedAtLevel
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:123
virtual ~TableWalker()
Definition: table_walker.cc:93
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
int physAddrRange
Current physical address range in bits.
Bitfield< 4 > s
#define M5_FALLTHROUGH
Definition: compiler.hh:84
Fault fault
The fault that we are going to return.
const Params * params() const
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:155
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
virtual bool xn() const =0
uint64_t Tick
Tick count type.
Definition: types.hh:61
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: tlb.hh:84
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
void setMMU(Stage2MMU *m, MasterID master_id)
Definition: table_walker.cc:99
ExceptionLevel el
Current exception level.
EventFunctionWrapper doL3LongDescEvent
void doL1LongDescriptorWrapper()
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:224
bool isSecure
If the access comes from the secure state.
Bitfield< 39, 12 > pa
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool supersection() const
Is the page a Supersection (16MB)?
bool xnTable() const
Is execution allowed on subsequent lookup levels?
Event * LongDescEventByLevel[4]
Fault readDataUntimed(ThreadContext *tc, Addr oVAddr, Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, bool isFunctional)
Definition: stage2_mmu.cc:62
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:199
Stats::Histogram statWalkServiceTime
Fault generateLongDescFault(ArmFault::FaultSource src)
void schedule(Event &event, Tick when)
Definition: eventq.hh:998
TLB::ArmTranslationType tranType
The translation type that has been requested.
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
bool functional
If the atomic mode should be functional.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Bitfield< 59, 56 > tlb
void doL0LongDescriptorWrapper()
virtual bool global(WalkerState *currState) const =0
uint64_t attributes
Definition: pagetable.hh:103
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
static unsigned adjustTableSizeAArch64(unsigned tsz)
bool haveVirtualization() const
Returns true if this system implements the virtualization Extensions.
Definition: system.hh:159
ArmTranslationType
Definition: tlb.hh:118
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
uint8_t outerAttrs
Definition: pagetable.hh:114
uint16_t MasterID
Definition: request.hh:84
#define ULL(N)
uint64_t constant
Definition: types.hh:48
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
void completeDrain()
Checks if all state is cleared and if so, completes drain.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
uint8_t ap() const
2-bit access protection flags
Bitfield< 10, 5 > event
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
#define warn_once(...)
Definition: logging.hh:212
Bitfield< 34 > aarch64
Definition: types.hh:89
Stats::Histogram statWalkWaitTime
Stats::Scalar statSquashedAfter
static const unsigned REQUESTED
const FlagsType total
Print the total.
Definition: info.hh:49
TLB * tlb
TLB that is initiating these table walks.
RequestPtr dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, uint8_t *data, Tick delay, Request::Flags flag=0)
Definition: dma_device.cc:197
This translation class is used to trigger the data fetch once a timing translation returns the transl...
Definition: stage2_mmu.hh:68
void drainResume() override
Resume execution after a successful drain.
Mode
Definition: tlb.hh:57
bool xn() const
Is execution allowed on this mapping?
virtual uint8_t ap() const =0
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:276
virtual const std::string name() const
Definition: sim_object.hh:128
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Stats::Scalar statWalks
Statistics.
uint64_t data
The raw bits of the entry.
ThreadContext * tc
Thread context that we&#39;re doing the walk for.
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:197
Bitfield< 7, 4 > domain
TlbEntry * lookup(Addr vpn, uint16_t asn, uint8_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el)
Lookup an entry in the TLB.
Definition: tlb.cc:157
const unsigned MaxPhysAddrRange
Definition: isa_traits.hh:89
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition: system.hh:232
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
void insert(Addr vaddr, TlbEntry &pte)
Definition: tlb.cc:200
static const unsigned COMPLETED
virtual uint64_t getRawData() const =0
Definition: eventq.hh:246
EventFunctionWrapper doProcessEvent
Derived & ysubname(off_type index, const std::string &subname)
Definition: statistics.hh:463
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
DomainType domain
Definition: pagetable.hh:117
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: miscregs.cc:1091
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:289
Bitfield< 24 > hpd
TlbEntry::DomainType domain() const
Domain Client/Manager: ARM DDI 0406B: B3-31.
BaseTLB::Mode mode
Save mode for use in delayed response.
uint16_t asid
ASID that we&#39;re servicing the request under.
Stats::Scalar statWalksLongDescriptor
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
Stats::Vector2d statRequestOrigin
L1Descriptor l1Desc
Short-format descriptors.
void nextWalk(ThreadContext *tc)
uint32_t data
The raw bits of the entry.
virtual std::string dbgHeader() const =0
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:435
Bitfield< 3, 0 > mask
Definition: types.hh:62
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:309
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:235
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition: faults.hh:90
uint8_t sh() const
2-bit shareability field
void readDataTimed(ThreadContext *tc, Addr descAddr, Stage2Translation *translation, int numBytes, Request::Flags flags)
Definition: stage2_mmu.cc:99
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
Stage2MMU * stage2Mmu
The MMU to forward second stage look upts to.
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:64
bool aarch64
True if the current lookup is performed in AArch64 state.
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:95
bool aarch64
If the access is performed in AArch64 state.
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:71
ArmTableWalkerParams Params
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition: system.hh:212
Bitfield< 18, 16 > ps
bool af() const
Returns true if the access flag (AF) is set.
static bool checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
LookupLevel lookupLevel
Definition: pagetable.hh:105
const FlagsType nozero
Don&#39;t print if this is zero.
Definition: info.hh:57
Bitfield< 0 > p
Running normally.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:181
virtual RegVal readMiscReg(RegIndex misc_reg)=0
LookupLevel
Definition: pagetable.hh:74
uint8_t ap() const
Three bit access protection flags.
const FlagsType dist
Print the distribution.
Definition: info.hh:55
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:238
EventFunctionWrapper doL1LongDescEvent
void set(Type flags)
Definition: flags.hh:68
Long-descriptor format (LPAE)
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1896
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Addr vaddr_tainted
The virtual address that is being translated.
EventFunctionWrapper doL2DescEvent
RequestPtr req
Request that is currently being serviced.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
void setAttributes(bool lpae)
Definition: pagetable.hh:277
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
bool haveLPAE() const
Returns true if this system implements the Large Physical Address Extension.
Definition: system.hh:154
ByteOrder byteOrder(const ThreadContext *tc)
Definition: utility.hh:437
TLB::Translation * stage2Tran
A pointer to the stage 2 translation that&#39;s in progress.
Stats::Vector statPageSizes

Generated on Mon Jun 8 2020 15:34:40 for gem5 by doxygen 1.8.13