gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
table_walker.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010, 2012-2019 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Ali Saidi
38  * Giacomo Gabrielli
39  */
40 #include "arch/arm/table_walker.hh"
41 
42 #include <memory>
43 
44 #include "arch/arm/faults.hh"
45 #include "arch/arm/stage2_mmu.hh"
46 #include "arch/arm/system.hh"
47 #include "arch/arm/tlb.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/TLB.hh"
53 #include "debug/TLBVerbose.hh"
54 #include "dev/dma_device.hh"
55 #include "sim/system.hh"
56 
57 using namespace ArmISA;
58 
60  : ClockedObject(p),
61  stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
62  isStage2(p->is_stage2), tlb(NULL),
63  currState(NULL), pending(false),
64  numSquashable(p->num_squash_per_cycle),
65  pendingReqs(0),
66  pendingChangeTick(curTick()),
67  doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
68  doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
75  doProcessEvent([this]{ processWalkWrapper(); }, name())
76 {
77  sctlr = 0;
78 
79  // Cache system-level properties
80  if (FullSystem) {
81  ArmSystem *armSys = dynamic_cast<ArmSystem *>(p->sys);
82  assert(armSys);
83  haveSecurity = armSys->haveSecurity();
84  _haveLPAE = armSys->haveLPAE();
86  physAddrRange = armSys->physAddrRange();
88  } else {
90  _haveLargeAsid64 = false;
91  physAddrRange = 32;
92  }
93 
94 }
95 
97 {
98  ;
99 }
100 
101 void
103 {
104  stage2Mmu = m;
105  port = &m->getDMAPort();
106  masterId = master_id;
107 }
108 
109 void
111 {
112  fatal_if(!stage2Mmu, "Table walker must have a valid stage-2 MMU\n");
113  fatal_if(!port, "Table walker must have a valid port\n");
114  fatal_if(!tlb, "Table walker must have a valid TLB\n");
115 }
116 
117 Port &
118 TableWalker::getPort(const std::string &if_name, PortID idx)
119 {
120  if (if_name == "port") {
121  if (!isStage2) {
122  return *port;
123  } else {
124  fatal("Cannot access table walker port through stage-two walker\n");
125  }
126  }
127  return ClockedObject::getPort(if_name, idx);
128 }
129 
131  tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
132  asid(0), vmid(0), isHyp(false), transState(nullptr),
133  vaddr(0), vaddr_tainted(0),
134  sctlr(0), scr(0), cpsr(0), tcr(0),
135  htcr(0), hcr(0), vtcr(0),
136  isWrite(false), isFetch(false), isSecure(false),
137  isUncacheable(false),
138  secureLookup(false), rwTable(false), userTable(false), xnTable(false),
139  pxnTable(false), hpd(false), stage2Req(false),
140  stage2Tran(nullptr), timing(false), functional(false),
141  mode(BaseTLB::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
142  delayed(false), tableWalker(nullptr)
143 {
144 }
145 
146 void
148 {
149  if (drainState() == DrainState::Draining &&
150  stateQueues[L0].empty() && stateQueues[L1].empty() &&
151  stateQueues[L2].empty() && stateQueues[L3].empty() &&
152  pendingQueue.empty()) {
153 
154  DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
155  signalDrainDone();
156  }
157 }
158 
161 {
162  bool state_queues_not_empty = false;
163 
164  for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
165  if (!stateQueues[i].empty()) {
166  state_queues_not_empty = true;
167  break;
168  }
169  }
170 
171  if (state_queues_not_empty || pendingQueue.size()) {
172  DPRINTF(Drain, "TableWalker not drained\n");
173  return DrainState::Draining;
174  } else {
175  DPRINTF(Drain, "TableWalker free, no need to drain\n");
176  return DrainState::Drained;
177  }
178 }
179 
180 void
182 {
183  if (params()->sys->isTimingMode() && currState) {
184  delete currState;
185  currState = NULL;
186  pendingChange();
187  }
188 }
189 
190 Fault
191 TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
192  uint8_t _vmid, bool _isHyp, TLB::Mode _mode,
193  TLB::Translation *_trans, bool _timing, bool _functional,
194  bool secure, TLB::ArmTranslationType tranType,
195  bool _stage2Req)
196 {
197  assert(!(_functional && _timing));
198  ++statWalks;
199 
200  WalkerState *savedCurrState = NULL;
201 
202  if (!currState && !_functional) {
203  // For atomic mode, a new WalkerState instance should be only created
204  // once per TLB. For timing mode, a new instance is generated for every
205  // TLB miss.
206  DPRINTF(TLBVerbose, "creating new instance of WalkerState\n");
207 
208  currState = new WalkerState();
209  currState->tableWalker = this;
210  } else if (_functional) {
211  // If we are mixing functional mode with timing (or even
212  // atomic), we need to to be careful and clean up after
213  // ourselves to not risk getting into an inconsistent state.
214  DPRINTF(TLBVerbose, "creating functional instance of WalkerState\n");
215  savedCurrState = currState;
216  currState = new WalkerState();
217  currState->tableWalker = this;
218  } else if (_timing) {
219  // This is a translation that was completed and then faulted again
220  // because some underlying parameters that affect the translation
221  // changed out from under us (e.g. asid). It will either be a
222  // misprediction, in which case nothing will happen or we'll use
223  // this fault to re-execute the faulting instruction which should clean
224  // up everything.
225  if (currState->vaddr_tainted == _req->getVaddr()) {
227  return std::make_shared<ReExec>();
228  }
229  }
230  pendingChange();
231 
233  currState->tc = _tc;
234  // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
235  // aarch32/translation/translation/AArch32.TranslateAddress dictates
236  // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
237  if (isStage2) {
238  currState->el = EL1;
239  currState->aarch64 = ELIs64(_tc, EL2);
240  } else {
241  currState->el =
243  currState->aarch64 =
244  ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
245  }
246  currState->transState = _trans;
247  currState->req = _req;
249  currState->asid = _asid;
250  currState->vmid = _vmid;
251  currState->isHyp = _isHyp;
252  currState->timing = _timing;
253  currState->functional = _functional;
254  currState->mode = _mode;
256  currState->isSecure = secure;
258 
261  currState->vaddr_tainted = currState->req->getVaddr();
262  if (currState->aarch64)
266  else
268 
269  if (currState->aarch64) {
270  if (isStage2) {
273  } else switch (currState->el) {
274  case EL0:
275  case EL1:
278  break;
279  case EL2:
280  assert(_haveVirtualization);
283  break;
284  case EL3:
285  assert(haveSecurity);
288  break;
289  default:
290  panic("Invalid exception level");
291  break;
292  }
294  } else {
302  }
303  sctlr = currState->sctlr;
304 
307 
309 
310  currState->stage2Req = _stage2Req && !isStage2;
311 
312  bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
314 
315  if (long_desc_format) {
316  // Helper variables used for hierarchical permissions
318  currState->rwTable = true;
319  currState->userTable = true;
320  currState->xnTable = false;
321  currState->pxnTable = false;
322 
324  } else {
326  }
327 
328  if (!currState->timing) {
329  Fault fault = NoFault;
330  if (currState->aarch64)
331  fault = processWalkAArch64();
332  else if (long_desc_format)
333  fault = processWalkLPAE();
334  else
335  fault = processWalk();
336 
337  // If this was a functional non-timing access restore state to
338  // how we found it.
339  if (currState->functional) {
340  delete currState;
341  currState = savedCurrState;
342  }
343  return fault;
344  }
345 
346  if (pending || pendingQueue.size()) {
347  pendingQueue.push_back(currState);
348  currState = NULL;
349  pendingChange();
350  } else {
351  pending = true;
352  pendingChange();
353  if (currState->aarch64)
354  return processWalkAArch64();
355  else if (long_desc_format)
356  return processWalkLPAE();
357  else
358  return processWalk();
359  }
360 
361  return NoFault;
362 }
363 
364 void
366 {
367  assert(!currState);
368  assert(pendingQueue.size());
369  pendingChange();
370  currState = pendingQueue.front();
371 
372  // Check if a previous walk filled this request already
373  // @TODO Should this always be the TLB or should we look in the stage2 TLB?
375  currState->vmid, currState->isHyp, currState->isSecure, true, false,
376  currState->el);
377 
378  // Check if we still need to have a walk for this request. If the requesting
379  // instruction has been squashed, or a previous walk has filled the TLB with
380  // a match, we just want to get rid of the walk. The latter could happen
381  // when there are multiple outstanding misses to a single page and a
382  // previous request has been successfully translated.
383  if (!currState->transState->squashed() && !te) {
384  // We've got a valid request, lets process it
385  pending = true;
386  pendingQueue.pop_front();
387  // Keep currState in case one of the processWalk... calls NULLs it
388  WalkerState *curr_state_copy = currState;
389  Fault f;
390  if (currState->aarch64)
391  f = processWalkAArch64();
392  else if (longDescFormatInUse(currState->tc) ||
394  f = processWalkLPAE();
395  else
396  f = processWalk();
397 
398  if (f != NoFault) {
399  curr_state_copy->transState->finish(f, curr_state_copy->req,
400  curr_state_copy->tc, curr_state_copy->mode);
401 
402  delete curr_state_copy;
403  }
404  return;
405  }
406 
407 
408  // If the instruction that we were translating for has been
409  // squashed we shouldn't bother.
410  unsigned num_squashed = 0;
412  while ((num_squashed < numSquashable) && currState &&
413  (currState->transState->squashed() || te)) {
414  pendingQueue.pop_front();
415  num_squashed++;
417 
418  DPRINTF(TLB, "Squashing table walk for address %#x\n",
420 
421  if (currState->transState->squashed()) {
422  // finish the translation which will delete the translation object
424  std::make_shared<UnimpFault>("Squashed Inst"),
426  } else {
427  // translate the request now that we know it will work
431 
432  }
433 
434  // delete the current request
435  delete currState;
436 
437  // peak at the next one
438  if (pendingQueue.size()) {
439  currState = pendingQueue.front();
442  false, currState->el);
443  } else {
444  // Terminate the loop, nothing more to do
445  currState = NULL;
446  }
447  }
448  pendingChange();
449 
450  // if we still have pending translations, schedule more work
451  nextWalk(tc);
452  currState = NULL;
453 }
454 
455 Fault
457 {
458  Addr ttbr = 0;
459 
460  // For short descriptors, translation configs are held in
461  // TTBR1.
464 
465  const auto irgn0_mask = 0x1;
466  const auto irgn1_mask = 0x40;
467  currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
468 
469  // If translation isn't enabled, we shouldn't be here
470  assert(currState->sctlr.m || isStage2);
471  const bool is_atomic = currState->req->isAtomic();
472 
473  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
475  32 - currState->ttbcr.n));
476 
478 
479  if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
480  32 - currState->ttbcr.n)) {
481  DPRINTF(TLB, " - Selecting TTBR0\n");
482  // Check if table walk is allowed when Security Extensions are enabled
483  if (haveSecurity && currState->ttbcr.pd0) {
484  if (currState->isFetch)
485  return std::make_shared<PrefetchAbort>(
488  isStage2,
490  else
491  return std::make_shared<DataAbort>(
494  is_atomic ? false : currState->isWrite,
497  }
500  } else {
501  DPRINTF(TLB, " - Selecting TTBR1\n");
502  // Check if table walk is allowed when Security Extensions are enabled
503  if (haveSecurity && currState->ttbcr.pd1) {
504  if (currState->isFetch)
505  return std::make_shared<PrefetchAbort>(
508  isStage2,
510  else
511  return std::make_shared<DataAbort>(
514  is_atomic ? false : currState->isWrite,
517  }
518  ttbr = ttbr1;
519  currState->ttbcr.n = 0;
520  }
521 
522  Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
523  (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
524  DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
525  currState->isSecure ? "s" : "ns");
526 
527  // Trickbox address check
528  Fault f;
529  f = testWalk(l1desc_addr, sizeof(uint32_t),
531  if (f) {
532  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
533  if (currState->timing) {
534  pending = false;
536  currState = NULL;
537  } else {
538  currState->tc = NULL;
539  currState->req = NULL;
540  }
541  return f;
542  }
543 
545  if (currState->sctlr.c == 0 || currState->isUncacheable) {
547  }
548 
549  if (currState->isSecure) {
550  flag.set(Request::SECURE);
551  }
552 
553  bool delayed;
554  delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
555  sizeof(uint32_t), flag, L1, &doL1DescEvent,
557  if (!delayed) {
558  f = currState->fault;
559  }
560 
561  return f;
562 }
563 
564 Fault
566 {
567  Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
568  int tsz, n;
569  LookupLevel start_lookup_level = L1;
570 
571  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
573 
575 
577  if (currState->isSecure)
578  flag.set(Request::SECURE);
579 
580  // work out which base address register to use, if in hyp mode we always
581  // use HTTBR
582  if (isStage2) {
583  DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
585  tsz = sext<4>(currState->vtcr.t0sz);
586  start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
587  currState->isUncacheable = currState->vtcr.irgn0 == 0;
588  } else if (currState->isHyp) {
589  DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
591  tsz = currState->htcr.t0sz;
592  currState->isUncacheable = currState->htcr.irgn0 == 0;
593  } else {
594  assert(longDescFormatInUse(currState->tc));
595 
596  // Determine boundaries of TTBR0/1 regions
597  if (currState->ttbcr.t0sz)
598  ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
599  else if (currState->ttbcr.t1sz)
600  ttbr0_max = (1ULL << 32) -
601  (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
602  else
603  ttbr0_max = (1ULL << 32) - 1;
604  if (currState->ttbcr.t1sz)
605  ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
606  else
607  ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
608 
609  const bool is_atomic = currState->req->isAtomic();
610 
611  // The following code snippet selects the appropriate translation table base
612  // address (TTBR0 or TTBR1) and the appropriate starting lookup level
613  // depending on the address range supported by the translation table (ARM
614  // ARM issue C B3.6.4)
615  if (currState->vaddr <= ttbr0_max) {
616  DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
617  // Check if table walk is allowed
618  if (currState->ttbcr.epd0) {
619  if (currState->isFetch)
620  return std::make_shared<PrefetchAbort>(
623  isStage2,
625  else
626  return std::make_shared<DataAbort>(
629  is_atomic ? false : currState->isWrite,
631  isStage2,
633  }
636  tsz = currState->ttbcr.t0sz;
637  currState->isUncacheable = currState->ttbcr.irgn0 == 0;
638  if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GB
639  start_lookup_level = L2;
640  } else if (currState->vaddr >= ttbr1_min) {
641  DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
642  // Check if table walk is allowed
643  if (currState->ttbcr.epd1) {
644  if (currState->isFetch)
645  return std::make_shared<PrefetchAbort>(
648  isStage2,
650  else
651  return std::make_shared<DataAbort>(
654  is_atomic ? false : currState->isWrite,
656  isStage2,
658  }
661  tsz = currState->ttbcr.t1sz;
662  currState->isUncacheable = currState->ttbcr.irgn1 == 0;
663  // Lower limit >= 3 GB
664  if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
665  start_lookup_level = L2;
666  } else {
667  // Out of boundaries -> translation fault
668  if (currState->isFetch)
669  return std::make_shared<PrefetchAbort>(
672  isStage2,
674  else
675  return std::make_shared<DataAbort>(
678  is_atomic ? false : currState->isWrite,
681  }
682 
683  }
684 
685  // Perform lookup (ARM ARM issue C B3.6.6)
686  if (start_lookup_level == L1) {
687  n = 5 - tsz;
688  desc_addr = mbits(ttbr, 39, n) |
689  (bits(currState->vaddr, n + 26, 30) << 3);
690  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
691  desc_addr, currState->isSecure ? "s" : "ns");
692  } else {
693  // Skip first-level lookup
694  n = (tsz >= 2 ? 14 - tsz : 12);
695  desc_addr = mbits(ttbr, 39, n) |
696  (bits(currState->vaddr, n + 17, 21) << 3);
697  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
698  desc_addr, currState->isSecure ? "s" : "ns");
699  }
700 
701  // Trickbox address check
702  Fault f = testWalk(desc_addr, sizeof(uint64_t),
703  TlbEntry::DomainType::NoAccess, start_lookup_level);
704  if (f) {
705  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
706  if (currState->timing) {
707  pending = false;
709  currState = NULL;
710  } else {
711  currState->tc = NULL;
712  currState->req = NULL;
713  }
714  return f;
715  }
716 
717  if (currState->sctlr.c == 0 || currState->isUncacheable) {
719  }
720 
721  currState->longDesc.lookupLevel = start_lookup_level;
722  currState->longDesc.aarch64 = false;
724 
725  bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
726  sizeof(uint64_t), flag, start_lookup_level,
727  LongDescEventByLevel[start_lookup_level],
729  if (!delayed) {
730  f = currState->fault;
731  }
732 
733  return f;
734 }
735 
736 unsigned
738 {
739  if (tsz < 25)
740  return 25;
741  if (tsz > 48)
742  return 48;
743  return tsz;
744 }
745 
746 bool
748 {
749  return (currPhysAddrRange != MaxPhysAddrRange &&
750  bits(addr, MaxPhysAddrRange - 1, currPhysAddrRange));
751 }
752 
753 Fault
755 {
756  assert(currState->aarch64);
757 
758  DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
760 
761  static const GrainSize GrainMap_tg0[] =
763  static const GrainSize GrainMap_tg1[] =
765 
767 
768  // Determine TTBR, table size, granule size and phys. address range
769  Addr ttbr = 0;
770  int tsz = 0, ps = 0;
771  GrainSize tg = Grain4KB; // grain size computed from tg* field
772  bool fault = false;
773 
774  LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
775 
776  switch (currState->el) {
777  case EL0:
778  case EL1:
779  if (isStage2) {
780  DPRINTF(TLB, " - Selecting VTTBR0 (AArch64 stage 2)\n");
782  tsz = 64 - currState->vtcr.t0sz64;
783  tg = GrainMap_tg0[currState->vtcr.tg0];
784  // ARM DDI 0487A.f D7-2148
785  // The starting level of stage 2 translation depends on
786  // VTCR_EL2.SL0 and VTCR_EL2.TG0
787  LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
788  uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
789  static const LookupLevel SLL[] = {
790  L2, L3, L3, __, // sl0 == 0
791  L1, L2, L2, __, // sl0 == 1, etc.
792  L0, L1, L1, __,
793  __, __, __, __
794  };
795  start_lookup_level = SLL[sl_tg];
796  panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
797  "Cannot discern lookup level from vtcr.{sl0,tg0}");
798  ps = currState->vtcr.ps;
799  currState->isUncacheable = currState->vtcr.irgn0 == 0;
800  } else {
801  switch (bits(currState->vaddr, 63,48)) {
802  case 0:
803  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
805  tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
806  tg = GrainMap_tg0[currState->tcr.tg0];
807  currState->hpd = currState->tcr.hpd0;
808  currState->isUncacheable = currState->tcr.irgn0 == 0;
809  if (bits(currState->vaddr, 63, tsz) != 0x0 ||
810  currState->tcr.epd0)
811  fault = true;
812  break;
813  case 0xffff:
814  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
816  tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
817  tg = GrainMap_tg1[currState->tcr.tg1];
818  currState->hpd = currState->tcr.hpd1;
819  currState->isUncacheable = currState->tcr.irgn1 == 0;
820  if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
821  currState->tcr.epd1)
822  fault = true;
823  break;
824  default:
825  // top two bytes must be all 0s or all 1s, else invalid addr
826  fault = true;
827  }
828  ps = currState->tcr.ips;
829  }
830  break;
831  case EL2:
832  switch(bits(currState->vaddr, 63,48)) {
833  case 0:
834  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
836  tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
837  tg = GrainMap_tg0[currState->tcr.tg0];
838  currState->hpd = currState->hcr.e2h ?
839  currState->tcr.hpd0 : currState->tcr.hpd;
840  currState->isUncacheable = currState->tcr.irgn0 == 0;
841  break;
842 
843  case 0xffff:
844  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
846  tsz = adjustTableSizeAArch64(64 - currState->tcr.t1sz);
847  tg = GrainMap_tg1[currState->tcr.tg1];
848  currState->hpd = currState->tcr.hpd1;
849  currState->isUncacheable = currState->tcr.irgn1 == 0;
850  if (bits(currState->vaddr, 63, tsz) != mask(64-tsz) ||
851  currState->tcr.epd1 || !currState->hcr.e2h)
852  fault = true;
853  break;
854 
855  default:
856  // invalid addr if top two bytes are not all 0s
857  fault = true;
858  }
859  ps = currState->tcr.ps;
860  break;
861  case EL3:
862  switch(bits(currState->vaddr, 63,48)) {
863  case 0:
864  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
866  tsz = adjustTableSizeAArch64(64 - currState->tcr.t0sz);
867  tg = GrainMap_tg0[currState->tcr.tg0];
868  currState->hpd = currState->tcr.hpd;
869  currState->isUncacheable = currState->tcr.irgn0 == 0;
870  break;
871  default:
872  // invalid addr if top two bytes are not all 0s
873  fault = true;
874  }
875  ps = currState->tcr.ps;
876  break;
877  }
878 
879  const bool is_atomic = currState->req->isAtomic();
880 
881  if (fault) {
882  Fault f;
883  if (currState->isFetch)
884  f = std::make_shared<PrefetchAbort>(
888  else
889  f = std::make_shared<DataAbort>(
892  is_atomic ? false : currState->isWrite,
895 
896  if (currState->timing) {
897  pending = false;
899  currState = NULL;
900  } else {
901  currState->tc = NULL;
902  currState->req = NULL;
903  }
904  return f;
905 
906  }
907 
908  if (tg == ReservedGrain) {
909  warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
910  "DEFINED behavior takes this to mean 4KB granules\n");
911  tg = Grain4KB;
912  }
913 
914  // Determine starting lookup level
915  // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
916  // in ARM DDI 0487A. These table values correspond to the cascading tests
917  // to compute the lookup level and are of the form
918  // (grain_size + N*stride), for N = {1, 2, 3}.
919  // A value of 64 will never succeed and a value of 0 will always succeed.
920  if (start_lookup_level == MAX_LOOKUP_LEVELS) {
921  struct GrainMap {
922  GrainSize grain_size;
923  unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
924  };
925  static const GrainMap GM[] = {
926  { Grain4KB, { 39, 30, 0, 0 } },
927  { Grain16KB, { 47, 36, 25, 0 } },
928  { Grain64KB, { 64, 42, 29, 0 } }
929  };
930 
931  const unsigned *lookup = NULL; // points to a lookup_level_cutoff
932 
933  for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
934  if (tg == GM[i].grain_size) {
935  lookup = GM[i].lookup_level_cutoff;
936  break;
937  }
938  }
939  assert(lookup);
940 
941  for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
942  if (tsz > lookup[L]) {
943  start_lookup_level = (LookupLevel) L;
944  break;
945  }
946  }
947  panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
948  "Table walker couldn't find lookup level\n");
949  }
950 
951  int stride = tg - 3;
952 
953  // Determine table base address
954  int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
955  Addr base_addr = mbits(ttbr, 47, base_addr_lo);
956 
957  // Determine physical address size and raise an Address Size Fault if
958  // necessary
959  int pa_range = decodePhysAddrRange64(ps);
960  // Clamp to lower limit
961  if (pa_range > physAddrRange)
963  else
964  currState->physAddrRange = pa_range;
966  DPRINTF(TLB, "Address size fault before any lookup\n");
967  Fault f;
968  if (currState->isFetch)
969  f = std::make_shared<PrefetchAbort>(
971  ArmFault::AddressSizeLL + start_lookup_level,
972  isStage2,
974  else
975  f = std::make_shared<DataAbort>(
978  is_atomic ? false : currState->isWrite,
979  ArmFault::AddressSizeLL + start_lookup_level,
980  isStage2,
982 
983 
984  if (currState->timing) {
985  pending = false;
987  currState = NULL;
988  } else {
989  currState->tc = NULL;
990  currState->req = NULL;
991  }
992  return f;
993 
994  }
995 
996  // Determine descriptor address
997  Addr desc_addr = base_addr |
998  (bits(currState->vaddr, tsz - 1,
999  stride * (3 - start_lookup_level) + tg) << 3);
1000 
1001  // Trickbox address check
1002  Fault f = testWalk(desc_addr, sizeof(uint64_t),
1003  TlbEntry::DomainType::NoAccess, start_lookup_level);
1004  if (f) {
1005  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1006  if (currState->timing) {
1007  pending = false;
1008  nextWalk(currState->tc);
1009  currState = NULL;
1010  } else {
1011  currState->tc = NULL;
1012  currState->req = NULL;
1013  }
1014  return f;
1015  }
1016 
1018  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1019  flag.set(Request::UNCACHEABLE);
1020  }
1021 
1022  if (currState->isSecure) {
1023  flag.set(Request::SECURE);
1024  }
1025 
1026  currState->longDesc.lookupLevel = start_lookup_level;
1027  currState->longDesc.aarch64 = true;
1029 
1030  if (currState->timing) {
1031  fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1032  sizeof(uint64_t), flag, start_lookup_level,
1033  LongDescEventByLevel[start_lookup_level], NULL);
1034  } else {
1035  fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1036  sizeof(uint64_t), flag, -1, NULL,
1038  f = currState->fault;
1039  }
1040 
1041  return f;
1042 }
1043 
1044 void
1046  uint8_t texcb, bool s)
1047 {
1048  // Note: tc and sctlr local variables are hiding tc and sctrl class
1049  // variables
1050  DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1051  te.shareable = false; // default value
1052  te.nonCacheable = false;
1053  te.outerShareable = false;
1054  if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1055  switch(texcb) {
1056  case 0: // Stongly-ordered
1057  te.nonCacheable = true;
1059  te.shareable = true;
1060  te.innerAttrs = 1;
1061  te.outerAttrs = 0;
1062  break;
1063  case 1: // Shareable Device
1064  te.nonCacheable = true;
1066  te.shareable = true;
1067  te.innerAttrs = 3;
1068  te.outerAttrs = 0;
1069  break;
1070  case 2: // Outer and Inner Write-Through, no Write-Allocate
1072  te.shareable = s;
1073  te.innerAttrs = 6;
1074  te.outerAttrs = bits(texcb, 1, 0);
1075  break;
1076  case 3: // Outer and Inner Write-Back, no Write-Allocate
1078  te.shareable = s;
1079  te.innerAttrs = 7;
1080  te.outerAttrs = bits(texcb, 1, 0);
1081  break;
1082  case 4: // Outer and Inner Non-cacheable
1083  te.nonCacheable = true;
1085  te.shareable = s;
1086  te.innerAttrs = 0;
1087  te.outerAttrs = bits(texcb, 1, 0);
1088  break;
1089  case 5: // Reserved
1090  panic("Reserved texcb value!\n");
1091  break;
1092  case 6: // Implementation Defined
1093  panic("Implementation-defined texcb value!\n");
1094  break;
1095  case 7: // Outer and Inner Write-Back, Write-Allocate
1097  te.shareable = s;
1098  te.innerAttrs = 5;
1099  te.outerAttrs = 1;
1100  break;
1101  case 8: // Non-shareable Device
1102  te.nonCacheable = true;
1104  te.shareable = false;
1105  te.innerAttrs = 3;
1106  te.outerAttrs = 0;
1107  break;
1108  case 9 ... 15: // Reserved
1109  panic("Reserved texcb value!\n");
1110  break;
1111  case 16 ... 31: // Cacheable Memory
1113  te.shareable = s;
1114  if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1115  te.nonCacheable = true;
1116  te.innerAttrs = bits(texcb, 1, 0);
1117  te.outerAttrs = bits(texcb, 3, 2);
1118  break;
1119  default:
1120  panic("More than 32 states for 5 bits?\n");
1121  }
1122  } else {
1123  assert(tc);
1124  PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1126  NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1128  DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1129  uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1130  switch(bits(texcb, 2,0)) {
1131  case 0:
1132  curr_tr = prrr.tr0;
1133  curr_ir = nmrr.ir0;
1134  curr_or = nmrr.or0;
1135  te.outerShareable = (prrr.nos0 == 0);
1136  break;
1137  case 1:
1138  curr_tr = prrr.tr1;
1139  curr_ir = nmrr.ir1;
1140  curr_or = nmrr.or1;
1141  te.outerShareable = (prrr.nos1 == 0);
1142  break;
1143  case 2:
1144  curr_tr = prrr.tr2;
1145  curr_ir = nmrr.ir2;
1146  curr_or = nmrr.or2;
1147  te.outerShareable = (prrr.nos2 == 0);
1148  break;
1149  case 3:
1150  curr_tr = prrr.tr3;
1151  curr_ir = nmrr.ir3;
1152  curr_or = nmrr.or3;
1153  te.outerShareable = (prrr.nos3 == 0);
1154  break;
1155  case 4:
1156  curr_tr = prrr.tr4;
1157  curr_ir = nmrr.ir4;
1158  curr_or = nmrr.or4;
1159  te.outerShareable = (prrr.nos4 == 0);
1160  break;
1161  case 5:
1162  curr_tr = prrr.tr5;
1163  curr_ir = nmrr.ir5;
1164  curr_or = nmrr.or5;
1165  te.outerShareable = (prrr.nos5 == 0);
1166  break;
1167  case 6:
1168  panic("Imp defined type\n");
1169  case 7:
1170  curr_tr = prrr.tr7;
1171  curr_ir = nmrr.ir7;
1172  curr_or = nmrr.or7;
1173  te.outerShareable = (prrr.nos7 == 0);
1174  break;
1175  }
1176 
1177  switch(curr_tr) {
1178  case 0:
1179  DPRINTF(TLBVerbose, "StronglyOrdered\n");
1181  te.nonCacheable = true;
1182  te.innerAttrs = 1;
1183  te.outerAttrs = 0;
1184  te.shareable = true;
1185  break;
1186  case 1:
1187  DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1188  prrr.ds1, prrr.ds0, s);
1190  te.nonCacheable = true;
1191  te.innerAttrs = 3;
1192  te.outerAttrs = 0;
1193  if (prrr.ds1 && s)
1194  te.shareable = true;
1195  if (prrr.ds0 && !s)
1196  te.shareable = true;
1197  break;
1198  case 2:
1199  DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1200  prrr.ns1, prrr.ns0, s);
1202  if (prrr.ns1 && s)
1203  te.shareable = true;
1204  if (prrr.ns0 && !s)
1205  te.shareable = true;
1206  break;
1207  case 3:
1208  panic("Reserved type");
1209  }
1210 
1212  switch(curr_ir) {
1213  case 0:
1214  te.nonCacheable = true;
1215  te.innerAttrs = 0;
1216  break;
1217  case 1:
1218  te.innerAttrs = 5;
1219  break;
1220  case 2:
1221  te.innerAttrs = 6;
1222  break;
1223  case 3:
1224  te.innerAttrs = 7;
1225  break;
1226  }
1227 
1228  switch(curr_or) {
1229  case 0:
1230  te.nonCacheable = true;
1231  te.outerAttrs = 0;
1232  break;
1233  case 1:
1234  te.outerAttrs = 1;
1235  break;
1236  case 2:
1237  te.outerAttrs = 2;
1238  break;
1239  case 3:
1240  te.outerAttrs = 3;
1241  break;
1242  }
1243  }
1244  }
1245  DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1246  "outerAttrs: %d\n",
1247  te.shareable, te.innerAttrs, te.outerAttrs);
1248  te.setAttributes(false);
1249 }
1250 
1251 void
1253  LongDescriptor &lDescriptor)
1254 {
1255  assert(_haveLPAE);
1256 
1257  uint8_t attr;
1258  uint8_t sh = lDescriptor.sh();
1259  // Different format and source of attributes if this is a stage 2
1260  // translation
1261  if (isStage2) {
1262  attr = lDescriptor.memAttr();
1263  uint8_t attr_3_2 = (attr >> 2) & 0x3;
1264  uint8_t attr_1_0 = attr & 0x3;
1265 
1266  DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1267 
1268  if (attr_3_2 == 0) {
1269  te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1271  te.outerAttrs = 0;
1272  te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1273  te.nonCacheable = true;
1274  } else {
1276  te.outerAttrs = attr_3_2 == 1 ? 0 :
1277  attr_3_2 == 2 ? 2 : 1;
1278  te.innerAttrs = attr_1_0 == 1 ? 0 :
1279  attr_1_0 == 2 ? 6 : 5;
1280  te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1281  }
1282  } else {
1283  uint8_t attrIndx = lDescriptor.attrIndx();
1284 
1285  // LPAE always uses remapping of memory attributes, irrespective of the
1286  // value of SCTLR.TRE
1287  MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1288  int reg_as_int = snsBankedIndex(reg, currState->tc,
1289  !currState->isSecure);
1290  uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1291  attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1292  uint8_t attr_7_4 = bits(attr, 7, 4);
1293  uint8_t attr_3_0 = bits(attr, 3, 0);
1294  DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1295 
1296  // Note: the memory subsystem only cares about the 'cacheable' memory
1297  // attribute. The other attributes are only used to fill the PAR register
1298  // accordingly to provide the illusion of full support
1299  te.nonCacheable = false;
1300 
1301  switch (attr_7_4) {
1302  case 0x0:
1303  // Strongly-ordered or Device memory
1304  if (attr_3_0 == 0x0)
1306  else if (attr_3_0 == 0x4)
1308  else
1309  panic("Unpredictable behavior\n");
1310  te.nonCacheable = true;
1311  te.outerAttrs = 0;
1312  break;
1313  case 0x4:
1314  // Normal memory, Outer Non-cacheable
1316  te.outerAttrs = 0;
1317  if (attr_3_0 == 0x4)
1318  // Inner Non-cacheable
1319  te.nonCacheable = true;
1320  else if (attr_3_0 < 0x8)
1321  panic("Unpredictable behavior\n");
1322  break;
1323  case 0x8:
1324  case 0x9:
1325  case 0xa:
1326  case 0xb:
1327  case 0xc:
1328  case 0xd:
1329  case 0xe:
1330  case 0xf:
1331  if (attr_7_4 & 0x4) {
1332  te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1333  } else {
1334  te.outerAttrs = 0x2;
1335  }
1336  // Normal memory, Outer Cacheable
1338  if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1339  panic("Unpredictable behavior\n");
1340  break;
1341  default:
1342  panic("Unpredictable behavior\n");
1343  break;
1344  }
1345 
1346  switch (attr_3_0) {
1347  case 0x0:
1348  te.innerAttrs = 0x1;
1349  break;
1350  case 0x4:
1351  te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1352  break;
1353  case 0x8:
1354  case 0x9:
1355  case 0xA:
1356  case 0xB:
1357  te.innerAttrs = 6;
1358  break;
1359  case 0xC:
1360  case 0xD:
1361  case 0xE:
1362  case 0xF:
1363  te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1364  break;
1365  default:
1366  panic("Unpredictable behavior\n");
1367  break;
1368  }
1369  }
1370 
1371  te.outerShareable = sh == 2;
1372  te.shareable = (sh & 0x2) ? true : false;
1373  te.setAttributes(true);
1374  te.attributes |= (uint64_t) attr << 56;
1375 }
1376 
1377 void
1379  LongDescriptor &lDescriptor)
1380 {
1381  uint8_t attr;
1382  uint8_t attr_hi;
1383  uint8_t attr_lo;
1384  uint8_t sh = lDescriptor.sh();
1385 
1386  if (isStage2) {
1387  attr = lDescriptor.memAttr();
1388  uint8_t attr_hi = (attr >> 2) & 0x3;
1389  uint8_t attr_lo = attr & 0x3;
1390 
1391  DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1392 
1393  if (attr_hi == 0) {
1394  te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1396  te.outerAttrs = 0;
1397  te.innerAttrs = attr_lo == 0 ? 1 : 3;
1398  te.nonCacheable = true;
1399  } else {
1401  te.outerAttrs = attr_hi == 1 ? 0 :
1402  attr_hi == 2 ? 2 : 1;
1403  te.innerAttrs = attr_lo == 1 ? 0 :
1404  attr_lo == 2 ? 6 : 5;
1405  // Treat write-through memory as uncacheable, this is safe
1406  // but for performance reasons not optimal.
1407  te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1408  (attr_lo == 1) || (attr_lo == 2);
1409  }
1410  } else {
1411  uint8_t attrIndx = lDescriptor.attrIndx();
1412 
1413  DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1414 
1415  // Select MAIR
1416  uint64_t mair;
1417  switch (currState->el) {
1418  case EL0:
1419  case EL1:
1420  mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1421  break;
1422  case EL2:
1423  mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1424  break;
1425  case EL3:
1426  mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1427  break;
1428  default:
1429  panic("Invalid exception level");
1430  break;
1431  }
1432 
1433  // Select attributes
1434  attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1435  attr_lo = bits(attr, 3, 0);
1436  attr_hi = bits(attr, 7, 4);
1437 
1438  // Memory type
1440 
1441  // Cacheability
1442  te.nonCacheable = false;
1443  if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1444  te.nonCacheable = true;
1445  }
1446  // Treat write-through memory as uncacheable, this is safe
1447  // but for performance reasons not optimal.
1448  switch (attr_hi) {
1449  case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1450  case 0x4: // Normal memory, Outer Non-cacheable
1451  case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1452  te.nonCacheable = true;
1453  }
1454  switch (attr_lo) {
1455  case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1456  case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1457  warn_if(!attr_hi, "Unpredictable behavior");
1459  case 0x4: // Device-nGnRE memory or
1460  // Normal memory, Inner Non-cacheable
1461  case 0x8: // Device-nGRE memory or
1462  // Normal memory, Inner Write-through non-transient
1463  te.nonCacheable = true;
1464  }
1465 
1466  te.shareable = sh == 2;
1467  te.outerShareable = (sh & 0x2) ? true : false;
1468  // Attributes formatted according to the 64-bit PAR
1469  te.attributes = ((uint64_t) attr << 56) |
1470  (1 << 11) | // LPAE bit
1471  (te.ns << 9) | // NS bit
1472  (sh << 7);
1473  }
1474 }
1475 
1476 void
1478 {
1479  if (currState->fault != NoFault) {
1480  return;
1481  }
1482 
1484  byteOrder(currState->tc));
1485 
1486  DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1488  TlbEntry te;
1489 
1490  const bool is_atomic = currState->req->isAtomic();
1491 
1492  switch (currState->l1Desc.type()) {
1493  case L1Descriptor::Ignore:
1495  if (!currState->timing) {
1496  currState->tc = NULL;
1497  currState->req = NULL;
1498  }
1499  DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1500  if (currState->isFetch)
1501  currState->fault =
1502  std::make_shared<PrefetchAbort>(
1505  isStage2,
1507  else
1508  currState->fault =
1509  std::make_shared<DataAbort>(
1512  is_atomic ? false : currState->isWrite,
1515  return;
1516  case L1Descriptor::Section:
1517  if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1523  currState->fault = std::make_shared<DataAbort>(
1525  currState->l1Desc.domain(),
1526  is_atomic ? false : currState->isWrite,
1528  isStage2,
1530  }
1531  if (currState->l1Desc.supersection()) {
1532  panic("Haven't implemented supersections\n");
1533  }
1535  return;
1537  {
1538  Addr l2desc_addr;
1539  l2desc_addr = currState->l1Desc.l2Addr() |
1540  (bits(currState->vaddr, 19, 12) << 2);
1541  DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1542  l2desc_addr, currState->isSecure ? "s" : "ns");
1543 
1544  // Trickbox address check
1545  currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1546  currState->l1Desc.domain(), L2);
1547 
1548  if (currState->fault) {
1549  if (!currState->timing) {
1550  currState->tc = NULL;
1551  currState->req = NULL;
1552  }
1553  return;
1554  }
1555 
1557 
1558  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1559  flag.set(Request::UNCACHEABLE);
1560  }
1561 
1562  if (currState->isSecure)
1563  flag.set(Request::SECURE);
1564 
1565  bool delayed;
1566  delayed = fetchDescriptor(l2desc_addr,
1567  (uint8_t*)&currState->l2Desc.data,
1568  sizeof(uint32_t), flag, -1, &doL2DescEvent,
1570  if (delayed) {
1571  currState->delayed = true;
1572  }
1573 
1574  return;
1575  }
1576  default:
1577  panic("A new type in a 2 bit field?\n");
1578  }
1579 }
1580 
1581 Fault
1583 {
1584  if (currState->isFetch) {
1585  return std::make_shared<PrefetchAbort>(
1588  isStage2,
1590  } else {
1591  return std::make_shared<DataAbort>(
1594  currState->req->isAtomic() ? false : currState->isWrite,
1596  isStage2,
1598  }
1599 }
1600 
1601 void
1603 {
1604  if (currState->fault != NoFault) {
1605  return;
1606  }
1607 
1609  byteOrder(currState->tc));
1610 
1611  DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1614  currState->aarch64 ? "AArch64" : "long-desc.");
1615 
1618  DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1619  "xn: %d, ap: %d, af: %d, type: %d\n",
1622  currState->longDesc.pxn(),
1623  currState->longDesc.xn(),
1624  currState->longDesc.ap(),
1625  currState->longDesc.af(),
1626  currState->longDesc.type());
1627  } else {
1628  DPRINTF(TLBVerbose, "Analyzing L%d descriptor: %#llx, type: %d\n",
1631  currState->longDesc.type());
1632  }
1633 
1634  TlbEntry te;
1635 
1636  switch (currState->longDesc.type()) {
1638  DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1641 
1643  if (!currState->timing) {
1644  currState->tc = NULL;
1645  currState->req = NULL;
1646  }
1647  return;
1648 
1649  case LongDescriptor::Block:
1650  case LongDescriptor::Page:
1651  {
1652  auto fault_source = ArmFault::FaultSourceInvalid;
1653  // Check for address size fault
1658 
1659  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1661  fault_source = ArmFault::AddressSizeLL;
1662 
1663  // Check for access fault
1664  } else if (currState->longDesc.af() == 0) {
1665 
1666  DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1668  fault_source = ArmFault::AccessFlagLL;
1669  }
1670 
1671  if (fault_source != ArmFault::FaultSourceInvalid) {
1672  currState->fault = generateLongDescFault(fault_source);
1673  } else {
1675  }
1676  }
1677  return;
1678  case LongDescriptor::Table:
1679  {
1680  // Set hierarchical permission flags
1691 
1692  // Set up next level lookup
1693  Addr next_desc_addr = currState->longDesc.nextDescAddr(
1694  currState->vaddr);
1695 
1696  DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1699  next_desc_addr,
1700  currState->secureLookup ? "s" : "ns");
1701 
1702  // Check for address size fault
1704  next_desc_addr, currState->physAddrRange)) {
1705  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1707 
1710  return;
1711  }
1712 
1713  // Trickbox address check
1715  next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1717 
1718  if (currState->fault) {
1719  if (!currState->timing) {
1720  currState->tc = NULL;
1721  currState->req = NULL;
1722  }
1723  return;
1724  }
1725 
1727  if (currState->secureLookup)
1728  flag.set(Request::SECURE);
1729 
1730  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1731  flag.set(Request::UNCACHEABLE);
1732  }
1733 
1736  Event *event = NULL;
1737  switch (L) {
1738  case L1:
1739  assert(currState->aarch64);
1740  case L2:
1741  case L3:
1742  event = LongDescEventByLevel[L];
1743  break;
1744  default:
1745  panic("Wrong lookup level in table walk\n");
1746  break;
1747  }
1748 
1749  bool delayed;
1750  delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1751  sizeof(uint64_t), flag, -1, event,
1753  if (delayed) {
1754  currState->delayed = true;
1755  }
1756  }
1757  return;
1758  default:
1759  panic("A new type in a 2 bit field?\n");
1760  }
1761 }
1762 
1763 void
1765 {
1766  if (currState->fault != NoFault) {
1767  return;
1768  }
1769 
1771  byteOrder(currState->tc));
1772 
1773  DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1775  TlbEntry te;
1776 
1777  const bool is_atomic = currState->req->isAtomic();
1778 
1779  if (currState->l2Desc.invalid()) {
1780  DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1781  if (!currState->timing) {
1782  currState->tc = NULL;
1783  currState->req = NULL;
1784  }
1785  if (currState->isFetch)
1786  currState->fault = std::make_shared<PrefetchAbort>(
1789  isStage2,
1791  else
1792  currState->fault = std::make_shared<DataAbort>(
1794  is_atomic ? false : currState->isWrite,
1796  isStage2,
1798  return;
1799  }
1800 
1801  if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
1805  DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
1806  currState->sctlr.afe, currState->l2Desc.ap());
1807 
1808  currState->fault = std::make_shared<DataAbort>(
1811  is_atomic ? false : currState->isWrite,
1814  }
1815 
1817 }
1818 
1819 void
1821 {
1822  currState = stateQueues[L1].front();
1823  currState->delayed = false;
1824  // if there's a stage2 translation object we don't need it any more
1825  if (currState->stage2Tran) {
1826  delete currState->stage2Tran;
1827  currState->stage2Tran = NULL;
1828  }
1829 
1830 
1831  DPRINTF(TLBVerbose, "L1 Desc object host addr: %p\n",&currState->l1Desc.data);
1832  DPRINTF(TLBVerbose, "L1 Desc object data: %08x\n",currState->l1Desc.data);
1833 
1834  DPRINTF(TLBVerbose, "calling doL1Descriptor for vaddr:%#x\n", currState->vaddr_tainted);
1835  doL1Descriptor();
1836 
1837  stateQueues[L1].pop_front();
1838  // Check if fault was generated
1839  if (currState->fault != NoFault) {
1841  currState->tc, currState->mode);
1843 
1844  pending = false;
1845  nextWalk(currState->tc);
1846 
1847  currState->req = NULL;
1848  currState->tc = NULL;
1849  currState->delayed = false;
1850  delete currState;
1851  }
1852  else if (!currState->delayed) {
1853  // delay is not set so there is no L2 to do
1854  // Don't finish the translation if a stage 2 look up is underway
1856  DPRINTF(TLBVerbose, "calling translateTiming again\n");
1860 
1861  pending = false;
1862  nextWalk(currState->tc);
1863 
1864  currState->req = NULL;
1865  currState->tc = NULL;
1866  currState->delayed = false;
1867  delete currState;
1868  } else {
1869  // need to do L2 descriptor
1870  stateQueues[L2].push_back(currState);
1871  }
1872  currState = NULL;
1873 }
1874 
1875 void
1877 {
1878  currState = stateQueues[L2].front();
1879  assert(currState->delayed);
1880  // if there's a stage2 translation object we don't need it any more
1881  if (currState->stage2Tran) {
1882  delete currState->stage2Tran;
1883  currState->stage2Tran = NULL;
1884  }
1885 
1886  DPRINTF(TLBVerbose, "calling doL2Descriptor for vaddr:%#x\n",
1888  doL2Descriptor();
1889 
1890  // Check if fault was generated
1891  if (currState->fault != NoFault) {
1893  currState->tc, currState->mode);
1895  } else {
1897  DPRINTF(TLBVerbose, "calling translateTiming again\n");
1901  }
1902 
1903 
1904  stateQueues[L2].pop_front();
1905  pending = false;
1906  nextWalk(currState->tc);
1907 
1908  currState->req = NULL;
1909  currState->tc = NULL;
1910  currState->delayed = false;
1911 
1912  delete currState;
1913  currState = NULL;
1914 }
1915 
1916 void
1918 {
1920 }
1921 
1922 void
1924 {
1926 }
1927 
1928 void
1930 {
1932 }
1933 
1934 void
1936 {
1938 }
1939 
1940 void
1942 {
1943  currState = stateQueues[curr_lookup_level].front();
1944  assert(curr_lookup_level == currState->longDesc.lookupLevel);
1945  currState->delayed = false;
1946 
1947  // if there's a stage2 translation object we don't need it any more
1948  if (currState->stage2Tran) {
1949  delete currState->stage2Tran;
1950  currState->stage2Tran = NULL;
1951  }
1952 
1953  DPRINTF(TLBVerbose, "calling doLongDescriptor for vaddr:%#x\n",
1955  doLongDescriptor();
1956 
1957  stateQueues[curr_lookup_level].pop_front();
1958 
1959  if (currState->fault != NoFault) {
1960  // A fault was generated
1962  currState->tc, currState->mode);
1963 
1964  pending = false;
1965  nextWalk(currState->tc);
1966 
1967  currState->req = NULL;
1968  currState->tc = NULL;
1969  currState->delayed = false;
1970  delete currState;
1971  } else if (!currState->delayed) {
1972  // No additional lookups required
1973  DPRINTF(TLBVerbose, "calling translateTiming again\n");
1977  statWalksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
1978 
1979  pending = false;
1980  nextWalk(currState->tc);
1981 
1982  currState->req = NULL;
1983  currState->tc = NULL;
1984  currState->delayed = false;
1985  delete currState;
1986  } else {
1987  if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
1988  panic("Max. number of lookups already reached in table walk\n");
1989  // Need to perform additional lookups
1991  }
1992  currState = NULL;
1993 }
1994 
1995 
1996 void
1998 {
1999  if (pendingQueue.size())
2001  else
2002  completeDrain();
2003 }
2004 
2005 bool
2006 TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2007  Request::Flags flags, int queueIndex, Event *event,
2008  void (TableWalker::*doDescriptor)())
2009 {
2010  bool isTiming = currState->timing;
2011 
2012  DPRINTF(TLBVerbose, "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2013  descAddr, currState->stage2Req);
2014 
2015  // If this translation has a stage 2 then we know descAddr is an IPA and
2016  // needs to be translated before we can access the page table. Do that
2017  // check here.
2018  if (currState->stage2Req) {
2019  Fault fault;
2020  flags = flags | TLB::MustBeOne;
2021 
2022  if (isTiming) {
2023  Stage2MMU::Stage2Translation *tran = new
2025  currState->vaddr);
2026  currState->stage2Tran = tran;
2027  stage2Mmu->readDataTimed(currState->tc, descAddr, tran, numBytes,
2028  flags);
2029  fault = tran->fault;
2030  } else {
2032  currState->vaddr, descAddr, data, numBytes, flags,
2034  }
2035 
2036  if (fault != NoFault) {
2037  currState->fault = fault;
2038  }
2039  if (isTiming) {
2040  if (queueIndex >= 0) {
2041  DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2042  stateQueues[queueIndex].size());
2043  stateQueues[queueIndex].push_back(currState);
2044  currState = NULL;
2045  }
2046  } else {
2047  (this->*doDescriptor)();
2048  }
2049  } else {
2050  if (isTiming) {
2051  port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, event, data,
2052  currState->tc->getCpuPtr()->clockPeriod(),flags);
2053  if (queueIndex >= 0) {
2054  DPRINTF(TLBVerbose, "Adding to walker fifo: queue size before adding: %d\n",
2055  stateQueues[queueIndex].size());
2056  stateQueues[queueIndex].push_back(currState);
2057  currState = NULL;
2058  }
2059  } else if (!currState->functional) {
2060  port->dmaAction(MemCmd::ReadReq, descAddr, numBytes, NULL, data,
2061  currState->tc->getCpuPtr()->clockPeriod(), flags);
2062  (this->*doDescriptor)();
2063  } else {
2064  RequestPtr req = std::make_shared<Request>(
2065  descAddr, numBytes, flags, masterId);
2066 
2067  req->taskId(ContextSwitchTaskId::DMA);
2068  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
2069  pkt->dataStatic(data);
2070  port->sendFunctional(pkt);
2071  (this->*doDescriptor)();
2072  delete pkt;
2073  }
2074  }
2075  return (isTiming);
2076 }
2077 
2078 void
2079 TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2080 {
2081  TlbEntry te;
2082 
2083  // Create and fill a new page table entry
2084  te.valid = true;
2085  te.longDescFormat = longDescriptor;
2086  te.isHyp = currState->isHyp;
2087  te.asid = currState->asid;
2088  te.vmid = currState->vmid;
2089  te.N = descriptor.offsetBits();
2090  te.vpn = currState->vaddr >> te.N;
2091  te.size = (1<<te.N) - 1;
2092  te.pfn = descriptor.pfn();
2093  te.domain = descriptor.domain();
2094  te.lookupLevel = descriptor.lookupLevel;
2095  te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
2096  te.nstid = !currState->isSecure;
2097  te.xn = descriptor.xn();
2098  if (currState->aarch64)
2099  te.el = currState->el;
2100  else
2101  te.el = EL1;
2102 
2105 
2106  // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2107  // as global
2108  te.global = descriptor.global(currState) || isStage2;
2109  if (longDescriptor) {
2110  LongDescriptor lDescriptor =
2111  dynamic_cast<LongDescriptor &>(descriptor);
2112 
2113  te.xn |= currState->xnTable;
2114  te.pxn = currState->pxnTable || lDescriptor.pxn();
2115  if (isStage2) {
2116  // this is actually the HAP field, but its stored in the same bit
2117  // possitions as the AP field in a stage 1 translation.
2118  te.hap = lDescriptor.ap();
2119  } else {
2120  te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2121  (currState->userTable && (descriptor.ap() & 0x1));
2122  }
2123  if (currState->aarch64)
2124  memAttrsAArch64(currState->tc, te, lDescriptor);
2125  else
2126  memAttrsLPAE(currState->tc, te, lDescriptor);
2127  } else {
2128  te.ap = descriptor.ap();
2129  memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2130  descriptor.shareable());
2131  }
2132 
2133  // Debug output
2134  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2135  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2136  te.N, te.pfn, te.size, te.global, te.valid);
2137  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2138  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2139  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2140  te.nonCacheable, te.ns);
2141  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2142  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2143  descriptor.getRawData());
2144 
2145  // Insert the entry into the TLB
2146  tlb->insert(currState->vaddr, te);
2147  if (!currState->timing) {
2148  currState->tc = NULL;
2149  currState->req = NULL;
2150  }
2151 }
2152 
2154 ArmTableWalkerParams::create()
2155 {
2156  return new ArmISA::TableWalker(this);
2157 }
2158 
2160 TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2161 {
2162  switch (lookup_level_as_int) {
2163  case L1:
2164  return L1;
2165  case L2:
2166  return L2;
2167  case L3:
2168  return L3;
2169  default:
2170  panic("Invalid lookup level conversion");
2171  }
2172 }
2173 
2174 /* this method keeps track of the table walker queue's residency, so
2175  * needs to be called whenever requests start and complete. */
2176 void
2178 {
2179  unsigned n = pendingQueue.size();
2180  if ((currState != NULL) && (currState != pendingQueue.front())) {
2181  ++n;
2182  }
2183 
2184  if (n != pendingReqs) {
2185  Tick now = curTick();
2187  pendingReqs = n;
2188  pendingChangeTick = now;
2189  }
2190 }
2191 
2192 Fault
2194  LookupLevel lookup_level)
2195 {
2196  return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2197  currState->mode, domain, lookup_level);
2198 }
2199 
2200 
2201 uint8_t
2203 {
2204  /* for statPageSizes */
2205  switch(N) {
2206  case 12: return 0; // 4K
2207  case 14: return 1; // 16K (using 16K granule in v8-64)
2208  case 16: return 2; // 64K
2209  case 20: return 3; // 1M
2210  case 21: return 4; // 2M-LPAE
2211  case 24: return 5; // 16M
2212  case 25: return 6; // 32M (using 16K granule in v8-64)
2213  case 29: return 7; // 512M (using 64K granule in v8-64)
2214  case 30: return 8; // 1G-LPAE
2215  default:
2216  panic("unknown page size");
2217  return 255;
2218  }
2219 }
2220 
2221 void
2223 {
2225 
2226  statWalks
2227  .name(name() + ".walks")
2228  .desc("Table walker walks requested")
2229  ;
2230 
2232  .name(name() + ".walksShort")
2233  .desc("Table walker walks initiated with short descriptors")
2235  ;
2236 
2238  .name(name() + ".walksLong")
2239  .desc("Table walker walks initiated with long descriptors")
2241  ;
2242 
2244  .init(2)
2245  .name(name() + ".walksShortTerminationLevel")
2246  .desc("Level at which table walker walks "
2247  "with short descriptors terminate")
2249  ;
2252 
2254  .init(4)
2255  .name(name() + ".walksLongTerminationLevel")
2256  .desc("Level at which table walker walks "
2257  "with long descriptors terminate")
2259  ;
2264 
2266  .name(name() + ".walksSquashedBefore")
2267  .desc("Table walks squashed before starting")
2269  ;
2270 
2272  .name(name() + ".walksSquashedAfter")
2273  .desc("Table walks squashed after completion")
2275  ;
2276 
2278  .init(16)
2279  .name(name() + ".walkWaitTime")
2280  .desc("Table walker wait (enqueue to first request) latency")
2282  ;
2283 
2285  .init(16)
2286  .name(name() + ".walkCompletionTime")
2287  .desc("Table walker service (enqueue to completion) latency")
2289  ;
2290 
2292  .init(16)
2293  .name(name() + ".walksPending")
2294  .desc("Table walker pending requests distribution")
2296  ;
2297 
2298  statPageSizes // see DDI 0487A D4-1661
2299  .init(9)
2300  .name(name() + ".walkPageSizes")
2301  .desc("Table walker page sizes translated")
2303  ;
2304  statPageSizes.subname(0, "4K");
2305  statPageSizes.subname(1, "16K");
2306  statPageSizes.subname(2, "64K");
2307  statPageSizes.subname(3, "1M");
2308  statPageSizes.subname(4, "2M");
2309  statPageSizes.subname(5, "16M");
2310  statPageSizes.subname(6, "32M");
2311  statPageSizes.subname(7, "512M");
2312  statPageSizes.subname(8, "1G");
2313 
2315  .init(2,2) // Instruction/Data, requests/completed
2316  .name(name() + ".walkRequestOrigin")
2317  .desc("Table walker requests started/completed, data/inst")
2319  ;
2320  statRequestOrigin.subname(0,"Requested");
2321  statRequestOrigin.subname(1,"Completed");
2322  statRequestOrigin.ysubname(0,"Data");
2323  statRequestOrigin.ysubname(1,"Inst");
2324 }
uint8_t innerAttrs
Definition: pagetable.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
#define DPRINTF(x,...)
Definition: trace.hh:229
void regStats() override
Callback to set stat parameters.
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:53
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
The request is to an uncacheable address.
Definition: request.hh:115
Ports are used to interface objects to each other.
Definition: port.hh:60
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode, ArmTranslationType tranType)
Definition: tlb.cc:1229
MiscRegIndex
Definition: miscregs.hh:57
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
Bitfield< 5, 3 > reg
Definition: types.hh:89
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Derived & init(size_type _x, size_type _y)
Definition: statistics.hh:1288
decltype(nullptr) constexpr NoFault
Definition: types.hh:245
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation...
Definition: statistics.hh:379
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
LookupLevel lookupLevel
Current lookup level for this descriptor.
Definition: table_walker.hh:74
Bitfield< 7, 0 > L
Definition: int.hh:59
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
virtual TlbEntry::DomainType domain() const =0
uint32_t data
The raw bits of the entry.
virtual Addr pfn() const =0
Bitfield< 7 > i
bool haveSecurity() const
Returns true if this system implements the Security Extensions.
Definition: system.hh:185
DrainState
Object drain/handover states.
Definition: drain.hh:71
Bitfield< 0 > m
void doL3LongDescriptorWrapper()
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
Running normally.
Addr l2Addr() const
Address of L2 descriptor if it exists.
std::list< WalkerState * > stateQueues[MAX_LOOKUP_LEVELS]
Queues of requests for all the different lookup levels.
const FlagsType nonan
Don&#39;t print if this is NAN.
Definition: info.hh:61
GrainSize grainSize
Width of the granule size in bits.
bool pending
If a timing translation is currently in progress.
DmaPort & getDMAPort()
Get the port that ultimately belongs to the stage-two MMU, but is used by the two table walkers...
Definition: stage2_mmu.hh:113
EventFunctionWrapper doL2LongDescEvent
Bitfield< 21, 20 > stride
EntryType type() const
Return the descriptor type.
std::shared_ptr< Request > RequestPtr
Definition: request.hh:83
static uint8_t pageSizeNtoStatBin(uint8_t N)
Bitfield< 8, 7 > sh
Stats::Vector statWalksShortTerminatedAtLevel
ip6_addr_t addr
Definition: inet.hh:335
bool secureTable() const
Whether the subsequent levels of lookup are secure.
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level)
bool isWrite
If the access is a write.
TableWalker(const Params *p)
Definition: table_walker.cc:59
bool invalid() const
Is the entry invalid.
Stats::Histogram statPendingWalks
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:282
DrainState drain() override
Notify an object that it needs to drain its state.
virtual uint8_t texcb() const
Definition: table_walker.hh:85
Bitfield< 30 > te
bool haveSecurity
Cached copies of system-level properties.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:136
Stats::Scalar statWalksShortDescriptor
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:267
The request targets the secure memory space.
Definition: request.hh:176
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type...
Definition: tlb.cc:1410
bool timing
If the mode is timing or atomic.
uint64_t RegVal
Definition: types.hh:168
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t ap() const
Three bit access protection flags.
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:66
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
Stats::Scalar statSquashedBefore
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2644
virtual BaseCPU * getCpuPtr()=0
Definition: ccregs.hh:42
bool hpd
Hierarchical access permission disable.
MemoryType mtype
Definition: pagetable.hh:122
bool stage2Req
Flag indicating if a second stage of lookup is required.
Bitfield< 4, 0 > mode
TLB::Translation * transState
Translation state for delayed requests.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
void doL2LongDescriptorWrapper()
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool isInstr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:477
Tick clockPeriod() const
uint8_t attrIndx() const
Attribute index.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:336
ThreadContext is the external interface to all thread state for anything outside of the CPU...
EventFunctionWrapper doL0LongDescEvent
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, uint8_t _vmid, bool _isHyp, TLB::Mode mode, TLB::Translation *_trans, bool timing, bool functional, bool secure, TLB::ArmTranslationType tranType, bool _stage2Req)
Addr vaddr
The virtual address that is being translated with tagging removed.
virtual bool secure(bool have_security, WalkerState *currState) const =0
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1152
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1040
DmaPort * port
Port shared by the two table walkers.
uint8_t offsetBits() const
Return the bit width of the page/block offset.
Bitfield< 31 > n
ExceptionLevel el
Definition: pagetable.hh:136
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level)
Definition: tlb.cc:1603
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
Bitfield< 6 > f
WalkerState * currState
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition: utility.cc:898
EventFunctionWrapper doL1DescEvent
virtual uint8_t offsetBits() const =0
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:333
Definition: tlb.hh:52
HCR hcr
Cached copy of the htcr as it existed when translation began.
MasterID masterId
Master id assigned by the MMU.
Tick curTick()
The current simulated tick.
Definition: core.hh:47
Bitfield< 3, 2 > el
Stats::Vector statWalksLongTerminatedAtLevel
virtual ~TableWalker()
Definition: table_walker.cc:96
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
int physAddrRange
Current physical address range in bits.
Bitfield< 4 > s
#define M5_FALLTHROUGH
Definition: compiler.hh:86
Fault fault
The fault that we are going to return.
ByteOrder byteOrder(ThreadContext *tc)
Definition: utility.hh:374
const Params * params() const
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:159
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
virtual bool xn() const =0
uint64_t Tick
Tick count type.
Definition: types.hh:63
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: tlb.hh:86
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
void setMMU(Stage2MMU *m, MasterID master_id)
ExceptionLevel el
Current exception level.
EventFunctionWrapper doL3LongDescEvent
void doL1LongDescriptorWrapper()
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:228
bool isSecure
If the access comes from the secure state.
Bitfield< 39, 12 > pa
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool supersection() const
Is the page a Supersection (16MB)?
bool xnTable() const
Is execution allowed on subsequent lookup levels?
Event * LongDescEventByLevel[4]
Fault readDataUntimed(ThreadContext *tc, Addr oVAddr, Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, bool isFunctional)
Definition: stage2_mmu.cc:64
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:203
Stats::Histogram statWalkServiceTime
Fault generateLongDescFault(ArmFault::FaultSource src)
TLB::ArmTranslationType tranType
The translation type that has been requested.
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, Mode mode)=0
bool functional
If the atomic mode should be functional.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Bitfield< 59, 56 > tlb
void doL0LongDescriptorWrapper()
virtual bool global(WalkerState *currState) const =0
uint64_t attributes
Definition: pagetable.hh:106
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
static unsigned adjustTableSizeAArch64(unsigned tsz)
bool haveVirtualization() const
Returns true if this system implements the virtualization Extensions.
Definition: system.hh:194
ArmTranslationType
Definition: tlb.hh:125
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
uint8_t outerAttrs
Definition: pagetable.hh:117
uint16_t MasterID
Definition: request.hh:86
Draining buffers pending serialization/handover.
#define ULL(N)
uint64_t constant
Definition: types.hh:50
virtual const std::string name() const
Definition: sim_object.hh:120
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
void completeDrain()
Checks if all state is cleared and if so, completes drain.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
uint8_t ap() const
2-bit access protection flags
Bitfield< 10, 5 > event
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
#define warn_once(...)
Definition: logging.hh:216
Bitfield< 34 > aarch64
Definition: types.hh:91
Stats::Histogram statWalkWaitTime
Stats::Scalar statSquashedAfter
static const unsigned REQUESTED
const FlagsType total
Print the total.
Definition: info.hh:51
TLB * tlb
TLB that is initiating these table walks.
RequestPtr dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, uint8_t *data, Tick delay, Request::Flags flag=0)
Definition: dma_device.cc:202
This translation class is used to trigger the data fetch once a timing translation returns the transl...
Definition: stage2_mmu.hh:70
void drainResume() override
Resume execution after a successful drain.
Mode
Definition: tlb.hh:59
bool xn() const
Is execution allowed on this mapping?
virtual uint8_t ap() const =0
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:279
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Stats::Scalar statWalks
Statistics.
uint64_t data
The raw bits of the entry.
ThreadContext * tc
Thread context that we&#39;re doing the walk for.
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:218
Bitfield< 7, 4 > domain
TlbEntry * lookup(Addr vpn, uint16_t asn, uint8_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el)
Lookup an entry in the TLB.
Definition: tlb.cc:147
const unsigned MaxPhysAddrRange
Definition: isa_traits.hh:92
uint8_t physAddrRange() const
Returns the supported physical address range in bits.
Definition: system.hh:257
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
void insert(Addr vaddr, TlbEntry &pte)
Definition: tlb.cc:190
static const unsigned COMPLETED
virtual uint64_t getRawData() const =0
Definition: eventq.hh:189
EventFunctionWrapper doProcessEvent
Derived & ysubname(off_type index, const std::string &subname)
Definition: statistics.hh:466
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
DomainType domain
Definition: pagetable.hh:120
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: miscregs.cc:1063
Bitfield< 24 > hpd
TlbEntry::DomainType domain() const
Domain Client/Manager: ARM DDI 0406B: B3-31.
BaseTLB::Mode mode
Save mode for use in delayed response.
void schedule(Event &event, Tick when)
Definition: eventq.hh:744
uint16_t asid
ASID that we&#39;re servicing the request under.
Stats::Scalar statWalksLongDescriptor
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
Stats::Vector2d statRequestOrigin
L1Descriptor l1Desc
Short-format descriptors.
void nextWalk(ThreadContext *tc)
uint32_t data
The raw bits of the entry.
The request is a page table walk.
Definition: request.hh:178
virtual std::string dbgHeader() const =0
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:439
Bitfield< 3, 0 > mask
Definition: types.hh:64
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:312
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:237
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition: faults.hh:95
uint8_t sh() const
2-bit shareability field
void readDataTimed(ThreadContext *tc, Addr descAddr, Stage2Translation *translation, int numBytes, Request::Flags flags)
Definition: stage2_mmu.cc:101
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
Stage2MMU * stage2Mmu
The MMU to forward second stage look upts to.
bool aarch64
True if the current lookup is performed in AArch64 state.
T mbits(T val, int first, int last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:96
bool aarch64
If the access is performed in AArch64 state.
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:72
ArmTableWalkerParams Params
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool haveLargeAsid64() const
Returns true if ASID is 16 bits in AArch64 (ARMv8)
Definition: system.hh:238
Bitfield< 18, 16 > ps
bool af() const
Returns true if the access flag (AF) is set.
static bool checkAddrSizeFaultAArch64(Addr addr, int currPhysAddrRange)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
LookupLevel lookupLevel
Definition: pagetable.hh:108
const FlagsType nozero
Don&#39;t print if this is zero.
Definition: info.hh:59
Bitfield< 0 > p
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:185
virtual RegVal readMiscReg(RegIndex misc_reg)=0
LookupLevel
Definition: pagetable.hh:77
uint8_t ap() const
Three bit access protection flags.
const FlagsType dist
Print the distribution.
Definition: info.hh:57
const char data[]
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
EventFunctionWrapper doL1LongDescEvent
void set(Type flags)
Definition: flags.hh:70
Long-descriptor format (LPAE)
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1899
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Addr vaddr_tainted
The virtual address that is being translated.
EventFunctionWrapper doL2DescEvent
RequestPtr req
Request that is currently being serviced.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
void setAttributes(bool lpae)
Definition: pagetable.hh:280
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:104
bool haveLPAE() const
Returns true if this system implements the Large Physical Address Extension.
Definition: system.hh:189
TLB::Translation * stage2Tran
A pointer to the stage 2 translation that&#39;s in progress.
Stats::Vector statPageSizes

Generated on Fri Feb 28 2020 16:26:57 for gem5 by doxygen 1.8.13