gem5  v21.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
table_walker.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010, 2012-2019, 2021 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 #include "arch/arm/table_walker.hh"
38 
39 #include <cassert>
40 #include <memory>
41 
42 #include "arch/arm/faults.hh"
43 #include "arch/arm/mmu.hh"
44 #include "arch/arm/system.hh"
45 #include "arch/arm/tlb.hh"
46 #include "base/compiler.hh"
47 #include "cpu/base.hh"
48 #include "cpu/thread_context.hh"
49 #include "debug/Checkpoint.hh"
50 #include "debug/Drain.hh"
51 #include "debug/PageTableWalker.hh"
52 #include "debug/TLB.hh"
53 #include "debug/TLBVerbose.hh"
54 #include "sim/system.hh"
55 
56 namespace gem5
57 {
58 
59 using namespace ArmISA;
60 
62  : ClockedObject(p),
63  requestorId(p.sys->getRequestorId(this)),
64  port(nullptr),
65  isStage2(p.is_stage2), tlb(NULL),
66  currState(NULL), pending(false),
67  numSquashable(p.num_squash_per_cycle),
68  stats(this),
69  pendingReqs(0),
70  pendingChangeTick(curTick()),
71  doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
72  doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
73  doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
74  doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
75  doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
76  doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
77  LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
78  &doL2LongDescEvent, &doL3LongDescEvent },
79  doProcessEvent([this]{ processWalkWrapper(); }, name())
80 {
81  sctlr = 0;
82 
83  // Cache system-level properties
84  if (FullSystem) {
85  ArmSystem *armSys = dynamic_cast<ArmSystem *>(p.sys);
86  assert(armSys);
87  haveSecurity = armSys->haveSecurity();
88  _haveLPAE = armSys->haveLPAE();
89  _haveVirtualization = armSys->haveVirtualization();
90  _physAddrRange = armSys->physAddrRange();
91  _haveLargeAsid64 = armSys->haveLargeAsid64();
92  } else {
93  haveSecurity = _haveLPAE = _haveVirtualization = false;
94  _haveLargeAsid64 = false;
95  _physAddrRange = 48;
96  }
97 
98 }
99 
101 {
102  ;
103 }
104 
107 {
108  return static_cast<Port&>(getPort("port"));
109 }
110 
111 Port &
112 TableWalker::getPort(const std::string &if_name, PortID idx)
113 {
114  if (if_name == "port") {
115  return *port;
116  }
117  return ClockedObject::getPort(if_name, idx);
118 }
119 
121  tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
122  asid(0), vmid(0), isHyp(false), transState(nullptr),
123  vaddr(0), vaddr_tainted(0),
124  sctlr(0), scr(0), cpsr(0), tcr(0),
125  htcr(0), hcr(0), vtcr(0),
126  isWrite(false), isFetch(false), isSecure(false),
127  isUncacheable(false),
128  secureLookup(false), rwTable(false), userTable(false), xnTable(false),
129  pxnTable(false), hpd(false), stage2Req(false),
130  stage2Tran(nullptr), timing(false), functional(false),
131  mode(BaseMMU::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
132  delayed(false), tableWalker(nullptr)
133 {
134 }
135 
137  : QueuedRequestPort(_walker->name() + ".port", _walker,
138  reqQueue, snoopRespQueue),
139  reqQueue(*_walker, *this), snoopRespQueue(*_walker, *this),
140  requestorId(id)
141 {
142 }
143 
144 PacketPtr
146  Addr desc_addr, int size,
147  uint8_t *data, Request::Flags flags, Tick delay,
148  Event *event)
149 {
150  RequestPtr req = std::make_shared<Request>(
151  desc_addr, size, flags, requestorId);
152  req->taskId(context_switch_task_id::DMA);
153 
154  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
155  pkt->dataStatic(data);
156 
157  auto state = new TableWalkerState;
158  state->event = event;
159  state->delay = delay;
160 
161  pkt->senderState = state;
162  return pkt;
163 }
164 
165 void
167  Addr desc_addr, int size,
168  uint8_t *data, Request::Flags flags)
169 {
170  auto pkt = createPacket(desc_addr, size, data, flags, 0, nullptr);
171 
172  sendFunctional(pkt);
173 
174  handleRespPacket(pkt);
175 }
176 
177 void
179  Addr desc_addr, int size,
180  uint8_t *data, Request::Flags flags, Tick delay)
181 {
182  auto pkt = createPacket(desc_addr, size, data, flags, delay, nullptr);
183 
184  Tick lat = sendAtomic(pkt);
185 
186  handleRespPacket(pkt, lat);
187 }
188 
189 void
191  Addr desc_addr, int size,
192  uint8_t *data, Request::Flags flags, Tick delay,
193  Event *event)
194 {
195  auto pkt = createPacket(desc_addr, size, data, flags, delay, event);
196 
197  schedTimingReq(pkt, curTick());
198 }
199 
200 bool
202 {
203  // We shouldn't ever get a cacheable block in Modified state.
204  assert(pkt->req->isUncacheable() ||
205  !(pkt->cacheResponding() && !pkt->hasSharers()));
206 
207  handleRespPacket(pkt);
208 
209  return true;
210 }
211 
212 void
214 {
215  // Should always see a response with a sender state.
216  assert(pkt->isResponse());
217 
218  // Get the DMA sender state.
219  auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
220  assert(state);
221 
222  handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
223 
224  delete pkt;
225 }
226 
227 void
229  Addr size, Tick delay)
230 {
231  if (state->event) {
232  owner.schedule(state->event, curTick() + delay);
233  }
234  delete state;
235 }
236 
237 void
239 {
240  if (drainState() == DrainState::Draining &&
241  stateQueues[L0].empty() && stateQueues[L1].empty() &&
242  stateQueues[L2].empty() && stateQueues[L3].empty() &&
243  pendingQueue.empty()) {
244 
245  DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
246  signalDrainDone();
247  }
248 }
249 
252 {
253  bool state_queues_not_empty = false;
254 
255  for (int i = 0; i < MAX_LOOKUP_LEVELS; ++i) {
256  if (!stateQueues[i].empty()) {
257  state_queues_not_empty = true;
258  break;
259  }
260  }
261 
262  if (state_queues_not_empty || pendingQueue.size()) {
263  DPRINTF(Drain, "TableWalker not drained\n");
264  return DrainState::Draining;
265  } else {
266  DPRINTF(Drain, "TableWalker free, no need to drain\n");
267  return DrainState::Drained;
268  }
269 }
270 
271 void
273 {
274  if (params().sys->isTimingMode() && currState) {
275  delete currState;
276  currState = NULL;
277  pendingChange();
278  }
279 }
280 
281 Fault
282 TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
283  vmid_t _vmid, bool _isHyp, BaseMMU::Mode _mode,
284  BaseMMU::Translation *_trans, bool _timing, bool _functional,
285  bool secure, TLB::ArmTranslationType tranType,
286  bool _stage2Req)
287 {
288  assert(!(_functional && _timing));
289  ++stats.walks;
290 
291  WalkerState *savedCurrState = NULL;
292 
293  if (!currState && !_functional) {
294  // For atomic mode, a new WalkerState instance should be only created
295  // once per TLB. For timing mode, a new instance is generated for every
296  // TLB miss.
297  DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
298 
299  currState = new WalkerState();
300  currState->tableWalker = this;
301  } else if (_functional) {
302  // If we are mixing functional mode with timing (or even
303  // atomic), we need to to be careful and clean up after
304  // ourselves to not risk getting into an inconsistent state.
305  DPRINTF(PageTableWalker,
306  "creating functional instance of WalkerState\n");
307  savedCurrState = currState;
308  currState = new WalkerState();
309  currState->tableWalker = this;
310  } else if (_timing) {
311  // This is a translation that was completed and then faulted again
312  // because some underlying parameters that affect the translation
313  // changed out from under us (e.g. asid). It will either be a
314  // misprediction, in which case nothing will happen or we'll use
315  // this fault to re-execute the faulting instruction which should clean
316  // up everything.
317  if (currState->vaddr_tainted == _req->getVaddr()) {
319  return std::make_shared<ReExec>();
320  }
321  }
322  pendingChange();
323 
325  currState->tc = _tc;
326  // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
327  // aarch32/translation/translation/AArch32.TranslateAddress dictates
328  // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
329  if (isStage2) {
330  currState->el = EL1;
331  currState->aarch64 = ELIs64(_tc, EL2);
332  } else {
333  currState->el =
334  TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
335  currState->aarch64 =
336  ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
337  }
338  currState->transState = _trans;
339  currState->req = _req;
341  currState->asid = _asid;
342  currState->vmid = _vmid;
343  currState->isHyp = _isHyp;
344  currState->timing = _timing;
345  currState->functional = _functional;
346  currState->mode = _mode;
347  currState->tranType = tranType;
348  currState->isSecure = secure;
350 
353  currState->vaddr_tainted = currState->req->getVaddr();
354  if (currState->aarch64)
358  else
360 
361  if (currState->aarch64) {
363  if (isStage2) {
365  if (currState->secureLookup) {
366  currState->vtcr =
368  } else {
369  currState->vtcr =
371  }
372  } else switch (currState->el) {
373  case EL0:
374  if (HaveVirtHostExt(currState->tc) &&
375  currState->hcr.tge == 1 && currState->hcr.e2h ==1) {
378  } else {
381  }
382  break;
383  case EL1:
386  break;
387  case EL2:
388  assert(_haveVirtualization);
391  break;
392  case EL3:
393  assert(haveSecurity);
396  break;
397  default:
398  panic("Invalid exception level");
399  break;
400  }
401  } else {
409  }
410  sctlr = currState->sctlr;
411 
414 
416 
417  currState->stage2Req = _stage2Req && !isStage2;
418 
419  bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
421 
422  if (long_desc_format) {
423  // Helper variables used for hierarchical permissions
425  currState->rwTable = true;
426  currState->userTable = true;
427  currState->xnTable = false;
428  currState->pxnTable = false;
429 
431  } else {
433  }
434 
435  if (!currState->timing) {
436  Fault fault = NoFault;
437  if (currState->aarch64)
438  fault = processWalkAArch64();
439  else if (long_desc_format)
440  fault = processWalkLPAE();
441  else
442  fault = processWalk();
443 
444  // If this was a functional non-timing access restore state to
445  // how we found it.
446  if (currState->functional) {
447  delete currState;
448  currState = savedCurrState;
449  }
450  return fault;
451  }
452 
453  if (pending || pendingQueue.size()) {
454  pendingQueue.push_back(currState);
455  currState = NULL;
456  pendingChange();
457  } else {
458  pending = true;
459  pendingChange();
460  if (currState->aarch64)
461  return processWalkAArch64();
462  else if (long_desc_format)
463  return processWalkLPAE();
464  else
465  return processWalk();
466  }
467 
468  return NoFault;
469 }
470 
471 void
473 {
474  assert(!currState);
475  assert(pendingQueue.size());
476  pendingChange();
477  currState = pendingQueue.front();
478 
479  // Check if a previous walk filled this request already
480  // @TODO Should this always be the TLB or should we look in the stage2 TLB?
482  currState->vmid, currState->isHyp, currState->isSecure, true, false,
483  currState->el, false, BaseMMU::Read);
484 
485  // Check if we still need to have a walk for this request. If the requesting
486  // instruction has been squashed, or a previous walk has filled the TLB with
487  // a match, we just want to get rid of the walk. The latter could happen
488  // when there are multiple outstanding misses to a single page and a
489  // previous request has been successfully translated.
490  if (!currState->transState->squashed() && !te) {
491  // We've got a valid request, lets process it
492  pending = true;
493  pendingQueue.pop_front();
494  // Keep currState in case one of the processWalk... calls NULLs it
495  WalkerState *curr_state_copy = currState;
496  Fault f;
497  if (currState->aarch64)
498  f = processWalkAArch64();
499  else if (longDescFormatInUse(currState->tc) ||
501  f = processWalkLPAE();
502  else
503  f = processWalk();
504 
505  if (f != NoFault) {
506  curr_state_copy->transState->finish(f, curr_state_copy->req,
507  curr_state_copy->tc, curr_state_copy->mode);
508 
509  delete curr_state_copy;
510  }
511  return;
512  }
513 
514 
515  // If the instruction that we were translating for has been
516  // squashed we shouldn't bother.
517  unsigned num_squashed = 0;
518  ThreadContext *tc = currState->tc;
519  while ((num_squashed < numSquashable) && currState &&
520  (currState->transState->squashed() || te)) {
521  pendingQueue.pop_front();
522  num_squashed++;
524 
525  DPRINTF(TLB, "Squashing table walk for address %#x\n",
527 
528  if (currState->transState->squashed()) {
529  // finish the translation which will delete the translation object
531  std::make_shared<UnimpFault>("Squashed Inst"),
533  } else {
534  // translate the request now that we know it will work
538 
539  }
540 
541  // delete the current request
542  delete currState;
543 
544  // peak at the next one
545  if (pendingQueue.size()) {
546  currState = pendingQueue.front();
549  false, currState->el, false, BaseMMU::Read);
550  } else {
551  // Terminate the loop, nothing more to do
552  currState = NULL;
553  }
554  }
555  pendingChange();
556 
557  // if we still have pending translations, schedule more work
558  nextWalk(tc);
559  currState = NULL;
560 }
561 
562 Fault
564 {
565  Addr ttbr = 0;
566 
567  // For short descriptors, translation configs are held in
568  // TTBR1.
571 
572  const auto irgn0_mask = 0x1;
573  const auto irgn1_mask = 0x40;
574  currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
575 
576  // If translation isn't enabled, we shouldn't be here
577  assert(currState->sctlr.m || isStage2);
578  const bool is_atomic = currState->req->isAtomic();
579 
580  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
582  32 - currState->ttbcr.n));
583 
585 
586  if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
587  32 - currState->ttbcr.n)) {
588  DPRINTF(TLB, " - Selecting TTBR0\n");
589  // Check if table walk is allowed when Security Extensions are enabled
590  if (haveSecurity && currState->ttbcr.pd0) {
591  if (currState->isFetch)
592  return std::make_shared<PrefetchAbort>(
595  isStage2,
597  else
598  return std::make_shared<DataAbort>(
601  is_atomic ? false : currState->isWrite,
604  }
607  } else {
608  DPRINTF(TLB, " - Selecting TTBR1\n");
609  // Check if table walk is allowed when Security Extensions are enabled
610  if (haveSecurity && currState->ttbcr.pd1) {
611  if (currState->isFetch)
612  return std::make_shared<PrefetchAbort>(
615  isStage2,
617  else
618  return std::make_shared<DataAbort>(
621  is_atomic ? false : currState->isWrite,
624  }
625  ttbr = ttbr1;
626  currState->ttbcr.n = 0;
627  }
628 
629  Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
630  (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
631  DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
632  currState->isSecure ? "s" : "ns");
633 
634  // Trickbox address check
635  Fault f;
636  f = testWalk(l1desc_addr, sizeof(uint32_t),
638  if (f) {
639  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
640  if (currState->timing) {
641  pending = false;
643  currState = NULL;
644  } else {
645  currState->tc = NULL;
646  currState->req = NULL;
647  }
648  return f;
649  }
650 
652  if (currState->sctlr.c == 0 || currState->isUncacheable) {
654  }
655 
656  if (currState->isSecure) {
657  flag.set(Request::SECURE);
658  }
659 
660  bool delayed;
661  delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
662  sizeof(uint32_t), flag, L1, &doL1DescEvent,
664  if (!delayed) {
665  f = currState->fault;
666  }
667 
668  return f;
669 }
670 
671 Fault
673 {
674  Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
675  int tsz, n;
676  LookupLevel start_lookup_level = L1;
677 
678  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
680 
682 
684  if (currState->isSecure)
685  flag.set(Request::SECURE);
686 
687  // work out which base address register to use, if in hyp mode we always
688  // use HTTBR
689  if (isStage2) {
690  DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
692  tsz = sext<4>(currState->vtcr.t0sz);
693  start_lookup_level = currState->vtcr.sl0 ? L1 : L2;
694  currState->isUncacheable = currState->vtcr.irgn0 == 0;
695  } else if (currState->isHyp) {
696  DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
698  tsz = currState->htcr.t0sz;
699  currState->isUncacheable = currState->htcr.irgn0 == 0;
700  } else {
701  assert(longDescFormatInUse(currState->tc));
702 
703  // Determine boundaries of TTBR0/1 regions
704  if (currState->ttbcr.t0sz)
705  ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
706  else if (currState->ttbcr.t1sz)
707  ttbr0_max = (1ULL << 32) -
708  (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
709  else
710  ttbr0_max = (1ULL << 32) - 1;
711  if (currState->ttbcr.t1sz)
712  ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
713  else
714  ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
715 
716  const bool is_atomic = currState->req->isAtomic();
717 
718  // The following code snippet selects the appropriate translation table base
719  // address (TTBR0 or TTBR1) and the appropriate starting lookup level
720  // depending on the address range supported by the translation table (ARM
721  // ARM issue C B3.6.4)
722  if (currState->vaddr <= ttbr0_max) {
723  DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
724  // Check if table walk is allowed
725  if (currState->ttbcr.epd0) {
726  if (currState->isFetch)
727  return std::make_shared<PrefetchAbort>(
730  isStage2,
732  else
733  return std::make_shared<DataAbort>(
736  is_atomic ? false : currState->isWrite,
738  isStage2,
740  }
743  tsz = currState->ttbcr.t0sz;
744  currState->isUncacheable = currState->ttbcr.irgn0 == 0;
745  if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
746  start_lookup_level = L2;
747  } else if (currState->vaddr >= ttbr1_min) {
748  DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
749  // Check if table walk is allowed
750  if (currState->ttbcr.epd1) {
751  if (currState->isFetch)
752  return std::make_shared<PrefetchAbort>(
755  isStage2,
757  else
758  return std::make_shared<DataAbort>(
761  is_atomic ? false : currState->isWrite,
763  isStage2,
765  }
768  tsz = currState->ttbcr.t1sz;
769  currState->isUncacheable = currState->ttbcr.irgn1 == 0;
770  // Lower limit >= 3 GiB
771  if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
772  start_lookup_level = L2;
773  } else {
774  // Out of boundaries -> translation fault
775  if (currState->isFetch)
776  return std::make_shared<PrefetchAbort>(
779  isStage2,
781  else
782  return std::make_shared<DataAbort>(
785  is_atomic ? false : currState->isWrite,
788  }
789 
790  }
791 
792  // Perform lookup (ARM ARM issue C B3.6.6)
793  if (start_lookup_level == L1) {
794  n = 5 - tsz;
795  desc_addr = mbits(ttbr, 39, n) |
796  (bits(currState->vaddr, n + 26, 30) << 3);
797  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
798  desc_addr, currState->isSecure ? "s" : "ns");
799  } else {
800  // Skip first-level lookup
801  n = (tsz >= 2 ? 14 - tsz : 12);
802  desc_addr = mbits(ttbr, 39, n) |
803  (bits(currState->vaddr, n + 17, 21) << 3);
804  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
805  desc_addr, currState->isSecure ? "s" : "ns");
806  }
807 
808  // Trickbox address check
809  Fault f = testWalk(desc_addr, sizeof(uint64_t),
810  TlbEntry::DomainType::NoAccess, start_lookup_level);
811  if (f) {
812  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
813  if (currState->timing) {
814  pending = false;
816  currState = NULL;
817  } else {
818  currState->tc = NULL;
819  currState->req = NULL;
820  }
821  return f;
822  }
823 
824  if (currState->sctlr.c == 0 || currState->isUncacheable) {
826  }
827 
828  currState->longDesc.lookupLevel = start_lookup_level;
829  currState->longDesc.aarch64 = false;
831 
832  bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
833  sizeof(uint64_t), flag, start_lookup_level,
834  LongDescEventByLevel[start_lookup_level],
836  if (!delayed) {
837  f = currState->fault;
838  }
839 
840  return f;
841 }
842 
843 bool
845  GrainSize tg, int tsz, bool low_range)
846 {
847  // The effective maximum input size is 48 if ARMv8.2-LVA is not
848  // supported or if the translation granule that is in use is 4KB or
849  // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
850  // translation granule size only, the effective minimum value of
851  // 52.
852  int in_max = (HaveLVA(currState->tc) && tg == Grain64KB) ? 52 : 48;
853  int in_min = 64 - (tg == Grain64KB ? 47 : 48);
854 
855  return tsz > in_max || tsz < in_min || (low_range ?
856  bits(currState->vaddr, top_bit, tsz) != 0x0 :
857  bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1));
858 }
859 
860 bool
862 {
863  return (pa_range != _physAddrRange &&
864  bits(addr, _physAddrRange - 1, pa_range));
865 }
866 
867 Fault
869 {
870  assert(currState->aarch64);
871 
872  DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
874 
875  static const GrainSize GrainMap_tg0[] =
877  static const GrainSize GrainMap_tg1[] =
879 
881 
882  // Determine TTBR, table size, granule size and phys. address range
883  Addr ttbr = 0;
884  int tsz = 0, ps = 0;
885  GrainSize tg = Grain4KB; // grain size computed from tg* field
886  bool fault = false;
887 
888  int top_bit = computeAddrTop(currState->tc,
889  bits(currState->vaddr, 55),
891  currState->tcr,
892  currState->el);
893 
894  LookupLevel start_lookup_level = MAX_LOOKUP_LEVELS;
895  bool vaddr_fault = false;
896  switch (currState->el) {
897  case EL0:
898  {
899  Addr ttbr0;
900  Addr ttbr1;
901  if (HaveVirtHostExt(currState->tc) &&
902  currState->hcr.tge==1 && currState->hcr.e2h == 1) {
903  // VHE code for EL2&0 regime
906  } else {
909  }
910  switch (bits(currState->vaddr, 63,48)) {
911  case 0:
912  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
913  ttbr = ttbr0;
914  tsz = 64 - currState->tcr.t0sz;
915  tg = GrainMap_tg0[currState->tcr.tg0];
916  currState->hpd = currState->tcr.hpd0;
917  currState->isUncacheable = currState->tcr.irgn0 == 0;
919  top_bit, tg, tsz, true);
920 
921  if (vaddr_fault || currState->tcr.epd0)
922  fault = true;
923  break;
924  case 0xffff:
925  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
926  ttbr = ttbr1;
927  tsz = 64 - currState->tcr.t1sz;
928  tg = GrainMap_tg1[currState->tcr.tg1];
929  currState->hpd = currState->tcr.hpd1;
930  currState->isUncacheable = currState->tcr.irgn1 == 0;
932  top_bit, tg, tsz, false);
933 
934  if (vaddr_fault || currState->tcr.epd1)
935  fault = true;
936  break;
937  default:
938  // top two bytes must be all 0s or all 1s, else invalid addr
939  fault = true;
940  }
941  ps = currState->tcr.ips;
942  }
943  break;
944  case EL1:
945  if (isStage2) {
946  if (currState->secureLookup) {
947  DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
949  } else {
950  DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
952  }
953  tsz = 64 - currState->vtcr.t0sz64;
954  tg = GrainMap_tg0[currState->vtcr.tg0];
955  // ARM DDI 0487A.f D7-2148
956  // The starting level of stage 2 translation depends on
957  // VTCR_EL2.SL0 and VTCR_EL2.TG0
958  LookupLevel __ = MAX_LOOKUP_LEVELS; // invalid level
959  uint8_t sl_tg = (currState->vtcr.sl0 << 2) | currState->vtcr.tg0;
960  static const LookupLevel SLL[] = {
961  L2, L3, L3, __, // sl0 == 0
962  L1, L2, L2, __, // sl0 == 1, etc.
963  L0, L1, L1, __,
964  __, __, __, __
965  };
966  start_lookup_level = SLL[sl_tg];
967  panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
968  "Cannot discern lookup level from vtcr.{sl0,tg0}");
969  ps = currState->vtcr.ps;
970  currState->isUncacheable = currState->vtcr.irgn0 == 0;
971  } else {
972  switch (bits(currState->vaddr, top_bit)) {
973  case 0:
974  DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
976  tsz = 64 - currState->tcr.t0sz;
977  tg = GrainMap_tg0[currState->tcr.tg0];
978  currState->hpd = currState->tcr.hpd0;
979  currState->isUncacheable = currState->tcr.irgn0 == 0;
981  top_bit, tg, tsz, true);
982 
983  if (vaddr_fault || currState->tcr.epd0)
984  fault = true;
985  break;
986  case 0x1:
987  DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
989  tsz = 64 - currState->tcr.t1sz;
990  tg = GrainMap_tg1[currState->tcr.tg1];
991  currState->hpd = currState->tcr.hpd1;
992  currState->isUncacheable = currState->tcr.irgn1 == 0;
994  top_bit, tg, tsz, false);
995 
996  if (vaddr_fault || currState->tcr.epd1)
997  fault = true;
998  break;
999  default:
1000  // top two bytes must be all 0s or all 1s, else invalid addr
1001  fault = true;
1002  }
1003  ps = currState->tcr.ips;
1004  }
1005  break;
1006  case EL2:
1007  switch(bits(currState->vaddr, top_bit)) {
1008  case 0:
1009  DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1011  tsz = 64 - currState->tcr.t0sz;
1012  tg = GrainMap_tg0[currState->tcr.tg0];
1013  currState->hpd = currState->hcr.e2h ?
1014  currState->tcr.hpd0 : currState->tcr.hpd;
1015  currState->isUncacheable = currState->tcr.irgn0 == 0;
1017  top_bit, tg, tsz, true);
1018 
1019  if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1020  fault = true;
1021  break;
1022 
1023  case 0x1:
1024  DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1026  tsz = 64 - currState->tcr.t1sz;
1027  tg = GrainMap_tg1[currState->tcr.tg1];
1028  currState->hpd = currState->tcr.hpd1;
1029  currState->isUncacheable = currState->tcr.irgn1 == 0;
1031  top_bit, tg, tsz, false);
1032 
1033  if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1034  fault = true;
1035  break;
1036 
1037  default:
1038  // invalid addr if top two bytes are not all 0s
1039  fault = true;
1040  }
1041  ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1042  break;
1043  case EL3:
1044  switch(bits(currState->vaddr, top_bit)) {
1045  case 0:
1046  DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1048  tsz = 64 - currState->tcr.t0sz;
1049  tg = GrainMap_tg0[currState->tcr.tg0];
1050  currState->hpd = currState->tcr.hpd;
1051  currState->isUncacheable = currState->tcr.irgn0 == 0;
1053  top_bit, tg, tsz, true);
1054 
1055  if (vaddr_fault)
1056  fault = true;
1057  break;
1058  default:
1059  // invalid addr if top two bytes are not all 0s
1060  fault = true;
1061  }
1062  ps = currState->tcr.ps;
1063  break;
1064  }
1065 
1066  const bool is_atomic = currState->req->isAtomic();
1067 
1068  if (fault) {
1069  Fault f;
1070  if (currState->isFetch)
1071  f = std::make_shared<PrefetchAbort>(
1075  else
1076  f = std::make_shared<DataAbort>(
1079  is_atomic ? false : currState->isWrite,
1082 
1083  if (currState->timing) {
1084  pending = false;
1085  nextWalk(currState->tc);
1086  currState = NULL;
1087  } else {
1088  currState->tc = NULL;
1089  currState->req = NULL;
1090  }
1091  return f;
1092 
1093  }
1094 
1095  if (tg == ReservedGrain) {
1096  warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1097  "DEFINED behavior takes this to mean 4KB granules\n");
1098  tg = Grain4KB;
1099  }
1100 
1101  // Determine starting lookup level
1102  // See aarch64/translation/walk in Appendix G: ARMv8 Pseudocode Library
1103  // in ARM DDI 0487A. These table values correspond to the cascading tests
1104  // to compute the lookup level and are of the form
1105  // (grain_size + N*stride), for N = {1, 2, 3}.
1106  // A value of 64 will never succeed and a value of 0 will always succeed.
1107  if (start_lookup_level == MAX_LOOKUP_LEVELS) {
1108  struct GrainMap
1109  {
1110  GrainSize grain_size;
1111  unsigned lookup_level_cutoff[MAX_LOOKUP_LEVELS];
1112  };
1113  static const GrainMap GM[] = {
1114  { Grain4KB, { 39, 30, 0, 0 } },
1115  { Grain16KB, { 47, 36, 25, 0 } },
1116  { Grain64KB, { 64, 42, 29, 0 } }
1117  };
1118 
1119  const unsigned *lookup = NULL; // points to a lookup_level_cutoff
1120 
1121  for (unsigned i = 0; i < 3; ++i) { // choose entry of GM[]
1122  if (tg == GM[i].grain_size) {
1123  lookup = GM[i].lookup_level_cutoff;
1124  break;
1125  }
1126  }
1127  assert(lookup);
1128 
1129  for (int L = L0; L != MAX_LOOKUP_LEVELS; ++L) {
1130  if (tsz > lookup[L]) {
1131  start_lookup_level = (LookupLevel) L;
1132  break;
1133  }
1134  }
1135  panic_if(start_lookup_level == MAX_LOOKUP_LEVELS,
1136  "Table walker couldn't find lookup level\n");
1137  }
1138 
1139  // Clamp to lower limit
1140  int pa_range = decodePhysAddrRange64(ps);
1141  if (pa_range > _physAddrRange) {
1143  } else {
1144  currState->physAddrRange = pa_range;
1145  }
1146 
1147  // Determine table base address
1148  int stride = tg - 3;
1149  int base_addr_lo = 3 + tsz - stride * (3 - start_lookup_level) - tg;
1150  Addr base_addr = 0;
1151 
1152  if (pa_range == 52) {
1153  int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1154  base_addr = mbits(ttbr, 47, z);
1155  base_addr |= (bits(ttbr, 5, 2) << 48);
1156  } else {
1157  base_addr = mbits(ttbr, 47, base_addr_lo);
1158  }
1159 
1160  // Determine physical address size and raise an Address Size Fault if
1161  // necessary
1163  DPRINTF(TLB, "Address size fault before any lookup\n");
1164  Fault f;
1165  if (currState->isFetch)
1166  f = std::make_shared<PrefetchAbort>(
1168  ArmFault::AddressSizeLL + start_lookup_level,
1169  isStage2,
1171  else
1172  f = std::make_shared<DataAbort>(
1175  is_atomic ? false : currState->isWrite,
1176  ArmFault::AddressSizeLL + start_lookup_level,
1177  isStage2,
1179 
1180 
1181  if (currState->timing) {
1182  pending = false;
1183  nextWalk(currState->tc);
1184  currState = NULL;
1185  } else {
1186  currState->tc = NULL;
1187  currState->req = NULL;
1188  }
1189  return f;
1190 
1191  }
1192 
1193  // Determine descriptor address
1194  Addr desc_addr = base_addr |
1195  (bits(currState->vaddr, tsz - 1,
1196  stride * (3 - start_lookup_level) + tg) << 3);
1197 
1198  // Trickbox address check
1199  Fault f = testWalk(desc_addr, sizeof(uint64_t),
1200  TlbEntry::DomainType::NoAccess, start_lookup_level);
1201  if (f) {
1202  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1203  if (currState->timing) {
1204  pending = false;
1205  nextWalk(currState->tc);
1206  currState = NULL;
1207  } else {
1208  currState->tc = NULL;
1209  currState->req = NULL;
1210  }
1211  return f;
1212  }
1213 
1215  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1216  flag.set(Request::UNCACHEABLE);
1217  }
1218 
1219  if (currState->isSecure) {
1220  flag.set(Request::SECURE);
1221  }
1222 
1223  currState->longDesc.lookupLevel = start_lookup_level;
1224  currState->longDesc.aarch64 = true;
1227 
1228  if (currState->timing) {
1229  fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1230  sizeof(uint64_t), flag, start_lookup_level,
1231  LongDescEventByLevel[start_lookup_level], NULL);
1232  } else {
1233  fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1234  sizeof(uint64_t), flag, -1, NULL,
1236  f = currState->fault;
1237  }
1238 
1239  return f;
1240 }
1241 
1242 void
1244  uint8_t texcb, bool s)
1245 {
1246  // Note: tc and sctlr local variables are hiding tc and sctrl class
1247  // variables
1248  DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1249  te.shareable = false; // default value
1250  te.nonCacheable = false;
1251  te.outerShareable = false;
1252  if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1253  switch(texcb) {
1254  case 0: // Stongly-ordered
1255  te.nonCacheable = true;
1257  te.shareable = true;
1258  te.innerAttrs = 1;
1259  te.outerAttrs = 0;
1260  break;
1261  case 1: // Shareable Device
1262  te.nonCacheable = true;
1264  te.shareable = true;
1265  te.innerAttrs = 3;
1266  te.outerAttrs = 0;
1267  break;
1268  case 2: // Outer and Inner Write-Through, no Write-Allocate
1270  te.shareable = s;
1271  te.innerAttrs = 6;
1272  te.outerAttrs = bits(texcb, 1, 0);
1273  break;
1274  case 3: // Outer and Inner Write-Back, no Write-Allocate
1276  te.shareable = s;
1277  te.innerAttrs = 7;
1278  te.outerAttrs = bits(texcb, 1, 0);
1279  break;
1280  case 4: // Outer and Inner Non-cacheable
1281  te.nonCacheable = true;
1283  te.shareable = s;
1284  te.innerAttrs = 0;
1285  te.outerAttrs = bits(texcb, 1, 0);
1286  break;
1287  case 5: // Reserved
1288  panic("Reserved texcb value!\n");
1289  break;
1290  case 6: // Implementation Defined
1291  panic("Implementation-defined texcb value!\n");
1292  break;
1293  case 7: // Outer and Inner Write-Back, Write-Allocate
1295  te.shareable = s;
1296  te.innerAttrs = 5;
1297  te.outerAttrs = 1;
1298  break;
1299  case 8: // Non-shareable Device
1300  te.nonCacheable = true;
1302  te.shareable = false;
1303  te.innerAttrs = 3;
1304  te.outerAttrs = 0;
1305  break;
1306  case 9 ... 15: // Reserved
1307  panic("Reserved texcb value!\n");
1308  break;
1309  case 16 ... 31: // Cacheable Memory
1311  te.shareable = s;
1312  if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1313  te.nonCacheable = true;
1314  te.innerAttrs = bits(texcb, 1, 0);
1315  te.outerAttrs = bits(texcb, 3, 2);
1316  break;
1317  default:
1318  panic("More than 32 states for 5 bits?\n");
1319  }
1320  } else {
1321  assert(tc);
1322  PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1324  NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1326  DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1327  uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1328  switch(bits(texcb, 2,0)) {
1329  case 0:
1330  curr_tr = prrr.tr0;
1331  curr_ir = nmrr.ir0;
1332  curr_or = nmrr.or0;
1333  te.outerShareable = (prrr.nos0 == 0);
1334  break;
1335  case 1:
1336  curr_tr = prrr.tr1;
1337  curr_ir = nmrr.ir1;
1338  curr_or = nmrr.or1;
1339  te.outerShareable = (prrr.nos1 == 0);
1340  break;
1341  case 2:
1342  curr_tr = prrr.tr2;
1343  curr_ir = nmrr.ir2;
1344  curr_or = nmrr.or2;
1345  te.outerShareable = (prrr.nos2 == 0);
1346  break;
1347  case 3:
1348  curr_tr = prrr.tr3;
1349  curr_ir = nmrr.ir3;
1350  curr_or = nmrr.or3;
1351  te.outerShareable = (prrr.nos3 == 0);
1352  break;
1353  case 4:
1354  curr_tr = prrr.tr4;
1355  curr_ir = nmrr.ir4;
1356  curr_or = nmrr.or4;
1357  te.outerShareable = (prrr.nos4 == 0);
1358  break;
1359  case 5:
1360  curr_tr = prrr.tr5;
1361  curr_ir = nmrr.ir5;
1362  curr_or = nmrr.or5;
1363  te.outerShareable = (prrr.nos5 == 0);
1364  break;
1365  case 6:
1366  panic("Imp defined type\n");
1367  case 7:
1368  curr_tr = prrr.tr7;
1369  curr_ir = nmrr.ir7;
1370  curr_or = nmrr.or7;
1371  te.outerShareable = (prrr.nos7 == 0);
1372  break;
1373  }
1374 
1375  switch(curr_tr) {
1376  case 0:
1377  DPRINTF(TLBVerbose, "StronglyOrdered\n");
1379  te.nonCacheable = true;
1380  te.innerAttrs = 1;
1381  te.outerAttrs = 0;
1382  te.shareable = true;
1383  break;
1384  case 1:
1385  DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1386  prrr.ds1, prrr.ds0, s);
1388  te.nonCacheable = true;
1389  te.innerAttrs = 3;
1390  te.outerAttrs = 0;
1391  if (prrr.ds1 && s)
1392  te.shareable = true;
1393  if (prrr.ds0 && !s)
1394  te.shareable = true;
1395  break;
1396  case 2:
1397  DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1398  prrr.ns1, prrr.ns0, s);
1400  if (prrr.ns1 && s)
1401  te.shareable = true;
1402  if (prrr.ns0 && !s)
1403  te.shareable = true;
1404  break;
1405  case 3:
1406  panic("Reserved type");
1407  }
1408 
1409  if (te.mtype == TlbEntry::MemoryType::Normal){
1410  switch(curr_ir) {
1411  case 0:
1412  te.nonCacheable = true;
1413  te.innerAttrs = 0;
1414  break;
1415  case 1:
1416  te.innerAttrs = 5;
1417  break;
1418  case 2:
1419  te.innerAttrs = 6;
1420  break;
1421  case 3:
1422  te.innerAttrs = 7;
1423  break;
1424  }
1425 
1426  switch(curr_or) {
1427  case 0:
1428  te.nonCacheable = true;
1429  te.outerAttrs = 0;
1430  break;
1431  case 1:
1432  te.outerAttrs = 1;
1433  break;
1434  case 2:
1435  te.outerAttrs = 2;
1436  break;
1437  case 3:
1438  te.outerAttrs = 3;
1439  break;
1440  }
1441  }
1442  }
1443  DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1444  "outerAttrs: %d\n",
1445  te.shareable, te.innerAttrs, te.outerAttrs);
1446  te.setAttributes(false);
1447 }
1448 
1449 void
1451  LongDescriptor &lDescriptor)
1452 {
1453  assert(_haveLPAE);
1454 
1455  uint8_t attr;
1456  uint8_t sh = lDescriptor.sh();
1457  // Different format and source of attributes if this is a stage 2
1458  // translation
1459  if (isStage2) {
1460  attr = lDescriptor.memAttr();
1461  uint8_t attr_3_2 = (attr >> 2) & 0x3;
1462  uint8_t attr_1_0 = attr & 0x3;
1463 
1464  DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1465 
1466  if (attr_3_2 == 0) {
1467  te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1469  te.outerAttrs = 0;
1470  te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1471  te.nonCacheable = true;
1472  } else {
1474  te.outerAttrs = attr_3_2 == 1 ? 0 :
1475  attr_3_2 == 2 ? 2 : 1;
1476  te.innerAttrs = attr_1_0 == 1 ? 0 :
1477  attr_1_0 == 2 ? 6 : 5;
1478  te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1479  }
1480  } else {
1481  uint8_t attrIndx = lDescriptor.attrIndx();
1482 
1483  // LPAE always uses remapping of memory attributes, irrespective of the
1484  // value of SCTLR.TRE
1485  MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1486  int reg_as_int = snsBankedIndex(reg, currState->tc,
1487  !currState->isSecure);
1488  uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1489  attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1490  uint8_t attr_7_4 = bits(attr, 7, 4);
1491  uint8_t attr_3_0 = bits(attr, 3, 0);
1492  DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1493 
1494  // Note: the memory subsystem only cares about the 'cacheable' memory
1495  // attribute. The other attributes are only used to fill the PAR register
1496  // accordingly to provide the illusion of full support
1497  te.nonCacheable = false;
1498 
1499  switch (attr_7_4) {
1500  case 0x0:
1501  // Strongly-ordered or Device memory
1502  if (attr_3_0 == 0x0)
1504  else if (attr_3_0 == 0x4)
1506  else
1507  panic("Unpredictable behavior\n");
1508  te.nonCacheable = true;
1509  te.outerAttrs = 0;
1510  break;
1511  case 0x4:
1512  // Normal memory, Outer Non-cacheable
1514  te.outerAttrs = 0;
1515  if (attr_3_0 == 0x4)
1516  // Inner Non-cacheable
1517  te.nonCacheable = true;
1518  else if (attr_3_0 < 0x8)
1519  panic("Unpredictable behavior\n");
1520  break;
1521  case 0x8:
1522  case 0x9:
1523  case 0xa:
1524  case 0xb:
1525  case 0xc:
1526  case 0xd:
1527  case 0xe:
1528  case 0xf:
1529  if (attr_7_4 & 0x4) {
1530  te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1531  } else {
1532  te.outerAttrs = 0x2;
1533  }
1534  // Normal memory, Outer Cacheable
1536  if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1537  panic("Unpredictable behavior\n");
1538  break;
1539  default:
1540  panic("Unpredictable behavior\n");
1541  break;
1542  }
1543 
1544  switch (attr_3_0) {
1545  case 0x0:
1546  te.innerAttrs = 0x1;
1547  break;
1548  case 0x4:
1549  te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1550  break;
1551  case 0x8:
1552  case 0x9:
1553  case 0xA:
1554  case 0xB:
1555  te.innerAttrs = 6;
1556  break;
1557  case 0xC:
1558  case 0xD:
1559  case 0xE:
1560  case 0xF:
1561  te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1562  break;
1563  default:
1564  panic("Unpredictable behavior\n");
1565  break;
1566  }
1567  }
1568 
1569  te.outerShareable = sh == 2;
1570  te.shareable = (sh & 0x2) ? true : false;
1571  te.setAttributes(true);
1572  te.attributes |= (uint64_t) attr << 56;
1573 }
1574 
1575 void
1577  LongDescriptor &lDescriptor)
1578 {
1579  uint8_t attr;
1580  uint8_t attr_hi;
1581  uint8_t attr_lo;
1582  uint8_t sh = lDescriptor.sh();
1583 
1584  if (isStage2) {
1585  attr = lDescriptor.memAttr();
1586  uint8_t attr_hi = (attr >> 2) & 0x3;
1587  uint8_t attr_lo = attr & 0x3;
1588 
1589  DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1590 
1591  if (attr_hi == 0) {
1592  te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1594  te.outerAttrs = 0;
1595  te.innerAttrs = attr_lo == 0 ? 1 : 3;
1596  te.nonCacheable = true;
1597  } else {
1599  te.outerAttrs = attr_hi == 1 ? 0 :
1600  attr_hi == 2 ? 2 : 1;
1601  te.innerAttrs = attr_lo == 1 ? 0 :
1602  attr_lo == 2 ? 6 : 5;
1603  // Treat write-through memory as uncacheable, this is safe
1604  // but for performance reasons not optimal.
1605  te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1606  (attr_lo == 1) || (attr_lo == 2);
1607  }
1608  } else {
1609  uint8_t attrIndx = lDescriptor.attrIndx();
1610 
1611  DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1613 
1614  // Select MAIR
1615  uint64_t mair;
1616  switch (regime) {
1617  case EL0:
1618  case EL1:
1619  mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1620  break;
1621  case EL2:
1622  mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1623  break;
1624  case EL3:
1625  mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1626  break;
1627  default:
1628  panic("Invalid exception level");
1629  break;
1630  }
1631 
1632  // Select attributes
1633  attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1634  attr_lo = bits(attr, 3, 0);
1635  attr_hi = bits(attr, 7, 4);
1636 
1637  // Memory type
1639 
1640  // Cacheability
1641  te.nonCacheable = false;
1642  if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1643  te.nonCacheable = true;
1644  }
1645  // Treat write-through memory as uncacheable, this is safe
1646  // but for performance reasons not optimal.
1647  switch (attr_hi) {
1648  case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1649  case 0x4: // Normal memory, Outer Non-cacheable
1650  case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1651  te.nonCacheable = true;
1652  }
1653  switch (attr_lo) {
1654  case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1655  case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1656  warn_if(!attr_hi, "Unpredictable behavior");
1658  case 0x4: // Device-nGnRE memory or
1659  // Normal memory, Inner Non-cacheable
1660  case 0x8: // Device-nGRE memory or
1661  // Normal memory, Inner Write-through non-transient
1662  te.nonCacheable = true;
1663  }
1664 
1665  te.shareable = sh == 2;
1666  te.outerShareable = (sh & 0x2) ? true : false;
1667  // Attributes formatted according to the 64-bit PAR
1668  te.attributes = ((uint64_t) attr << 56) |
1669  (1 << 11) | // LPAE bit
1670  (te.ns << 9) | // NS bit
1671  (sh << 7);
1672  }
1673 }
1674 
1675 void
1677 {
1678  if (currState->fault != NoFault) {
1679  return;
1680  }
1681 
1683  byteOrder(currState->tc));
1684 
1685  DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1687  TlbEntry te;
1688 
1689  const bool is_atomic = currState->req->isAtomic();
1690 
1691  switch (currState->l1Desc.type()) {
1692  case L1Descriptor::Ignore:
1694  if (!currState->timing) {
1695  currState->tc = NULL;
1696  currState->req = NULL;
1697  }
1698  DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1699  if (currState->isFetch)
1700  currState->fault =
1701  std::make_shared<PrefetchAbort>(
1704  isStage2,
1706  else
1707  currState->fault =
1708  std::make_shared<DataAbort>(
1711  is_atomic ? false : currState->isWrite,
1714  return;
1715  case L1Descriptor::Section:
1716  if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1722  currState->fault = std::make_shared<DataAbort>(
1724  currState->l1Desc.domain(),
1725  is_atomic ? false : currState->isWrite,
1727  isStage2,
1729  }
1730  if (currState->l1Desc.supersection()) {
1731  panic("Haven't implemented supersections\n");
1732  }
1734  return;
1736  {
1737  Addr l2desc_addr;
1738  l2desc_addr = currState->l1Desc.l2Addr() |
1739  (bits(currState->vaddr, 19, 12) << 2);
1740  DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1741  l2desc_addr, currState->isSecure ? "s" : "ns");
1742 
1743  // Trickbox address check
1744  currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1745  currState->l1Desc.domain(), L2);
1746 
1747  if (currState->fault) {
1748  if (!currState->timing) {
1749  currState->tc = NULL;
1750  currState->req = NULL;
1751  }
1752  return;
1753  }
1754 
1756 
1757  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1758  flag.set(Request::UNCACHEABLE);
1759  }
1760 
1761  if (currState->isSecure)
1762  flag.set(Request::SECURE);
1763 
1764  bool delayed;
1765  delayed = fetchDescriptor(l2desc_addr,
1766  (uint8_t*)&currState->l2Desc.data,
1767  sizeof(uint32_t), flag, -1, &doL2DescEvent,
1769  if (delayed) {
1770  currState->delayed = true;
1771  }
1772 
1773  return;
1774  }
1775  default:
1776  panic("A new type in a 2 bit field?\n");
1777  }
1778 }
1779 
1780 Fault
1782 {
1783  if (currState->isFetch) {
1784  return std::make_shared<PrefetchAbort>(
1787  isStage2,
1789  } else {
1790  return std::make_shared<DataAbort>(
1793  currState->req->isAtomic() ? false : currState->isWrite,
1795  isStage2,
1797  }
1798 }
1799 
1800 void
1802 {
1803  if (currState->fault != NoFault) {
1804  return;
1805  }
1806 
1808  byteOrder(currState->tc));
1809 
1810  DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1813  currState->aarch64 ? "AArch64" : "long-desc.");
1814 
1817  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1818  "xn: %d, ap: %d, af: %d, type: %d\n",
1821  currState->longDesc.pxn(),
1822  currState->longDesc.xn(),
1823  currState->longDesc.ap(),
1824  currState->longDesc.af(),
1825  currState->longDesc.type());
1826  } else {
1827  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1830  currState->longDesc.type());
1831  }
1832 
1833  TlbEntry te;
1834 
1835  switch (currState->longDesc.type()) {
1837  DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1840 
1842  if (!currState->timing) {
1843  currState->tc = NULL;
1844  currState->req = NULL;
1845  }
1846  return;
1847 
1848  case LongDescriptor::Block:
1849  case LongDescriptor::Page:
1850  {
1851  auto fault_source = ArmFault::FaultSourceInvalid;
1852  // Check for address size fault
1855 
1856  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1858  fault_source = ArmFault::AddressSizeLL;
1859 
1860  // Check for access fault
1861  } else if (currState->longDesc.af() == 0) {
1862 
1863  DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1865  fault_source = ArmFault::AccessFlagLL;
1866  }
1867 
1868  if (fault_source != ArmFault::FaultSourceInvalid) {
1869  currState->fault = generateLongDescFault(fault_source);
1870  } else {
1872  }
1873  }
1874  return;
1875  case LongDescriptor::Table:
1876  {
1877  // Set hierarchical permission flags
1888 
1889  // Set up next level lookup
1890  Addr next_desc_addr = currState->longDesc.nextDescAddr(
1891  currState->vaddr);
1892 
1893  DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1896  next_desc_addr,
1897  currState->secureLookup ? "s" : "ns");
1898 
1899  // Check for address size fault
1901  next_desc_addr, currState->physAddrRange)) {
1902  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1904 
1907  return;
1908  }
1909 
1910  // Trickbox address check
1912  next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1914 
1915  if (currState->fault) {
1916  if (!currState->timing) {
1917  currState->tc = NULL;
1918  currState->req = NULL;
1919  }
1920  return;
1921  }
1922 
1924  if (currState->secureLookup)
1925  flag.set(Request::SECURE);
1926 
1927  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1928  flag.set(Request::UNCACHEABLE);
1929  }
1930 
1933  Event *event = NULL;
1934  switch (L) {
1935  case L1:
1936  assert(currState->aarch64);
1937  case L2:
1938  case L3:
1939  event = LongDescEventByLevel[L];
1940  break;
1941  default:
1942  panic("Wrong lookup level in table walk\n");
1943  break;
1944  }
1945 
1946  bool delayed;
1947  delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1948  sizeof(uint64_t), flag, -1, event,
1950  if (delayed) {
1951  currState->delayed = true;
1952  }
1953  }
1954  return;
1955  default:
1956  panic("A new type in a 2 bit field?\n");
1957  }
1958 }
1959 
1960 void
1962 {
1963  if (currState->fault != NoFault) {
1964  return;
1965  }
1966 
1968  byteOrder(currState->tc));
1969 
1970  DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1972  TlbEntry te;
1973 
1974  const bool is_atomic = currState->req->isAtomic();
1975 
1976  if (currState->l2Desc.invalid()) {
1977  DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1978  if (!currState->timing) {
1979  currState->tc = NULL;
1980  currState->req = NULL;
1981  }
1982  if (currState->isFetch)
1983  currState->fault = std::make_shared<PrefetchAbort>(
1986  isStage2,
1988  else
1989  currState->fault = std::make_shared<DataAbort>(
1991  is_atomic ? false : currState->isWrite,
1993  isStage2,
1995  return;
1996  }
1997 
1998  if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
2002  DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
2003  currState->sctlr.afe, currState->l2Desc.ap());
2004 
2005  currState->fault = std::make_shared<DataAbort>(
2008  is_atomic ? false : currState->isWrite,
2011  }
2012 
2014 }
2015 
2016 void
2018 {
2019  currState = stateQueues[L1].front();
2020  currState->delayed = false;
2021  // if there's a stage2 translation object we don't need it any more
2022  if (currState->stage2Tran) {
2023  delete currState->stage2Tran;
2024  currState->stage2Tran = NULL;
2025  }
2026 
2027 
2028  DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2029  &currState->l1Desc.data);
2030  DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2031  currState->l1Desc.data);
2032 
2033  DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2035  doL1Descriptor();
2036 
2037  stateQueues[L1].pop_front();
2038  // Check if fault was generated
2039  if (currState->fault != NoFault) {
2041  currState->tc, currState->mode);
2043 
2044  pending = false;
2045  nextWalk(currState->tc);
2046 
2047  currState->req = NULL;
2048  currState->tc = NULL;
2049  currState->delayed = false;
2050  delete currState;
2051  }
2052  else if (!currState->delayed) {
2053  // delay is not set so there is no L2 to do
2054  // Don't finish the translation if a stage 2 look up is underway
2056  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2060 
2061  pending = false;
2062  nextWalk(currState->tc);
2063 
2064  currState->req = NULL;
2065  currState->tc = NULL;
2066  currState->delayed = false;
2067  delete currState;
2068  } else {
2069  // need to do L2 descriptor
2070  stateQueues[L2].push_back(currState);
2071  }
2072  currState = NULL;
2073 }
2074 
2075 void
2077 {
2078  currState = stateQueues[L2].front();
2079  assert(currState->delayed);
2080  // if there's a stage2 translation object we don't need it any more
2081  if (currState->stage2Tran) {
2082  delete currState->stage2Tran;
2083  currState->stage2Tran = NULL;
2084  }
2085 
2086  DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2088  doL2Descriptor();
2089 
2090  // Check if fault was generated
2091  if (currState->fault != NoFault) {
2093  currState->tc, currState->mode);
2095  } else {
2097  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2101  }
2102 
2103 
2104  stateQueues[L2].pop_front();
2105  pending = false;
2106  nextWalk(currState->tc);
2107 
2108  currState->req = NULL;
2109  currState->tc = NULL;
2110  currState->delayed = false;
2111 
2112  delete currState;
2113  currState = NULL;
2114 }
2115 
2116 void
2118 {
2120 }
2121 
2122 void
2124 {
2126 }
2127 
2128 void
2130 {
2132 }
2133 
2134 void
2136 {
2138 }
2139 
2140 void
2142 {
2143  currState = stateQueues[curr_lookup_level].front();
2144  assert(curr_lookup_level == currState->longDesc.lookupLevel);
2145  currState->delayed = false;
2146 
2147  // if there's a stage2 translation object we don't need it any more
2148  if (currState->stage2Tran) {
2149  delete currState->stage2Tran;
2150  currState->stage2Tran = NULL;
2151  }
2152 
2153  DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2155  doLongDescriptor();
2156 
2157  stateQueues[curr_lookup_level].pop_front();
2158 
2159  if (currState->fault != NoFault) {
2160  // A fault was generated
2162  currState->tc, currState->mode);
2163 
2164  pending = false;
2165  nextWalk(currState->tc);
2166 
2167  currState->req = NULL;
2168  currState->tc = NULL;
2169  currState->delayed = false;
2170  delete currState;
2171  } else if (!currState->delayed) {
2172  // No additional lookups required
2173  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2177  stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2178 
2179  pending = false;
2180  nextWalk(currState->tc);
2181 
2182  currState->req = NULL;
2183  currState->tc = NULL;
2184  currState->delayed = false;
2185  delete currState;
2186  } else {
2187  if (curr_lookup_level >= MAX_LOOKUP_LEVELS - 1)
2188  panic("Max. number of lookups already reached in table walk\n");
2189  // Need to perform additional lookups
2191  }
2192  currState = NULL;
2193 }
2194 
2195 
2196 void
2198 {
2199  if (pendingQueue.size())
2201  else
2202  completeDrain();
2203 }
2204 
2205 bool
2206 TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2207  Request::Flags flags, int queueIndex, Event *event,
2208  void (TableWalker::*doDescriptor)())
2209 {
2210  bool isTiming = currState->timing;
2211 
2212  DPRINTF(PageTableWalker,
2213  "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2214  descAddr, currState->stage2Req);
2215 
2216  // If this translation has a stage 2 then we know descAddr is an IPA and
2217  // needs to be translated before we can access the page table. Do that
2218  // check here.
2219  if (currState->stage2Req) {
2220  Fault fault;
2221 
2222  if (isTiming) {
2223  auto *tran = new
2224  Stage2Walk(*this, data, event, currState->vaddr);
2225  currState->stage2Tran = tran;
2226  readDataTimed(currState->tc, descAddr, tran, numBytes, flags);
2227  fault = tran->fault;
2228  } else {
2229  fault = readDataUntimed(currState->tc,
2230  currState->vaddr, descAddr, data, numBytes, flags,
2232  }
2233 
2234  if (fault != NoFault) {
2235  currState->fault = fault;
2236  }
2237  if (isTiming) {
2238  if (queueIndex >= 0) {
2239  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2240  "queue size before adding: %d\n",
2241  stateQueues[queueIndex].size());
2242  stateQueues[queueIndex].push_back(currState);
2243  currState = NULL;
2244  }
2245  } else {
2246  (this->*doDescriptor)();
2247  }
2248  } else {
2249  if (isTiming) {
2250  port->sendTimingReq(descAddr, numBytes, data, flags,
2252 
2253  if (queueIndex >= 0) {
2254  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2255  "queue size before adding: %d\n",
2256  stateQueues[queueIndex].size());
2257  stateQueues[queueIndex].push_back(currState);
2258  currState = NULL;
2259  }
2260  } else if (!currState->functional) {
2261  port->sendAtomicReq(descAddr, numBytes, data, flags,
2263 
2264  (this->*doDescriptor)();
2265  } else {
2266  port->sendFunctionalReq(descAddr, numBytes, data, flags);
2267  (this->*doDescriptor)();
2268  }
2269  }
2270  return (isTiming);
2271 }
2272 
2273 void
2274 TableWalker::insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
2275 {
2276  TlbEntry te;
2277 
2278  // Create and fill a new page table entry
2279  te.valid = true;
2280  te.longDescFormat = longDescriptor;
2281  te.isHyp = currState->isHyp;
2282  te.asid = currState->asid;
2283  te.vmid = currState->vmid;
2284  te.N = descriptor.offsetBits();
2285  te.vpn = currState->vaddr >> te.N;
2286  te.size = (1<<te.N) - 1;
2287  te.pfn = descriptor.pfn();
2288  te.domain = descriptor.domain();
2289  te.lookupLevel = descriptor.lookupLevel;
2290  te.ns = !descriptor.secure(haveSecurity, currState) || isStage2;
2291  te.nstid = !currState->isSecure;
2292  te.xn = descriptor.xn();
2293  if (currState->aarch64)
2294  te.el = currState->el;
2295  else
2296  te.el = EL1;
2297 
2300 
2301  // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2302  // as global
2303  te.global = descriptor.global(currState) || isStage2;
2304  if (longDescriptor) {
2305  LongDescriptor lDescriptor =
2306  dynamic_cast<LongDescriptor &>(descriptor);
2307 
2308  te.xn |= currState->xnTable;
2309  te.pxn = currState->pxnTable || lDescriptor.pxn();
2310  if (isStage2) {
2311  // this is actually the HAP field, but its stored in the same bit
2312  // possitions as the AP field in a stage 1 translation.
2313  te.hap = lDescriptor.ap();
2314  } else {
2315  te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2316  (currState->userTable && (descriptor.ap() & 0x1));
2317  }
2318  if (currState->aarch64)
2319  memAttrsAArch64(currState->tc, te, lDescriptor);
2320  else
2321  memAttrsLPAE(currState->tc, te, lDescriptor);
2322  } else {
2323  te.ap = descriptor.ap();
2324  memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2325  descriptor.shareable());
2326  }
2327 
2328  // Debug output
2329  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2330  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2331  te.N, te.pfn, te.size, te.global, te.valid);
2332  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2333  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2334  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2335  te.nonCacheable, te.ns);
2336  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2337  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2338  descriptor.getRawData());
2339 
2340  // Insert the entry into the TLB
2341  tlb->insert(currState->vaddr, te);
2342  if (!currState->timing) {
2343  currState->tc = NULL;
2344  currState->req = NULL;
2345  }
2346 }
2347 
2349 TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2350 {
2351  switch (lookup_level_as_int) {
2352  case L1:
2353  return L1;
2354  case L2:
2355  return L2;
2356  case L3:
2357  return L3;
2358  default:
2359  panic("Invalid lookup level conversion");
2360  }
2361 }
2362 
2363 /* this method keeps track of the table walker queue's residency, so
2364  * needs to be called whenever requests start and complete. */
2365 void
2367 {
2368  unsigned n = pendingQueue.size();
2369  if ((currState != NULL) && (currState != pendingQueue.front())) {
2370  ++n;
2371  }
2372 
2373  if (n != pendingReqs) {
2374  Tick now = curTick();
2376  pendingReqs = n;
2377  pendingChangeTick = now;
2378  }
2379 }
2380 
2381 Fault
2383  LookupLevel lookup_level)
2384 {
2385  return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
2386  currState->mode, domain, lookup_level);
2387 }
2388 
2389 
2390 uint8_t
2392 {
2393  /* for stats.pageSizes */
2394  switch(N) {
2395  case 12: return 0; // 4K
2396  case 14: return 1; // 16K (using 16K granule in v8-64)
2397  case 16: return 2; // 64K
2398  case 20: return 3; // 1M
2399  case 21: return 4; // 2M-LPAE
2400  case 24: return 5; // 16M
2401  case 25: return 6; // 32M (using 16K granule in v8-64)
2402  case 29: return 7; // 512M (using 64K granule in v8-64)
2403  case 30: return 8; // 1G-LPAE
2404  case 42: return 9; // 1G-LPAE
2405  default:
2406  panic("unknown page size");
2407  return 255;
2408  }
2409 }
2410 
2411 Fault
2413  uint8_t *data, int num_bytes, Request::Flags flags, bool functional)
2414 {
2415  Fault fault;
2416 
2417  // translate to physical address using the second stage MMU
2418  auto req = std::make_shared<Request>();
2419  req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2420  requestorId, 0);
2421  if (functional) {
2422  fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2423  TLB::NormalTran, true);
2424  } else {
2425  fault = mmu->translateAtomic(req, tc, BaseMMU::Read, true);
2426  }
2427 
2428  // Now do the access.
2429  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2430  Packet pkt = Packet(req, MemCmd::ReadReq);
2431  pkt.dataStatic(data);
2432  if (functional) {
2433  port->sendFunctional(&pkt);
2434  } else {
2435  port->sendAtomic(&pkt);
2436  }
2437  assert(!pkt.isError());
2438  }
2439 
2440  // If there was a fault annotate it with the flag saying the foult occured
2441  // while doing a translation for a stage 1 page table walk.
2442  if (fault != NoFault) {
2443  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2444  arm_fault->annotate(ArmFault::S1PTW, true);
2445  arm_fault->annotate(ArmFault::OVA, vaddr);
2446  }
2447  return fault;
2448 }
2449 
2450 void
2452  Stage2Walk *translation, int num_bytes,
2453  Request::Flags flags)
2454 {
2455  // translate to physical address using the second stage MMU
2456  translation->setVirt(
2457  desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2458  translation->translateTiming(tc);
2459 }
2460 
2462  uint8_t *_data, Event *_event, Addr vaddr)
2463  : data(_data), numBytes(0), event(_event), parent(_parent),
2464  oVAddr(vaddr), fault(NoFault)
2465 {
2466  req = std::make_shared<Request>();
2467 }
2468 
2469 void
2471  const RequestPtr &req,
2473 {
2474  fault = _fault;
2475 
2476  // If there was a fault annotate it with the flag saying the foult occured
2477  // while doing a translation for a stage 1 page table walk.
2478  if (fault != NoFault) {
2479  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2480  arm_fault->annotate(ArmFault::S1PTW, true);
2481  arm_fault->annotate(ArmFault::OVA, oVAddr);
2482  }
2483 
2484  if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2485  parent.getTableWalkerPort().sendTimingReq(
2486  req->getPaddr(), numBytes, data, req->getFlags(),
2487  tc->getCpuPtr()->clockPeriod(), event);
2488  } else {
2489  // We can't do the DMA access as there's been a problem, so tell the
2490  // event we're done
2491  event->process();
2492  }
2493 }
2494 
2495 void
2497 {
2498  parent.mmu->translateTiming(req, tc, this, BaseMMU::Read, true);
2499 }
2500 
2502  : Stats::Group(parent),
2503  ADD_STAT(walks, statistics::units::Count::get(),
2504  "Table walker walks requested"),
2505  ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2506  "Table walker walks initiated with short descriptors"),
2507  ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2508  "Table walker walks initiated with long descriptors"),
2509  ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2510  "Level at which table walker walks with short descriptors "
2511  "terminate"),
2512  ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2513  "Level at which table walker walks with long descriptors "
2514  "terminate"),
2515  ADD_STAT(squashedBefore, statistics::units::Count::get(),
2516  "Table walks squashed before starting"),
2517  ADD_STAT(squashedAfter, statistics::units::Count::get(),
2518  "Table walks squashed after completion"),
2519  ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2520  "Table walker wait (enqueue to first request) latency"),
2521  ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2522  "Table walker service (enqueue to completion) latency"),
2523  ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2524  "Table walker pending requests distribution"),
2525  ADD_STAT(pageSizes, statistics::units::Count::get(),
2526  "Table walker page sizes translated"),
2527  ADD_STAT(requestOrigin, statistics::units::Count::get(),
2528  "Table walker requests started/completed, data/inst")
2529 {
2532 
2535 
2537  .init(2)
2539 
2540  walksShortTerminatedAtLevel.subname(0, "Level1");
2541  walksShortTerminatedAtLevel.subname(1, "Level2");
2542 
2544  .init(4)
2546  walksLongTerminatedAtLevel.subname(0, "Level0");
2547  walksLongTerminatedAtLevel.subname(1, "Level1");
2548  walksLongTerminatedAtLevel.subname(2, "Level2");
2549  walksLongTerminatedAtLevel.subname(3, "Level3");
2550 
2553 
2556 
2557  walkWaitTime
2558  .init(16)
2560 
2562  .init(16)
2564 
2565  pendingWalks
2566  .init(16)
2569 
2570  pageSizes // see DDI 0487A D4-1661
2571  .init(10)
2574  pageSizes.subname(0, "4KiB");
2575  pageSizes.subname(1, "16KiB");
2576  pageSizes.subname(2, "64KiB");
2577  pageSizes.subname(3, "1MiB");
2578  pageSizes.subname(4, "2MiB");
2579  pageSizes.subname(5, "16MiB");
2580  pageSizes.subname(6, "32MiB");
2581  pageSizes.subname(7, "512MiB");
2582  pageSizes.subname(8, "1GiB");
2583  pageSizes.subname(9, "4TiB");
2584 
2586  .init(2,2) // Instruction/Data, requests/completed
2588  requestOrigin.subname(0,"Requested");
2589  requestOrigin.subname(1,"Completed");
2590  requestOrigin.ysubname(0,"Data");
2591  requestOrigin.ysubname(1,"Inst");
2592 }
2593 
2594 } // namespace gem5
gem5::ArmISA::TableWalker::doL1DescEvent
EventFunctionWrapper doL1DescEvent
Definition: table_walker.hh:1055
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::ArmISA::TableWalker::DescriptorBase::dbgHeader
virtual std::string dbgHeader() const =0
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:252
gem5::BaseMMU::Translation::squashed
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: mmu.hh:81
gem5::ArmISA::TableWalker::doLongDescriptorWrapper
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
Definition: table_walker.cc:2141
gem5::ArmISA::TableWalker::_haveVirtualization
bool _haveVirtualization
Definition: table_walker.hh:984
gem5::ArmISA::MISCREG_CPSR
@ MISCREG_CPSR
Definition: misc.hh:61
gem5::statistics::DataWrapVec2d::ysubname
Derived & ysubname(off_type index, const std::string &subname)
Definition: statistics.hh:487
gem5::ArmISA::TableWalker::WalkerState::isSecure
bool isSecure
If the access comes from the secure state.
Definition: table_walker.hh:799
gem5::SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:91
gem5::ArmISA::MISCREG_VTTBR
@ MISCREG_VTTBR
Definition: misc.hh:448
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:53
gem5::ArmISA::TableWalker::LongDescEventByLevel
Event * LongDescEventByLevel[4]
Definition: table_walker.hh:1073
gem5::ArmISA::MISCREG_TTBR0_EL2
@ MISCREG_TTBR0_EL2
Definition: misc.hh:603
gem5::ArmISA::TableWalker::TableWalkerStats::walksLongDescriptor
statistics::Scalar walksLongDescriptor
Definition: table_walker.hh:994
gem5::ArmISA::TableWalker::WalkerState::pxnTable
bool pxnTable
Definition: table_walker.hh:810
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::ArmISA::TableWalker::Port
Definition: table_walker.hh:869
gem5::ArmISA::ELIs64
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:282
gem5::ArmISA::TableWalker::LongDescriptor::pxnTable
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
Definition: table_walker.hh:725
gem5::ArmISA::ArmFault::AddressSizeLL
@ AddressSizeLL
Definition: faults.hh:111
gem5::ArmISA::TableWalker::WalkerState::sctlr
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Definition: table_walker.hh:768
gem5::ArmISA::MISCREG_TTBR0_EL3
@ MISCREG_TTBR0_EL3
Definition: misc.hh:609
gem5::ArmISA::TableWalker::walk
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool _isHyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, TLB::ArmTranslationType tranType, bool _stage2Req)
Definition: table_walker.cc:282
gem5::ArmISA::TableWalker::WalkerState::el
ExceptionLevel el
Current exception level.
Definition: table_walker.hh:742
gem5::ArmISA::MMU::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode, bool stage2)
Definition: mmu.cc:118
gem5::ArmISA::TableWalker::Port::sendAtomicReq
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
Definition: table_walker.cc:178
gem5::RegVal
uint64_t RegVal
Definition: types.hh:173
gem5::ArmISA::TableWalker::pendingChangeTick
Tick pendingChangeTick
Definition: table_walker.hh:1008
gem5::ArmISA::MISCREG_SCTLR_EL3
@ MISCREG_SCTLR_EL3
Definition: misc.hh:591
gem5::ArmISA::TableWalker::TableWalkerStats::walks
statistics::Scalar walks
Definition: table_walker.hh:992
gem5::ArmISA::TableWalker::WalkerState::longDesc
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
Definition: table_walker.hh:838
system.hh
gem5::ArmISA::TableWalker::doProcessEvent
EventFunctionWrapper doProcessEvent
Definition: table_walker.hh:1095
gem5::ArmISA::TableWalker::readDataTimed
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
Definition: table_walker.cc:2451
gem5::ArmISA::TableWalker::completeDrain
void completeDrain()
Checks if all state is cleared and if so, completes drain.
Definition: table_walker.cc:238
gem5::ArmISA::TableWalker::memAttrsAArch64
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Definition: table_walker.cc:1576
gem5::ArmISA::TableWalker::TableWalkerStats::walksLongTerminatedAtLevel
statistics::Vector walksLongTerminatedAtLevel
Definition: table_walker.hh:996
gem5::ArmISA::MISCREG_TTBR0
@ MISCREG_TTBR0
Definition: misc.hh:254
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::ArmISA::ArmFault::FaultSourceInvalid
@ FaultSourceInvalid
Definition: faults.hh:120
gem5::ArmISA::TableWalker::doL1LongDescriptorWrapper
void doL1LongDescriptorWrapper()
Definition: table_walker.cc:2123
gem5::X86ISA::L
Bitfield< 7, 0 > L
Definition: int.hh:59
gem5::ArmISA::TableWalker::pendingQueue
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Definition: table_walker.hh:952
gem5::ArmISA::TableWalker::LongDescriptor::Block
@ Block
Definition: table_walker.hh:393
gem5::ArmISA::el
Bitfield< 3, 2 > el
Definition: misc_types.hh:72
gem5::ArmISA::TableWalker::WalkerState::vaddr
Addr vaddr
The virtual address that is being translated with tagging removed.
Definition: table_walker.hh:762
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
gem5::ArmISA::TableWalker::requestorId
RequestorID requestorId
Requestor id assigned by the MMU.
Definition: table_walker.hh:958
gem5::ArmISA::TableWalker::WalkerState::tcr
TCR tcr
Definition: table_walker.hh:780
gem5::ArmISA::TableWalker::L1Descriptor::Section
@ Section
Definition: table_walker.hh:106
gem5::ArmISA::TableWalker::Stage2Walk::req
RequestPtr req
Definition: table_walker.hh:911
gem5::ArmISA::MISCREG_TCR_EL2
@ MISCREG_TCR_EL2
Definition: misc.hh:604
gem5::ArmISA::TableWalker::getTableWalkerPort
Port & getTableWalkerPort()
Definition: table_walker.cc:106
warn_once
#define warn_once(...)
Definition: logging.hh:249
gem5::ArmISA::aarch64
Bitfield< 34 > aarch64
Definition: types.hh:81
gem5::ArmISA::TableWalker::nextWalk
void nextWalk(ThreadContext *tc)
Definition: table_walker.cc:2197
gem5::statistics::Vector2dBase::init
Derived & init(size_type _x, size_type _y)
Definition: statistics.hh:1168
gem5::QueuedRequestPort
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition: qport.hh:109
gem5::ArmISA::decodePhysAddrRange64
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition: utility.cc:1288
gem5::ArmISA::TableWalker::LongDescriptor::paddr
Addr paddr() const
Return the physical address of the entry.
Definition: table_walker.hh:520
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:53
gem5::ArmISA::attr
attr
Definition: misc_types.hh:655
gem5::Flags::set
void set(Type mask)
Set all flag's bits matching the given mask.
Definition: flags.hh:116
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::ArmISA::asid
asid
Definition: misc_types.hh:617
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:53
gem5::ArmISA::TableWalker::WalkerState::userTable
bool userTable
Definition: table_walker.hh:808
gem5::ArmISA::domain
Bitfield< 7, 4 > domain
Definition: misc_types.hh:423
gem5::ArmISA::TlbEntry::DomainType::NoAccess
@ NoAccess
gem5::Request::PT_WALK
@ PT_WALK
The request is a page table walk.
Definition: request.hh:186
gem5::ArmISA::TableWalker::Stage2Walk
This translation class is used to trigger the data fetch once a timing translation returns the transl...
Definition: table_walker.hh:906
gem5::ArmISA::TableWalker::TableWalkerStats::requestOrigin
statistics::Vector2d requestOrigin
Definition: table_walker.hh:1004
gem5::ArmISA::f
Bitfield< 6 > f
Definition: misc_types.hh:67
gem5::ArmISA::TableWalker::DescriptorBase::xn
virtual bool xn() const =0
gem5::RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
gem5::ArmISA::TableWalker::LongDescriptor::nextDescAddr
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Definition: table_walker.hh:552
gem5::ArmISA::ArmFault::LpaeTran
@ LpaeTran
Definition: faults.hh:152
gem5::MipsISA::event
Bitfield< 10, 5 > event
Definition: pra_constants.hh:300
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::ArmISA::MISCREG_TTBR1_EL1
@ MISCREG_TTBR1_EL1
Definition: misc.hh:599
gem5::ArmISA::MISCREG_TTBCR
@ MISCREG_TTBCR
Definition: misc.hh:260
gem5::ArmISA::vmid_t
uint16_t vmid_t
Definition: types.hh:57
gem5::ArmISA::L2
@ L2
Definition: pagetable.hh:80
gem5::ArmISA::TableWalker::DescriptorBase::texcb
virtual uint8_t texcb() const
Definition: table_walker.hh:88
gem5::ArmISA::TableWalker::WalkerState::secureLookup
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
Definition: table_walker.hh:806
gem5::ArmISA::TableWalker::LongDescriptor::Table
@ Table
Definition: table_walker.hh:392
GEM5_FALLTHROUGH
#define GEM5_FALLTHROUGH
Definition: compiler.hh:61
gem5::ArmISA::LookupLevel
LookupLevel
Definition: pagetable.hh:76
gem5::ArmISA::TableWalker::TableWalkerStats::squashedAfter
statistics::Scalar squashedAfter
Definition: table_walker.hh:998
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:646
gem5::ArmISA::TableWalker::toLookupLevel
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
Definition: table_walker.cc:2349
gem5::ArmISA::TLB
Definition: tlb.hh:109
gem5::statistics::DataWrapVec::subname
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Definition: statistics.hh:399
gem5::ArmISA::TableWalker::L1Descriptor::data
uint32_t data
The raw bits of the entry.
Definition: table_walker.hh:111
gem5::ArmISA::MISCREG_TCR_EL3
@ MISCREG_TCR_EL3
Definition: misc.hh:610
gem5::ArmISA::EL1
@ EL1
Definition: types.hh:267
gem5::Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:673
gem5::ArmISA::byteOrder
ByteOrder byteOrder(const ThreadContext *tc)
Definition: utility.hh:412
gem5::ArmISA::MISCREG_MAIR_EL3
@ MISCREG_MAIR_EL3
Definition: misc.hh:732
gem5::ArmISA::TableWalker::L1Descriptor::Ignore
@ Ignore
Definition: table_walker.hh:104
gem5::ArmISA::TableWalker::_haveLPAE
bool _haveLPAE
Definition: table_walker.hh:983
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
tlb.hh
gem5::ArmISA::TableWalker::L2Descriptor::data
uint32_t data
The raw bits of the entry.
Definition: table_walker.hh:255
gem5::ArmISA::TlbEntry::DomainType::Client
@ Client
gem5::statistics::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
gem5::ArmISA::TableWalker::fetchDescriptor
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
Definition: table_walker.cc:2206
gem5::ArmISA::MISCREG_MAIR_EL1
@ MISCREG_MAIR_EL1
Definition: misc.hh:726
gem5::ArmISA::TableWalker::TableWalkerStats::walkServiceTime
statistics::Histogram walkServiceTime
Definition: table_walker.hh:1000
gem5::ArmISA::TableWalker::Port::handleResp
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
Definition: table_walker.cc:228
gem5::mbits
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:103
gem5::ArmISA::TlbEntry::MemoryType::Normal
@ Normal
gem5::ArmISA::TableWalker::Stage2Walk::setVirt
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Definition: table_walker.hh:928
system.hh
gem5::ArmISA::TableWalker::WalkerState::stage2Tran
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
Definition: table_walker.hh:819
gem5::ArmISA::TLB::lookup
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition: tlb.cc:166
gem5::ArmISA::MISCREG_SCTLR
@ MISCREG_SCTLR
Definition: misc.hh:235
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
table_walker.hh
gem5::ArmISA::TableWalker::WalkerState::vmid
vmid_t vmid
Definition: table_walker.hh:752
gem5::ArmISA::TableWalker::WalkerState::vaddr_tainted
Addr vaddr_tainted
The virtual address that is being translated.
Definition: table_walker.hh:765
gem5::ArmISA::TableWalker::Grain64KB
@ Grain64KB
Definition: table_walker.hh:380
gem5::ArmISA::hpd
Bitfield< 24 > hpd
Definition: misc_types.hh:533
gem5::ArmISA::TlbEntry
Definition: pagetable.hh:86
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1319
gem5::ArmISA::TableWalker::Port::Port
Port(TableWalker *_walker, RequestorID id)
Definition: table_walker.cc:136
gem5::ArmISA::TlbEntry::DomainType
DomainType
Definition: pagetable.hh:96
gem5::ArmISA::TableWalker::getPort
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: table_walker.cc:112
gem5::ArmISA::MISCREG_VTCR_EL2
@ MISCREG_VTCR_EL2
Definition: misc.hh:606
gem5::statistics::dist
const FlagsType dist
Print the distribution.
Definition: info.hh:66
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:53
gem5::ArmISA::HaveLVA
bool HaveLVA(ThreadContext *tc)
Definition: utility.cc:231
gem5::BaseMMU
Definition: mmu.hh:50
gem5::ArmISA::ArmFault::VmsaTran
@ VmsaTran
Definition: faults.hh:153
gem5::ArmISA::TableWalker::COMPLETED
static const unsigned COMPLETED
Definition: table_walker.hh:1011
gem5::ArmISA::TableWalker::WalkerState::functional
bool functional
If the atomic mode should be functional.
Definition: table_walker.hh:825
gem5::ArmISA::TableWalker::TableWalkerStats::walksShortDescriptor
statistics::Scalar walksShortDescriptor
Definition: table_walker.hh:993
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::ArmISA::TableWalker::REQUESTED
static const unsigned REQUESTED
Definition: table_walker.hh:1010
gem5::ArmISA::TableWalker::LongDescriptor::ap
uint8_t ap() const
2-bit access protection flags
Definition: table_walker.hh:624
gem5::ArmISA::MISCREG_HTCR
@ MISCREG_HTCR
Definition: misc.hh:263
gem5::ArmISA::TableWalker::L1Descriptor::ap
uint8_t ap() const
Three bit access protection flags.
Definition: table_walker.hh:186
gem5::statistics::pdf
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:62
gem5::ArmISA::purifyTaggedAddr
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool isInstr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:463
gem5::ArmISA::MISCREG_MAIR_EL2
@ MISCREG_MAIR_EL2
Definition: misc.hh:730
gem5::ArmISA::TableWalker::testWalk
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level)
Definition: table_walker.cc:2382
gem5::ArmISA::TableWalker::doL1Descriptor
void doL1Descriptor()
Definition: table_walker.cc:1676
gem5::ArmISA::TableWalker::TableWalkerStats::TableWalkerStats
TableWalkerStats(statistics::Group *parent)
Definition: table_walker.cc:2501
gem5::ArmISA::TableWalker::Port::handleRespPacket
void handleRespPacket(PacketPtr pkt, Tick delay=0)
Definition: table_walker.cc:213
gem5::ArmISA::TableWalker::WalkerState::asid
uint16_t asid
ASID that we're servicing the request under.
Definition: table_walker.hh:751
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
mmu.hh
gem5::ArmISA::TableWalker::LongDescriptor::xnTable
bool xnTable() const
Is execution allowed on subsequent lookup levels?
Definition: table_walker.hh:718
gem5::ArmISA::MISCREG_VTCR
@ MISCREG_VTCR
Definition: misc.hh:264
gem5::ArmISA::TableWalker::haveSecurity
bool haveSecurity
Cached copies of system-level properties.
Definition: table_walker.hh:982
gem5::ArmISA::TableWalker::L1Descriptor::PageTable
@ PageTable
Definition: table_walker.hh:105
gem5::ArmISA::pa
Bitfield< 39, 12 > pa
Definition: misc_types.hh:656
gem5::Request::Flags
gem5::Flags< FlagsType > Flags
Definition: request.hh:102
gem5::ArmISA::TlbEntry::MemoryType::Device
@ Device
gem5::ArmISA::TableWalker::LongDescriptor::secureTable
bool secureTable() const
Whether the subsequent levels of lookup are secure.
Definition: table_walker.hh:689
gem5::Flags< FlagsType >
gem5::DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:74
gem5::ArmISA::TableWalker::sctlr
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Definition: table_walker.hh:970
gem5::ArmISA::TableWalker::WalkerState::isHyp
bool isHyp
Definition: table_walker.hh:753
gem5::ArmISA::TableWalker::LongDescriptor::data
uint64_t data
The raw bits of the entry.
Definition: table_walker.hh:403
gem5::RequestPort::sendFunctional
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:485
gem5::ArmISA::MISCREG_PRRR
@ MISCREG_PRRR
Definition: misc.hh:369
gem5::ArmISA::TableWalker::LongDescriptor::sh
uint8_t sh() const
2-bit shareability field
Definition: table_walker.hh:617
gem5::ArmISA::z
Bitfield< 11 > z
Definition: misc_types.hh:374
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:93
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:184
gem5::ArmISA::MISCREG_SCTLR_EL1
@ MISCREG_SCTLR_EL1
Definition: misc.hh:579
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::ArmISA::TableWalker::Stage2Walk::finish
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
Definition: table_walker.cc:2470
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::ArmISA::HaveVirtHostExt
bool HaveVirtHostExt(ThreadContext *tc)
Definition: utility.cc:224
gem5::ArmISA::TableWalker::Port::sendFunctionalReq
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
Definition: table_walker.cc:166
gem5::ArmISA::ArmFault::FaultSource
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition: faults.hh:95
gem5::SimObject::params
const Params & params() const
Definition: sim_object.hh:176
gem5::ArmISA::TLB::NormalTran
@ NormalTran
Definition: tlb.hh:130
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::Event
Definition: eventq.hh:251
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::ArmISA::TableWalker::processWalkAArch64
Fault processWalkAArch64()
Definition: table_walker.cc:868
gem5::ArmISA::TableWalker::pageSizeNtoStatBin
static uint8_t pageSizeNtoStatBin(uint8_t N)
Definition: table_walker.cc:2391
gem5::ArmISA::TableWalker::WalkerState::delayed
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
Definition: table_walker.hh:842
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::ArmISA::TableWalker::doL0LongDescriptorWrapper
void doL0LongDescriptorWrapper()
Definition: table_walker.cc:2117
gem5::ArmISA::TableWalker::WalkerState::tc
ThreadContext * tc
Thread context that we're doing the walk for.
Definition: table_walker.hh:736
gem5::ArmISA::MISCREG_MAIR1
@ MISCREG_MAIR1
Definition: misc.hh:378
gem5::ArmISA::EL2
@ EL2
Definition: types.hh:268
gem5::ArmISA::TableWalker::processWalkLPAE
Fault processWalkLPAE()
Definition: table_walker.cc:672
gem5::ArmISA::TableWalker::WalkerState::physAddrRange
int physAddrRange
Current physical address range in bits.
Definition: table_walker.hh:745
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::ArmISA::MISCREG_SCTLR_EL2
@ MISCREG_SCTLR_EL2
Definition: misc.hh:584
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::ArmISA::TableWalker::GrainSize
GrainSize
Definition: table_walker.hh:376
gem5::ArmISA::computeAddrTop
int computeAddrTop(ThreadContext *tc, bool selbit, bool isInstr, TCR tcr, ExceptionLevel el)
Definition: utility.cc:418
gem5::ArmISA::TableWalker::WalkerState::startTime
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Definition: table_walker.hh:847
gem5::ArmISA::TableWalker::L1Descriptor::l2Addr
Addr l2Addr() const
Address of L2 descriptor if it exists.
Definition: table_walker.hh:198
gem5::ArmISA::TableWalker::LongDescriptor::type
EntryType type() const
Return the descriptor type.
Definition: table_walker.hh:445
gem5::ArmISA::MISCREG_HTTBR
@ MISCREG_HTTBR
Definition: misc.hh:447
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::ArmISA::s
Bitfield< 4 > s
Definition: misc_types.hh:561
gem5::ArmISA::TableWalker::DescriptorBase::pfn
virtual Addr pfn() const =0
gem5::ArmISA::TableWalker::doL2DescEvent
EventFunctionWrapper doL2DescEvent
Definition: table_walker.hh:1059
gem5::ArmISA::TableWalker::DescriptorBase::offsetBits
virtual uint8_t offsetBits() const =0
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::ArmISA::TableWalker::WalkerState::ttbcr
TTBCR ttbcr
Definition: table_walker.hh:779
gem5::ArmISA::TableWalker::L1Descriptor::domain
TlbEntry::DomainType domain() const
Domain Client/Manager: ARM DDI 0406B: B3-31.
Definition: table_walker.hh:192
gem5::ArmISA::TableWalker::TableWalkerStats::pageSizes
statistics::Vector pageSizes
Definition: table_walker.hh:1003
gem5::ArmISA::TableWalker::WalkerState::isFetch
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
Definition: table_walker.hh:796
gem5::ArmISA::TableWalker::port
Port * port
Port shared by the two table walkers.
Definition: table_walker.hh:961
gem5::ArmISA::TableWalker::DescriptorBase::getRawData
virtual uint64_t getRawData() const =0
gem5::ArmISA::TableWalker::Stage2Walk::translateTiming
void translateTiming(ThreadContext *tc)
Definition: table_walker.cc:2496
gem5::ArmISA::TableWalker::doL2LongDescriptorWrapper
void doL2LongDescriptorWrapper()
Definition: table_walker.cc:2129
gem5::ArmISA::TableWalker::doL1DescriptorWrapper
void doL1DescriptorWrapper()
Definition: table_walker.cc:2017
gem5::ArmISA::TableWalker::WalkerState::req
RequestPtr req
Request that is currently being serviced.
Definition: table_walker.hh:748
gem5::htog
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:187
gem5::ArmISA::TLB::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, BaseMMU::Translation *translation, BaseMMU::Mode mode, ArmTranslationType tranType)
Definition: tlb.cc:1298
gem5::ArmISA::TableWalker::processWalkWrapper
void processWalkWrapper()
Definition: table_walker.cc:472
gem5::ArmISA::TableWalker::LongDescriptor::physAddrRange
uint8_t physAddrRange
Definition: table_walker.hh:415
gem5::ArmISA::TableWalker::LongDescriptor::Invalid
@ Invalid
Definition: table_walker.hh:391
gem5::ArmISA::TableWalker::_physAddrRange
uint8_t _physAddrRange
Definition: table_walker.hh:985
gem5::ArmISA::TableWalker::numSquashable
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
Definition: table_walker.hh:979
gem5::ArmISA::te
Bitfield< 30 > te
Definition: misc_types.hh:337
gem5::ArmISA::TableWalker::WalkerState::transState
BaseMMU::Translation * transState
Translation state for delayed requests.
Definition: table_walker.hh:756
gem5::ArmISA::TableWalker::DescriptorBase::secure
virtual bool secure(bool have_security, WalkerState *currState) const =0
gem5::ArmISA::mask
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
gem5::ArmISA::MMU::translateFunctional
bool translateFunctional(ThreadContext *tc, Addr vaddr, Addr &paddr)
Definition: mmu.cc:96
gem5::ArmISA::TableWalker::LongDescriptor::grainSize
GrainSize grainSize
Width of the granule size in bits.
Definition: table_walker.hh:413
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
compiler.hh
gem5::ArmISA::TableWalker::DescriptorBase
Definition: table_walker.hh:71
gem5::ArmISA::TableWalker::WalkerState::tableWalker
TableWalker * tableWalker
Definition: table_walker.hh:844
gem5::ArmISA::EL3
@ EL3
Definition: types.hh:269
gem5::ArmISA::TableWalker::WalkerState::htcr
HTCR htcr
Cached copy of the htcr as it existed when translation began.
Definition: table_walker.hh:784
gem5::ArmISA::TableWalker::TableWalkerState
Definition: table_walker.hh:862
gem5::DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
gem5::ArmISA::TableWalker::WalkerState::isWrite
bool isWrite
If the access is a write.
Definition: table_walker.hh:793
gem5::ArmISA::L3
@ L3
Definition: pagetable.hh:81
gem5::ArmISA::TableWalker::L1Descriptor::Reserved
@ Reserved
Definition: table_walker.hh:107
gem5::ArmISA::TableWalker::checkVAddrSizeFaultAArch64
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
Definition: table_walker.cc:844
gem5::ArmISA::TableWalker::WalkerState::vtcr
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
Definition: table_walker.hh:790
gem5::ArmISA::s1TranslationRegime
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:238
gem5::ArmISA::MAX_LOOKUP_LEVELS
@ MAX_LOOKUP_LEVELS
Definition: pagetable.hh:82
faults.hh
gem5::ArmISA::TableWalker::L1Descriptor::type
EntryType type() const
Definition: table_walker.hh:138
gem5::ArmISA::TableWalker::generateLongDescFault
Fault generateLongDescFault(ArmFault::FaultSource src)
Definition: table_walker.cc:1781
gem5::ArmISA::TableWalker::Grain16KB
@ Grain16KB
Definition: table_walker.hh:379
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::ArmISA::TableWalker::LongDescriptor
Long-descriptor format (LPAE)
Definition: table_walker.hh:385
gem5::statistics::Histogram::init
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2142
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:534
gem5::ArmISA::MISCREG_NMRR
@ MISCREG_NMRR
Definition: misc.hh:375
gem5::ArmISA::TableWalker::Stage2Walk::Stage2Walk
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr)
Definition: table_walker.cc:2461
gem5::ArmISA::TableWalker::LongDescriptor::Page
@ Page
Definition: table_walker.hh:394
name
const std::string & name()
Definition: trace.cc:49
gem5::ArmISA::MiscRegIndex
MiscRegIndex
Definition: misc.hh:59
gem5::ArmISA::TLB::testWalk
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, BaseMMU::Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level)
Definition: tlb.cc:1737
gem5::ArmISA::TableWalker::TableWalkerStats::pendingWalks
statistics::Histogram pendingWalks
Definition: table_walker.hh:1002
gem5::ArmISA::TableWalker::doL2Descriptor
void doL2Descriptor()
Definition: table_walker.cc:1961
gem5::ArmISA::TLB::tranTypeEL
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition: tlb.cc:1557
gem5::context_switch_task_id::DMA
@ DMA
Definition: request.hh:84
gem5::ArmISA::MISCREG_TTBR1
@ MISCREG_TTBR1
Definition: misc.hh:257
gem5::ArmISA::isSecure
bool isSecure(ThreadContext *tc)
Definition: utility.cc:72
gem5::ArmISA::TableWalker::pendingReqs
unsigned pendingReqs
Definition: table_walker.hh:1007
gem5::ArmISA::TableWalker::TableWalkerStats::walkWaitTime
statistics::Histogram walkWaitTime
Definition: table_walker.hh:999
gem5::ClockedObject
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Definition: clocked_object.hh:234
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::ArmISA::TableWalker::DescriptorBase::lookupLevel
LookupLevel lookupLevel
Current lookup level for this descriptor.
Definition: table_walker.hh:77
gem5::ArmISA::TableWalker::DescriptorBase::global
virtual bool global(WalkerState *currState) const =0
gem5::ArmISA::MISCREG_MAIR0
@ MISCREG_MAIR0
Definition: misc.hh:372
gem5::ArmISA::sh
Bitfield< 8, 7 > sh
Definition: misc_types.hh:660
gem5::ArmISA::TableWalker::L1Descriptor::supersection
bool supersection() const
Is the page a Supersection (16 MiB)?
Definition: table_walker.hh:144
gem5::ArmISA::TableWalker::LongDescriptor::aarch64
bool aarch64
True if the current lookup is performed in AArch64 state.
Definition: table_walker.hh:410
gem5::ArmISA::ArmFault
Definition: faults.hh:64
gem5::X86ISA::reg
Bitfield< 5, 3 > reg
Definition: types.hh:92
gem5::ArmISA::TableWalker::WalkerState::xnTable
bool xnTable
Definition: table_walker.hh:809
gem5::ArmISA::L1
@ L1
Definition: pagetable.hh:79
gem5::ArmISA::TableWalker::LongDescriptor::af
bool af() const
Returns true if the access flag (AF) is set.
Definition: table_walker.hh:610
gem5::Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:223
gem5::ArmISA::TableWalker::WalkerState::stage2Req
bool stage2Req
Flag indicating if a second stage of lookup is required.
Definition: table_walker.hh:816
gem5::ArmISA::TableWalker::stateQueues
std::list< WalkerState * > stateQueues[MAX_LOOKUP_LEVELS]
Queues of requests for all the different lookup levels.
Definition: table_walker.hh:948
gem5::BaseMMU::Translation
Definition: mmu.hh:55
gem5::ArmISA::TableWalker::Port::sendTimingReq
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
Definition: table_walker.cc:190
warn_if
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:272
gem5::ArmISA::EL0
@ EL0
Definition: types.hh:266
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:203
gem5::ArmISA::TableWalker::LongDescriptor::rwTable
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
Definition: table_walker.hh:703
gem5::ArmISA::TlbEntry::MemoryType::StronglyOrdered
@ StronglyOrdered
gem5::ArmISA::TableWalker
Definition: table_walker.hh:66
gem5::ArmISA::MISCREG_HCR
@ MISCREG_HCR
Definition: misc.hh:248
gem5::ArmISA::TableWalker::WalkerState::rwTable
bool rwTable
Definition: table_walker.hh:807
gem5::ArmISA::TableWalker::currState
WalkerState * currState
Definition: table_walker.hh:972
gem5::ArmISA::TableWalker::TableWalker
TableWalker(const Params &p)
Definition: table_walker.cc:61
base.hh
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::ArmISA::L0
@ L0
Definition: pagetable.hh:78
gem5::ArmISA::TableWalker::WalkerState::hpd
bool hpd
Hierarchical access permission disable.
Definition: table_walker.hh:813
gem5::ArmISA::n
Bitfield< 31 > n
Definition: misc_types.hh:455
gem5::ArmISA::TableWalker::LongDescriptor::memAttr
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
Definition: table_walker.hh:668
gem5::ArmISA::MISCREG_VTTBR_EL2
@ MISCREG_VTTBR_EL2
Definition: misc.hh:605
gem5::ArmISA::TableWalker::LongDescriptor::xn
bool xn() const
Is execution allowed on this mapping?
Definition: table_walker.hh:571
gem5::ArmISA::TableWalker::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: table_walker.cc:251
gem5::ArmISA::TableWalker::drainResume
void drainResume() override
Resume execution after a successful drain.
Definition: table_walker.cc:272
gem5::ArmISA::ArmFault::TranslationLL
@ TranslationLL
Definition: faults.hh:101
gem5::ArmISA::TableWalker::memAttrs
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
Definition: table_walker.cc:1243
gem5::ArmISA::ps
Bitfield< 18, 16 > ps
Definition: misc_types.hh:513
gem5::ArmISA::TableWalker::mmu
MMU * mmu
The MMU to forward second stage look upts to.
Definition: table_walker.hh:955
gem5::ArmISA::TableWalker::~TableWalker
virtual ~TableWalker()
Definition: table_walker.cc:100
gem5::ArmISA::TableWalker::doL2DescriptorWrapper
void doL2DescriptorWrapper()
Definition: table_walker.cc:2076
gem5::ArmISA::TableWalker::LongDescriptor::attrIndx
uint8_t attrIndx() const
Attribute index.
Definition: table_walker.hh:661
gem5::ArmISA::TableWalker::memAttrsLPAE
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Definition: table_walker.cc:1450
gem5::ArmISA::TableWalker::WalkerState::isUncacheable
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
Definition: table_walker.hh:802
gem5::ArmISA::longDescFormatInUse
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:127
gem5::ArmISA::TableWalker::WalkerState::l2Desc
L2Descriptor l2Desc
Definition: table_walker.hh:835
gem5::ArmISA::ArmFault::annotate
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:237
gem5::ArmISA::MISCREG_VSTCR_EL2
@ MISCREG_VSTCR_EL2
Definition: misc.hh:608
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::Request::NO_ACCESS
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
gem5::ArmISA::TLB::insert
void insert(Addr vaddr, TlbEntry &pte)
Definition: tlb.cc:230
gem5::ArmISA::id
Bitfield< 33 > id
Definition: misc_types.hh:250
gem5::ArmISA::ArmFault::AccessFlagLL
@ AccessFlagLL
Definition: faults.hh:102
gem5::ArmISA::TLB::ArmTranslationType
ArmTranslationType
Definition: tlb.hh:128
gem5::ArmISA::TableWalker::checkAddrSizeFaultAArch64
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
Definition: table_walker.cc:861
gem5::ArmISA::TableWalker::Grain4KB
@ Grain4KB
Definition: table_walker.hh:378
gem5::ArmISA::ArmFault::S1PTW
@ S1PTW
Definition: faults.hh:134
gem5::ArmISA::TableWalker::L2Descriptor::ap
uint8_t ap() const
Three bit access protection flags.
Definition: table_walker.hh:324
gem5::ArmISA::snsBankedIndex
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: misc.cc:1313
gem5::ThreadContext::getCpuPtr
virtual BaseCPU * getCpuPtr()=0
gem5::ArmISA::TableWalker::L2Descriptor::invalid
bool invalid() const
Is the entry invalid.
Definition: table_walker.hh:300
gem5::ArmISA::TableWalker::isStage2
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
Definition: table_walker.hh:964
gem5::RequestorID
uint16_t RequestorID
Definition: request.hh:95
gem5::ArmISA::TableWalker::Port::recvTimingResp
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Definition: table_walker.cc:201
gem5::ArmISA::TableWalker::TableWalkerState::event
Event * event
Definition: table_walker.hh:866
gem5::ClockedObject::Params
ClockedObjectParams Params
Parameters of ClockedObject.
Definition: clocked_object.hh:240
gem5::statistics::DataWrap::flags
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:355
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::ArmISA::TableWalker::doL3LongDescriptorWrapper
void doL3LongDescriptorWrapper()
Definition: table_walker.cc:2135
gem5::ArmISA::TableWalker::stats
gem5::ArmISA::TableWalker::TableWalkerStats stats
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::ArmISA::MISCREG_HCR_EL2
@ MISCREG_HCR_EL2
Definition: misc.hh:586
gem5::ArmISA::TableWalker::DescriptorBase::ap
virtual uint8_t ap() const =0
gem5::ArmISA::TableWalker::WalkerState::hcr
HCR hcr
Cached copy of the htcr as it existed when translation began.
Definition: table_walker.hh:787
gem5::ArmISA::MISCREG_VSTTBR_EL2
@ MISCREG_VSTTBR_EL2
Definition: misc.hh:607
gem5::ArmISA::TableWalker::WalkerState::WalkerState
WalkerState()
Definition: table_walker.cc:120
gem5::ArmISA::TableWalker::readDataUntimed
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, bool functional)
Definition: table_walker.cc:2412
gem5::ArmISA::MISCREG_TTBR0_EL1
@ MISCREG_TTBR0_EL1
Definition: misc.hh:597
gem5::ArmISA::TableWalker::WalkerState::aarch64
bool aarch64
If the access is performed in AArch64 state.
Definition: table_walker.hh:739
gem5::ArmISA::TableWalker::processWalk
Fault processWalk()
Definition: table_walker.cc:563
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::ArmISA::TableWalker::WalkerState::timing
bool timing
If the mode is timing or atomic.
Definition: table_walker.hh:822
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
gem5::ArmISA::TableWalker::DescriptorBase::shareable
virtual bool shareable() const
Definition: table_walker.hh:92
gem5::statistics::VectorBase::init
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1034
gem5::ArmISA::stride
Bitfield< 21, 20 > stride
Definition: misc_types.hh:446
gem5::ArmISA::TableWalker::LongDescriptor::userTable
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
Definition: table_walker.hh:711
gem5::ArmISA::TableWalker::doLongDescriptor
void doLongDescriptor()
Definition: table_walker.cc:1801
gem5::ArmISA::TableWalker::WalkerState
Definition: table_walker.hh:732
gem5::BaseMMU::Translation::finish
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
gem5::ArmISA::TableWalker::pendingChange
void pendingChange()
Definition: table_walker.cc:2366
gem5::ArmISA::TableWalker::DescriptorBase::domain
virtual TlbEntry::DomainType domain() const =0
gem5::ArmISA::TableWalker::WalkerState::fault
Fault fault
The fault that we are going to return.
Definition: table_walker.hh:759
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:587
thread_context.hh
gem5::ArmISA::TableWalker::ReservedGrain
@ ReservedGrain
Definition: table_walker.hh:381
gem5::ArmISA::ArmFault::OVA
@ OVA
Definition: faults.hh:135
gem5::ArmISA::TableWalker::tlb
TLB * tlb
TLB that is initiating these table walks.
Definition: table_walker.hh:967
gem5::ArmISA::MISCREG_TTBR1_EL2
@ MISCREG_TTBR1_EL2
Definition: misc.hh:820
gem5::ArmISA::TableWalker::WalkerState::tranType
TLB::ArmTranslationType tranType
The translation type that has been requested.
Definition: table_walker.hh:831
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::ArmISA::TableWalker::WalkerState::l1Desc
L1Descriptor l1Desc
Short-format descriptors.
Definition: table_walker.hh:834
gem5::ArmISA::TableWalker::TableWalkerStats::walksShortTerminatedAtLevel
statistics::Vector walksShortTerminatedAtLevel
Definition: table_walker.hh:995
gem5::ArmISA::TableWalker::Port::createPacket
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
Definition: table_walker.cc:145
gem5::ArmISA::TableWalker::WalkerState::mode
BaseMMU::Mode mode
Save mode for use in delayed response.
Definition: table_walker.hh:828
gem5::ArmISA::MISCREG_TCR_EL1
@ MISCREG_TCR_EL1
Definition: misc.hh:601
gem5::ArmISA::TableWalker::LongDescriptor::pxn
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
Definition: table_walker.hh:578
gem5::ArmISA::ExceptionLevel
ExceptionLevel
Definition: types.hh:264
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::ArmISA::TableWalker::pending
bool pending
If a timing translation is currently in progress.
Definition: table_walker.hh:975
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:73
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::ArmISA::TableWalker::insertTableEntry
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
Definition: table_walker.cc:2274
gem5::ArmISA::TableWalker::TableWalkerStats::squashedBefore
statistics::Scalar squashedBefore
Definition: table_walker.hh:997

Generated on Tue Sep 7 2021 14:53:42 for gem5 by doxygen 1.8.17