gem5  v21.2.0.0
table_walker.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010, 2012-2019, 2021 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 #include "arch/arm/table_walker.hh"
38 
39 #include <cassert>
40 #include <memory>
41 
42 #include "arch/arm/faults.hh"
43 #include "arch/arm/mmu.hh"
44 #include "arch/arm/pagetable.hh"
45 #include "arch/arm/system.hh"
46 #include "arch/arm/tlb.hh"
47 #include "base/compiler.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/PageTableWalker.hh"
53 #include "debug/TLB.hh"
54 #include "debug/TLBVerbose.hh"
55 #include "sim/system.hh"
56 
57 namespace gem5
58 {
59 
60 using namespace ArmISA;
61 
63  : ClockedObject(p),
64  requestorId(p.sys->getRequestorId(this)),
65  port(new Port(this, requestorId)),
66  isStage2(p.is_stage2), tlb(NULL),
67  currState(NULL), pending(false),
68  numSquashable(p.num_squash_per_cycle),
69  release(nullptr),
70  stats(this),
71  pendingReqs(0),
72  pendingChangeTick(curTick()),
73  doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
74  doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
75  doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
76  doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
77  doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
78  doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
79  LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
80  &doL2LongDescEvent, &doL3LongDescEvent },
81  doProcessEvent([this]{ processWalkWrapper(); }, name())
82 {
83  sctlr = 0;
84 
85  // Cache system-level properties
86  if (FullSystem) {
87  ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
88  assert(arm_sys);
89  _physAddrRange = arm_sys->physAddrRange();
90  _haveLargeAsid64 = arm_sys->haveLargeAsid64();
91  } else {
92  _haveLargeAsid64 = false;
93  _physAddrRange = 48;
94  }
95 
96 }
97 
99 {
100  ;
101 }
102 
105 {
106  return static_cast<Port&>(getPort("port"));
107 }
108 
109 Port &
110 TableWalker::getPort(const std::string &if_name, PortID idx)
111 {
112  if (if_name == "port") {
113  return *port;
114  }
115  return ClockedObject::getPort(if_name, idx);
116 }
117 
118 void
120 {
121  mmu = _mmu;
122  release = mmu->release();
123 }
124 
126  tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
127  asid(0), vmid(0), isHyp(false), transState(nullptr),
128  vaddr(0), vaddr_tainted(0),
129  sctlr(0), scr(0), cpsr(0), tcr(0),
130  htcr(0), hcr(0), vtcr(0),
131  isWrite(false), isFetch(false), isSecure(false),
132  isUncacheable(false),
133  secureLookup(false), rwTable(false), userTable(false), xnTable(false),
134  pxnTable(false), hpd(false), stage2Req(false),
135  stage2Tran(nullptr), timing(false), functional(false),
136  mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
137  delayed(false), tableWalker(nullptr)
138 {
139 }
140 
142  : QueuedRequestPort(_walker->name() + ".port", _walker,
143  reqQueue, snoopRespQueue),
144  reqQueue(*_walker, *this), snoopRespQueue(*_walker, *this),
145  requestorId(id)
146 {
147 }
148 
149 PacketPtr
151  Addr desc_addr, int size,
152  uint8_t *data, Request::Flags flags, Tick delay,
153  Event *event)
154 {
155  RequestPtr req = std::make_shared<Request>(
156  desc_addr, size, flags, requestorId);
157  req->taskId(context_switch_task_id::DMA);
158 
159  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
160  pkt->dataStatic(data);
161 
162  auto state = new TableWalkerState;
163  state->event = event;
164  state->delay = delay;
165 
166  pkt->senderState = state;
167  return pkt;
168 }
169 
170 void
172  Addr desc_addr, int size,
173  uint8_t *data, Request::Flags flags)
174 {
175  auto pkt = createPacket(desc_addr, size, data, flags, 0, nullptr);
176 
177  sendFunctional(pkt);
178 
179  handleRespPacket(pkt);
180 }
181 
182 void
184  Addr desc_addr, int size,
185  uint8_t *data, Request::Flags flags, Tick delay)
186 {
187  auto pkt = createPacket(desc_addr, size, data, flags, delay, nullptr);
188 
189  Tick lat = sendAtomic(pkt);
190 
191  handleRespPacket(pkt, lat);
192 }
193 
194 void
196  Addr desc_addr, int size,
197  uint8_t *data, Request::Flags flags, Tick delay,
198  Event *event)
199 {
200  auto pkt = createPacket(desc_addr, size, data, flags, delay, event);
201 
202  schedTimingReq(pkt, curTick());
203 }
204 
205 bool
207 {
208  // We shouldn't ever get a cacheable block in Modified state.
209  assert(pkt->req->isUncacheable() ||
210  !(pkt->cacheResponding() && !pkt->hasSharers()));
211 
212  handleRespPacket(pkt);
213 
214  return true;
215 }
216 
217 void
219 {
220  // Should always see a response with a sender state.
221  assert(pkt->isResponse());
222 
223  // Get the DMA sender state.
224  auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
225  assert(state);
226 
227  handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
228 
229  delete pkt;
230 }
231 
232 void
234  Addr size, Tick delay)
235 {
236  if (state->event) {
237  owner.schedule(state->event, curTick() + delay);
238  }
239  delete state;
240 }
241 
242 void
244 {
245  if (drainState() == DrainState::Draining &&
246  stateQueues[LookupLevel::L0].empty() &&
247  stateQueues[LookupLevel::L1].empty() &&
248  stateQueues[LookupLevel::L2].empty() &&
249  stateQueues[LookupLevel::L3].empty() &&
250  pendingQueue.empty()) {
251 
252  DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
253  signalDrainDone();
254  }
255 }
256 
259 {
260  bool state_queues_not_empty = false;
261 
262  for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
263  if (!stateQueues[i].empty()) {
264  state_queues_not_empty = true;
265  break;
266  }
267  }
268 
269  if (state_queues_not_empty || pendingQueue.size()) {
270  DPRINTF(Drain, "TableWalker not drained\n");
271  return DrainState::Draining;
272  } else {
273  DPRINTF(Drain, "TableWalker free, no need to drain\n");
274  return DrainState::Drained;
275  }
276 }
277 
278 void
280 {
281  if (params().sys->isTimingMode() && currState) {
282  delete currState;
283  currState = NULL;
284  pendingChange();
285  }
286 }
287 
288 Fault
289 TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
290  vmid_t _vmid, bool _isHyp, MMU::Mode _mode,
291  MMU::Translation *_trans, bool _timing, bool _functional,
292  bool secure, MMU::ArmTranslationType tranType,
293  bool _stage2Req, const TlbEntry *walk_entry)
294 {
295  assert(!(_functional && _timing));
296  ++stats.walks;
297 
298  WalkerState *savedCurrState = NULL;
299 
300  if (!currState && !_functional) {
301  // For atomic mode, a new WalkerState instance should be only created
302  // once per TLB. For timing mode, a new instance is generated for every
303  // TLB miss.
304  DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
305 
306  currState = new WalkerState();
307  currState->tableWalker = this;
308  } else if (_functional) {
309  // If we are mixing functional mode with timing (or even
310  // atomic), we need to to be careful and clean up after
311  // ourselves to not risk getting into an inconsistent state.
312  DPRINTF(PageTableWalker,
313  "creating functional instance of WalkerState\n");
314  savedCurrState = currState;
315  currState = new WalkerState();
316  currState->tableWalker = this;
317  } else if (_timing) {
318  // This is a translation that was completed and then faulted again
319  // because some underlying parameters that affect the translation
320  // changed out from under us (e.g. asid). It will either be a
321  // misprediction, in which case nothing will happen or we'll use
322  // this fault to re-execute the faulting instruction which should clean
323  // up everything.
324  if (currState->vaddr_tainted == _req->getVaddr()) {
326  return std::make_shared<ReExec>();
327  }
328  }
329  pendingChange();
330 
332  currState->tc = _tc;
333  // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
334  // aarch32/translation/translation/AArch32.TranslateAddress dictates
335  // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
336  if (isStage2) {
337  currState->el = EL1;
338  currState->aarch64 = ELIs64(_tc, EL2);
339  } else {
340  currState->el =
341  MMU::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
342  currState->aarch64 =
343  ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
344  }
345  currState->transState = _trans;
346  currState->req = _req;
347  if (walk_entry) {
348  currState->walkEntry = *walk_entry;
349  } else {
351  }
353  currState->asid = _asid;
354  currState->vmid = _vmid;
355  currState->isHyp = _isHyp;
356  currState->timing = _timing;
357  currState->functional = _functional;
358  currState->mode = _mode;
359  currState->tranType = tranType;
360  currState->isSecure = secure;
362 
365  currState->vaddr_tainted = currState->req->getVaddr();
366  if (currState->aarch64)
370  else
372 
373  if (currState->aarch64) {
375  if (isStage2) {
377  if (currState->secureLookup) {
378  currState->vtcr =
380  } else {
381  currState->vtcr =
383  }
384  } else switch (currState->el) {
385  case EL0:
386  if (HaveVirtHostExt(currState->tc) &&
387  currState->hcr.tge == 1 && currState->hcr.e2h ==1) {
390  } else {
393  }
394  break;
395  case EL1:
398  break;
399  case EL2:
400  assert(release->has(ArmExtension::VIRTUALIZATION));
403  break;
404  case EL3:
405  assert(release->has(ArmExtension::SECURITY));
408  break;
409  default:
410  panic("Invalid exception level");
411  break;
412  }
413  } else {
421  }
422  sctlr = currState->sctlr;
423 
426 
428 
429  currState->stage2Req = _stage2Req && !isStage2;
430 
431  bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
433 
434  if (long_desc_format) {
435  // Helper variables used for hierarchical permissions
437  currState->rwTable = true;
438  currState->userTable = true;
439  currState->xnTable = false;
440  currState->pxnTable = false;
441 
443  } else {
445  }
446 
447  if (!currState->timing) {
448  Fault fault = NoFault;
449  if (currState->aarch64)
450  fault = processWalkAArch64();
451  else if (long_desc_format)
452  fault = processWalkLPAE();
453  else
454  fault = processWalk();
455 
456  // If this was a functional non-timing access restore state to
457  // how we found it.
458  if (currState->functional) {
459  delete currState;
460  currState = savedCurrState;
461  }
462  return fault;
463  }
464 
465  if (pending || pendingQueue.size()) {
466  pendingQueue.push_back(currState);
467  currState = NULL;
468  pendingChange();
469  } else {
470  pending = true;
471  pendingChange();
472  if (currState->aarch64)
473  return processWalkAArch64();
474  else if (long_desc_format)
475  return processWalkLPAE();
476  else
477  return processWalk();
478  }
479 
480  return NoFault;
481 }
482 
483 void
485 {
486  assert(!currState);
487  assert(pendingQueue.size());
488  pendingChange();
489  currState = pendingQueue.front();
490 
491  // Check if a previous walk filled this request already
492  // @TODO Should this always be the TLB or should we look in the stage2 TLB?
494  currState->vmid, currState->isHyp, currState->isSecure, true, false,
495  currState->el, false, isStage2, currState->mode);
496 
497  // Check if we still need to have a walk for this request. If the requesting
498  // instruction has been squashed, or a previous walk has filled the TLB with
499  // a match, we just want to get rid of the walk. The latter could happen
500  // when there are multiple outstanding misses to a single page and a
501  // previous request has been successfully translated.
502  if (!currState->transState->squashed() && (!te || te->partial)) {
503  // We've got a valid request, lets process it
504  pending = true;
505  pendingQueue.pop_front();
506  // Keep currState in case one of the processWalk... calls NULLs it
507 
508  if (te && te->partial) {
509  currState->walkEntry = *te;
510  }
511  WalkerState *curr_state_copy = currState;
512  Fault f;
513  if (currState->aarch64)
514  f = processWalkAArch64();
515  else if (longDescFormatInUse(currState->tc) ||
517  f = processWalkLPAE();
518  else
519  f = processWalk();
520 
521  if (f != NoFault) {
522  curr_state_copy->transState->finish(f, curr_state_copy->req,
523  curr_state_copy->tc, curr_state_copy->mode);
524 
525  delete curr_state_copy;
526  }
527  return;
528  }
529 
530 
531  // If the instruction that we were translating for has been
532  // squashed we shouldn't bother.
533  unsigned num_squashed = 0;
534  ThreadContext *tc = currState->tc;
535  while ((num_squashed < numSquashable) && currState &&
537  (te && !te->partial))) {
538  pendingQueue.pop_front();
539  num_squashed++;
541 
542  DPRINTF(TLB, "Squashing table walk for address %#x\n",
544 
545  if (currState->transState->squashed()) {
546  // finish the translation which will delete the translation object
548  std::make_shared<UnimpFault>("Squashed Inst"),
550  } else {
551  // translate the request now that we know it will work
556  }
557 
558  // delete the current request
559  delete currState;
560 
561  // peak at the next one
562  if (pendingQueue.size()) {
563  currState = pendingQueue.front();
566  false, currState->el, false, isStage2, currState->mode);
567  } else {
568  // Terminate the loop, nothing more to do
569  currState = NULL;
570  }
571  }
572  pendingChange();
573 
574  // if we still have pending translations, schedule more work
575  nextWalk(tc);
576  currState = NULL;
577 }
578 
579 Fault
581 {
582  Addr ttbr = 0;
583 
584  // For short descriptors, translation configs are held in
585  // TTBR1.
588 
589  const auto irgn0_mask = 0x1;
590  const auto irgn1_mask = 0x40;
591  currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
592 
593  // If translation isn't enabled, we shouldn't be here
594  assert(currState->sctlr.m || isStage2);
595  const bool is_atomic = currState->req->isAtomic();
596  const bool have_security = release->has(ArmExtension::SECURITY);
597 
598  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
600  32 - currState->ttbcr.n));
601 
603 
604  if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
605  32 - currState->ttbcr.n)) {
606  DPRINTF(TLB, " - Selecting TTBR0\n");
607  // Check if table walk is allowed when Security Extensions are enabled
608  if (have_security && currState->ttbcr.pd0) {
609  if (currState->isFetch)
610  return std::make_shared<PrefetchAbort>(
612  ArmFault::TranslationLL + LookupLevel::L1,
613  isStage2,
615  else
616  return std::make_shared<DataAbort>(
619  is_atomic ? false : currState->isWrite,
620  ArmFault::TranslationLL + LookupLevel::L1, isStage2,
622  }
625  } else {
626  DPRINTF(TLB, " - Selecting TTBR1\n");
627  // Check if table walk is allowed when Security Extensions are enabled
628  if (have_security && currState->ttbcr.pd1) {
629  if (currState->isFetch)
630  return std::make_shared<PrefetchAbort>(
632  ArmFault::TranslationLL + LookupLevel::L1,
633  isStage2,
635  else
636  return std::make_shared<DataAbort>(
639  is_atomic ? false : currState->isWrite,
640  ArmFault::TranslationLL + LookupLevel::L1, isStage2,
642  }
643  ttbr = ttbr1;
644  currState->ttbcr.n = 0;
645  }
646 
647  Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
648  (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
649  DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
650  currState->isSecure ? "s" : "ns");
651 
652  // Trickbox address check
653  Fault f;
654  f = testWalk(l1desc_addr, sizeof(uint32_t),
655  TlbEntry::DomainType::NoAccess, LookupLevel::L1, isStage2);
656  if (f) {
657  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
658  if (currState->timing) {
659  pending = false;
661  currState = NULL;
662  } else {
663  currState->tc = NULL;
664  currState->req = NULL;
665  }
666  return f;
667  }
668 
670  if (currState->sctlr.c == 0 || currState->isUncacheable) {
672  }
673 
674  if (currState->isSecure) {
675  flag.set(Request::SECURE);
676  }
677 
678  bool delayed;
679  delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
680  sizeof(uint32_t), flag, LookupLevel::L1,
681  &doL1DescEvent,
683  if (!delayed) {
684  f = currState->fault;
685  }
686 
687  return f;
688 }
689 
690 Fault
692 {
693  Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
694  int tsz, n;
695  LookupLevel start_lookup_level = LookupLevel::L1;
696 
697  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
699 
701 
703  if (currState->isSecure)
704  flag.set(Request::SECURE);
705 
706  // work out which base address register to use, if in hyp mode we always
707  // use HTTBR
708  if (isStage2) {
709  DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
711  tsz = sext<4>(currState->vtcr.t0sz);
712  start_lookup_level = currState->vtcr.sl0 ?
713  LookupLevel::L1 : LookupLevel::L2;
714  currState->isUncacheable = currState->vtcr.irgn0 == 0;
715  } else if (currState->isHyp) {
716  DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
718  tsz = currState->htcr.t0sz;
719  currState->isUncacheable = currState->htcr.irgn0 == 0;
720  } else {
721  assert(longDescFormatInUse(currState->tc));
722 
723  // Determine boundaries of TTBR0/1 regions
724  if (currState->ttbcr.t0sz)
725  ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
726  else if (currState->ttbcr.t1sz)
727  ttbr0_max = (1ULL << 32) -
728  (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
729  else
730  ttbr0_max = (1ULL << 32) - 1;
731  if (currState->ttbcr.t1sz)
732  ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
733  else
734  ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
735 
736  const bool is_atomic = currState->req->isAtomic();
737 
738  // The following code snippet selects the appropriate translation table base
739  // address (TTBR0 or TTBR1) and the appropriate starting lookup level
740  // depending on the address range supported by the translation table (ARM
741  // ARM issue C B3.6.4)
742  if (currState->vaddr <= ttbr0_max) {
743  DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
744  // Check if table walk is allowed
745  if (currState->ttbcr.epd0) {
746  if (currState->isFetch)
747  return std::make_shared<PrefetchAbort>(
749  ArmFault::TranslationLL + LookupLevel::L1,
750  isStage2,
752  else
753  return std::make_shared<DataAbort>(
756  is_atomic ? false : currState->isWrite,
757  ArmFault::TranslationLL + LookupLevel::L1,
758  isStage2,
760  }
763  tsz = currState->ttbcr.t0sz;
764  currState->isUncacheable = currState->ttbcr.irgn0 == 0;
765  if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
766  start_lookup_level = LookupLevel::L2;
767  } else if (currState->vaddr >= ttbr1_min) {
768  DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
769  // Check if table walk is allowed
770  if (currState->ttbcr.epd1) {
771  if (currState->isFetch)
772  return std::make_shared<PrefetchAbort>(
774  ArmFault::TranslationLL + LookupLevel::L1,
775  isStage2,
777  else
778  return std::make_shared<DataAbort>(
781  is_atomic ? false : currState->isWrite,
782  ArmFault::TranslationLL + LookupLevel::L1,
783  isStage2,
785  }
788  tsz = currState->ttbcr.t1sz;
789  currState->isUncacheable = currState->ttbcr.irgn1 == 0;
790  // Lower limit >= 3 GiB
791  if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
792  start_lookup_level = LookupLevel::L2;
793  } else {
794  // Out of boundaries -> translation fault
795  if (currState->isFetch)
796  return std::make_shared<PrefetchAbort>(
798  ArmFault::TranslationLL + LookupLevel::L1,
799  isStage2,
801  else
802  return std::make_shared<DataAbort>(
805  is_atomic ? false : currState->isWrite,
806  ArmFault::TranslationLL + LookupLevel::L1,
808  }
809 
810  }
811 
812  // Perform lookup (ARM ARM issue C B3.6.6)
813  if (start_lookup_level == LookupLevel::L1) {
814  n = 5 - tsz;
815  desc_addr = mbits(ttbr, 39, n) |
816  (bits(currState->vaddr, n + 26, 30) << 3);
817  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
818  desc_addr, currState->isSecure ? "s" : "ns");
819  } else {
820  // Skip first-level lookup
821  n = (tsz >= 2 ? 14 - tsz : 12);
822  desc_addr = mbits(ttbr, 39, n) |
823  (bits(currState->vaddr, n + 17, 21) << 3);
824  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
825  desc_addr, currState->isSecure ? "s" : "ns");
826  }
827 
828  // Trickbox address check
829  Fault f = testWalk(desc_addr, sizeof(uint64_t),
830  TlbEntry::DomainType::NoAccess, start_lookup_level,
831  isStage2);
832  if (f) {
833  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
834  if (currState->timing) {
835  pending = false;
837  currState = NULL;
838  } else {
839  currState->tc = NULL;
840  currState->req = NULL;
841  }
842  return f;
843  }
844 
845  if (currState->sctlr.c == 0 || currState->isUncacheable) {
847  }
848 
849  currState->longDesc.lookupLevel = start_lookup_level;
850  currState->longDesc.aarch64 = false;
852 
853  bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
854  sizeof(uint64_t), flag, start_lookup_level,
855  LongDescEventByLevel[start_lookup_level],
857  if (!delayed) {
858  f = currState->fault;
859  }
860 
861  return f;
862 }
863 
864 bool
866  GrainSize tg, int tsz, bool low_range)
867 {
868  // The effective maximum input size is 48 if ARMv8.2-LVA is not
869  // supported or if the translation granule that is in use is 4KB or
870  // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
871  // translation granule size only, the effective minimum value of
872  // 52.
873  int in_max = (HaveLVA(currState->tc) && tg == Grain64KB) ? 52 : 48;
874  int in_min = 64 - (tg == Grain64KB ? 47 : 48);
875 
876  return tsz > in_max || tsz < in_min || (low_range ?
877  bits(currState->vaddr, top_bit, tsz) != 0x0 :
878  bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1));
879 }
880 
881 bool
883 {
884  return (pa_range != _physAddrRange &&
885  bits(addr, _physAddrRange - 1, pa_range));
886 }
887 
888 Fault
890 {
891  assert(currState->aarch64);
892 
893  DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
895 
897 
898  // Determine TTBR, table size, granule size and phys. address range
899  Addr ttbr = 0;
900  int tsz = 0, ps = 0;
901  GrainSize tg = Grain4KB; // grain size computed from tg* field
902  bool fault = false;
903 
904  int top_bit = computeAddrTop(currState->tc,
905  bits(currState->vaddr, 55),
907  currState->tcr,
908  currState->el);
909 
910  bool vaddr_fault = false;
911  switch (currState->el) {
912  case EL0:
913  {
914  Addr ttbr0;
915  Addr ttbr1;
916  if (HaveVirtHostExt(currState->tc) &&
917  currState->hcr.tge==1 && currState->hcr.e2h == 1) {
918  // VHE code for EL2&0 regime
921  } else {
924  }
925  switch (bits(currState->vaddr, 63,48)) {
926  case 0:
927  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
928  ttbr = ttbr0;
929  tsz = 64 - currState->tcr.t0sz;
930  tg = GrainMap_tg0[currState->tcr.tg0];
931  currState->hpd = currState->tcr.hpd0;
932  currState->isUncacheable = currState->tcr.irgn0 == 0;
934  top_bit, tg, tsz, true);
935 
936  if (vaddr_fault || currState->tcr.epd0)
937  fault = true;
938  break;
939  case 0xffff:
940  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
941  ttbr = ttbr1;
942  tsz = 64 - currState->tcr.t1sz;
943  tg = GrainMap_tg1[currState->tcr.tg1];
944  currState->hpd = currState->tcr.hpd1;
945  currState->isUncacheable = currState->tcr.irgn1 == 0;
947  top_bit, tg, tsz, false);
948 
949  if (vaddr_fault || currState->tcr.epd1)
950  fault = true;
951  break;
952  default:
953  // top two bytes must be all 0s or all 1s, else invalid addr
954  fault = true;
955  }
956  ps = currState->tcr.ips;
957  }
958  break;
959  case EL1:
960  if (isStage2) {
961  if (currState->secureLookup) {
962  DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
964  } else {
965  DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
967  }
968  tsz = 64 - currState->vtcr.t0sz64;
969  tg = GrainMap_tg0[currState->vtcr.tg0];
970 
971  ps = currState->vtcr.ps;
972  currState->isUncacheable = currState->vtcr.irgn0 == 0;
973  } else {
974  switch (bits(currState->vaddr, top_bit)) {
975  case 0:
976  DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
978  tsz = 64 - currState->tcr.t0sz;
979  tg = GrainMap_tg0[currState->tcr.tg0];
980  currState->hpd = currState->tcr.hpd0;
981  currState->isUncacheable = currState->tcr.irgn0 == 0;
983  top_bit, tg, tsz, true);
984 
985  if (vaddr_fault || currState->tcr.epd0)
986  fault = true;
987  break;
988  case 0x1:
989  DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
991  tsz = 64 - currState->tcr.t1sz;
992  tg = GrainMap_tg1[currState->tcr.tg1];
993  currState->hpd = currState->tcr.hpd1;
994  currState->isUncacheable = currState->tcr.irgn1 == 0;
996  top_bit, tg, tsz, false);
997 
998  if (vaddr_fault || currState->tcr.epd1)
999  fault = true;
1000  break;
1001  default:
1002  // top two bytes must be all 0s or all 1s, else invalid addr
1003  fault = true;
1004  }
1005  ps = currState->tcr.ips;
1006  }
1007  break;
1008  case EL2:
1009  switch(bits(currState->vaddr, top_bit)) {
1010  case 0:
1011  DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1013  tsz = 64 - currState->tcr.t0sz;
1014  tg = GrainMap_tg0[currState->tcr.tg0];
1015  currState->hpd = currState->hcr.e2h ?
1016  currState->tcr.hpd0 : currState->tcr.hpd;
1017  currState->isUncacheable = currState->tcr.irgn0 == 0;
1019  top_bit, tg, tsz, true);
1020 
1021  if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1022  fault = true;
1023  break;
1024 
1025  case 0x1:
1026  DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1028  tsz = 64 - currState->tcr.t1sz;
1029  tg = GrainMap_tg1[currState->tcr.tg1];
1030  currState->hpd = currState->tcr.hpd1;
1031  currState->isUncacheable = currState->tcr.irgn1 == 0;
1033  top_bit, tg, tsz, false);
1034 
1035  if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1036  fault = true;
1037  break;
1038 
1039  default:
1040  // invalid addr if top two bytes are not all 0s
1041  fault = true;
1042  }
1043  ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1044  break;
1045  case EL3:
1046  switch(bits(currState->vaddr, top_bit)) {
1047  case 0:
1048  DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1050  tsz = 64 - currState->tcr.t0sz;
1051  tg = GrainMap_tg0[currState->tcr.tg0];
1052  currState->hpd = currState->tcr.hpd;
1053  currState->isUncacheable = currState->tcr.irgn0 == 0;
1055  top_bit, tg, tsz, true);
1056 
1057  if (vaddr_fault)
1058  fault = true;
1059  break;
1060  default:
1061  // invalid addr if top two bytes are not all 0s
1062  fault = true;
1063  }
1064  ps = currState->tcr.ps;
1065  break;
1066  }
1067 
1068  const bool is_atomic = currState->req->isAtomic();
1069 
1070  if (fault) {
1071  Fault f;
1072  if (currState->isFetch)
1073  f = std::make_shared<PrefetchAbort>(
1075  ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1077  else
1078  f = std::make_shared<DataAbort>(
1081  is_atomic ? false : currState->isWrite,
1082  ArmFault::TranslationLL + LookupLevel::L0,
1084 
1085  if (currState->timing) {
1086  pending = false;
1087  nextWalk(currState->tc);
1088  currState = NULL;
1089  } else {
1090  currState->tc = NULL;
1091  currState->req = NULL;
1092  }
1093  return f;
1094 
1095  }
1096 
1097  if (tg == ReservedGrain) {
1098  warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1099  "DEFINED behavior takes this to mean 4KB granules\n");
1100  tg = Grain4KB;
1101  }
1102 
1103  // Clamp to lower limit
1104  int pa_range = decodePhysAddrRange64(ps);
1105  if (pa_range > _physAddrRange) {
1107  } else {
1108  currState->physAddrRange = pa_range;
1109  }
1110 
1111  auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1112  ttbr, tg, tsz, pa_range);
1113 
1114  // Determine physical address size and raise an Address Size Fault if
1115  // necessary
1117  DPRINTF(TLB, "Address size fault before any lookup\n");
1118  Fault f;
1119  if (currState->isFetch)
1120  f = std::make_shared<PrefetchAbort>(
1122  ArmFault::AddressSizeLL + start_lookup_level,
1123  isStage2,
1125  else
1126  f = std::make_shared<DataAbort>(
1129  is_atomic ? false : currState->isWrite,
1130  ArmFault::AddressSizeLL + start_lookup_level,
1131  isStage2,
1133 
1134 
1135  if (currState->timing) {
1136  pending = false;
1137  nextWalk(currState->tc);
1138  currState = NULL;
1139  } else {
1140  currState->tc = NULL;
1141  currState->req = NULL;
1142  }
1143  return f;
1144 
1145  }
1146 
1147  // Trickbox address check
1148  Fault f = testWalk(desc_addr, sizeof(uint64_t),
1149  TlbEntry::DomainType::NoAccess, start_lookup_level, isStage2);
1150  if (f) {
1151  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1152  if (currState->timing) {
1153  pending = false;
1154  nextWalk(currState->tc);
1155  currState = NULL;
1156  } else {
1157  currState->tc = NULL;
1158  currState->req = NULL;
1159  }
1160  return f;
1161  }
1162 
1164  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1165  flag.set(Request::UNCACHEABLE);
1166  }
1167 
1168  if (currState->isSecure) {
1169  flag.set(Request::SECURE);
1170  }
1171 
1172  currState->longDesc.lookupLevel = start_lookup_level;
1173  currState->longDesc.aarch64 = true;
1176 
1177  if (currState->timing) {
1178  fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1179  sizeof(uint64_t), flag, start_lookup_level,
1180  LongDescEventByLevel[start_lookup_level], NULL);
1181  } else {
1182  fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1183  sizeof(uint64_t), flag, -1, NULL,
1185  f = currState->fault;
1186  }
1187 
1188  return f;
1189 }
1190 
1191 std::tuple<Addr, Addr, TableWalker::LookupLevel>
1192 TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1193 {
1194  const auto* ptops = getPageTableOps(tg);
1195 
1196  LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1197  Addr table_addr = 0;
1198  Addr desc_addr = 0;
1199 
1200  if (currState->walkEntry.valid) {
1201  // WalkCache hit
1202  TlbEntry* entry = &currState->walkEntry;
1203  DPRINTF(PageTableWalker,
1204  "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1205  currState->vaddr, entry->lookupLevel, entry->pfn);
1206 
1207  currState->xnTable = entry->xn;
1208  currState->pxnTable = entry->pxn;
1209  currState->rwTable = bits(entry->ap, 1);
1210  currState->userTable = bits(entry->ap, 0);
1211 
1212  table_addr = entry->pfn;
1213  first_level = (LookupLevel)(entry->lookupLevel + 1);
1214  } else {
1215  // WalkCache miss
1216  first_level = isStage2 ?
1217  ptops->firstS2Level(currState->vtcr.sl0) :
1218  ptops->firstLevel(64 - tsz);
1219  panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1220  "Table walker couldn't find lookup level\n");
1221 
1222  int stride = tg - 3;
1223  int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1224 
1225  if (pa_range == 52) {
1226  int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1227  table_addr = mbits(ttbr, 47, z);
1228  table_addr |= (bits(ttbr, 5, 2) << 48);
1229  } else {
1230  table_addr = mbits(ttbr, 47, base_addr_lo);
1231  }
1232  }
1233 
1234  desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1235 
1236  return std::make_tuple(table_addr, desc_addr, first_level);
1237 }
1238 
1239 void
1241  uint8_t texcb, bool s)
1242 {
1243  // Note: tc and sctlr local variables are hiding tc and sctrl class
1244  // variables
1245  DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1246  te.shareable = false; // default value
1247  te.nonCacheable = false;
1248  te.outerShareable = false;
1249  if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1250  switch(texcb) {
1251  case 0: // Stongly-ordered
1252  te.nonCacheable = true;
1254  te.shareable = true;
1255  te.innerAttrs = 1;
1256  te.outerAttrs = 0;
1257  break;
1258  case 1: // Shareable Device
1259  te.nonCacheable = true;
1261  te.shareable = true;
1262  te.innerAttrs = 3;
1263  te.outerAttrs = 0;
1264  break;
1265  case 2: // Outer and Inner Write-Through, no Write-Allocate
1267  te.shareable = s;
1268  te.innerAttrs = 6;
1269  te.outerAttrs = bits(texcb, 1, 0);
1270  break;
1271  case 3: // Outer and Inner Write-Back, no Write-Allocate
1273  te.shareable = s;
1274  te.innerAttrs = 7;
1275  te.outerAttrs = bits(texcb, 1, 0);
1276  break;
1277  case 4: // Outer and Inner Non-cacheable
1278  te.nonCacheable = true;
1280  te.shareable = s;
1281  te.innerAttrs = 0;
1282  te.outerAttrs = bits(texcb, 1, 0);
1283  break;
1284  case 5: // Reserved
1285  panic("Reserved texcb value!\n");
1286  break;
1287  case 6: // Implementation Defined
1288  panic("Implementation-defined texcb value!\n");
1289  break;
1290  case 7: // Outer and Inner Write-Back, Write-Allocate
1292  te.shareable = s;
1293  te.innerAttrs = 5;
1294  te.outerAttrs = 1;
1295  break;
1296  case 8: // Non-shareable Device
1297  te.nonCacheable = true;
1299  te.shareable = false;
1300  te.innerAttrs = 3;
1301  te.outerAttrs = 0;
1302  break;
1303  case 9 ... 15: // Reserved
1304  panic("Reserved texcb value!\n");
1305  break;
1306  case 16 ... 31: // Cacheable Memory
1308  te.shareable = s;
1309  if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1310  te.nonCacheable = true;
1311  te.innerAttrs = bits(texcb, 1, 0);
1312  te.outerAttrs = bits(texcb, 3, 2);
1313  break;
1314  default:
1315  panic("More than 32 states for 5 bits?\n");
1316  }
1317  } else {
1318  assert(tc);
1319  PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1321  NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1323  DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1324  uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1325  switch(bits(texcb, 2,0)) {
1326  case 0:
1327  curr_tr = prrr.tr0;
1328  curr_ir = nmrr.ir0;
1329  curr_or = nmrr.or0;
1330  te.outerShareable = (prrr.nos0 == 0);
1331  break;
1332  case 1:
1333  curr_tr = prrr.tr1;
1334  curr_ir = nmrr.ir1;
1335  curr_or = nmrr.or1;
1336  te.outerShareable = (prrr.nos1 == 0);
1337  break;
1338  case 2:
1339  curr_tr = prrr.tr2;
1340  curr_ir = nmrr.ir2;
1341  curr_or = nmrr.or2;
1342  te.outerShareable = (prrr.nos2 == 0);
1343  break;
1344  case 3:
1345  curr_tr = prrr.tr3;
1346  curr_ir = nmrr.ir3;
1347  curr_or = nmrr.or3;
1348  te.outerShareable = (prrr.nos3 == 0);
1349  break;
1350  case 4:
1351  curr_tr = prrr.tr4;
1352  curr_ir = nmrr.ir4;
1353  curr_or = nmrr.or4;
1354  te.outerShareable = (prrr.nos4 == 0);
1355  break;
1356  case 5:
1357  curr_tr = prrr.tr5;
1358  curr_ir = nmrr.ir5;
1359  curr_or = nmrr.or5;
1360  te.outerShareable = (prrr.nos5 == 0);
1361  break;
1362  case 6:
1363  panic("Imp defined type\n");
1364  case 7:
1365  curr_tr = prrr.tr7;
1366  curr_ir = nmrr.ir7;
1367  curr_or = nmrr.or7;
1368  te.outerShareable = (prrr.nos7 == 0);
1369  break;
1370  }
1371 
1372  switch(curr_tr) {
1373  case 0:
1374  DPRINTF(TLBVerbose, "StronglyOrdered\n");
1376  te.nonCacheable = true;
1377  te.innerAttrs = 1;
1378  te.outerAttrs = 0;
1379  te.shareable = true;
1380  break;
1381  case 1:
1382  DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1383  prrr.ds1, prrr.ds0, s);
1385  te.nonCacheable = true;
1386  te.innerAttrs = 3;
1387  te.outerAttrs = 0;
1388  if (prrr.ds1 && s)
1389  te.shareable = true;
1390  if (prrr.ds0 && !s)
1391  te.shareable = true;
1392  break;
1393  case 2:
1394  DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1395  prrr.ns1, prrr.ns0, s);
1397  if (prrr.ns1 && s)
1398  te.shareable = true;
1399  if (prrr.ns0 && !s)
1400  te.shareable = true;
1401  break;
1402  case 3:
1403  panic("Reserved type");
1404  }
1405 
1406  if (te.mtype == TlbEntry::MemoryType::Normal){
1407  switch(curr_ir) {
1408  case 0:
1409  te.nonCacheable = true;
1410  te.innerAttrs = 0;
1411  break;
1412  case 1:
1413  te.innerAttrs = 5;
1414  break;
1415  case 2:
1416  te.innerAttrs = 6;
1417  break;
1418  case 3:
1419  te.innerAttrs = 7;
1420  break;
1421  }
1422 
1423  switch(curr_or) {
1424  case 0:
1425  te.nonCacheable = true;
1426  te.outerAttrs = 0;
1427  break;
1428  case 1:
1429  te.outerAttrs = 1;
1430  break;
1431  case 2:
1432  te.outerAttrs = 2;
1433  break;
1434  case 3:
1435  te.outerAttrs = 3;
1436  break;
1437  }
1438  }
1439  }
1440  DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1441  "outerAttrs: %d\n",
1442  te.shareable, te.innerAttrs, te.outerAttrs);
1443  te.setAttributes(false);
1444 }
1445 
1446 void
1448  LongDescriptor &l_descriptor)
1449 {
1450  assert(release->has(ArmExtension::LPAE));
1451 
1452  uint8_t attr;
1453  uint8_t sh = l_descriptor.sh();
1454  // Different format and source of attributes if this is a stage 2
1455  // translation
1456  if (isStage2) {
1457  attr = l_descriptor.memAttr();
1458  uint8_t attr_3_2 = (attr >> 2) & 0x3;
1459  uint8_t attr_1_0 = attr & 0x3;
1460 
1461  DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1462 
1463  if (attr_3_2 == 0) {
1464  te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1466  te.outerAttrs = 0;
1467  te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1468  te.nonCacheable = true;
1469  } else {
1471  te.outerAttrs = attr_3_2 == 1 ? 0 :
1472  attr_3_2 == 2 ? 2 : 1;
1473  te.innerAttrs = attr_1_0 == 1 ? 0 :
1474  attr_1_0 == 2 ? 6 : 5;
1475  te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1476  }
1477  } else {
1478  uint8_t attrIndx = l_descriptor.attrIndx();
1479 
1480  // LPAE always uses remapping of memory attributes, irrespective of the
1481  // value of SCTLR.TRE
1482  MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1483  int reg_as_int = snsBankedIndex(reg, currState->tc,
1484  !currState->isSecure);
1485  uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1486  attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1487  uint8_t attr_7_4 = bits(attr, 7, 4);
1488  uint8_t attr_3_0 = bits(attr, 3, 0);
1489  DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1490 
1491  // Note: the memory subsystem only cares about the 'cacheable' memory
1492  // attribute. The other attributes are only used to fill the PAR register
1493  // accordingly to provide the illusion of full support
1494  te.nonCacheable = false;
1495 
1496  switch (attr_7_4) {
1497  case 0x0:
1498  // Strongly-ordered or Device memory
1499  if (attr_3_0 == 0x0)
1501  else if (attr_3_0 == 0x4)
1503  else
1504  panic("Unpredictable behavior\n");
1505  te.nonCacheable = true;
1506  te.outerAttrs = 0;
1507  break;
1508  case 0x4:
1509  // Normal memory, Outer Non-cacheable
1511  te.outerAttrs = 0;
1512  if (attr_3_0 == 0x4)
1513  // Inner Non-cacheable
1514  te.nonCacheable = true;
1515  else if (attr_3_0 < 0x8)
1516  panic("Unpredictable behavior\n");
1517  break;
1518  case 0x8:
1519  case 0x9:
1520  case 0xa:
1521  case 0xb:
1522  case 0xc:
1523  case 0xd:
1524  case 0xe:
1525  case 0xf:
1526  if (attr_7_4 & 0x4) {
1527  te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1528  } else {
1529  te.outerAttrs = 0x2;
1530  }
1531  // Normal memory, Outer Cacheable
1533  if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1534  panic("Unpredictable behavior\n");
1535  break;
1536  default:
1537  panic("Unpredictable behavior\n");
1538  break;
1539  }
1540 
1541  switch (attr_3_0) {
1542  case 0x0:
1543  te.innerAttrs = 0x1;
1544  break;
1545  case 0x4:
1546  te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1547  break;
1548  case 0x8:
1549  case 0x9:
1550  case 0xA:
1551  case 0xB:
1552  te.innerAttrs = 6;
1553  break;
1554  case 0xC:
1555  case 0xD:
1556  case 0xE:
1557  case 0xF:
1558  te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1559  break;
1560  default:
1561  panic("Unpredictable behavior\n");
1562  break;
1563  }
1564  }
1565 
1566  te.outerShareable = sh == 2;
1567  te.shareable = (sh & 0x2) ? true : false;
1568  te.setAttributes(true);
1569  te.attributes |= (uint64_t) attr << 56;
1570 }
1571 
1572 void
1574  LongDescriptor &l_descriptor)
1575 {
1576  uint8_t attr;
1577  uint8_t attr_hi;
1578  uint8_t attr_lo;
1579  uint8_t sh = l_descriptor.sh();
1580 
1581  if (isStage2) {
1582  attr = l_descriptor.memAttr();
1583  uint8_t attr_hi = (attr >> 2) & 0x3;
1584  uint8_t attr_lo = attr & 0x3;
1585 
1586  DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1587 
1588  if (attr_hi == 0) {
1589  te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1591  te.outerAttrs = 0;
1592  te.innerAttrs = attr_lo == 0 ? 1 : 3;
1593  te.nonCacheable = true;
1594  } else {
1596  te.outerAttrs = attr_hi == 1 ? 0 :
1597  attr_hi == 2 ? 2 : 1;
1598  te.innerAttrs = attr_lo == 1 ? 0 :
1599  attr_lo == 2 ? 6 : 5;
1600  // Treat write-through memory as uncacheable, this is safe
1601  // but for performance reasons not optimal.
1602  te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1603  (attr_lo == 1) || (attr_lo == 2);
1604  }
1605  } else {
1606  uint8_t attrIndx = l_descriptor.attrIndx();
1607 
1608  DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1610 
1611  // Select MAIR
1612  uint64_t mair;
1613  switch (regime) {
1614  case EL0:
1615  case EL1:
1616  mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1617  break;
1618  case EL2:
1619  mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1620  break;
1621  case EL3:
1622  mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1623  break;
1624  default:
1625  panic("Invalid exception level");
1626  break;
1627  }
1628 
1629  // Select attributes
1630  attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1631  attr_lo = bits(attr, 3, 0);
1632  attr_hi = bits(attr, 7, 4);
1633 
1634  // Memory type
1636 
1637  // Cacheability
1638  te.nonCacheable = false;
1639  if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1640  te.nonCacheable = true;
1641  }
1642  // Treat write-through memory as uncacheable, this is safe
1643  // but for performance reasons not optimal.
1644  switch (attr_hi) {
1645  case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1646  case 0x4: // Normal memory, Outer Non-cacheable
1647  case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1648  te.nonCacheable = true;
1649  }
1650  switch (attr_lo) {
1651  case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1652  case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1653  warn_if(!attr_hi, "Unpredictable behavior");
1654  [[fallthrough]];
1655  case 0x4: // Device-nGnRE memory or
1656  // Normal memory, Inner Non-cacheable
1657  case 0x8: // Device-nGRE memory or
1658  // Normal memory, Inner Write-through non-transient
1659  te.nonCacheable = true;
1660  }
1661 
1662  te.shareable = sh == 2;
1663  te.outerShareable = (sh & 0x2) ? true : false;
1664  // Attributes formatted according to the 64-bit PAR
1665  te.attributes = ((uint64_t) attr << 56) |
1666  (1 << 11) | // LPAE bit
1667  (te.ns << 9) | // NS bit
1668  (sh << 7);
1669  }
1670 }
1671 
1672 void
1674 {
1675  if (currState->fault != NoFault) {
1676  return;
1677  }
1678 
1680  byteOrder(currState->tc));
1681 
1682  DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1684  TlbEntry te;
1685 
1686  const bool is_atomic = currState->req->isAtomic();
1687 
1688  switch (currState->l1Desc.type()) {
1689  case L1Descriptor::Ignore:
1691  if (!currState->timing) {
1692  currState->tc = NULL;
1693  currState->req = NULL;
1694  }
1695  DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1696  if (currState->isFetch)
1697  currState->fault =
1698  std::make_shared<PrefetchAbort>(
1700  ArmFault::TranslationLL + LookupLevel::L1,
1701  isStage2,
1703  else
1704  currState->fault =
1705  std::make_shared<DataAbort>(
1708  is_atomic ? false : currState->isWrite,
1709  ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1711  return;
1712  case L1Descriptor::Section:
1713  if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1719  currState->fault = std::make_shared<DataAbort>(
1721  currState->l1Desc.domain(),
1722  is_atomic ? false : currState->isWrite,
1723  ArmFault::AccessFlagLL + LookupLevel::L1,
1724  isStage2,
1726  }
1727  if (currState->l1Desc.supersection()) {
1728  panic("Haven't implemented supersections\n");
1729  }
1731  return;
1733  {
1734  Addr l2desc_addr;
1735  l2desc_addr = currState->l1Desc.l2Addr() |
1736  (bits(currState->vaddr, 19, 12) << 2);
1737  DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1738  l2desc_addr, currState->isSecure ? "s" : "ns");
1739 
1740  // Trickbox address check
1741  currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1742  currState->l1Desc.domain(),
1743  LookupLevel::L2, isStage2);
1744 
1745  if (currState->fault) {
1746  if (!currState->timing) {
1747  currState->tc = NULL;
1748  currState->req = NULL;
1749  }
1750  return;
1751  }
1752 
1754 
1755  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1756  flag.set(Request::UNCACHEABLE);
1757  }
1758 
1759  if (currState->isSecure)
1760  flag.set(Request::SECURE);
1761 
1762  bool delayed;
1763  delayed = fetchDescriptor(l2desc_addr,
1764  (uint8_t*)&currState->l2Desc.data,
1765  sizeof(uint32_t), flag, -1, &doL2DescEvent,
1767  if (delayed) {
1768  currState->delayed = true;
1769  }
1770 
1771  return;
1772  }
1773  default:
1774  panic("A new type in a 2 bit field?\n");
1775  }
1776 }
1777 
1778 Fault
1780 {
1781  if (currState->isFetch) {
1782  return std::make_shared<PrefetchAbort>(
1785  isStage2,
1787  } else {
1788  return std::make_shared<DataAbort>(
1791  currState->req->isAtomic() ? false : currState->isWrite,
1793  isStage2,
1795  }
1796 }
1797 
1798 void
1800 {
1801  if (currState->fault != NoFault) {
1802  return;
1803  }
1804 
1806  byteOrder(currState->tc));
1807 
1808  DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1811  currState->aarch64 ? "AArch64" : "long-desc.");
1812 
1815  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1816  "xn: %d, ap: %d, af: %d, type: %d\n",
1819  currState->longDesc.pxn(),
1820  currState->longDesc.xn(),
1821  currState->longDesc.ap(),
1822  currState->longDesc.af(),
1823  currState->longDesc.type());
1824  } else {
1825  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1828  currState->longDesc.type());
1829  }
1830 
1831  TlbEntry te;
1832 
1833  switch (currState->longDesc.type()) {
1835  DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1838 
1840  if (!currState->timing) {
1841  currState->tc = NULL;
1842  currState->req = NULL;
1843  }
1844  return;
1845 
1846  case LongDescriptor::Block:
1847  case LongDescriptor::Page:
1848  {
1849  auto fault_source = ArmFault::FaultSourceInvalid;
1850  // Check for address size fault
1853 
1854  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1856  fault_source = ArmFault::AddressSizeLL;
1857 
1858  // Check for access fault
1859  } else if (currState->longDesc.af() == 0) {
1860 
1861  DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1863  fault_source = ArmFault::AccessFlagLL;
1864  }
1865 
1866  if (fault_source != ArmFault::FaultSourceInvalid) {
1867  currState->fault = generateLongDescFault(fault_source);
1868  } else {
1870  }
1871  }
1872  return;
1873  case LongDescriptor::Table:
1874  {
1875  // Set hierarchical permission flags
1886 
1887  // Set up next level lookup
1888  Addr next_desc_addr = currState->longDesc.nextDescAddr(
1889  currState->vaddr);
1890 
1891  DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1894  next_desc_addr,
1895  currState->secureLookup ? "s" : "ns");
1896 
1897  // Check for address size fault
1899  next_desc_addr, currState->physAddrRange)) {
1900  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1902 
1905  return;
1906  }
1907 
1908  // Trickbox address check
1910  next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1912 
1913  if (currState->fault) {
1914  if (!currState->timing) {
1915  currState->tc = NULL;
1916  currState->req = NULL;
1917  }
1918  return;
1919  }
1920 
1921  if (mmu->hasWalkCache()) {
1923  }
1924 
1925 
1927  if (currState->secureLookup)
1928  flag.set(Request::SECURE);
1929 
1930  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1931  flag.set(Request::UNCACHEABLE);
1932  }
1933 
1936  Event *event = NULL;
1937  switch (L) {
1938  case LookupLevel::L1:
1939  assert(currState->aarch64);
1940  case LookupLevel::L2:
1941  case LookupLevel::L3:
1942  event = LongDescEventByLevel[L];
1943  break;
1944  default:
1945  panic("Wrong lookup level in table walk\n");
1946  break;
1947  }
1948 
1949  bool delayed;
1950  delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1951  sizeof(uint64_t), flag, -1, event,
1953  if (delayed) {
1954  currState->delayed = true;
1955  }
1956  }
1957  return;
1958  default:
1959  panic("A new type in a 2 bit field?\n");
1960  }
1961 }
1962 
1963 void
1965 {
1966  if (currState->fault != NoFault) {
1967  return;
1968  }
1969 
1971  byteOrder(currState->tc));
1972 
1973  DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1975  TlbEntry te;
1976 
1977  const bool is_atomic = currState->req->isAtomic();
1978 
1979  if (currState->l2Desc.invalid()) {
1980  DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1981  if (!currState->timing) {
1982  currState->tc = NULL;
1983  currState->req = NULL;
1984  }
1985  if (currState->isFetch)
1986  currState->fault = std::make_shared<PrefetchAbort>(
1988  ArmFault::TranslationLL + LookupLevel::L2,
1989  isStage2,
1991  else
1992  currState->fault = std::make_shared<DataAbort>(
1994  is_atomic ? false : currState->isWrite,
1995  ArmFault::TranslationLL + LookupLevel::L2,
1996  isStage2,
1998  return;
1999  }
2000 
2001  if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
2005  DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
2006  currState->sctlr.afe, currState->l2Desc.ap());
2007 
2008  currState->fault = std::make_shared<DataAbort>(
2011  is_atomic ? false : currState->isWrite,
2012  ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
2014  }
2015 
2017 }
2018 
2019 void
2021 {
2022  currState = stateQueues[LookupLevel::L1].front();
2023  currState->delayed = false;
2024  // if there's a stage2 translation object we don't need it any more
2025  if (currState->stage2Tran) {
2026  delete currState->stage2Tran;
2027  currState->stage2Tran = NULL;
2028  }
2029 
2030 
2031  DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2032  &currState->l1Desc.data);
2033  DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2034  currState->l1Desc.data);
2035 
2036  DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2038  doL1Descriptor();
2039 
2040  stateQueues[LookupLevel::L1].pop_front();
2041  // Check if fault was generated
2042  if (currState->fault != NoFault) {
2044  currState->tc, currState->mode);
2046 
2047  pending = false;
2048  nextWalk(currState->tc);
2049 
2050  currState->req = NULL;
2051  currState->tc = NULL;
2052  currState->delayed = false;
2053  delete currState;
2054  }
2055  else if (!currState->delayed) {
2056  // delay is not set so there is no L2 to do
2057  // Don't finish the translation if a stage 2 look up is underway
2059  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2060 
2064 
2066 
2067  pending = false;
2068  nextWalk(currState->tc);
2069 
2070  currState->req = NULL;
2071  currState->tc = NULL;
2072  currState->delayed = false;
2073  delete currState;
2074  } else {
2075  // need to do L2 descriptor
2076  stateQueues[LookupLevel::L2].push_back(currState);
2077  }
2078  currState = NULL;
2079 }
2080 
2081 void
2083 {
2084  currState = stateQueues[LookupLevel::L2].front();
2085  assert(currState->delayed);
2086  // if there's a stage2 translation object we don't need it any more
2087  if (currState->stage2Tran) {
2088  delete currState->stage2Tran;
2089  currState->stage2Tran = NULL;
2090  }
2091 
2092  DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2094  doL2Descriptor();
2095 
2096  // Check if fault was generated
2097  if (currState->fault != NoFault) {
2099  currState->tc, currState->mode);
2101  } else {
2103  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2104 
2108 
2110  }
2111 
2112 
2113  stateQueues[LookupLevel::L2].pop_front();
2114  pending = false;
2115  nextWalk(currState->tc);
2116 
2117  currState->req = NULL;
2118  currState->tc = NULL;
2119  currState->delayed = false;
2120 
2121  delete currState;
2122  currState = NULL;
2123 }
2124 
2125 void
2127 {
2128  doLongDescriptorWrapper(LookupLevel::L0);
2129 }
2130 
2131 void
2133 {
2134  doLongDescriptorWrapper(LookupLevel::L1);
2135 }
2136 
2137 void
2139 {
2140  doLongDescriptorWrapper(LookupLevel::L2);
2141 }
2142 
2143 void
2145 {
2146  doLongDescriptorWrapper(LookupLevel::L3);
2147 }
2148 
2149 void
2151 {
2152  currState = stateQueues[curr_lookup_level].front();
2153  assert(curr_lookup_level == currState->longDesc.lookupLevel);
2154  currState->delayed = false;
2155 
2156  // if there's a stage2 translation object we don't need it any more
2157  if (currState->stage2Tran) {
2158  delete currState->stage2Tran;
2159  currState->stage2Tran = NULL;
2160  }
2161 
2162  DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2164  doLongDescriptor();
2165 
2166  stateQueues[curr_lookup_level].pop_front();
2167 
2168  if (currState->fault != NoFault) {
2169  // A fault was generated
2171  currState->tc, currState->mode);
2172 
2173  pending = false;
2174  nextWalk(currState->tc);
2175 
2176  currState->req = NULL;
2177  currState->tc = NULL;
2178  currState->delayed = false;
2179  delete currState;
2180  } else if (!currState->delayed) {
2181  // No additional lookups required
2182  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2184 
2188 
2189  stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2190 
2191  pending = false;
2192  nextWalk(currState->tc);
2193 
2194  currState->req = NULL;
2195  currState->tc = NULL;
2196  currState->delayed = false;
2197  delete currState;
2198  } else {
2199  if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2200  panic("Max. number of lookups already reached in table walk\n");
2201  // Need to perform additional lookups
2203  }
2204  currState = NULL;
2205 }
2206 
2207 
2208 void
2210 {
2211  if (pendingQueue.size())
2213  else
2214  completeDrain();
2215 }
2216 
2217 bool
2218 TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2219  Request::Flags flags, int queueIndex, Event *event,
2220  void (TableWalker::*doDescriptor)())
2221 {
2222  bool isTiming = currState->timing;
2223 
2224  DPRINTF(PageTableWalker,
2225  "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2226  descAddr, currState->stage2Req);
2227 
2228  // If this translation has a stage 2 then we know descAddr is an IPA and
2229  // needs to be translated before we can access the page table. Do that
2230  // check here.
2231  if (currState->stage2Req) {
2232  Fault fault;
2233 
2234  if (isTiming) {
2235  auto *tran = new
2236  Stage2Walk(*this, data, event, currState->vaddr,
2238  currState->stage2Tran = tran;
2239  readDataTimed(currState->tc, descAddr, tran, numBytes, flags);
2240  fault = tran->fault;
2241  } else {
2242  fault = readDataUntimed(currState->tc,
2243  currState->vaddr, descAddr, data, numBytes, flags,
2244  currState->mode,
2247  }
2248 
2249  if (fault != NoFault) {
2250  currState->fault = fault;
2251  }
2252  if (isTiming) {
2253  if (queueIndex >= 0) {
2254  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2255  "queue size before adding: %d\n",
2256  stateQueues[queueIndex].size());
2257  stateQueues[queueIndex].push_back(currState);
2258  currState = NULL;
2259  }
2260  } else {
2261  (this->*doDescriptor)();
2262  }
2263  } else {
2264  if (isTiming) {
2265  port->sendTimingReq(descAddr, numBytes, data, flags,
2266  currState->tc->getCpuPtr()->clockPeriod(), event);
2267 
2268  if (queueIndex >= 0) {
2269  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2270  "queue size before adding: %d\n",
2271  stateQueues[queueIndex].size());
2272  stateQueues[queueIndex].push_back(currState);
2273  currState = NULL;
2274  }
2275  } else if (!currState->functional) {
2276  port->sendAtomicReq(descAddr, numBytes, data, flags,
2277  currState->tc->getCpuPtr()->clockPeriod());
2278 
2279  (this->*doDescriptor)();
2280  } else {
2281  port->sendFunctionalReq(descAddr, numBytes, data, flags);
2282  (this->*doDescriptor)();
2283  }
2284  }
2285  return (isTiming);
2286 }
2287 
2288 void
2290 {
2291  const bool have_security = release->has(ArmExtension::SECURITY);
2292  TlbEntry te;
2293 
2294  // Create and fill a new page table entry
2295  te.valid = true;
2296  te.longDescFormat = true;
2297  te.partial = true;
2298  te.global = false;
2299  te.isHyp = currState->isHyp;
2300  te.asid = currState->asid;
2301  te.vmid = currState->vmid;
2302  te.N = descriptor.offsetBits();
2303  te.vpn = currState->vaddr >> te.N;
2304  te.size = (1ULL << te.N) - 1;
2305  te.pfn = descriptor.nextTableAddr();
2306  te.domain = descriptor.domain();
2307  te.lookupLevel = descriptor.lookupLevel;
2308  te.ns = !descriptor.secure(have_security, currState);
2309  te.nstid = !currState->isSecure;
2310  te.type = TypeTLB::unified;
2311 
2312  if (currState->aarch64)
2313  te.el = currState->el;
2314  else
2315  te.el = EL1;
2316 
2317  te.xn = currState->xnTable;
2318  te.pxn = currState->pxnTable;
2319  te.ap = (currState->rwTable << 1) | (currState->userTable);
2320 
2321  // Debug output
2322  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2323  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2324  te.N, te.pfn, te.size, te.global, te.valid);
2325  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2326  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2327  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2328  te.nonCacheable, te.ns);
2329  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2330  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2331  descriptor.getRawData());
2332 
2333  // Insert the entry into the TLBs
2334  tlb->multiInsert(te);
2335 }
2336 
2337 void
2338 TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2339 {
2340  const bool have_security = release->has(ArmExtension::SECURITY);
2341  TlbEntry te;
2342 
2343  // Create and fill a new page table entry
2344  te.valid = true;
2345  te.longDescFormat = long_descriptor;
2346  te.isHyp = currState->isHyp;
2347  te.asid = currState->asid;
2348  te.vmid = currState->vmid;
2349  te.N = descriptor.offsetBits();
2350  te.vpn = currState->vaddr >> te.N;
2351  te.size = (1<<te.N) - 1;
2352  te.pfn = descriptor.pfn();
2353  te.domain = descriptor.domain();
2354  te.lookupLevel = descriptor.lookupLevel;
2355  te.ns = !descriptor.secure(have_security, currState);
2356  te.nstid = !currState->isSecure;
2357  te.xn = descriptor.xn();
2358  te.type = currState->mode == BaseMMU::Execute ?
2359  TypeTLB::instruction : TypeTLB::data;
2360 
2361  if (currState->aarch64)
2362  te.el = currState->el;
2363  else
2364  te.el = EL1;
2365 
2368 
2369  // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2370  // as global
2371  te.global = descriptor.global(currState) || isStage2;
2372  if (long_descriptor) {
2373  LongDescriptor l_descriptor =
2374  dynamic_cast<LongDescriptor &>(descriptor);
2375 
2376  te.xn |= currState->xnTable;
2377  te.pxn = currState->pxnTable || l_descriptor.pxn();
2378  if (isStage2) {
2379  // this is actually the HAP field, but its stored in the same bit
2380  // possitions as the AP field in a stage 1 translation.
2381  te.hap = l_descriptor.ap();
2382  } else {
2383  te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2384  (currState->userTable && (descriptor.ap() & 0x1));
2385  }
2386  if (currState->aarch64)
2387  memAttrsAArch64(currState->tc, te, l_descriptor);
2388  else
2389  memAttrsLPAE(currState->tc, te, l_descriptor);
2390  } else {
2391  te.ap = descriptor.ap();
2392  memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2393  descriptor.shareable());
2394  }
2395 
2396  // Debug output
2397  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2398  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2399  te.N, te.pfn, te.size, te.global, te.valid);
2400  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2401  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2402  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2403  te.nonCacheable, te.ns);
2404  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2405  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2406  descriptor.getRawData());
2407 
2408  // Insert the entry into the TLBs
2409  tlb->multiInsert(te);
2410  if (!currState->timing) {
2411  currState->tc = NULL;
2412  currState->req = NULL;
2413  }
2414 }
2415 
2417 TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2418 {
2419  switch (lookup_level_as_int) {
2420  case LookupLevel::L1:
2421  return LookupLevel::L1;
2422  case LookupLevel::L2:
2423  return LookupLevel::L2;
2424  case LookupLevel::L3:
2425  return LookupLevel::L3;
2426  default:
2427  panic("Invalid lookup level conversion");
2428  }
2429 }
2430 
2431 /* this method keeps track of the table walker queue's residency, so
2432  * needs to be called whenever requests start and complete. */
2433 void
2435 {
2436  unsigned n = pendingQueue.size();
2437  if ((currState != NULL) && (currState != pendingQueue.front())) {
2438  ++n;
2439  }
2440 
2441  if (n != pendingReqs) {
2442  Tick now = curTick();
2444  pendingReqs = n;
2445  pendingChangeTick = now;
2446  }
2447 }
2448 
2449 Fault
2451  LookupLevel lookup_level, bool stage2)
2452 {
2453  return mmu->testWalk(pa, size, currState->vaddr, currState->isSecure,
2454  currState->mode, domain, lookup_level, stage2);
2455 }
2456 
2457 
2458 uint8_t
2460 {
2461  /* for stats.pageSizes */
2462  switch(N) {
2463  case 12: return 0; // 4K
2464  case 14: return 1; // 16K (using 16K granule in v8-64)
2465  case 16: return 2; // 64K
2466  case 20: return 3; // 1M
2467  case 21: return 4; // 2M-LPAE
2468  case 24: return 5; // 16M
2469  case 25: return 6; // 32M (using 16K granule in v8-64)
2470  case 29: return 7; // 512M (using 64K granule in v8-64)
2471  case 30: return 8; // 1G-LPAE
2472  case 42: return 9; // 1G-LPAE
2473  default:
2474  panic("unknown page size");
2475  return 255;
2476  }
2477 }
2478 
2479 Fault
2481  uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2482  MMU::ArmTranslationType tran_type, bool functional)
2483 {
2484  Fault fault;
2485 
2486  // translate to physical address using the second stage MMU
2487  auto req = std::make_shared<Request>();
2488  req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2489  requestorId, 0);
2490 
2491  if (functional) {
2492  fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2493  tran_type, true);
2494  } else {
2495  fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2496  tran_type, true);
2497  }
2498 
2499  // Now do the access.
2500  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2501  Packet pkt = Packet(req, MemCmd::ReadReq);
2502  pkt.dataStatic(data);
2503  if (functional) {
2504  port->sendFunctional(&pkt);
2505  } else {
2506  port->sendAtomic(&pkt);
2507  }
2508  assert(!pkt.isError());
2509  }
2510 
2511  // If there was a fault annotate it with the flag saying the foult occured
2512  // while doing a translation for a stage 1 page table walk.
2513  if (fault != NoFault) {
2514  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2515  arm_fault->annotate(ArmFault::S1PTW, true);
2516  arm_fault->annotate(ArmFault::OVA, vaddr);
2517  }
2518  return fault;
2519 }
2520 
2521 void
2523  Stage2Walk *translation, int num_bytes,
2524  Request::Flags flags)
2525 {
2526  // translate to physical address using the second stage MMU
2527  translation->setVirt(
2528  desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2529  translation->translateTiming(tc);
2530 }
2531 
2533  uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2534  MMU::ArmTranslationType tran_type)
2535  : data(_data), numBytes(0), event(_event), parent(_parent),
2536  oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2537 {
2538  req = std::make_shared<Request>();
2539 }
2540 
2541 void
2543  const RequestPtr &req,
2545 {
2546  fault = _fault;
2547 
2548  // If there was a fault annotate it with the flag saying the foult occured
2549  // while doing a translation for a stage 1 page table walk.
2550  if (fault != NoFault) {
2551  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2552  arm_fault->annotate(ArmFault::S1PTW, true);
2553  arm_fault->annotate(ArmFault::OVA, oVAddr);
2554  }
2555 
2556  if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2557  parent.getTableWalkerPort().sendTimingReq(
2558  req->getPaddr(), numBytes, data, req->getFlags(),
2559  tc->getCpuPtr()->clockPeriod(), event);
2560  } else {
2561  // We can't do the DMA access as there's been a problem, so tell the
2562  // event we're done
2563  event->process();
2564  }
2565 }
2566 
2567 void
2569 {
2570  parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2571 }
2572 
2574  : statistics::Group(parent),
2575  ADD_STAT(walks, statistics::units::Count::get(),
2576  "Table walker walks requested"),
2577  ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2578  "Table walker walks initiated with short descriptors"),
2579  ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2580  "Table walker walks initiated with long descriptors"),
2581  ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2582  "Level at which table walker walks with short descriptors "
2583  "terminate"),
2584  ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2585  "Level at which table walker walks with long descriptors "
2586  "terminate"),
2587  ADD_STAT(squashedBefore, statistics::units::Count::get(),
2588  "Table walks squashed before starting"),
2589  ADD_STAT(squashedAfter, statistics::units::Count::get(),
2590  "Table walks squashed after completion"),
2591  ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2592  "Table walker wait (enqueue to first request) latency"),
2593  ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2594  "Table walker service (enqueue to completion) latency"),
2595  ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2596  "Table walker pending requests distribution"),
2597  ADD_STAT(pageSizes, statistics::units::Count::get(),
2598  "Table walker page sizes translated"),
2599  ADD_STAT(requestOrigin, statistics::units::Count::get(),
2600  "Table walker requests started/completed, data/inst")
2601 {
2604 
2607 
2609  .init(2)
2611 
2612  walksShortTerminatedAtLevel.subname(0, "Level1");
2613  walksShortTerminatedAtLevel.subname(1, "Level2");
2614 
2616  .init(4)
2618  walksLongTerminatedAtLevel.subname(0, "Level0");
2619  walksLongTerminatedAtLevel.subname(1, "Level1");
2620  walksLongTerminatedAtLevel.subname(2, "Level2");
2621  walksLongTerminatedAtLevel.subname(3, "Level3");
2622 
2625 
2628 
2629  walkWaitTime
2630  .init(16)
2632 
2634  .init(16)
2636 
2637  pendingWalks
2638  .init(16)
2641 
2642  pageSizes // see DDI 0487A D4-1661
2643  .init(10)
2646  pageSizes.subname(0, "4KiB");
2647  pageSizes.subname(1, "16KiB");
2648  pageSizes.subname(2, "64KiB");
2649  pageSizes.subname(3, "1MiB");
2650  pageSizes.subname(4, "2MiB");
2651  pageSizes.subname(5, "16MiB");
2652  pageSizes.subname(6, "32MiB");
2653  pageSizes.subname(7, "512MiB");
2654  pageSizes.subname(8, "1GiB");
2655  pageSizes.subname(9, "4TiB");
2656 
2658  .init(2,2) // Instruction/Data, requests/completed
2660  requestOrigin.subname(0,"Requested");
2661  requestOrigin.subname(1,"Completed");
2662  requestOrigin.ysubname(0,"Data");
2663  requestOrigin.ysubname(1,"Inst");
2664 }
2665 
2666 } // namespace gem5
gem5::ArmISA::TableWalker::doL1DescEvent
EventFunctionWrapper doL1DescEvent
Definition: table_walker.hh:1125
gem5::ArmISA::TableWalker::testWalk
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: table_walker.cc:2450
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::ArmISA::TableWalker::DescriptorBase::dbgHeader
virtual std::string dbgHeader() const =0
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:252
gem5::BaseMMU::Translation::squashed
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: mmu.hh:84
gem5::ArmISA::TableWalker::doLongDescriptorWrapper
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
Definition: table_walker.cc:2150
gem5::ArmISA::TableWalker::LookupLevel
enums::ArmLookupLevel LookupLevel
Definition: table_walker.hh:68
gem5::ArmISA::MISCREG_CPSR
@ MISCREG_CPSR
Definition: misc.hh:61
gem5::statistics::DataWrapVec2d::ysubname
Derived & ysubname(off_type index, const std::string &subname)
Definition: statistics.hh:490
gem5::ArmISA::TableWalker::WalkerState::isSecure
bool isSecure
If the access comes from the secure state.
Definition: table_walker.hh:869
gem5::SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:92
gem5::ArmISA::MISCREG_VTTBR
@ MISCREG_VTTBR
Definition: misc.hh:448
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::ArmISA::TableWalker::LongDescEventByLevel
Event * LongDescEventByLevel[4]
Definition: table_walker.hh:1143
gem5::ArmISA::MISCREG_TTBR0_EL2
@ MISCREG_TTBR0_EL2
Definition: misc.hh:603
gem5::ArmISA::TableWalker::TableWalkerStats::walksLongDescriptor
statistics::Scalar walksLongDescriptor
Definition: table_walker.hh:1066
gem5::ArmISA::TableWalker::WalkerState::pxnTable
bool pxnTable
Definition: table_walker.hh:880
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::ArmISA::TableWalker::Port
Definition: table_walker.hh:939
gem5::ArmISA::TableWalker::WalkerState::walkEntry
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
Definition: table_walker.hh:818
gem5::ArmISA::ELIs64
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:290
gem5::ArmISA::TableWalker::LongDescriptor::pxnTable
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
Definition: table_walker.hh:792
gem5::ArmISA::ArmFault::AddressSizeLL
@ AddressSizeLL
Definition: faults.hh:111
gem5::ArmISA::TableWalker::WalkerState::sctlr
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Definition: table_walker.hh:838
gem5::ArmISA::MISCREG_TTBR0_EL3
@ MISCREG_TTBR0_EL3
Definition: misc.hh:609
gem5::ArmISA::TableWalker::WalkerState::el
ExceptionLevel el
Current exception level.
Definition: table_walker.hh:809
gem5::ArmISA::TlbEntry::valid
bool valid
Definition: pagetable.hh:236
gem5::ArmISA::TableWalker::Port::sendAtomicReq
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
Definition: table_walker.cc:183
gem5::RegVal
uint64_t RegVal
Definition: types.hh:173
gem5::ArmISA::TableWalker::pendingChangeTick
Tick pendingChangeTick
Definition: table_walker.hh:1080
gem5::ArmISA::MISCREG_SCTLR_EL3
@ MISCREG_SCTLR_EL3
Definition: misc.hh:591
gem5::ArmISA::TableWalker::TableWalkerStats::walks
statistics::Scalar walks
Definition: table_walker.hh:1064
gem5::ArmISA::TableWalker::L2Descriptor::ap
uint8_t ap() const override
Three bit access protection flags.
Definition: table_walker.hh:354
gem5::ArmISA::TableWalker::WalkerState::longDesc
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
Definition: table_walker.hh:908
system.hh
gem5::ArmISA::TableWalker::doProcessEvent
EventFunctionWrapper doProcessEvent
Definition: table_walker.hh:1174
gem5::ArmISA::TableWalker::readDataTimed
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
Definition: table_walker.cc:2522
gem5::ArmISA::TableWalker::completeDrain
void completeDrain()
Checks if all state is cleared and if so, completes drain.
Definition: table_walker.cc:243
gem5::ArmISA::TableWalker::memAttrsAArch64
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Definition: table_walker.cc:1573
gem5::ArmISA::TableWalker::TableWalkerStats::walksLongTerminatedAtLevel
statistics::Vector walksLongTerminatedAtLevel
Definition: table_walker.hh:1068
gem5::ArmISA::MISCREG_TTBR0
@ MISCREG_TTBR0
Definition: misc.hh:254
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::ArmISA::ArmFault::FaultSourceInvalid
@ FaultSourceInvalid
Definition: faults.hh:120
gem5::ArmISA::TableWalker::doL1LongDescriptorWrapper
void doL1LongDescriptorWrapper()
Definition: table_walker.cc:2132
gem5::X86ISA::L
Bitfield< 7, 0 > L
Definition: int.hh:59
gem5::ArmISA::TableWalker::pendingQueue
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Definition: table_walker.hh:1026
gem5::ArmISA::TableWalker::release
const ArmRelease * release
Cached copies of system-level properties.
Definition: table_walker.hh:1056
gem5::ArmISA::Grain64KB
@ Grain64KB
Definition: pagetable.hh:65
gem5::ArmISA::TableWalker::LongDescriptor::Block
@ Block
Definition: table_walker.hh:420
gem5::ArmISA::el
Bitfield< 3, 2 > el
Definition: misc_types.hh:73
gem5::ArmISA::TableWalker::WalkerState::vaddr
Addr vaddr
The virtual address that is being translated with tagging removed.
Definition: table_walker.hh:832
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
gem5::ArmISA::TableWalker::requestorId
RequestorID requestorId
Requestor id assigned by the MMU.
Definition: table_walker.hh:1032
gem5::ArmISA::TableWalker::WalkerState::tcr
TCR tcr
Definition: table_walker.hh:850
gem5::ArmISA::TableWalker::L1Descriptor::Section
@ Section
Definition: table_walker.hh:108
gem5::ArmISA::TableWalker::Stage2Walk::req
RequestPtr req
Definition: table_walker.hh:981
gem5::ArmISA::MISCREG_TCR_EL2
@ MISCREG_TCR_EL2
Definition: misc.hh:604
gem5::ArmISA::TableWalker::getTableWalkerPort
Port & getTableWalkerPort()
Definition: table_walker.cc:104
warn_once
#define warn_once(...)
Definition: logging.hh:250
gem5::ArmISA::aarch64
Bitfield< 34 > aarch64
Definition: types.hh:81
gem5::ArmISA::TableWalker::nextWalk
void nextWalk(ThreadContext *tc)
Definition: table_walker.cc:2209
gem5::statistics::Vector2dBase::init
Derived & init(size_type _x, size_type _y)
Definition: statistics.hh:1174
gem5::QueuedRequestPort
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition: qport.hh:109
gem5::ArmISA::decodePhysAddrRange64
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition: utility.cc:1299
gem5::ArmISA::getPageTableOps
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition: pagetable.cc:476
gem5::ArmISA::TableWalker::LongDescriptor::paddr
Addr paddr() const
Return the physical address of the entry.
Definition: table_walker.hh:565
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::ArmISA::GrainMap_tg0
const GrainSize GrainMap_tg0[]
Definition: pagetable.cc:49
gem5::ArmISA::attr
attr
Definition: misc_types.hh:656
gem5::Flags::set
void set(Type mask)
Set all flag's bits matching the given mask.
Definition: flags.hh:116
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::ArmISA::asid
asid
Definition: misc_types.hh:618
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::ArmISA::TableWalker::WalkerState::userTable
bool userTable
Definition: table_walker.hh:878
gem5::ArmISA::domain
Bitfield< 7, 4 > domain
Definition: misc_types.hh:424
gem5::ArmISA::TlbEntry::DomainType::NoAccess
@ NoAccess
gem5::ArmISA::TableWalker::Stage2Walk
This translation class is used to trigger the data fetch once a timing translation returns the transl...
Definition: table_walker.hh:976
gem5::ArmISA::TableWalker::TableWalkerStats::requestOrigin
statistics::Vector2d requestOrigin
Definition: table_walker.hh:1076
gem5::ArmISA::TlbEntry::pfn
Addr pfn
Definition: pagetable.hh:210
gem5::ArmISA::f
Bitfield< 6 > f
Definition: misc_types.hh:68
gem5::ArmISA::TableWalker::DescriptorBase::xn
virtual bool xn() const =0
pagetable.hh
gem5::RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
gem5::ArmISA::TableWalker::LongDescriptor::secure
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
Definition: table_walker.hh:472
gem5::ArmISA::TableWalker::LongDescriptor::nextDescAddr
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Definition: table_walker.hh:599
gem5::ArmISA::ArmFault::LpaeTran
@ LpaeTran
Definition: faults.hh:152
gem5::MipsISA::event
Bitfield< 10, 5 > event
Definition: pra_constants.hh:300
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::ArmISA::MISCREG_TTBR1_EL1
@ MISCREG_TTBR1_EL1
Definition: misc.hh:599
gem5::ArmISA::MISCREG_TTBCR
@ MISCREG_TTBCR
Definition: misc.hh:260
gem5::ArmISA::vmid_t
uint16_t vmid_t
Definition: types.hh:57
gem5::ArmISA::TableWalker::DescriptorBase::texcb
virtual uint8_t texcb() const
Definition: table_walker.hh:90
gem5::ArmISA::TableWalker::WalkerState::secureLookup
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
Definition: table_walker.hh:876
gem5::ArmISA::TableWalker::LongDescriptor::Table
@ Table
Definition: table_walker.hh:419
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
gem5::ArmISA::TableWalker::TableWalkerStats::squashedAfter
statistics::Scalar squashedAfter
Definition: table_walker.hh:1070
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:646
gem5::ArmISA::TLB
Definition: tlb.hh:115
gem5::statistics::DataWrapVec::subname
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Definition: statistics.hh:402
gem5::ArmISA::TableWalker::L1Descriptor::data
uint32_t data
The raw bits of the entry.
Definition: table_walker.hh:113
gem5::ArmISA::MISCREG_TCR_EL3
@ MISCREG_TCR_EL3
Definition: misc.hh:610
gem5::ArmISA::EL1
@ EL1
Definition: types.hh:267
gem5::Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:673
gem5::ArmISA::GrainMap_tg1
const GrainSize GrainMap_tg1[]
Definition: pagetable.cc:51
gem5::ArmISA::byteOrder
ByteOrder byteOrder(const ThreadContext *tc)
Definition: utility.hh:365
gem5::ArmISA::MISCREG_MAIR_EL3
@ MISCREG_MAIR_EL3
Definition: misc.hh:732
gem5::ArmISA::TableWalker::L1Descriptor::Ignore
@ Ignore
Definition: table_walker.hh:106
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
tlb.hh
gem5::ArmISA::TableWalker::L2Descriptor::data
uint32_t data
The raw bits of the entry.
Definition: table_walker.hh:275
gem5::ArmISA::TlbEntry::DomainType::Client
@ Client
gem5::statistics::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
gem5::ArmISA::TableWalker::fetchDescriptor
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
Definition: table_walker.cc:2218
gem5::ArmISA::MISCREG_MAIR_EL1
@ MISCREG_MAIR_EL1
Definition: misc.hh:726
gem5::ArmISA::TableWalker::LongDescriptor::ap
uint8_t ap() const override
2-bit access protection flags
Definition: table_walker.hh:678
gem5::ArmISA::TableWalker::TableWalkerStats::walkServiceTime
statistics::Histogram walkServiceTime
Definition: table_walker.hh:1072
gem5::ArmISA::TableWalker::Port::handleResp
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
Definition: table_walker.cc:233
gem5::mbits
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:103
gem5::ArmISA::TlbEntry::MemoryType::Normal
@ Normal
gem5::ArmISA::TLB::multiInsert
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
Definition: tlb.cc:270
gem5::ArmISA::TableWalker::Stage2Walk::setVirt
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Definition: table_walker.hh:1001
system.hh
gem5::ArmISA::TableWalker::WalkerState::stage2Tran
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
Definition: table_walker.hh:889
gem5::ArmISA::TableWalker::walk
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Definition: table_walker.cc:289
gem5::ArmISA::MISCREG_SCTLR
@ MISCREG_SCTLR
Definition: misc.hh:235
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
table_walker.hh
gem5::ArmISA::TableWalker::WalkerState::vmid
vmid_t vmid
Definition: table_walker.hh:822
gem5::ArmISA::TableWalker::WalkerState::vaddr_tainted
Addr vaddr_tainted
The virtual address that is being translated.
Definition: table_walker.hh:835
gem5::ArmISA::Grain4KB
@ Grain4KB
Definition: pagetable.hh:63
gem5::ArmISA::MMU::testWalk
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: mmu.cc:1607
gem5::ArmISA::TableWalker::WalkerState::tranType
MMU::ArmTranslationType tranType
The translation type that has been requested.
Definition: table_walker.hh:901
gem5::ArmISA::hpd
Bitfield< 24 > hpd
Definition: misc_types.hh:534
gem5::ArmISA::TlbEntry
Definition: pagetable.hh:165
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
gem5::ArmISA::TableWalker::Port::Port
Port(TableWalker *_walker, RequestorID id)
Definition: table_walker.cc:141
gem5::ArmISA::TlbEntry::DomainType
DomainType
Definition: pagetable.hh:177
gem5::ArmISA::TableWalker::getPort
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: table_walker.cc:110
gem5::ArmISA::MISCREG_VTCR_EL2
@ MISCREG_VTCR_EL2
Definition: misc.hh:606
gem5::statistics::dist
const FlagsType dist
Print the distribution.
Definition: info.hh:66
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:56
gem5::ArmISA::HaveLVA
bool HaveLVA(ThreadContext *tc)
Definition: utility.cc:239
gem5::BaseMMU
Definition: mmu.hh:53
gem5::ArmISA::TableWalker::L1Descriptor::ap
uint8_t ap() const override
Three bit access protection flags.
Definition: table_walker.hh:199
gem5::ArmISA::ArmFault::VmsaTran
@ VmsaTran
Definition: faults.hh:153
gem5::ArmISA::TableWalker::COMPLETED
static const unsigned COMPLETED
Definition: table_walker.hh:1083
gem5::ArmISA::TableWalker::WalkerState::functional
bool functional
If the atomic mode should be functional.
Definition: table_walker.hh:895
gem5::ArmISA::TableWalker::TableWalkerStats::walksShortDescriptor
statistics::Scalar walksShortDescriptor
Definition: table_walker.hh:1065
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::ArmISA::TableWalker::LongDescriptor::nextTableAddr
Addr nextTableAddr() const
Return the address of the next page table.
Definition: table_walker.hh:581
gem5::ArmISA::TableWalker::REQUESTED
static const unsigned REQUESTED
Definition: table_walker.hh:1082
gem5::ArmISA::MISCREG_HTCR
@ MISCREG_HTCR
Definition: misc.hh:263
gem5::statistics::pdf
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:62
gem5::ArmISA::purifyTaggedAddr
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:469
gem5::ArmISA::MISCREG_MAIR_EL2
@ MISCREG_MAIR_EL2
Definition: misc.hh:730
gem5::ArmISA::TableWalker::doL1Descriptor
void doL1Descriptor()
Definition: table_walker.cc:1673
gem5::ArmISA::TableWalker::TableWalkerStats::TableWalkerStats
TableWalkerStats(statistics::Group *parent)
Definition: table_walker.cc:2573
gem5::ArmISA::TableWalker::Port::handleRespPacket
void handleRespPacket(PacketPtr pkt, Tick delay=0)
Definition: table_walker.cc:218
gem5::Request::PT_WALK
@ PT_WALK
The request is a page table walk.
Definition: request.hh:188
gem5::ArmISA::TableWalker::WalkerState::asid
uint16_t asid
ASID that we're servicing the request under.
Definition: table_walker.hh:821
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
mmu.hh
gem5::ArmISA::TableWalker::LongDescriptor::xnTable
bool xnTable() const
Is execution allowed on subsequent lookup levels?
Definition: table_walker.hh:784
gem5::ArmISA::MISCREG_VTCR
@ MISCREG_VTCR
Definition: misc.hh:264
gem5::ArmISA::TableWalker::L1Descriptor::PageTable
@ PageTable
Definition: table_walker.hh:107
gem5::ArmRelease::has
bool has(ArmExtension ext) const
Definition: system.hh:75
gem5::ArmISA::pa
Bitfield< 39, 12 > pa
Definition: misc_types.hh:657
gem5::ArmISA::TlbEntry::MemoryType::Device
@ Device
gem5::ArmISA::TableWalker::LongDescriptor::secureTable
bool secureTable() const
Whether the subsequent levels of lookup are secure.
Definition: table_walker.hh:751
gem5::Flags< FlagsType >
gem5::DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:74
gem5::ArmISA::TableWalker::sctlr
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Definition: table_walker.hh:1044
gem5::ArmISA::TableWalker::WalkerState::isHyp
bool isHyp
Definition: table_walker.hh:823
gem5::ArmISA::TableWalker::LongDescriptor::data
uint64_t data
The raw bits of the entry.
Definition: table_walker.hh:430
gem5::RequestPort::sendFunctional
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:485
gem5::ArmISA::MISCREG_PRRR
@ MISCREG_PRRR
Definition: misc.hh:369
gem5::ArmISA::TableWalker::walkAddresses
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Definition: table_walker.cc:1192
gem5::ArmISA::TableWalker::LongDescriptor::sh
uint8_t sh() const
2-bit shareability field
Definition: table_walker.hh:670
gem5::ArmISA::z
Bitfield< 11 > z
Definition: misc_types.hh:375
gem5::ArmISA::TableWalker::LongDescriptor::getRawData
uint64_t getRawData() const override
Definition: table_walker.hh:445
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::ArmISA::MISCREG_SCTLR_EL1
@ MISCREG_SCTLR_EL1
Definition: misc.hh:579
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::ArmISA::TableWalker::Stage2Walk::finish
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
Definition: table_walker.cc:2542
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::ArmISA::HaveVirtHostExt
bool HaveVirtHostExt(ThreadContext *tc)
Definition: utility.cc:232
gem5::ArmISA::TableWalker::Port::sendFunctionalReq
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
Definition: table_walker.cc:171
gem5::ArmISA::ArmFault::FaultSource
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition: faults.hh:95
gem5::SimObject::params
const Params & params() const
Definition: sim_object.hh:176
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ArmISA::TableWalker::L1Descriptor::domain
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
Definition: table_walker.hh:206
gem5::Event
Definition: eventq.hh:251
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::ArmISA::TableWalker::processWalkAArch64
Fault processWalkAArch64()
Definition: table_walker.cc:889
gem5::ArmISA::TableWalker::pageSizeNtoStatBin
static uint8_t pageSizeNtoStatBin(uint8_t N)
Definition: table_walker.cc:2459
gem5::ArmISA::TableWalker::WalkerState::delayed
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
Definition: table_walker.hh:912
gem5::ArmISA::TableWalker::stateQueues
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
Definition: table_walker.hh:1022
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::ArmISA::MMU::tranTypeEL
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition: mmu.cc:1368
gem5::ArmISA::TableWalker::doL0LongDescriptorWrapper
void doL0LongDescriptorWrapper()
Definition: table_walker.cc:2126
gem5::ArmISA::TableWalker::WalkerState::tc
ThreadContext * tc
Thread context that we're doing the walk for.
Definition: table_walker.hh:803
gem5::ArmISA::MISCREG_MAIR1
@ MISCREG_MAIR1
Definition: misc.hh:378
gem5::ArmISA::EL2
@ EL2
Definition: types.hh:268
gem5::ArmISA::TableWalker::processWalkLPAE
Fault processWalkLPAE()
Definition: table_walker.cc:691
gem5::ArmISA::TableWalker::WalkerState::physAddrRange
int physAddrRange
Current physical address range in bits.
Definition: table_walker.hh:812
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::ArmISA::MISCREG_SCTLR_EL2
@ MISCREG_SCTLR_EL2
Definition: misc.hh:584
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::ArmISA::TableWalker::WalkerState::startTime
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Definition: table_walker.hh:917
gem5::ArmISA::TableWalker::L1Descriptor::l2Addr
Addr l2Addr() const
Address of L2 descriptor if it exists.
Definition: table_walker.hh:213
gem5::ArmISA::TableWalker::LongDescriptor::type
EntryType type() const
Return the descriptor type.
Definition: table_walker.hh:484
gem5::ArmISA::ReservedGrain
@ ReservedGrain
Definition: pagetable.hh:66
gem5::ArmISA::MISCREG_HTTBR
@ MISCREG_HTTBR
Definition: misc.hh:447
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::ArmISA::s
Bitfield< 4 > s
Definition: misc_types.hh:562
gem5::ArmISA::TableWalker::DescriptorBase::pfn
virtual Addr pfn() const =0
gem5::ArmISA::TableWalker::insertPartialTableEntry
void insertPartialTableEntry(LongDescriptor &descriptor)
Definition: table_walker.cc:2289
gem5::Request::NO_ACCESS
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
gem5::ArmISA::TableWalker::doL2DescEvent
EventFunctionWrapper doL2DescEvent
Definition: table_walker.hh:1129
gem5::ArmISA::TableWalker::DescriptorBase::offsetBits
virtual uint8_t offsetBits() const =0
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::ArmISA::TableWalker::WalkerState::ttbcr
TTBCR ttbcr
Definition: table_walker.hh:849
gem5::ArmISA::TableWalker::TableWalkerStats::pageSizes
statistics::Vector pageSizes
Definition: table_walker.hh:1075
gem5::ArmISA::TableWalker::WalkerState::isFetch
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
Definition: table_walker.hh:866
gem5::ArmISA::TableWalker::port
Port * port
Port shared by the two table walkers.
Definition: table_walker.hh:1035
gem5::ArmISA::TableWalker::DescriptorBase::getRawData
virtual uint64_t getRawData() const =0
gem5::ArmISA::TableWalker::Stage2Walk::translateTiming
void translateTiming(ThreadContext *tc)
Definition: table_walker.cc:2568
gem5::ArmISA::TableWalker::doL2LongDescriptorWrapper
void doL2LongDescriptorWrapper()
Definition: table_walker.cc:2138
gem5::ArmISA::TableWalker::doL1DescriptorWrapper
void doL1DescriptorWrapper()
Definition: table_walker.cc:2020
gem5::ArmISA::TableWalker::WalkerState::req
RequestPtr req
Request that is currently being serviced.
Definition: table_walker.hh:815
gem5::htog
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:187
gem5::ArmISA::MMU::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: mmu.hh:208
gem5::ArmISA::TableWalker::processWalkWrapper
void processWalkWrapper()
Definition: table_walker.cc:484
gem5::ArmISA::TableWalker::LongDescriptor::physAddrRange
uint8_t physAddrRange
Definition: table_walker.hh:442
gem5::ArmISA::TableWalker::LongDescriptor::Invalid
@ Invalid
Definition: table_walker.hh:418
gem5::ArmISA::TableWalker::_physAddrRange
uint8_t _physAddrRange
Definition: table_walker.hh:1057
gem5::ArmISA::TableWalker::numSquashable
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
Definition: table_walker.hh:1053
gem5::ArmISA::te
Bitfield< 30 > te
Definition: misc_types.hh:338
gem5::ArmISA::TableWalker::WalkerState::transState
BaseMMU::Translation * transState
Translation state for delayed requests.
Definition: table_walker.hh:826
gem5::ArmISA::TableWalker::DescriptorBase::secure
virtual bool secure(bool have_security, WalkerState *currState) const =0
gem5::ArmISA::mask
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
gem5::ArmISA::TableWalker::LongDescriptor::grainSize
GrainSize grainSize
Width of the granule size in bits.
Definition: table_walker.hh:440
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
compiler.hh
gem5::ArmISA::TableWalker::DescriptorBase
Definition: table_walker.hh:73
gem5::ArmISA::TableWalker::WalkerState::tableWalker
TableWalker * tableWalker
Definition: table_walker.hh:914
gem5::ArmISA::EL3
@ EL3
Definition: types.hh:269
gem5::ArmISA::TableWalker::WalkerState::htcr
HTCR htcr
Cached copy of the htcr as it existed when translation began.
Definition: table_walker.hh:854
gem5::ArmISA::TableWalker::TableWalkerState
Definition: table_walker.hh:932
gem5::DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
gem5::ArmISA::TableWalker::WalkerState::isWrite
bool isWrite
If the access is a write.
Definition: table_walker.hh:863
gem5::ArmISA::TableWalker::L1Descriptor::Reserved
@ Reserved
Definition: table_walker.hh:109
gem5::ArmISA::TableWalker::checkVAddrSizeFaultAArch64
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
Definition: table_walker.cc:865
gem5::ArmISA::TableWalker::WalkerState::vtcr
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
Definition: table_walker.hh:860
gem5::ArmISA::s1TranslationRegime
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:246
faults.hh
gem5::ArmISA::TableWalker::L1Descriptor::type
EntryType type() const
Definition: table_walker.hh:144
gem5::ArmISA::TableWalker::generateLongDescFault
Fault generateLongDescFault(ArmFault::FaultSource src)
Definition: table_walker.cc:1779
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::ArmISA::TableWalker::LongDescriptor
Long-descriptor format (LPAE)
Definition: table_walker.hh:412
gem5::statistics::Histogram::init
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2154
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:534
gem5::ArmISA::MISCREG_NMRR
@ MISCREG_NMRR
Definition: misc.hh:375
gem5::ArmISA::TableWalker::LongDescriptor::Page
@ Page
Definition: table_walker.hh:421
name
const std::string & name()
Definition: trace.cc:49
gem5::ArmISA::MiscRegIndex
MiscRegIndex
Definition: misc.hh:59
gem5::ArmISA::TableWalker::TableWalkerStats::pendingWalks
statistics::Histogram pendingWalks
Definition: table_walker.hh:1074
gem5::ArmISA::TableWalker::doL2Descriptor
void doL2Descriptor()
Definition: table_walker.cc:1964
gem5::context_switch_task_id::DMA
@ DMA
Definition: request.hh:84
gem5::ArmISA::MISCREG_TTBR1
@ MISCREG_TTBR1
Definition: misc.hh:257
gem5::ArmISA::isSecure
bool isSecure(ThreadContext *tc)
Definition: utility.cc:73
gem5::ArmISA::TableWalker::pendingReqs
unsigned pendingReqs
Definition: table_walker.hh:1079
gem5::ArmISA::TableWalker::TableWalkerStats::walkWaitTime
statistics::Histogram walkWaitTime
Definition: table_walker.hh:1071
gem5::ClockedObject
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Definition: clocked_object.hh:234
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::ArmISA::TableWalker::DescriptorBase::lookupLevel
LookupLevel lookupLevel
Current lookup level for this descriptor.
Definition: table_walker.hh:79
gem5::ArmISA::TableWalker::DescriptorBase::global
virtual bool global(WalkerState *currState) const =0
gem5::ArmISA::MISCREG_MAIR0
@ MISCREG_MAIR0
Definition: misc.hh:372
gem5::ArmISA::sh
Bitfield< 8, 7 > sh
Definition: misc_types.hh:661
gem5::ArmISA::TableWalker::L1Descriptor::supersection
bool supersection() const
Is the page a Supersection (16 MiB)?
Definition: table_walker.hh:151
gem5::ArmISA::TableWalker::LongDescriptor::aarch64
bool aarch64
True if the current lookup is performed in AArch64 state.
Definition: table_walker.hh:437
gem5::ArmISA::ArmFault
Definition: faults.hh:64
gem5::X86ISA::reg
Bitfield< 5, 3 > reg
Definition: types.hh:92
gem5::ArmISA::TableWalker::WalkerState::xnTable
bool xnTable
Definition: table_walker.hh:879
gem5::ArmISA::TableWalker::LongDescriptor::af
bool af() const
Returns true if the access flag (AF) is set.
Definition: table_walker.hh:662
gem5::Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
gem5::ArmISA::TableWalker::WalkerState::stage2Req
bool stage2Req
Flag indicating if a second stage of lookup is required.
Definition: table_walker.hh:886
gem5::BaseMMU::Translation
Definition: mmu.hh:58
gem5::ArmISA::MMU::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: mmu.hh:219
gem5::ArmISA::TableWalker::Port::sendTimingReq
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
Definition: table_walker.cc:195
warn_if
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:273
gem5::ArmISA::TableWalker::readDataUntimed
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
Definition: table_walker.cc:2480
gem5::ArmISA::MMU::ArmTranslationType
ArmTranslationType
Definition: mmu.hh:113
gem5::ArmISA::EL0
@ EL0
Definition: types.hh:266
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
gem5::ArmISA::TableWalker::LongDescriptor::rwTable
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
Definition: table_walker.hh:767
gem5::ArmISA::TlbEntry::MemoryType::StronglyOrdered
@ StronglyOrdered
gem5::ArmISA::TableWalker
Definition: table_walker.hh:66
gem5::ArmISA::MISCREG_HCR
@ MISCREG_HCR
Definition: misc.hh:248
gem5::ArmISA::TableWalker::WalkerState::rwTable
bool rwTable
Definition: table_walker.hh:877
gem5::ArmISA::TableWalker::currState
WalkerState * currState
Definition: table_walker.hh:1046
gem5::ArmISA::TableWalker::TableWalker
TableWalker(const Params &p)
Definition: table_walker.cc:62
base.hh
gem5::ArmISA::TlbEntry::xn
bool xn
Definition: pagetable.hh:258
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::ArmISA::TableWalker::WalkerState::hpd
bool hpd
Hierarchical access permission disable.
Definition: table_walker.hh:883
gem5::ArmISA::n
Bitfield< 31 > n
Definition: misc_types.hh:456
gem5::ArmISA::TableWalker::LongDescriptor::memAttr
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
Definition: table_walker.hh:727
gem5::ArmISA::MISCREG_VTTBR_EL2
@ MISCREG_VTTBR_EL2
Definition: misc.hh:605
gem5::ArmISA::TableWalker::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: table_walker.cc:258
gem5::ArmISA::MMU::translateFunctional
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition: mmu.hh:90
gem5::ArmISA::TableWalker::drainResume
void drainResume() override
Resume execution after a successful drain.
Definition: table_walker.cc:279
gem5::ArmISA::TableWalker::LongDescriptor::domain
TlbEntry::DomainType domain() const override
Definition: table_walker.hh:711
gem5::ArmISA::ArmFault::TranslationLL
@ TranslationLL
Definition: faults.hh:101
gem5::ArmISA::TableWalker::memAttrs
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
Definition: table_walker.cc:1240
gem5::ArmISA::ps
Bitfield< 18, 16 > ps
Definition: misc_types.hh:514
gem5::ArmISA::TableWalker::mmu
MMU * mmu
The MMU to forward second stage look upts to.
Definition: table_walker.hh:1029
gem5::ArmISA::TableWalker::~TableWalker
virtual ~TableWalker()
Definition: table_walker.cc:98
gem5::ArmISA::TableWalker::doL2DescriptorWrapper
void doL2DescriptorWrapper()
Definition: table_walker.cc:2082
gem5::ArmISA::TableWalker::LongDescriptor::attrIndx
uint8_t attrIndx() const
Attribute index.
Definition: table_walker.hh:719
gem5::ArmISA::TableWalker::memAttrsLPAE
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Definition: table_walker.cc:1447
gem5::ArmISA::TableWalker::WalkerState::isUncacheable
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
Definition: table_walker.hh:872
gem5::ArmISA::longDescFormatInUse
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:135
gem5::ArmISA::TableWalker::WalkerState::l2Desc
L2Descriptor l2Desc
Definition: table_walker.hh:905
gem5::ArmISA::ArmFault::annotate
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:238
gem5::ArmISA::MISCREG_VSTCR_EL2
@ MISCREG_VSTCR_EL2
Definition: misc.hh:608
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::ArmISA::TableWalker::LongDescriptor::offsetBits
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
Definition: table_walker.hh:525
gem5::ArmISA::TableWalker::LongDescriptor::dbgHeader
std::string dbgHeader() const override
Definition: table_walker.hh:451
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::ArmISA::id
Bitfield< 33 > id
Definition: misc_types.hh:251
gem5::ArmISA::ArmFault::AccessFlagLL
@ AccessFlagLL
Definition: faults.hh:102
gem5::ArmISA::TableWalker::checkAddrSizeFaultAArch64
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
Definition: table_walker.cc:882
gem5::ArmISA::ArmFault::S1PTW
@ S1PTW
Definition: faults.hh:134
gem5::ArmISA::MMU::release
const ArmRelease * release() const
Definition: mmu.hh:345
gem5::ArmISA::snsBankedIndex
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: misc.cc:1313
gem5::ThreadContext::getCpuPtr
virtual BaseCPU * getCpuPtr()=0
gem5::ArmISA::TableWalker::L2Descriptor::invalid
bool invalid() const
Is the entry invalid.
Definition: table_walker.hh:326
gem5::ArmISA::TableWalker::isStage2
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
Definition: table_walker.hh:1038
gem5::RequestorID
uint16_t RequestorID
Definition: request.hh:95
gem5::ArmISA::TableWalker::Port::recvTimingResp
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Definition: table_walker.cc:206
gem5::ArmISA::TableWalker::TableWalkerState::event
Event * event
Definition: table_walker.hh:936
gem5::ClockedObject::Params
ClockedObjectParams Params
Parameters of ClockedObject.
Definition: clocked_object.hh:240
gem5::statistics::DataWrap::flags
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:358
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::ArmISA::TableWalker::doL3LongDescriptorWrapper
void doL3LongDescriptorWrapper()
Definition: table_walker.cc:2144
gem5::ArmISA::TableWalker::stats
gem5::ArmISA::TableWalker::TableWalkerStats stats
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::ArmISA::MISCREG_HCR_EL2
@ MISCREG_HCR_EL2
Definition: misc.hh:586
gem5::ArmISA::TableWalker::DescriptorBase::ap
virtual uint8_t ap() const =0
gem5::ArmISA::TableWalker::WalkerState::hcr
HCR hcr
Cached copy of the htcr as it existed when translation began.
Definition: table_walker.hh:857
gem5::ArmISA::MISCREG_VSTTBR_EL2
@ MISCREG_VSTTBR_EL2
Definition: misc.hh:607
gem5::ArmISA::TableWalker::WalkerState::WalkerState
WalkerState()
Definition: table_walker.cc:125
gem5::ArmISA::MISCREG_TTBR0_EL1
@ MISCREG_TTBR0_EL1
Definition: misc.hh:597
gem5::ArmISA::TableWalker::WalkerState::aarch64
bool aarch64
If the access is performed in AArch64 state.
Definition: table_walker.hh:806
gem5::ArmISA::TableWalker::toLookupLevel
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
Definition: table_walker.cc:2417
gem5::ArmISA::TableWalker::processWalk
Fault processWalk()
Definition: table_walker.cc:580
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::ArmISA::TableWalker::WalkerState::timing
bool timing
If the mode is timing or atomic.
Definition: table_walker.hh:892
gem5::ArmISA::computeAddrTop
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition: utility.cc:424
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
gem5::ArmISA::TableWalker::DescriptorBase::shareable
virtual bool shareable() const
Definition: table_walker.hh:94
gem5::statistics::VectorBase::init
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1040
gem5::ArmISA::stride
Bitfield< 21, 20 > stride
Definition: misc_types.hh:447
gem5::ArmISA::TableWalker::LongDescriptor::userTable
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
Definition: table_walker.hh:776
gem5::ArmISA::MMU
Definition: mmu.hh:59
gem5::ArmISA::TableWalker::setMmu
void setMmu(MMU *_mmu)
Definition: table_walker.cc:119
gem5::ArmISA::TableWalker::doLongDescriptor
void doLongDescriptor()
Definition: table_walker.cc:1799
gem5::ArmISA::TableWalker::WalkerState
Definition: table_walker.hh:799
gem5::ArmISA::MMU::hasWalkCache
bool hasWalkCache() const
Definition: mmu.hh:347
gem5::BaseMMU::Translation::finish
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
gem5::ArmISA::TableWalker::pendingChange
void pendingChange()
Definition: table_walker.cc:2434
gem5::ArmISA::TableWalker::DescriptorBase::domain
virtual TlbEntry::DomainType domain() const =0
gem5::ArmISA::TableWalker::WalkerState::fault
Fault fault
The fault that we are going to return.
Definition: table_walker.hh:829
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:587
thread_context.hh
gem5::ArmISA::TableWalker::LongDescriptor::xn
bool xn() const override
Is execution allowed on this mapping?
Definition: table_walker.hh:619
gem5::ArmISA::MMU::lookup
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition: mmu.cc:1407
gem5::ArmISA::ArmFault::OVA
@ OVA
Definition: faults.hh:135
gem5::ArmISA::TableWalker::tlb
TLB * tlb
TLB that is initiating these table walks.
Definition: table_walker.hh:1041
gem5::ArmISA::MISCREG_TTBR1_EL2
@ MISCREG_TTBR1_EL2
Definition: misc.hh:820
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::ArmISA::TableWalker::WalkerState::l1Desc
L1Descriptor l1Desc
Short-format descriptors.
Definition: table_walker.hh:904
gem5::ArmISA::TlbEntry::pxn
bool pxn
Definition: pagetable.hh:259
gem5::ArmISA::GrainSize
GrainSize
Definition: pagetable.hh:61
gem5::ArmISA::TableWalker::Stage2Walk::Stage2Walk
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
Definition: table_walker.cc:2532
gem5::ArmISA::TableWalker::TableWalkerStats::walksShortTerminatedAtLevel
statistics::Vector walksShortTerminatedAtLevel
Definition: table_walker.hh:1067
gem5::ArmISA::TlbEntry::ap
uint8_t ap
Definition: pagetable.hh:225
gem5::ArmISA::TableWalker::Port::createPacket
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
Definition: table_walker.cc:150
gem5::ArmISA::TableWalker::WalkerState::mode
BaseMMU::Mode mode
Save mode for use in delayed response.
Definition: table_walker.hh:898
gem5::ArmISA::MISCREG_TCR_EL1
@ MISCREG_TCR_EL1
Definition: misc.hh:601
gem5::ArmISA::TableWalker::LongDescriptor::pxn
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
Definition: table_walker.hh:627
gem5::ArmISA::ExceptionLevel
ExceptionLevel
Definition: types.hh:264
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::TableWalker::pending
bool pending
If a timing translation is currently in progress.
Definition: table_walker.hh:1049
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::ArmISA::TableWalker::insertTableEntry
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
Definition: table_walker.cc:2338
gem5::ArmISA::TlbEntry::lookupLevel
LookupLevel lookupLevel
Definition: pagetable.hh:215
gem5::ArmISA::TableWalker::TableWalkerStats::squashedBefore
statistics::Scalar squashedBefore
Definition: table_walker.hh:1069

Generated on Tue Dec 21 2021 11:34:22 for gem5 by doxygen 1.8.17