gem5  v22.0.0.1
table_walker.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010, 2012-2019, 2021 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 #include "arch/arm/table_walker.hh"
38 
39 #include <cassert>
40 #include <memory>
41 
42 #include "arch/arm/faults.hh"
43 #include "arch/arm/mmu.hh"
44 #include "arch/arm/pagetable.hh"
45 #include "arch/arm/system.hh"
46 #include "arch/arm/tlb.hh"
47 #include "base/compiler.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/PageTableWalker.hh"
53 #include "debug/TLB.hh"
54 #include "debug/TLBVerbose.hh"
55 #include "sim/system.hh"
56 
57 namespace gem5
58 {
59 
60 using namespace ArmISA;
61 
63  : ClockedObject(p),
64  requestorId(p.sys->getRequestorId(this)),
65  port(new Port(this, requestorId)),
66  isStage2(p.is_stage2), tlb(NULL),
67  currState(NULL), pending(false),
68  numSquashable(p.num_squash_per_cycle),
69  release(nullptr),
70  stats(this),
71  pendingReqs(0),
72  pendingChangeTick(curTick()),
73  doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
74  doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
75  doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
76  doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
77  doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
78  doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
79  LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
80  &doL2LongDescEvent, &doL3LongDescEvent },
81  doProcessEvent([this]{ processWalkWrapper(); }, name())
82 {
83  sctlr = 0;
84 
85  // Cache system-level properties
86  if (FullSystem) {
87  ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
88  assert(arm_sys);
89  _physAddrRange = arm_sys->physAddrRange();
90  _haveLargeAsid64 = arm_sys->haveLargeAsid64();
91  } else {
92  _haveLargeAsid64 = false;
93  _physAddrRange = 48;
94  }
95 
96 }
97 
99 {
100  ;
101 }
102 
105 {
106  return static_cast<Port&>(getPort("port"));
107 }
108 
109 Port &
110 TableWalker::getPort(const std::string &if_name, PortID idx)
111 {
112  if (if_name == "port") {
113  return *port;
114  }
115  return ClockedObject::getPort(if_name, idx);
116 }
117 
118 void
120 {
121  mmu = _mmu;
122  release = mmu->release();
123 }
124 
126  tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
127  asid(0), vmid(0), isHyp(false), transState(nullptr),
128  vaddr(0), vaddr_tainted(0),
129  sctlr(0), scr(0), cpsr(0), tcr(0),
130  htcr(0), hcr(0), vtcr(0),
131  isWrite(false), isFetch(false), isSecure(false),
132  isUncacheable(false),
133  secureLookup(false), rwTable(false), userTable(false), xnTable(false),
134  pxnTable(false), hpd(false), stage2Req(false),
135  stage2Tran(nullptr), timing(false), functional(false),
136  mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
137  delayed(false), tableWalker(nullptr)
138 {
139 }
140 
142  : QueuedRequestPort(_walker->name() + ".port", _walker,
143  reqQueue, snoopRespQueue),
144  reqQueue(*_walker, *this), snoopRespQueue(*_walker, *this),
145  requestorId(id)
146 {
147 }
148 
149 PacketPtr
151  Addr desc_addr, int size,
152  uint8_t *data, Request::Flags flags, Tick delay,
153  Event *event)
154 {
155  RequestPtr req = std::make_shared<Request>(
156  desc_addr, size, flags, requestorId);
157  req->taskId(context_switch_task_id::DMA);
158 
159  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
160  pkt->dataStatic(data);
161 
162  auto state = new TableWalkerState;
163  state->event = event;
164  state->delay = delay;
165 
166  pkt->senderState = state;
167  return pkt;
168 }
169 
170 void
172  Addr desc_addr, int size,
173  uint8_t *data, Request::Flags flags)
174 {
175  auto pkt = createPacket(desc_addr, size, data, flags, 0, nullptr);
176 
177  sendFunctional(pkt);
178 
179  handleRespPacket(pkt);
180 }
181 
182 void
184  Addr desc_addr, int size,
185  uint8_t *data, Request::Flags flags, Tick delay)
186 {
187  auto pkt = createPacket(desc_addr, size, data, flags, delay, nullptr);
188 
189  Tick lat = sendAtomic(pkt);
190 
191  handleRespPacket(pkt, lat);
192 }
193 
194 void
196  Addr desc_addr, int size,
197  uint8_t *data, Request::Flags flags, Tick delay,
198  Event *event)
199 {
200  auto pkt = createPacket(desc_addr, size, data, flags, delay, event);
201 
202  schedTimingReq(pkt, curTick());
203 }
204 
205 bool
207 {
208  // We shouldn't ever get a cacheable block in Modified state.
209  assert(pkt->req->isUncacheable() ||
210  !(pkt->cacheResponding() && !pkt->hasSharers()));
211 
212  handleRespPacket(pkt);
213 
214  return true;
215 }
216 
217 void
219 {
220  // Should always see a response with a sender state.
221  assert(pkt->isResponse());
222 
223  // Get the DMA sender state.
224  auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
225  assert(state);
226 
227  handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
228 
229  delete pkt;
230 }
231 
232 void
234  Addr size, Tick delay)
235 {
236  if (state->event) {
237  owner.schedule(state->event, curTick() + delay);
238  }
239  delete state;
240 }
241 
242 void
244 {
245  if (drainState() == DrainState::Draining &&
246  stateQueues[LookupLevel::L0].empty() &&
247  stateQueues[LookupLevel::L1].empty() &&
248  stateQueues[LookupLevel::L2].empty() &&
249  stateQueues[LookupLevel::L3].empty() &&
250  pendingQueue.empty()) {
251 
252  DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
253  signalDrainDone();
254  }
255 }
256 
259 {
260  bool state_queues_not_empty = false;
261 
262  for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
263  if (!stateQueues[i].empty()) {
264  state_queues_not_empty = true;
265  break;
266  }
267  }
268 
269  if (state_queues_not_empty || pendingQueue.size()) {
270  DPRINTF(Drain, "TableWalker not drained\n");
271  return DrainState::Draining;
272  } else {
273  DPRINTF(Drain, "TableWalker free, no need to drain\n");
274  return DrainState::Drained;
275  }
276 }
277 
278 void
280 {
281  if (params().sys->isTimingMode() && currState) {
282  delete currState;
283  currState = NULL;
284  pendingChange();
285  }
286 }
287 
288 Fault
289 TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
290  vmid_t _vmid, bool _isHyp, MMU::Mode _mode,
291  MMU::Translation *_trans, bool _timing, bool _functional,
292  bool secure, MMU::ArmTranslationType tranType,
293  bool _stage2Req, const TlbEntry *walk_entry)
294 {
295  assert(!(_functional && _timing));
296  ++stats.walks;
297 
298  WalkerState *savedCurrState = NULL;
299 
300  if (!currState && !_functional) {
301  // For atomic mode, a new WalkerState instance should be only created
302  // once per TLB. For timing mode, a new instance is generated for every
303  // TLB miss.
304  DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
305 
306  currState = new WalkerState();
307  currState->tableWalker = this;
308  } else if (_functional) {
309  // If we are mixing functional mode with timing (or even
310  // atomic), we need to to be careful and clean up after
311  // ourselves to not risk getting into an inconsistent state.
312  DPRINTF(PageTableWalker,
313  "creating functional instance of WalkerState\n");
314  savedCurrState = currState;
315  currState = new WalkerState();
316  currState->tableWalker = this;
317  } else if (_timing) {
318  // This is a translation that was completed and then faulted again
319  // because some underlying parameters that affect the translation
320  // changed out from under us (e.g. asid). It will either be a
321  // misprediction, in which case nothing will happen or we'll use
322  // this fault to re-execute the faulting instruction which should clean
323  // up everything.
324  if (currState->vaddr_tainted == _req->getVaddr()) {
326  return std::make_shared<ReExec>();
327  }
328  }
329  pendingChange();
330 
332  currState->tc = _tc;
333  // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
334  // aarch32/translation/translation/AArch32.TranslateAddress dictates
335  // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
336  if (isStage2) {
337  currState->el = EL1;
338  currState->aarch64 = ELIs64(_tc, EL2);
339  } else {
340  currState->el =
341  MMU::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
342  currState->aarch64 =
343  ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
344  }
345  currState->transState = _trans;
346  currState->req = _req;
347  if (walk_entry) {
348  currState->walkEntry = *walk_entry;
349  } else {
351  }
353  currState->asid = _asid;
354  currState->vmid = _vmid;
355  currState->isHyp = _isHyp;
356  currState->timing = _timing;
357  currState->functional = _functional;
358  currState->mode = _mode;
359  currState->tranType = tranType;
360  currState->isSecure = secure;
362 
365  currState->vaddr_tainted = currState->req->getVaddr();
366  if (currState->aarch64)
370  else
372 
373  if (currState->aarch64) {
375  if (isStage2) {
377  if (currState->secureLookup) {
378  currState->vtcr =
380  } else {
381  currState->vtcr =
383  }
384  } else switch (currState->el) {
385  case EL0:
386  if (HaveExt(currState->tc, ArmExtension::FEAT_VHE) &&
387  currState->hcr.tge == 1 && currState->hcr.e2h ==1) {
390  } else {
393  }
394  break;
395  case EL1:
398  break;
399  case EL2:
400  assert(release->has(ArmExtension::VIRTUALIZATION));
403  break;
404  case EL3:
405  assert(release->has(ArmExtension::SECURITY));
408  break;
409  default:
410  panic("Invalid exception level");
411  break;
412  }
413  } else {
421  }
422  sctlr = currState->sctlr;
423 
426 
428 
429  currState->stage2Req = _stage2Req && !isStage2;
430 
431  bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
433 
434  if (long_desc_format) {
435  // Helper variables used for hierarchical permissions
437  currState->rwTable = true;
438  currState->userTable = true;
439  currState->xnTable = false;
440  currState->pxnTable = false;
441 
443  } else {
445  }
446 
447  if (!currState->timing) {
448  Fault fault = NoFault;
449  if (currState->aarch64)
450  fault = processWalkAArch64();
451  else if (long_desc_format)
452  fault = processWalkLPAE();
453  else
454  fault = processWalk();
455 
456  // If this was a functional non-timing access restore state to
457  // how we found it.
458  if (currState->functional) {
459  delete currState;
460  currState = savedCurrState;
461  }
462  return fault;
463  }
464 
465  if (pending || pendingQueue.size()) {
466  pendingQueue.push_back(currState);
467  currState = NULL;
468  pendingChange();
469  } else {
470  pending = true;
471  pendingChange();
472  if (currState->aarch64)
473  return processWalkAArch64();
474  else if (long_desc_format)
475  return processWalkLPAE();
476  else
477  return processWalk();
478  }
479 
480  return NoFault;
481 }
482 
483 void
485 {
486  assert(!currState);
487  assert(pendingQueue.size());
488  pendingChange();
489  currState = pendingQueue.front();
490 
491  // Check if a previous walk filled this request already
492  // @TODO Should this always be the TLB or should we look in the stage2 TLB?
494  currState->vmid, currState->isHyp, currState->isSecure, true, false,
495  currState->el, false, isStage2, currState->mode);
496 
497  // Check if we still need to have a walk for this request. If the requesting
498  // instruction has been squashed, or a previous walk has filled the TLB with
499  // a match, we just want to get rid of the walk. The latter could happen
500  // when there are multiple outstanding misses to a single page and a
501  // previous request has been successfully translated.
502  if (!currState->transState->squashed() && (!te || te->partial)) {
503  // We've got a valid request, lets process it
504  pending = true;
505  pendingQueue.pop_front();
506  // Keep currState in case one of the processWalk... calls NULLs it
507 
508  if (te && te->partial) {
509  currState->walkEntry = *te;
510  }
511  WalkerState *curr_state_copy = currState;
512  Fault f;
513  if (currState->aarch64)
514  f = processWalkAArch64();
515  else if (longDescFormatInUse(currState->tc) ||
517  f = processWalkLPAE();
518  else
519  f = processWalk();
520 
521  if (f != NoFault) {
522  curr_state_copy->transState->finish(f, curr_state_copy->req,
523  curr_state_copy->tc, curr_state_copy->mode);
524 
525  delete curr_state_copy;
526  }
527  return;
528  }
529 
530 
531  // If the instruction that we were translating for has been
532  // squashed we shouldn't bother.
533  unsigned num_squashed = 0;
534  ThreadContext *tc = currState->tc;
535  while ((num_squashed < numSquashable) && currState &&
537  (te && !te->partial))) {
538  pendingQueue.pop_front();
539  num_squashed++;
541 
542  DPRINTF(TLB, "Squashing table walk for address %#x\n",
544 
545  if (currState->transState->squashed()) {
546  // finish the translation which will delete the translation object
548  std::make_shared<UnimpFault>("Squashed Inst"),
550  } else {
551  // translate the request now that we know it will work
556  }
557 
558  // delete the current request
559  delete currState;
560 
561  // peak at the next one
562  if (pendingQueue.size()) {
563  currState = pendingQueue.front();
566  false, currState->el, false, isStage2, currState->mode);
567  } else {
568  // Terminate the loop, nothing more to do
569  currState = NULL;
570  }
571  }
572  pendingChange();
573 
574  // if we still have pending translations, schedule more work
575  nextWalk(tc);
576  currState = NULL;
577 }
578 
579 Fault
581 {
582  Addr ttbr = 0;
583 
584  // For short descriptors, translation configs are held in
585  // TTBR1.
588 
589  const auto irgn0_mask = 0x1;
590  const auto irgn1_mask = 0x40;
591  currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
592 
593  // If translation isn't enabled, we shouldn't be here
594  assert(currState->sctlr.m || isStage2);
595  const bool is_atomic = currState->req->isAtomic();
596  const bool have_security = release->has(ArmExtension::SECURITY);
597 
598  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
600  32 - currState->ttbcr.n));
601 
603 
604  if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
605  32 - currState->ttbcr.n)) {
606  DPRINTF(TLB, " - Selecting TTBR0\n");
607  // Check if table walk is allowed when Security Extensions are enabled
608  if (have_security && currState->ttbcr.pd0) {
609  if (currState->isFetch)
610  return std::make_shared<PrefetchAbort>(
612  ArmFault::TranslationLL + LookupLevel::L1,
613  isStage2,
615  else
616  return std::make_shared<DataAbort>(
619  is_atomic ? false : currState->isWrite,
620  ArmFault::TranslationLL + LookupLevel::L1, isStage2,
622  }
625  } else {
626  DPRINTF(TLB, " - Selecting TTBR1\n");
627  // Check if table walk is allowed when Security Extensions are enabled
628  if (have_security && currState->ttbcr.pd1) {
629  if (currState->isFetch)
630  return std::make_shared<PrefetchAbort>(
632  ArmFault::TranslationLL + LookupLevel::L1,
633  isStage2,
635  else
636  return std::make_shared<DataAbort>(
639  is_atomic ? false : currState->isWrite,
640  ArmFault::TranslationLL + LookupLevel::L1, isStage2,
642  }
643  ttbr = ttbr1;
644  currState->ttbcr.n = 0;
645  }
646 
647  Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
648  (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
649  DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
650  currState->isSecure ? "s" : "ns");
651 
652  // Trickbox address check
653  Fault f;
654  f = testWalk(l1desc_addr, sizeof(uint32_t),
655  TlbEntry::DomainType::NoAccess, LookupLevel::L1, isStage2);
656  if (f) {
657  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
658  if (currState->timing) {
659  pending = false;
661  currState = NULL;
662  } else {
663  currState->tc = NULL;
664  currState->req = NULL;
665  }
666  return f;
667  }
668 
670  if (currState->sctlr.c == 0 || currState->isUncacheable) {
672  }
673 
674  if (currState->isSecure) {
675  flag.set(Request::SECURE);
676  }
677 
678  bool delayed;
679  delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
680  sizeof(uint32_t), flag, LookupLevel::L1,
681  &doL1DescEvent,
683  if (!delayed) {
684  f = currState->fault;
685  }
686 
687  return f;
688 }
689 
690 Fault
692 {
693  Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
694  int tsz, n;
695  LookupLevel start_lookup_level = LookupLevel::L1;
696 
697  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
699 
701 
703  if (currState->isSecure)
704  flag.set(Request::SECURE);
705 
706  // work out which base address register to use, if in hyp mode we always
707  // use HTTBR
708  if (isStage2) {
709  DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
711  tsz = sext<4>(currState->vtcr.t0sz);
712  start_lookup_level = currState->vtcr.sl0 ?
713  LookupLevel::L1 : LookupLevel::L2;
714  currState->isUncacheable = currState->vtcr.irgn0 == 0;
715  } else if (currState->isHyp) {
716  DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
718  tsz = currState->htcr.t0sz;
719  currState->isUncacheable = currState->htcr.irgn0 == 0;
720  } else {
721  assert(longDescFormatInUse(currState->tc));
722 
723  // Determine boundaries of TTBR0/1 regions
724  if (currState->ttbcr.t0sz)
725  ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
726  else if (currState->ttbcr.t1sz)
727  ttbr0_max = (1ULL << 32) -
728  (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
729  else
730  ttbr0_max = (1ULL << 32) - 1;
731  if (currState->ttbcr.t1sz)
732  ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
733  else
734  ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
735 
736  const bool is_atomic = currState->req->isAtomic();
737 
738  // The following code snippet selects the appropriate translation table base
739  // address (TTBR0 or TTBR1) and the appropriate starting lookup level
740  // depending on the address range supported by the translation table (ARM
741  // ARM issue C B3.6.4)
742  if (currState->vaddr <= ttbr0_max) {
743  DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
744  // Check if table walk is allowed
745  if (currState->ttbcr.epd0) {
746  if (currState->isFetch)
747  return std::make_shared<PrefetchAbort>(
749  ArmFault::TranslationLL + LookupLevel::L1,
750  isStage2,
752  else
753  return std::make_shared<DataAbort>(
756  is_atomic ? false : currState->isWrite,
757  ArmFault::TranslationLL + LookupLevel::L1,
758  isStage2,
760  }
763  tsz = currState->ttbcr.t0sz;
764  currState->isUncacheable = currState->ttbcr.irgn0 == 0;
765  if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
766  start_lookup_level = LookupLevel::L2;
767  } else if (currState->vaddr >= ttbr1_min) {
768  DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
769  // Check if table walk is allowed
770  if (currState->ttbcr.epd1) {
771  if (currState->isFetch)
772  return std::make_shared<PrefetchAbort>(
774  ArmFault::TranslationLL + LookupLevel::L1,
775  isStage2,
777  else
778  return std::make_shared<DataAbort>(
781  is_atomic ? false : currState->isWrite,
782  ArmFault::TranslationLL + LookupLevel::L1,
783  isStage2,
785  }
788  tsz = currState->ttbcr.t1sz;
789  currState->isUncacheable = currState->ttbcr.irgn1 == 0;
790  // Lower limit >= 3 GiB
791  if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
792  start_lookup_level = LookupLevel::L2;
793  } else {
794  // Out of boundaries -> translation fault
795  if (currState->isFetch)
796  return std::make_shared<PrefetchAbort>(
798  ArmFault::TranslationLL + LookupLevel::L1,
799  isStage2,
801  else
802  return std::make_shared<DataAbort>(
805  is_atomic ? false : currState->isWrite,
806  ArmFault::TranslationLL + LookupLevel::L1,
808  }
809 
810  }
811 
812  // Perform lookup (ARM ARM issue C B3.6.6)
813  if (start_lookup_level == LookupLevel::L1) {
814  n = 5 - tsz;
815  desc_addr = mbits(ttbr, 39, n) |
816  (bits(currState->vaddr, n + 26, 30) << 3);
817  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
818  desc_addr, currState->isSecure ? "s" : "ns");
819  } else {
820  // Skip first-level lookup
821  n = (tsz >= 2 ? 14 - tsz : 12);
822  desc_addr = mbits(ttbr, 39, n) |
823  (bits(currState->vaddr, n + 17, 21) << 3);
824  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
825  desc_addr, currState->isSecure ? "s" : "ns");
826  }
827 
828  // Trickbox address check
829  Fault f = testWalk(desc_addr, sizeof(uint64_t),
830  TlbEntry::DomainType::NoAccess, start_lookup_level,
831  isStage2);
832  if (f) {
833  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
834  if (currState->timing) {
835  pending = false;
837  currState = NULL;
838  } else {
839  currState->tc = NULL;
840  currState->req = NULL;
841  }
842  return f;
843  }
844 
845  if (currState->sctlr.c == 0 || currState->isUncacheable) {
847  }
848 
849  currState->longDesc.lookupLevel = start_lookup_level;
850  currState->longDesc.aarch64 = false;
852 
853  bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
854  sizeof(uint64_t), flag, start_lookup_level,
855  LongDescEventByLevel[start_lookup_level],
857  if (!delayed) {
858  f = currState->fault;
859  }
860 
861  return f;
862 }
863 
864 bool
866  GrainSize tg, int tsz, bool low_range)
867 {
868  // The effective maximum input size is 48 if ARMv8.2-LVA is not
869  // supported or if the translation granule that is in use is 4KB or
870  // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
871  // translation granule size only, the effective minimum value of
872  // 52.
873  const bool have_lva = HaveExt(currState->tc, ArmExtension::FEAT_LVA);
874  int in_max = (have_lva && tg == Grain64KB) ? 52 : 48;
875  int in_min = 64 - (tg == Grain64KB ? 47 : 48);
876 
877  return tsz > in_max || tsz < in_min || (low_range ?
878  bits(currState->vaddr, top_bit, tsz) != 0x0 :
879  bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1));
880 }
881 
882 bool
884 {
885  return (pa_range != _physAddrRange &&
886  bits(addr, _physAddrRange - 1, pa_range));
887 }
888 
889 Fault
891 {
892  assert(currState->aarch64);
893 
894  DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
896 
898 
899  // Determine TTBR, table size, granule size and phys. address range
900  Addr ttbr = 0;
901  int tsz = 0, ps = 0;
902  GrainSize tg = Grain4KB; // grain size computed from tg* field
903  bool fault = false;
904 
905  int top_bit = computeAddrTop(currState->tc,
906  bits(currState->vaddr, 55),
908  currState->tcr,
909  currState->el);
910 
911  bool vaddr_fault = false;
912  switch (currState->el) {
913  case EL0:
914  {
915  Addr ttbr0;
916  Addr ttbr1;
917  if (HaveExt(currState->tc, ArmExtension::FEAT_VHE) &&
918  currState->hcr.tge==1 && currState->hcr.e2h == 1) {
919  // VHE code for EL2&0 regime
922  } else {
925  }
926  switch (bits(currState->vaddr, 63,48)) {
927  case 0:
928  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
929  ttbr = ttbr0;
930  tsz = 64 - currState->tcr.t0sz;
931  tg = GrainMap_tg0[currState->tcr.tg0];
932  currState->hpd = currState->tcr.hpd0;
933  currState->isUncacheable = currState->tcr.irgn0 == 0;
935  top_bit, tg, tsz, true);
936 
937  if (vaddr_fault || currState->tcr.epd0)
938  fault = true;
939  break;
940  case 0xffff:
941  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
942  ttbr = ttbr1;
943  tsz = 64 - currState->tcr.t1sz;
944  tg = GrainMap_tg1[currState->tcr.tg1];
945  currState->hpd = currState->tcr.hpd1;
946  currState->isUncacheable = currState->tcr.irgn1 == 0;
948  top_bit, tg, tsz, false);
949 
950  if (vaddr_fault || currState->tcr.epd1)
951  fault = true;
952  break;
953  default:
954  // top two bytes must be all 0s or all 1s, else invalid addr
955  fault = true;
956  }
957  ps = currState->tcr.ips;
958  }
959  break;
960  case EL1:
961  if (isStage2) {
962  if (currState->secureLookup) {
963  DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
965  } else {
966  DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
968  }
969  tsz = 64 - currState->vtcr.t0sz64;
970  tg = GrainMap_tg0[currState->vtcr.tg0];
971 
972  ps = currState->vtcr.ps;
973  currState->isUncacheable = currState->vtcr.irgn0 == 0;
974  } else {
975  switch (bits(currState->vaddr, top_bit)) {
976  case 0:
977  DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
979  tsz = 64 - currState->tcr.t0sz;
980  tg = GrainMap_tg0[currState->tcr.tg0];
981  currState->hpd = currState->tcr.hpd0;
982  currState->isUncacheable = currState->tcr.irgn0 == 0;
984  top_bit, tg, tsz, true);
985 
986  if (vaddr_fault || currState->tcr.epd0)
987  fault = true;
988  break;
989  case 0x1:
990  DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
992  tsz = 64 - currState->tcr.t1sz;
993  tg = GrainMap_tg1[currState->tcr.tg1];
994  currState->hpd = currState->tcr.hpd1;
995  currState->isUncacheable = currState->tcr.irgn1 == 0;
997  top_bit, tg, tsz, false);
998 
999  if (vaddr_fault || currState->tcr.epd1)
1000  fault = true;
1001  break;
1002  default:
1003  // top two bytes must be all 0s or all 1s, else invalid addr
1004  fault = true;
1005  }
1006  ps = currState->tcr.ips;
1007  }
1008  break;
1009  case EL2:
1010  switch(bits(currState->vaddr, top_bit)) {
1011  case 0:
1012  DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1014  tsz = 64 - currState->tcr.t0sz;
1015  tg = GrainMap_tg0[currState->tcr.tg0];
1016  currState->hpd = currState->hcr.e2h ?
1017  currState->tcr.hpd0 : currState->tcr.hpd;
1018  currState->isUncacheable = currState->tcr.irgn0 == 0;
1020  top_bit, tg, tsz, true);
1021 
1022  if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1023  fault = true;
1024  break;
1025 
1026  case 0x1:
1027  DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1029  tsz = 64 - currState->tcr.t1sz;
1030  tg = GrainMap_tg1[currState->tcr.tg1];
1031  currState->hpd = currState->tcr.hpd1;
1032  currState->isUncacheable = currState->tcr.irgn1 == 0;
1034  top_bit, tg, tsz, false);
1035 
1036  if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1037  fault = true;
1038  break;
1039 
1040  default:
1041  // invalid addr if top two bytes are not all 0s
1042  fault = true;
1043  }
1044  ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1045  break;
1046  case EL3:
1047  switch(bits(currState->vaddr, top_bit)) {
1048  case 0:
1049  DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1051  tsz = 64 - currState->tcr.t0sz;
1052  tg = GrainMap_tg0[currState->tcr.tg0];
1053  currState->hpd = currState->tcr.hpd;
1054  currState->isUncacheable = currState->tcr.irgn0 == 0;
1056  top_bit, tg, tsz, true);
1057 
1058  if (vaddr_fault)
1059  fault = true;
1060  break;
1061  default:
1062  // invalid addr if top two bytes are not all 0s
1063  fault = true;
1064  }
1065  ps = currState->tcr.ps;
1066  break;
1067  }
1068 
1069  const bool is_atomic = currState->req->isAtomic();
1070 
1071  if (fault) {
1072  Fault f;
1073  if (currState->isFetch)
1074  f = std::make_shared<PrefetchAbort>(
1076  ArmFault::TranslationLL + LookupLevel::L0, isStage2,
1078  else
1079  f = std::make_shared<DataAbort>(
1082  is_atomic ? false : currState->isWrite,
1083  ArmFault::TranslationLL + LookupLevel::L0,
1085 
1086  if (currState->timing) {
1087  pending = false;
1088  nextWalk(currState->tc);
1089  currState = NULL;
1090  } else {
1091  currState->tc = NULL;
1092  currState->req = NULL;
1093  }
1094  return f;
1095 
1096  }
1097 
1098  if (tg == ReservedGrain) {
1099  warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1100  "DEFINED behavior takes this to mean 4KB granules\n");
1101  tg = Grain4KB;
1102  }
1103 
1104  // Clamp to lower limit
1105  int pa_range = decodePhysAddrRange64(ps);
1106  if (pa_range > _physAddrRange) {
1108  } else {
1109  currState->physAddrRange = pa_range;
1110  }
1111 
1112  auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1113  ttbr, tg, tsz, pa_range);
1114 
1115  // Determine physical address size and raise an Address Size Fault if
1116  // necessary
1118  DPRINTF(TLB, "Address size fault before any lookup\n");
1119  Fault f;
1120  if (currState->isFetch)
1121  f = std::make_shared<PrefetchAbort>(
1123  ArmFault::AddressSizeLL + start_lookup_level,
1124  isStage2,
1126  else
1127  f = std::make_shared<DataAbort>(
1130  is_atomic ? false : currState->isWrite,
1131  ArmFault::AddressSizeLL + start_lookup_level,
1132  isStage2,
1134 
1135 
1136  if (currState->timing) {
1137  pending = false;
1138  nextWalk(currState->tc);
1139  currState = NULL;
1140  } else {
1141  currState->tc = NULL;
1142  currState->req = NULL;
1143  }
1144  return f;
1145 
1146  }
1147 
1148  // Trickbox address check
1149  Fault f = testWalk(desc_addr, sizeof(uint64_t),
1150  TlbEntry::DomainType::NoAccess, start_lookup_level, isStage2);
1151  if (f) {
1152  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1153  if (currState->timing) {
1154  pending = false;
1155  nextWalk(currState->tc);
1156  currState = NULL;
1157  } else {
1158  currState->tc = NULL;
1159  currState->req = NULL;
1160  }
1161  return f;
1162  }
1163 
1165  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1166  flag.set(Request::UNCACHEABLE);
1167  }
1168 
1169  if (currState->isSecure) {
1170  flag.set(Request::SECURE);
1171  }
1172 
1173  currState->longDesc.lookupLevel = start_lookup_level;
1174  currState->longDesc.aarch64 = true;
1177 
1178  if (currState->timing) {
1179  fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1180  sizeof(uint64_t), flag, start_lookup_level,
1181  LongDescEventByLevel[start_lookup_level], NULL);
1182  } else {
1183  fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1184  sizeof(uint64_t), flag, -1, NULL,
1186  f = currState->fault;
1187  }
1188 
1189  return f;
1190 }
1191 
1192 std::tuple<Addr, Addr, TableWalker::LookupLevel>
1193 TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1194 {
1195  const auto* ptops = getPageTableOps(tg);
1196 
1197  LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1198  Addr table_addr = 0;
1199  Addr desc_addr = 0;
1200 
1201  if (currState->walkEntry.valid) {
1202  // WalkCache hit
1203  TlbEntry* entry = &currState->walkEntry;
1204  DPRINTF(PageTableWalker,
1205  "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1206  currState->vaddr, entry->lookupLevel, entry->pfn);
1207 
1208  currState->xnTable = entry->xn;
1209  currState->pxnTable = entry->pxn;
1210  currState->rwTable = bits(entry->ap, 1);
1211  currState->userTable = bits(entry->ap, 0);
1212 
1213  table_addr = entry->pfn;
1214  first_level = (LookupLevel)(entry->lookupLevel + 1);
1215  } else {
1216  // WalkCache miss
1217  first_level = isStage2 ?
1218  ptops->firstS2Level(currState->vtcr.sl0) :
1219  ptops->firstLevel(64 - tsz);
1220  panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1221  "Table walker couldn't find lookup level\n");
1222 
1223  int stride = tg - 3;
1224  int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1225 
1226  if (pa_range == 52) {
1227  int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1228  table_addr = mbits(ttbr, 47, z);
1229  table_addr |= (bits(ttbr, 5, 2) << 48);
1230  } else {
1231  table_addr = mbits(ttbr, 47, base_addr_lo);
1232  }
1233  }
1234 
1235  desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1236 
1237  return std::make_tuple(table_addr, desc_addr, first_level);
1238 }
1239 
1240 void
1242  uint8_t texcb, bool s)
1243 {
1244  // Note: tc and sctlr local variables are hiding tc and sctrl class
1245  // variables
1246  DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1247  te.shareable = false; // default value
1248  te.nonCacheable = false;
1249  te.outerShareable = false;
1250  if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1251  switch(texcb) {
1252  case 0: // Stongly-ordered
1253  te.nonCacheable = true;
1255  te.shareable = true;
1256  te.innerAttrs = 1;
1257  te.outerAttrs = 0;
1258  break;
1259  case 1: // Shareable Device
1260  te.nonCacheable = true;
1262  te.shareable = true;
1263  te.innerAttrs = 3;
1264  te.outerAttrs = 0;
1265  break;
1266  case 2: // Outer and Inner Write-Through, no Write-Allocate
1268  te.shareable = s;
1269  te.innerAttrs = 6;
1270  te.outerAttrs = bits(texcb, 1, 0);
1271  break;
1272  case 3: // Outer and Inner Write-Back, no Write-Allocate
1274  te.shareable = s;
1275  te.innerAttrs = 7;
1276  te.outerAttrs = bits(texcb, 1, 0);
1277  break;
1278  case 4: // Outer and Inner Non-cacheable
1279  te.nonCacheable = true;
1281  te.shareable = s;
1282  te.innerAttrs = 0;
1283  te.outerAttrs = bits(texcb, 1, 0);
1284  break;
1285  case 5: // Reserved
1286  panic("Reserved texcb value!\n");
1287  break;
1288  case 6: // Implementation Defined
1289  panic("Implementation-defined texcb value!\n");
1290  break;
1291  case 7: // Outer and Inner Write-Back, Write-Allocate
1293  te.shareable = s;
1294  te.innerAttrs = 5;
1295  te.outerAttrs = 1;
1296  break;
1297  case 8: // Non-shareable Device
1298  te.nonCacheable = true;
1300  te.shareable = false;
1301  te.innerAttrs = 3;
1302  te.outerAttrs = 0;
1303  break;
1304  case 9 ... 15: // Reserved
1305  panic("Reserved texcb value!\n");
1306  break;
1307  case 16 ... 31: // Cacheable Memory
1309  te.shareable = s;
1310  if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1311  te.nonCacheable = true;
1312  te.innerAttrs = bits(texcb, 1, 0);
1313  te.outerAttrs = bits(texcb, 3, 2);
1314  break;
1315  default:
1316  panic("More than 32 states for 5 bits?\n");
1317  }
1318  } else {
1319  assert(tc);
1320  PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1322  NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1324  DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1325  uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1326  switch(bits(texcb, 2,0)) {
1327  case 0:
1328  curr_tr = prrr.tr0;
1329  curr_ir = nmrr.ir0;
1330  curr_or = nmrr.or0;
1331  te.outerShareable = (prrr.nos0 == 0);
1332  break;
1333  case 1:
1334  curr_tr = prrr.tr1;
1335  curr_ir = nmrr.ir1;
1336  curr_or = nmrr.or1;
1337  te.outerShareable = (prrr.nos1 == 0);
1338  break;
1339  case 2:
1340  curr_tr = prrr.tr2;
1341  curr_ir = nmrr.ir2;
1342  curr_or = nmrr.or2;
1343  te.outerShareable = (prrr.nos2 == 0);
1344  break;
1345  case 3:
1346  curr_tr = prrr.tr3;
1347  curr_ir = nmrr.ir3;
1348  curr_or = nmrr.or3;
1349  te.outerShareable = (prrr.nos3 == 0);
1350  break;
1351  case 4:
1352  curr_tr = prrr.tr4;
1353  curr_ir = nmrr.ir4;
1354  curr_or = nmrr.or4;
1355  te.outerShareable = (prrr.nos4 == 0);
1356  break;
1357  case 5:
1358  curr_tr = prrr.tr5;
1359  curr_ir = nmrr.ir5;
1360  curr_or = nmrr.or5;
1361  te.outerShareable = (prrr.nos5 == 0);
1362  break;
1363  case 6:
1364  panic("Imp defined type\n");
1365  case 7:
1366  curr_tr = prrr.tr7;
1367  curr_ir = nmrr.ir7;
1368  curr_or = nmrr.or7;
1369  te.outerShareable = (prrr.nos7 == 0);
1370  break;
1371  }
1372 
1373  switch(curr_tr) {
1374  case 0:
1375  DPRINTF(TLBVerbose, "StronglyOrdered\n");
1377  te.nonCacheable = true;
1378  te.innerAttrs = 1;
1379  te.outerAttrs = 0;
1380  te.shareable = true;
1381  break;
1382  case 1:
1383  DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1384  prrr.ds1, prrr.ds0, s);
1386  te.nonCacheable = true;
1387  te.innerAttrs = 3;
1388  te.outerAttrs = 0;
1389  if (prrr.ds1 && s)
1390  te.shareable = true;
1391  if (prrr.ds0 && !s)
1392  te.shareable = true;
1393  break;
1394  case 2:
1395  DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1396  prrr.ns1, prrr.ns0, s);
1398  if (prrr.ns1 && s)
1399  te.shareable = true;
1400  if (prrr.ns0 && !s)
1401  te.shareable = true;
1402  break;
1403  case 3:
1404  panic("Reserved type");
1405  }
1406 
1407  if (te.mtype == TlbEntry::MemoryType::Normal){
1408  switch(curr_ir) {
1409  case 0:
1410  te.nonCacheable = true;
1411  te.innerAttrs = 0;
1412  break;
1413  case 1:
1414  te.innerAttrs = 5;
1415  break;
1416  case 2:
1417  te.innerAttrs = 6;
1418  break;
1419  case 3:
1420  te.innerAttrs = 7;
1421  break;
1422  }
1423 
1424  switch(curr_or) {
1425  case 0:
1426  te.nonCacheable = true;
1427  te.outerAttrs = 0;
1428  break;
1429  case 1:
1430  te.outerAttrs = 1;
1431  break;
1432  case 2:
1433  te.outerAttrs = 2;
1434  break;
1435  case 3:
1436  te.outerAttrs = 3;
1437  break;
1438  }
1439  }
1440  }
1441  DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1442  "outerAttrs: %d\n",
1443  te.shareable, te.innerAttrs, te.outerAttrs);
1444  te.setAttributes(false);
1445 }
1446 
1447 void
1449  LongDescriptor &l_descriptor)
1450 {
1451  assert(release->has(ArmExtension::LPAE));
1452 
1453  uint8_t attr;
1454  uint8_t sh = l_descriptor.sh();
1455  // Different format and source of attributes if this is a stage 2
1456  // translation
1457  if (isStage2) {
1458  attr = l_descriptor.memAttr();
1459  uint8_t attr_3_2 = (attr >> 2) & 0x3;
1460  uint8_t attr_1_0 = attr & 0x3;
1461 
1462  DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1463 
1464  if (attr_3_2 == 0) {
1465  te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1467  te.outerAttrs = 0;
1468  te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1469  te.nonCacheable = true;
1470  } else {
1472  te.outerAttrs = attr_3_2 == 1 ? 0 :
1473  attr_3_2 == 2 ? 2 : 1;
1474  te.innerAttrs = attr_1_0 == 1 ? 0 :
1475  attr_1_0 == 2 ? 6 : 5;
1476  te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1477  }
1478  } else {
1479  uint8_t attrIndx = l_descriptor.attrIndx();
1480 
1481  // LPAE always uses remapping of memory attributes, irrespective of the
1482  // value of SCTLR.TRE
1483  MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1484  int reg_as_int = snsBankedIndex(reg, currState->tc,
1485  !currState->isSecure);
1486  uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1487  attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1488  uint8_t attr_7_4 = bits(attr, 7, 4);
1489  uint8_t attr_3_0 = bits(attr, 3, 0);
1490  DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1491 
1492  // Note: the memory subsystem only cares about the 'cacheable' memory
1493  // attribute. The other attributes are only used to fill the PAR register
1494  // accordingly to provide the illusion of full support
1495  te.nonCacheable = false;
1496 
1497  switch (attr_7_4) {
1498  case 0x0:
1499  // Strongly-ordered or Device memory
1500  if (attr_3_0 == 0x0)
1502  else if (attr_3_0 == 0x4)
1504  else
1505  panic("Unpredictable behavior\n");
1506  te.nonCacheable = true;
1507  te.outerAttrs = 0;
1508  break;
1509  case 0x4:
1510  // Normal memory, Outer Non-cacheable
1512  te.outerAttrs = 0;
1513  if (attr_3_0 == 0x4)
1514  // Inner Non-cacheable
1515  te.nonCacheable = true;
1516  else if (attr_3_0 < 0x8)
1517  panic("Unpredictable behavior\n");
1518  break;
1519  case 0x8:
1520  case 0x9:
1521  case 0xa:
1522  case 0xb:
1523  case 0xc:
1524  case 0xd:
1525  case 0xe:
1526  case 0xf:
1527  if (attr_7_4 & 0x4) {
1528  te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1529  } else {
1530  te.outerAttrs = 0x2;
1531  }
1532  // Normal memory, Outer Cacheable
1534  if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1535  panic("Unpredictable behavior\n");
1536  break;
1537  default:
1538  panic("Unpredictable behavior\n");
1539  break;
1540  }
1541 
1542  switch (attr_3_0) {
1543  case 0x0:
1544  te.innerAttrs = 0x1;
1545  break;
1546  case 0x4:
1547  te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1548  break;
1549  case 0x8:
1550  case 0x9:
1551  case 0xA:
1552  case 0xB:
1553  te.innerAttrs = 6;
1554  break;
1555  case 0xC:
1556  case 0xD:
1557  case 0xE:
1558  case 0xF:
1559  te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1560  break;
1561  default:
1562  panic("Unpredictable behavior\n");
1563  break;
1564  }
1565  }
1566 
1567  te.outerShareable = sh == 2;
1568  te.shareable = (sh & 0x2) ? true : false;
1569  te.setAttributes(true);
1570  te.attributes |= (uint64_t) attr << 56;
1571 }
1572 
1573 void
1575  LongDescriptor &l_descriptor)
1576 {
1577  uint8_t attr;
1578  uint8_t attr_hi;
1579  uint8_t attr_lo;
1580  uint8_t sh = l_descriptor.sh();
1581 
1582  if (isStage2) {
1583  attr = l_descriptor.memAttr();
1584  uint8_t attr_hi = (attr >> 2) & 0x3;
1585  uint8_t attr_lo = attr & 0x3;
1586 
1587  DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1588 
1589  if (attr_hi == 0) {
1590  te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1592  te.outerAttrs = 0;
1593  te.innerAttrs = attr_lo == 0 ? 1 : 3;
1594  te.nonCacheable = true;
1595  } else {
1597  te.outerAttrs = attr_hi == 1 ? 0 :
1598  attr_hi == 2 ? 2 : 1;
1599  te.innerAttrs = attr_lo == 1 ? 0 :
1600  attr_lo == 2 ? 6 : 5;
1601  // Treat write-through memory as uncacheable, this is safe
1602  // but for performance reasons not optimal.
1603  te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1604  (attr_lo == 1) || (attr_lo == 2);
1605  }
1606  } else {
1607  uint8_t attrIndx = l_descriptor.attrIndx();
1608 
1609  DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1611 
1612  // Select MAIR
1613  uint64_t mair;
1614  switch (regime) {
1615  case EL0:
1616  case EL1:
1617  mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1618  break;
1619  case EL2:
1620  mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1621  break;
1622  case EL3:
1623  mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1624  break;
1625  default:
1626  panic("Invalid exception level");
1627  break;
1628  }
1629 
1630  // Select attributes
1631  attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1632  attr_lo = bits(attr, 3, 0);
1633  attr_hi = bits(attr, 7, 4);
1634 
1635  // Memory type
1637 
1638  // Cacheability
1639  te.nonCacheable = false;
1640  if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1641  te.nonCacheable = true;
1642  }
1643  // Treat write-through memory as uncacheable, this is safe
1644  // but for performance reasons not optimal.
1645  switch (attr_hi) {
1646  case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1647  case 0x4: // Normal memory, Outer Non-cacheable
1648  case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1649  te.nonCacheable = true;
1650  }
1651  switch (attr_lo) {
1652  case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1653  case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1654  warn_if(!attr_hi, "Unpredictable behavior");
1655  [[fallthrough]];
1656  case 0x4: // Device-nGnRE memory or
1657  // Normal memory, Inner Non-cacheable
1658  case 0x8: // Device-nGRE memory or
1659  // Normal memory, Inner Write-through non-transient
1660  te.nonCacheable = true;
1661  }
1662 
1663  te.shareable = sh == 2;
1664  te.outerShareable = (sh & 0x2) ? true : false;
1665  // Attributes formatted according to the 64-bit PAR
1666  te.attributes = ((uint64_t) attr << 56) |
1667  (1 << 11) | // LPAE bit
1668  (te.ns << 9) | // NS bit
1669  (sh << 7);
1670  }
1671 }
1672 
1673 void
1675 {
1676  if (currState->fault != NoFault) {
1677  return;
1678  }
1679 
1681  byteOrder(currState->tc));
1682 
1683  DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1685  TlbEntry te;
1686 
1687  const bool is_atomic = currState->req->isAtomic();
1688 
1689  switch (currState->l1Desc.type()) {
1690  case L1Descriptor::Ignore:
1692  if (!currState->timing) {
1693  currState->tc = NULL;
1694  currState->req = NULL;
1695  }
1696  DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1697  if (currState->isFetch)
1698  currState->fault =
1699  std::make_shared<PrefetchAbort>(
1701  ArmFault::TranslationLL + LookupLevel::L1,
1702  isStage2,
1704  else
1705  currState->fault =
1706  std::make_shared<DataAbort>(
1709  is_atomic ? false : currState->isWrite,
1710  ArmFault::TranslationLL + LookupLevel::L1, isStage2,
1712  return;
1713  case L1Descriptor::Section:
1714  if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1720  currState->fault = std::make_shared<DataAbort>(
1722  currState->l1Desc.domain(),
1723  is_atomic ? false : currState->isWrite,
1724  ArmFault::AccessFlagLL + LookupLevel::L1,
1725  isStage2,
1727  }
1728  if (currState->l1Desc.supersection()) {
1729  panic("Haven't implemented supersections\n");
1730  }
1732  return;
1734  {
1735  Addr l2desc_addr;
1736  l2desc_addr = currState->l1Desc.l2Addr() |
1737  (bits(currState->vaddr, 19, 12) << 2);
1738  DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1739  l2desc_addr, currState->isSecure ? "s" : "ns");
1740 
1741  // Trickbox address check
1742  currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1743  currState->l1Desc.domain(),
1744  LookupLevel::L2, isStage2);
1745 
1746  if (currState->fault) {
1747  if (!currState->timing) {
1748  currState->tc = NULL;
1749  currState->req = NULL;
1750  }
1751  return;
1752  }
1753 
1755 
1756  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1757  flag.set(Request::UNCACHEABLE);
1758  }
1759 
1760  if (currState->isSecure)
1761  flag.set(Request::SECURE);
1762 
1763  bool delayed;
1764  delayed = fetchDescriptor(l2desc_addr,
1765  (uint8_t*)&currState->l2Desc.data,
1766  sizeof(uint32_t), flag, -1, &doL2DescEvent,
1768  if (delayed) {
1769  currState->delayed = true;
1770  }
1771 
1772  return;
1773  }
1774  default:
1775  panic("A new type in a 2 bit field?\n");
1776  }
1777 }
1778 
1779 Fault
1781 {
1782  if (currState->isFetch) {
1783  return std::make_shared<PrefetchAbort>(
1786  isStage2,
1788  } else {
1789  return std::make_shared<DataAbort>(
1792  currState->req->isAtomic() ? false : currState->isWrite,
1794  isStage2,
1796  }
1797 }
1798 
1799 void
1801 {
1802  if (currState->fault != NoFault) {
1803  return;
1804  }
1805 
1807  byteOrder(currState->tc));
1808 
1809  DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1812  currState->aarch64 ? "AArch64" : "long-desc.");
1813 
1816  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1817  "xn: %d, ap: %d, af: %d, type: %d\n",
1820  currState->longDesc.pxn(),
1821  currState->longDesc.xn(),
1822  currState->longDesc.ap(),
1823  currState->longDesc.af(),
1824  currState->longDesc.type());
1825  } else {
1826  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1829  currState->longDesc.type());
1830  }
1831 
1832  TlbEntry te;
1833 
1834  switch (currState->longDesc.type()) {
1836  DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1839 
1841  if (!currState->timing) {
1842  currState->tc = NULL;
1843  currState->req = NULL;
1844  }
1845  return;
1846 
1847  case LongDescriptor::Block:
1848  case LongDescriptor::Page:
1849  {
1850  auto fault_source = ArmFault::FaultSourceInvalid;
1851  // Check for address size fault
1854 
1855  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1857  fault_source = ArmFault::AddressSizeLL;
1858 
1859  // Check for access fault
1860  } else if (currState->longDesc.af() == 0) {
1861 
1862  DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1864  fault_source = ArmFault::AccessFlagLL;
1865  }
1866 
1867  if (fault_source != ArmFault::FaultSourceInvalid) {
1868  currState->fault = generateLongDescFault(fault_source);
1869  } else {
1871  }
1872  }
1873  return;
1874  case LongDescriptor::Table:
1875  {
1876  // Set hierarchical permission flags
1887 
1888  // Set up next level lookup
1889  Addr next_desc_addr = currState->longDesc.nextDescAddr(
1890  currState->vaddr);
1891 
1892  DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1895  next_desc_addr,
1896  currState->secureLookup ? "s" : "ns");
1897 
1898  // Check for address size fault
1900  next_desc_addr, currState->physAddrRange)) {
1901  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1903 
1906  return;
1907  }
1908 
1909  // Trickbox address check
1911  next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1913 
1914  if (currState->fault) {
1915  if (!currState->timing) {
1916  currState->tc = NULL;
1917  currState->req = NULL;
1918  }
1919  return;
1920  }
1921 
1922  if (mmu->hasWalkCache()) {
1924  }
1925 
1926 
1928  if (currState->secureLookup)
1929  flag.set(Request::SECURE);
1930 
1931  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1932  flag.set(Request::UNCACHEABLE);
1933  }
1934 
1937  Event *event = NULL;
1938  switch (L) {
1939  case LookupLevel::L1:
1940  assert(currState->aarch64);
1941  case LookupLevel::L2:
1942  case LookupLevel::L3:
1943  event = LongDescEventByLevel[L];
1944  break;
1945  default:
1946  panic("Wrong lookup level in table walk\n");
1947  break;
1948  }
1949 
1950  bool delayed;
1951  delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1952  sizeof(uint64_t), flag, -1, event,
1954  if (delayed) {
1955  currState->delayed = true;
1956  }
1957  }
1958  return;
1959  default:
1960  panic("A new type in a 2 bit field?\n");
1961  }
1962 }
1963 
1964 void
1966 {
1967  if (currState->fault != NoFault) {
1968  return;
1969  }
1970 
1972  byteOrder(currState->tc));
1973 
1974  DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1976  TlbEntry te;
1977 
1978  const bool is_atomic = currState->req->isAtomic();
1979 
1980  if (currState->l2Desc.invalid()) {
1981  DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1982  if (!currState->timing) {
1983  currState->tc = NULL;
1984  currState->req = NULL;
1985  }
1986  if (currState->isFetch)
1987  currState->fault = std::make_shared<PrefetchAbort>(
1989  ArmFault::TranslationLL + LookupLevel::L2,
1990  isStage2,
1992  else
1993  currState->fault = std::make_shared<DataAbort>(
1995  is_atomic ? false : currState->isWrite,
1996  ArmFault::TranslationLL + LookupLevel::L2,
1997  isStage2,
1999  return;
2000  }
2001 
2002  if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
2006  DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
2007  currState->sctlr.afe, currState->l2Desc.ap());
2008 
2009  currState->fault = std::make_shared<DataAbort>(
2012  is_atomic ? false : currState->isWrite,
2013  ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
2015  }
2016 
2018 }
2019 
2020 void
2022 {
2023  currState = stateQueues[LookupLevel::L1].front();
2024  currState->delayed = false;
2025  // if there's a stage2 translation object we don't need it any more
2026  if (currState->stage2Tran) {
2027  delete currState->stage2Tran;
2028  currState->stage2Tran = NULL;
2029  }
2030 
2031 
2032  DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2033  &currState->l1Desc.data);
2034  DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2035  currState->l1Desc.data);
2036 
2037  DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2039  doL1Descriptor();
2040 
2041  stateQueues[LookupLevel::L1].pop_front();
2042  // Check if fault was generated
2043  if (currState->fault != NoFault) {
2045  currState->tc, currState->mode);
2047 
2048  pending = false;
2049  nextWalk(currState->tc);
2050 
2051  currState->req = NULL;
2052  currState->tc = NULL;
2053  currState->delayed = false;
2054  delete currState;
2055  }
2056  else if (!currState->delayed) {
2057  // delay is not set so there is no L2 to do
2058  // Don't finish the translation if a stage 2 look up is underway
2060  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2061 
2065 
2067 
2068  pending = false;
2069  nextWalk(currState->tc);
2070 
2071  currState->req = NULL;
2072  currState->tc = NULL;
2073  currState->delayed = false;
2074  delete currState;
2075  } else {
2076  // need to do L2 descriptor
2077  stateQueues[LookupLevel::L2].push_back(currState);
2078  }
2079  currState = NULL;
2080 }
2081 
2082 void
2084 {
2085  currState = stateQueues[LookupLevel::L2].front();
2086  assert(currState->delayed);
2087  // if there's a stage2 translation object we don't need it any more
2088  if (currState->stage2Tran) {
2089  delete currState->stage2Tran;
2090  currState->stage2Tran = NULL;
2091  }
2092 
2093  DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2095  doL2Descriptor();
2096 
2097  // Check if fault was generated
2098  if (currState->fault != NoFault) {
2100  currState->tc, currState->mode);
2102  } else {
2104  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2105 
2109 
2111  }
2112 
2113 
2114  stateQueues[LookupLevel::L2].pop_front();
2115  pending = false;
2116  nextWalk(currState->tc);
2117 
2118  currState->req = NULL;
2119  currState->tc = NULL;
2120  currState->delayed = false;
2121 
2122  delete currState;
2123  currState = NULL;
2124 }
2125 
2126 void
2128 {
2129  doLongDescriptorWrapper(LookupLevel::L0);
2130 }
2131 
2132 void
2134 {
2135  doLongDescriptorWrapper(LookupLevel::L1);
2136 }
2137 
2138 void
2140 {
2141  doLongDescriptorWrapper(LookupLevel::L2);
2142 }
2143 
2144 void
2146 {
2147  doLongDescriptorWrapper(LookupLevel::L3);
2148 }
2149 
2150 void
2152 {
2153  currState = stateQueues[curr_lookup_level].front();
2154  assert(curr_lookup_level == currState->longDesc.lookupLevel);
2155  currState->delayed = false;
2156 
2157  // if there's a stage2 translation object we don't need it any more
2158  if (currState->stage2Tran) {
2159  delete currState->stage2Tran;
2160  currState->stage2Tran = NULL;
2161  }
2162 
2163  DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2165  doLongDescriptor();
2166 
2167  stateQueues[curr_lookup_level].pop_front();
2168 
2169  if (currState->fault != NoFault) {
2170  // A fault was generated
2172  currState->tc, currState->mode);
2173 
2174  pending = false;
2175  nextWalk(currState->tc);
2176 
2177  currState->req = NULL;
2178  currState->tc = NULL;
2179  currState->delayed = false;
2180  delete currState;
2181  } else if (!currState->delayed) {
2182  // No additional lookups required
2183  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2185 
2189 
2190  stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2191 
2192  pending = false;
2193  nextWalk(currState->tc);
2194 
2195  currState->req = NULL;
2196  currState->tc = NULL;
2197  currState->delayed = false;
2198  delete currState;
2199  } else {
2200  if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2201  panic("Max. number of lookups already reached in table walk\n");
2202  // Need to perform additional lookups
2204  }
2205  currState = NULL;
2206 }
2207 
2208 
2209 void
2211 {
2212  if (pendingQueue.size())
2214  else
2215  completeDrain();
2216 }
2217 
2218 bool
2219 TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2220  Request::Flags flags, int queueIndex, Event *event,
2221  void (TableWalker::*doDescriptor)())
2222 {
2223  bool isTiming = currState->timing;
2224 
2225  DPRINTF(PageTableWalker,
2226  "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2227  descAddr, currState->stage2Req);
2228 
2229  // If this translation has a stage 2 then we know descAddr is an IPA and
2230  // needs to be translated before we can access the page table. Do that
2231  // check here.
2232  if (currState->stage2Req) {
2233  Fault fault;
2234 
2235  if (isTiming) {
2236  auto *tran = new
2237  Stage2Walk(*this, data, event, currState->vaddr,
2239  currState->stage2Tran = tran;
2240  readDataTimed(currState->tc, descAddr, tran, numBytes, flags);
2241  fault = tran->fault;
2242  } else {
2243  fault = readDataUntimed(currState->tc,
2244  currState->vaddr, descAddr, data, numBytes, flags,
2245  currState->mode,
2248  }
2249 
2250  if (fault != NoFault) {
2251  currState->fault = fault;
2252  }
2253  if (isTiming) {
2254  if (queueIndex >= 0) {
2255  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2256  "queue size before adding: %d\n",
2257  stateQueues[queueIndex].size());
2258  stateQueues[queueIndex].push_back(currState);
2259  currState = NULL;
2260  }
2261  } else {
2262  (this->*doDescriptor)();
2263  }
2264  } else {
2265  if (isTiming) {
2266  port->sendTimingReq(descAddr, numBytes, data, flags,
2267  currState->tc->getCpuPtr()->clockPeriod(), event);
2268 
2269  if (queueIndex >= 0) {
2270  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2271  "queue size before adding: %d\n",
2272  stateQueues[queueIndex].size());
2273  stateQueues[queueIndex].push_back(currState);
2274  currState = NULL;
2275  }
2276  } else if (!currState->functional) {
2277  port->sendAtomicReq(descAddr, numBytes, data, flags,
2278  currState->tc->getCpuPtr()->clockPeriod());
2279 
2280  (this->*doDescriptor)();
2281  } else {
2282  port->sendFunctionalReq(descAddr, numBytes, data, flags);
2283  (this->*doDescriptor)();
2284  }
2285  }
2286  return (isTiming);
2287 }
2288 
2289 void
2291 {
2292  const bool have_security = release->has(ArmExtension::SECURITY);
2293  TlbEntry te;
2294 
2295  // Create and fill a new page table entry
2296  te.valid = true;
2297  te.longDescFormat = true;
2298  te.partial = true;
2299  te.global = false;
2300  te.isHyp = currState->isHyp;
2301  te.asid = currState->asid;
2302  te.vmid = currState->vmid;
2303  te.N = descriptor.offsetBits();
2304  te.vpn = currState->vaddr >> te.N;
2305  te.size = (1ULL << te.N) - 1;
2306  te.pfn = descriptor.nextTableAddr();
2307  te.domain = descriptor.domain();
2308  te.lookupLevel = descriptor.lookupLevel;
2309  te.ns = !descriptor.secure(have_security, currState);
2310  te.nstid = !currState->isSecure;
2311  te.type = TypeTLB::unified;
2312 
2313  if (currState->aarch64)
2314  te.el = currState->el;
2315  else
2316  te.el = EL1;
2317 
2318  te.xn = currState->xnTable;
2319  te.pxn = currState->pxnTable;
2320  te.ap = (currState->rwTable << 1) | (currState->userTable);
2321 
2322  // Debug output
2323  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2324  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2325  te.N, te.pfn, te.size, te.global, te.valid);
2326  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2327  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2328  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2329  te.nonCacheable, te.ns);
2330  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2331  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2332  descriptor.getRawData());
2333 
2334  // Insert the entry into the TLBs
2335  tlb->multiInsert(te);
2336 }
2337 
2338 void
2339 TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2340 {
2341  const bool have_security = release->has(ArmExtension::SECURITY);
2342  TlbEntry te;
2343 
2344  // Create and fill a new page table entry
2345  te.valid = true;
2346  te.longDescFormat = long_descriptor;
2347  te.isHyp = currState->isHyp;
2348  te.asid = currState->asid;
2349  te.vmid = currState->vmid;
2350  te.N = descriptor.offsetBits();
2351  te.vpn = currState->vaddr >> te.N;
2352  te.size = (1<<te.N) - 1;
2353  te.pfn = descriptor.pfn();
2354  te.domain = descriptor.domain();
2355  te.lookupLevel = descriptor.lookupLevel;
2356  te.ns = !descriptor.secure(have_security, currState);
2357  te.nstid = !currState->isSecure;
2358  te.xn = descriptor.xn();
2359  te.type = currState->mode == BaseMMU::Execute ?
2360  TypeTLB::instruction : TypeTLB::data;
2361 
2362  if (currState->aarch64)
2363  te.el = currState->el;
2364  else
2365  te.el = EL1;
2366 
2369 
2370  // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2371  // as global
2372  te.global = descriptor.global(currState) || isStage2;
2373  if (long_descriptor) {
2374  LongDescriptor l_descriptor =
2375  dynamic_cast<LongDescriptor &>(descriptor);
2376 
2377  te.xn |= currState->xnTable;
2378  te.pxn = currState->pxnTable || l_descriptor.pxn();
2379  if (isStage2) {
2380  // this is actually the HAP field, but its stored in the same bit
2381  // possitions as the AP field in a stage 1 translation.
2382  te.hap = l_descriptor.ap();
2383  } else {
2384  te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2385  (currState->userTable && (descriptor.ap() & 0x1));
2386  }
2387  if (currState->aarch64)
2388  memAttrsAArch64(currState->tc, te, l_descriptor);
2389  else
2390  memAttrsLPAE(currState->tc, te, l_descriptor);
2391  } else {
2392  te.ap = descriptor.ap();
2393  memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2394  descriptor.shareable());
2395  }
2396 
2397  // Debug output
2398  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2399  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2400  te.N, te.pfn, te.size, te.global, te.valid);
2401  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2402  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2403  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2404  te.nonCacheable, te.ns);
2405  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2406  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2407  descriptor.getRawData());
2408 
2409  // Insert the entry into the TLBs
2410  tlb->multiInsert(te);
2411  if (!currState->timing) {
2412  currState->tc = NULL;
2413  currState->req = NULL;
2414  }
2415 }
2416 
2418 TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2419 {
2420  switch (lookup_level_as_int) {
2421  case LookupLevel::L1:
2422  return LookupLevel::L1;
2423  case LookupLevel::L2:
2424  return LookupLevel::L2;
2425  case LookupLevel::L3:
2426  return LookupLevel::L3;
2427  default:
2428  panic("Invalid lookup level conversion");
2429  }
2430 }
2431 
2432 /* this method keeps track of the table walker queue's residency, so
2433  * needs to be called whenever requests start and complete. */
2434 void
2436 {
2437  unsigned n = pendingQueue.size();
2438  if ((currState != NULL) && (currState != pendingQueue.front())) {
2439  ++n;
2440  }
2441 
2442  if (n != pendingReqs) {
2443  Tick now = curTick();
2445  pendingReqs = n;
2446  pendingChangeTick = now;
2447  }
2448 }
2449 
2450 Fault
2452  LookupLevel lookup_level, bool stage2)
2453 {
2454  return mmu->testWalk(pa, size, currState->vaddr, currState->isSecure,
2455  currState->mode, domain, lookup_level, stage2);
2456 }
2457 
2458 
2459 uint8_t
2461 {
2462  /* for stats.pageSizes */
2463  switch(N) {
2464  case 12: return 0; // 4K
2465  case 14: return 1; // 16K (using 16K granule in v8-64)
2466  case 16: return 2; // 64K
2467  case 20: return 3; // 1M
2468  case 21: return 4; // 2M-LPAE
2469  case 24: return 5; // 16M
2470  case 25: return 6; // 32M (using 16K granule in v8-64)
2471  case 29: return 7; // 512M (using 64K granule in v8-64)
2472  case 30: return 8; // 1G-LPAE
2473  case 42: return 9; // 1G-LPAE
2474  default:
2475  panic("unknown page size");
2476  return 255;
2477  }
2478 }
2479 
2480 Fault
2482  uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2483  MMU::ArmTranslationType tran_type, bool functional)
2484 {
2485  Fault fault;
2486 
2487  // translate to physical address using the second stage MMU
2488  auto req = std::make_shared<Request>();
2489  req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2490  requestorId, 0);
2491 
2492  if (functional) {
2493  fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2494  tran_type, true);
2495  } else {
2496  fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2497  tran_type, true);
2498  }
2499 
2500  // Now do the access.
2501  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2502  Packet pkt = Packet(req, MemCmd::ReadReq);
2503  pkt.dataStatic(data);
2504  if (functional) {
2505  port->sendFunctional(&pkt);
2506  } else {
2507  port->sendAtomic(&pkt);
2508  }
2509  assert(!pkt.isError());
2510  }
2511 
2512  // If there was a fault annotate it with the flag saying the foult occured
2513  // while doing a translation for a stage 1 page table walk.
2514  if (fault != NoFault) {
2515  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2516  arm_fault->annotate(ArmFault::S1PTW, true);
2517  arm_fault->annotate(ArmFault::OVA, vaddr);
2518  }
2519  return fault;
2520 }
2521 
2522 void
2524  Stage2Walk *translation, int num_bytes,
2526 {
2527  // translate to physical address using the second stage MMU
2528  translation->setVirt(
2529  desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2530  translation->translateTiming(tc);
2531 }
2532 
2534  uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2535  MMU::ArmTranslationType tran_type)
2536  : data(_data), numBytes(0), event(_event), parent(_parent),
2537  oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2538 {
2539  req = std::make_shared<Request>();
2540 }
2541 
2542 void
2544  const RequestPtr &req,
2546 {
2547  fault = _fault;
2548 
2549  // If there was a fault annotate it with the flag saying the foult occured
2550  // while doing a translation for a stage 1 page table walk.
2551  if (fault != NoFault) {
2552  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2553  arm_fault->annotate(ArmFault::S1PTW, true);
2554  arm_fault->annotate(ArmFault::OVA, oVAddr);
2555  }
2556 
2557  if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2558  parent.getTableWalkerPort().sendTimingReq(
2559  req->getPaddr(), numBytes, data, req->getFlags(),
2560  tc->getCpuPtr()->clockPeriod(), event);
2561  } else {
2562  // We can't do the DMA access as there's been a problem, so tell the
2563  // event we're done
2564  event->process();
2565  }
2566 }
2567 
2568 void
2570 {
2571  parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2572 }
2573 
2575  : statistics::Group(parent),
2576  ADD_STAT(walks, statistics::units::Count::get(),
2577  "Table walker walks requested"),
2578  ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2579  "Table walker walks initiated with short descriptors"),
2580  ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2581  "Table walker walks initiated with long descriptors"),
2582  ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2583  "Level at which table walker walks with short descriptors "
2584  "terminate"),
2585  ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2586  "Level at which table walker walks with long descriptors "
2587  "terminate"),
2588  ADD_STAT(squashedBefore, statistics::units::Count::get(),
2589  "Table walks squashed before starting"),
2590  ADD_STAT(squashedAfter, statistics::units::Count::get(),
2591  "Table walks squashed after completion"),
2592  ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2593  "Table walker wait (enqueue to first request) latency"),
2594  ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2595  "Table walker service (enqueue to completion) latency"),
2596  ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2597  "Table walker pending requests distribution"),
2598  ADD_STAT(pageSizes, statistics::units::Count::get(),
2599  "Table walker page sizes translated"),
2600  ADD_STAT(requestOrigin, statistics::units::Count::get(),
2601  "Table walker requests started/completed, data/inst")
2602 {
2605 
2608 
2610  .init(2)
2612 
2613  walksShortTerminatedAtLevel.subname(0, "Level1");
2614  walksShortTerminatedAtLevel.subname(1, "Level2");
2615 
2617  .init(4)
2619  walksLongTerminatedAtLevel.subname(0, "Level0");
2620  walksLongTerminatedAtLevel.subname(1, "Level1");
2621  walksLongTerminatedAtLevel.subname(2, "Level2");
2622  walksLongTerminatedAtLevel.subname(3, "Level3");
2623 
2626 
2629 
2630  walkWaitTime
2631  .init(16)
2633 
2635  .init(16)
2637 
2638  pendingWalks
2639  .init(16)
2642 
2643  pageSizes // see DDI 0487A D4-1661
2644  .init(10)
2647  pageSizes.subname(0, "4KiB");
2648  pageSizes.subname(1, "16KiB");
2649  pageSizes.subname(2, "64KiB");
2650  pageSizes.subname(3, "1MiB");
2651  pageSizes.subname(4, "2MiB");
2652  pageSizes.subname(5, "16MiB");
2653  pageSizes.subname(6, "32MiB");
2654  pageSizes.subname(7, "512MiB");
2655  pageSizes.subname(8, "1GiB");
2656  pageSizes.subname(9, "4TiB");
2657 
2659  .init(2,2) // Instruction/Data, requests/completed
2661  requestOrigin.subname(0,"Requested");
2662  requestOrigin.subname(1,"Completed");
2663  requestOrigin.ysubname(0,"Data");
2664  requestOrigin.ysubname(1,"Inst");
2665 }
2666 
2667 } // namespace gem5
gem5::ArmISA::TableWalker::doL1DescEvent
EventFunctionWrapper doL1DescEvent
Definition: table_walker.hh:1125
gem5::ArmISA::TableWalker::testWalk
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: table_walker.cc:2451
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::ArmISA::TableWalker::DescriptorBase::dbgHeader
virtual std::string dbgHeader() const =0
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:245
gem5::BaseMMU::Translation::squashed
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: mmu.hh:84
gem5::ArmISA::TableWalker::doLongDescriptorWrapper
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
Definition: table_walker.cc:2151
gem5::ArmISA::TableWalker::LookupLevel
enums::ArmLookupLevel LookupLevel
Definition: table_walker.hh:68
gem5::ArmISA::MISCREG_CPSR
@ MISCREG_CPSR
Definition: misc.hh:61
gem5::statistics::DataWrapVec2d::ysubname
Derived & ysubname(off_type index, const std::string &subname)
Definition: statistics.hh:490
gem5::ArmISA::TableWalker::WalkerState::isSecure
bool isSecure
If the access comes from the secure state.
Definition: table_walker.hh:869
gem5::SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
gem5::ArmISA::tlb
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:92
gem5::ArmISA::MISCREG_VTTBR
@ MISCREG_VTTBR
Definition: misc.hh:449
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::ArmISA::TableWalker::LongDescEventByLevel
Event * LongDescEventByLevel[4]
Definition: table_walker.hh:1143
gem5::ArmISA::MISCREG_TTBR0_EL2
@ MISCREG_TTBR0_EL2
Definition: misc.hh:604
gem5::ArmISA::TableWalker::TableWalkerStats::walksLongDescriptor
statistics::Scalar walksLongDescriptor
Definition: table_walker.hh:1066
gem5::ArmISA::TableWalker::WalkerState::pxnTable
bool pxnTable
Definition: table_walker.hh:880
gem5::ThreadContext::readMiscReg
virtual RegVal readMiscReg(RegIndex misc_reg)=0
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
gem5::ArmISA::TableWalker::Port
Definition: table_walker.hh:939
gem5::ArmISA::TableWalker::WalkerState::walkEntry
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
Definition: table_walker.hh:818
gem5::ArmISA::ELIs64
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:273
gem5::ArmISA::TableWalker::LongDescriptor::pxnTable
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
Definition: table_walker.hh:792
gem5::ArmISA::ArmFault::AddressSizeLL
@ AddressSizeLL
Definition: faults.hh:111
gem5::ArmISA::TableWalker::WalkerState::sctlr
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Definition: table_walker.hh:838
gem5::ArmISA::MISCREG_TTBR0_EL3
@ MISCREG_TTBR0_EL3
Definition: misc.hh:610
gem5::ArmISA::TableWalker::WalkerState::el
ExceptionLevel el
Current exception level.
Definition: table_walker.hh:809
gem5::ArmISA::TlbEntry::valid
bool valid
Definition: pagetable.hh:236
gem5::ArmISA::TableWalker::Port::sendAtomicReq
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
Definition: table_walker.cc:183
gem5::RegVal
uint64_t RegVal
Definition: types.hh:173
gem5::ArmISA::TableWalker::pendingChangeTick
Tick pendingChangeTick
Definition: table_walker.hh:1080
gem5::ArmISA::MISCREG_SCTLR_EL3
@ MISCREG_SCTLR_EL3
Definition: misc.hh:592
gem5::ArmISA::TableWalker::TableWalkerStats::walks
statistics::Scalar walks
Definition: table_walker.hh:1064
gem5::ArmISA::TableWalker::L2Descriptor::ap
uint8_t ap() const override
Three bit access protection flags.
Definition: table_walker.hh:354
gem5::ArmISA::TableWalker::WalkerState::longDesc
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
Definition: table_walker.hh:908
system.hh
gem5::ArmISA::TableWalker::doProcessEvent
EventFunctionWrapper doProcessEvent
Definition: table_walker.hh:1174
gem5::ArmISA::TableWalker::readDataTimed
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
Definition: table_walker.cc:2523
gem5::ArmISA::TableWalker::completeDrain
void completeDrain()
Checks if all state is cleared and if so, completes drain.
Definition: table_walker.cc:243
gem5::ArmISA::TableWalker::memAttrsAArch64
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Definition: table_walker.cc:1574
gem5::ArmISA::TableWalker::TableWalkerStats::walksLongTerminatedAtLevel
statistics::Vector walksLongTerminatedAtLevel
Definition: table_walker.hh:1068
gem5::ArmISA::MISCREG_TTBR0
@ MISCREG_TTBR0
Definition: misc.hh:255
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::ArmISA::ArmFault::FaultSourceInvalid
@ FaultSourceInvalid
Definition: faults.hh:120
gem5::ArmISA::TableWalker::doL1LongDescriptorWrapper
void doL1LongDescriptorWrapper()
Definition: table_walker.cc:2133
gem5::X86ISA::L
Bitfield< 7, 0 > L
Definition: int.hh:61
gem5::ArmISA::TableWalker::pendingQueue
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Definition: table_walker.hh:1026
gem5::ArmISA::TableWalker::release
const ArmRelease * release
Cached copies of system-level properties.
Definition: table_walker.hh:1056
gem5::ArmISA::Grain64KB
@ Grain64KB
Definition: pagetable.hh:65
gem5::ArmISA::TableWalker::LongDescriptor::Block
@ Block
Definition: table_walker.hh:420
gem5::ArmISA::el
Bitfield< 3, 2 > el
Definition: misc_types.hh:73
gem5::ArmISA::TableWalker::WalkerState::vaddr
Addr vaddr
The virtual address that is being translated with tagging removed.
Definition: table_walker.hh:832
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
gem5::ArmISA::TableWalker::requestorId
RequestorID requestorId
Requestor id assigned by the MMU.
Definition: table_walker.hh:1032
gem5::ArmISA::TableWalker::WalkerState::tcr
TCR tcr
Definition: table_walker.hh:850
gem5::ArmISA::TableWalker::L1Descriptor::Section
@ Section
Definition: table_walker.hh:108
gem5::ArmISA::TableWalker::Stage2Walk::req
RequestPtr req
Definition: table_walker.hh:981
gem5::ArmISA::MISCREG_TCR_EL2
@ MISCREG_TCR_EL2
Definition: misc.hh:605
gem5::ArmISA::TableWalker::getTableWalkerPort
Port & getTableWalkerPort()
Definition: table_walker.cc:104
warn_once
#define warn_once(...)
Definition: logging.hh:250
gem5::ArmISA::aarch64
Bitfield< 34 > aarch64
Definition: types.hh:81
gem5::ArmISA::TableWalker::nextWalk
void nextWalk(ThreadContext *tc)
Definition: table_walker.cc:2210
gem5::statistics::Vector2dBase::init
Derived & init(size_type _x, size_type _y)
Definition: statistics.hh:1174
gem5::QueuedRequestPort
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition: qport.hh:109
gem5::ArmISA::decodePhysAddrRange64
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition: utility.cc:1293
gem5::ArmISA::getPageTableOps
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition: pagetable.cc:476
gem5::ArmISA::TableWalker::LongDescriptor::paddr
Addr paddr() const
Return the physical address of the entry.
Definition: table_walker.hh:565
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::ArmISA::GrainMap_tg0
const GrainSize GrainMap_tg0[]
Definition: pagetable.cc:49
gem5::ArmISA::attr
attr
Definition: misc_types.hh:656
gem5::Flags::set
void set(Type mask)
Set all flag's bits matching the given mask.
Definition: flags.hh:116
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:374
gem5::ArmISA::asid
asid
Definition: misc_types.hh:618
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::ArmISA::TableWalker::WalkerState::userTable
bool userTable
Definition: table_walker.hh:878
gem5::ArmISA::domain
Bitfield< 7, 4 > domain
Definition: misc_types.hh:424
gem5::ArmISA::TlbEntry::DomainType::NoAccess
@ NoAccess
gem5::ArmISA::TableWalker::Stage2Walk
This translation class is used to trigger the data fetch once a timing translation returns the transl...
Definition: table_walker.hh:976
gem5::ArmISA::TableWalker::TableWalkerStats::requestOrigin
statistics::Vector2d requestOrigin
Definition: table_walker.hh:1076
gem5::ArmISA::TlbEntry::pfn
Addr pfn
Definition: pagetable.hh:210
gem5::Request::NO_ACCESS
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
gem5::ArmISA::f
Bitfield< 6 > f
Definition: misc_types.hh:68
gem5::ArmISA::TableWalker::DescriptorBase::xn
virtual bool xn() const =0
pagetable.hh
gem5::RequestPort::sendAtomic
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
gem5::ArmISA::TableWalker::LongDescriptor::secure
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
Definition: table_walker.hh:472
gem5::ArmISA::TableWalker::LongDescriptor::nextDescAddr
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Definition: table_walker.hh:599
gem5::ArmISA::ArmFault::LpaeTran
@ LpaeTran
Definition: faults.hh:152
gem5::MipsISA::event
Bitfield< 10, 5 > event
Definition: pra_constants.hh:300
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::ArmISA::MISCREG_TTBR1_EL1
@ MISCREG_TTBR1_EL1
Definition: misc.hh:600
gem5::ArmISA::MISCREG_TTBCR
@ MISCREG_TTBCR
Definition: misc.hh:261
gem5::ArmISA::vmid_t
uint16_t vmid_t
Definition: types.hh:57
gem5::ArmISA::TableWalker::DescriptorBase::texcb
virtual uint8_t texcb() const
Definition: table_walker.hh:90
gem5::ArmISA::TableWalker::WalkerState::secureLookup
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
Definition: table_walker.hh:876
gem5::Request::SECURE
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
gem5::ArmISA::TableWalker::LongDescriptor::Table
@ Table
Definition: table_walker.hh:419
gem5::ArmISA::TableWalker::TableWalkerStats::squashedAfter
statistics::Scalar squashedAfter
Definition: table_walker.hh:1070
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:655
gem5::ArmISA::TLB
Definition: tlb.hh:115
gem5::statistics::DataWrapVec::subname
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Definition: statistics.hh:402
gem5::ArmISA::TableWalker::L1Descriptor::data
uint32_t data
The raw bits of the entry.
Definition: table_walker.hh:113
gem5::ArmISA::MISCREG_TCR_EL3
@ MISCREG_TCR_EL3
Definition: misc.hh:611
gem5::ArmISA::EL1
@ EL1
Definition: types.hh:274
gem5::Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:682
gem5::ArmISA::GrainMap_tg1
const GrainSize GrainMap_tg1[]
Definition: pagetable.cc:51
gem5::ArmISA::byteOrder
ByteOrder byteOrder(const ThreadContext *tc)
Definition: utility.hh:370
gem5::ArmISA::MISCREG_MAIR_EL3
@ MISCREG_MAIR_EL3
Definition: misc.hh:733
gem5::ArmISA::TableWalker::L1Descriptor::Ignore
@ Ignore
Definition: table_walker.hh:106
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
tlb.hh
gem5::ArmISA::TableWalker::L2Descriptor::data
uint32_t data
The raw bits of the entry.
Definition: table_walker.hh:275
gem5::ArmISA::TlbEntry::DomainType::Client
@ Client
gem5::statistics::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
gem5::ArmISA::TableWalker::fetchDescriptor
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
Definition: table_walker.cc:2219
gem5::ArmISA::MISCREG_MAIR_EL1
@ MISCREG_MAIR_EL1
Definition: misc.hh:727
gem5::ArmISA::TableWalker::LongDescriptor::ap
uint8_t ap() const override
2-bit access protection flags
Definition: table_walker.hh:678
gem5::ArmISA::TableWalker::TableWalkerStats::walkServiceTime
statistics::Histogram walkServiceTime
Definition: table_walker.hh:1072
gem5::ArmISA::TableWalker::Port::handleResp
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
Definition: table_walker.cc:233
gem5::mbits
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:103
gem5::ArmISA::TlbEntry::MemoryType::Normal
@ Normal
gem5::ArmISA::TLB::multiInsert
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
Definition: tlb.cc:270
gem5::ArmISA::TableWalker::Stage2Walk::setVirt
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Definition: table_walker.hh:1001
system.hh
gem5::ArmISA::TableWalker::WalkerState::stage2Tran
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
Definition: table_walker.hh:889
gem5::ArmISA::TableWalker::walk
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
Definition: table_walker.cc:289
gem5::ArmISA::MISCREG_SCTLR
@ MISCREG_SCTLR
Definition: misc.hh:236
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
table_walker.hh
gem5::ArmISA::TableWalker::WalkerState::vmid
vmid_t vmid
Definition: table_walker.hh:822
gem5::ArmISA::TableWalker::WalkerState::vaddr_tainted
Addr vaddr_tainted
The virtual address that is being translated.
Definition: table_walker.hh:835
gem5::ArmISA::Grain4KB
@ Grain4KB
Definition: pagetable.hh:63
gem5::ArmISA::MMU::testWalk
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: mmu.cc:1625
gem5::ArmISA::TableWalker::WalkerState::tranType
MMU::ArmTranslationType tranType
The translation type that has been requested.
Definition: table_walker.hh:901
gem5::ArmISA::hpd
Bitfield< 24 > hpd
Definition: misc_types.hh:534
gem5::ArmISA::TlbEntry
Definition: pagetable.hh:165
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
gem5::ArmISA::TableWalker::Port::Port
Port(TableWalker *_walker, RequestorID id)
Definition: table_walker.cc:141
gem5::ArmISA::TlbEntry::DomainType
DomainType
Definition: pagetable.hh:177
gem5::ArmISA::TableWalker::getPort
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: table_walker.cc:110
gem5::ArmISA::MISCREG_VTCR_EL2
@ MISCREG_VTCR_EL2
Definition: misc.hh:607
gem5::statistics::dist
const FlagsType dist
Print the distribution.
Definition: info.hh:66
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:56
gem5::BaseMMU
Definition: mmu.hh:53
gem5::ArmISA::TableWalker::L1Descriptor::ap
uint8_t ap() const override
Three bit access protection flags.
Definition: table_walker.hh:199
gem5::ArmISA::ArmFault::VmsaTran
@ VmsaTran
Definition: faults.hh:153
gem5::ArmISA::TableWalker::COMPLETED
static const unsigned COMPLETED
Definition: table_walker.hh:1083
gem5::ArmISA::TableWalker::WalkerState::functional
bool functional
If the atomic mode should be functional.
Definition: table_walker.hh:895
gem5::ArmISA::TableWalker::TableWalkerStats::walksShortDescriptor
statistics::Scalar walksShortDescriptor
Definition: table_walker.hh:1065
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::ArmISA::TableWalker::LongDescriptor::nextTableAddr
Addr nextTableAddr() const
Return the address of the next page table.
Definition: table_walker.hh:581
gem5::ArmISA::TableWalker::REQUESTED
static const unsigned REQUESTED
Definition: table_walker.hh:1082
gem5::ArmISA::MISCREG_HTCR
@ MISCREG_HTCR
Definition: misc.hh:264
gem5::statistics::pdf
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:62
gem5::ArmISA::purifyTaggedAddr
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:470
gem5::ArmISA::MISCREG_MAIR_EL2
@ MISCREG_MAIR_EL2
Definition: misc.hh:731
gem5::ArmISA::TableWalker::doL1Descriptor
void doL1Descriptor()
Definition: table_walker.cc:1674
gem5::ArmISA::TableWalker::TableWalkerStats::TableWalkerStats
TableWalkerStats(statistics::Group *parent)
Definition: table_walker.cc:2574
gem5::ArmISA::TableWalker::Port::handleRespPacket
void handleRespPacket(PacketPtr pkt, Tick delay=0)
Definition: table_walker.cc:218
gem5::ArmISA::TableWalker::WalkerState::asid
uint16_t asid
ASID that we're servicing the request under.
Definition: table_walker.hh:821
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1147
mmu.hh
gem5::ArmISA::TableWalker::LongDescriptor::xnTable
bool xnTable() const
Is execution allowed on subsequent lookup levels?
Definition: table_walker.hh:784
gem5::ArmISA::MISCREG_VTCR
@ MISCREG_VTCR
Definition: misc.hh:265
gem5::ArmISA::TableWalker::L1Descriptor::PageTable
@ PageTable
Definition: table_walker.hh:107
gem5::ArmRelease::has
bool has(ArmExtension ext) const
Definition: system.hh:75
gem5::ArmISA::pa
Bitfield< 39, 12 > pa
Definition: misc_types.hh:657
gem5::ArmISA::TlbEntry::MemoryType::Device
@ Device
gem5::ArmISA::TableWalker::LongDescriptor::secureTable
bool secureTable() const
Whether the subsequent levels of lookup are secure.
Definition: table_walker.hh:751
gem5::Flags< FlagsType >
gem5::DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:74
gem5::ArmISA::TableWalker::sctlr
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Definition: table_walker.hh:1044
gem5::ArmISA::TableWalker::WalkerState::isHyp
bool isHyp
Definition: table_walker.hh:823
gem5::ArmISA::TableWalker::LongDescriptor::data
uint64_t data
The raw bits of the entry.
Definition: table_walker.hh:430
gem5::RequestPort::sendFunctional
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:485
gem5::ArmISA::MISCREG_PRRR
@ MISCREG_PRRR
Definition: misc.hh:370
gem5::ArmISA::TableWalker::walkAddresses
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Definition: table_walker.cc:1193
gem5::ArmISA::TableWalker::LongDescriptor::sh
uint8_t sh() const
2-bit shareability field
Definition: table_walker.hh:670
gem5::Request::UNCACHEABLE
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
gem5::ArmISA::z
Bitfield< 11 > z
Definition: misc_types.hh:375
gem5::ArmISA::TableWalker::LongDescriptor::getRawData
uint64_t getRawData() const override
Definition: table_walker.hh:445
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::ArmISA::MISCREG_SCTLR_EL1
@ MISCREG_SCTLR_EL1
Definition: misc.hh:580
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::ArmISA::TableWalker::Stage2Walk::finish
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
Definition: table_walker.cc:2543
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
gem5::VegaISA::p
Bitfield< 54 > p
Definition: pagetable.hh:70
gem5::ArmISA::TableWalker::Port::sendFunctionalReq
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
Definition: table_walker.cc:171
gem5::ArmISA::ArmFault::FaultSource
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition: faults.hh:95
gem5::SimObject::params
const Params & params() const
Definition: sim_object.hh:176
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ArmISA::TableWalker::L1Descriptor::domain
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
Definition: table_walker.hh:206
gem5::Event
Definition: eventq.hh:251
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::ArmISA::TableWalker::processWalkAArch64
Fault processWalkAArch64()
Definition: table_walker.cc:890
gem5::ArmISA::TableWalker::pageSizeNtoStatBin
static uint8_t pageSizeNtoStatBin(uint8_t N)
Definition: table_walker.cc:2460
gem5::ArmISA::TableWalker::WalkerState::delayed
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
Definition: table_walker.hh:912
gem5::ArmISA::TableWalker::stateQueues
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
Definition: table_walker.hh:1022
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:291
gem5::ArmISA::MMU::tranTypeEL
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition: mmu.cc:1386
gem5::ArmISA::TableWalker::doL0LongDescriptorWrapper
void doL0LongDescriptorWrapper()
Definition: table_walker.cc:2127
gem5::ArmISA::TableWalker::WalkerState::tc
ThreadContext * tc
Thread context that we're doing the walk for.
Definition: table_walker.hh:803
gem5::ArmISA::MISCREG_MAIR1
@ MISCREG_MAIR1
Definition: misc.hh:379
gem5::ArmISA::EL2
@ EL2
Definition: types.hh:275
gem5::ArmISA::TableWalker::processWalkLPAE
Fault processWalkLPAE()
Definition: table_walker.cc:691
gem5::ArmISA::TableWalker::WalkerState::physAddrRange
int physAddrRange
Current physical address range in bits.
Definition: table_walker.hh:812
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::ArmISA::MISCREG_SCTLR_EL2
@ MISCREG_SCTLR_EL2
Definition: misc.hh:585
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::ArmISA::TableWalker::WalkerState::startTime
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
Definition: table_walker.hh:917
gem5::ArmISA::TableWalker::L1Descriptor::l2Addr
Addr l2Addr() const
Address of L2 descriptor if it exists.
Definition: table_walker.hh:213
gem5::ArmISA::TableWalker::LongDescriptor::type
EntryType type() const
Return the descriptor type.
Definition: table_walker.hh:484
gem5::ArmISA::ReservedGrain
@ ReservedGrain
Definition: pagetable.hh:66
gem5::ArmISA::MISCREG_HTTBR
@ MISCREG_HTTBR
Definition: misc.hh:448
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::ArmISA::s
Bitfield< 4 > s
Definition: misc_types.hh:562
gem5::ArmISA::TableWalker::DescriptorBase::pfn
virtual Addr pfn() const =0
gem5::ArmISA::TableWalker::insertPartialTableEntry
void insertPartialTableEntry(LongDescriptor &descriptor)
Definition: table_walker.cc:2290
gem5::ArmISA::TableWalker::doL2DescEvent
EventFunctionWrapper doL2DescEvent
Definition: table_walker.hh:1129
gem5::ArmISA::TableWalker::DescriptorBase::offsetBits
virtual uint8_t offsetBits() const =0
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::ArmISA::TableWalker::WalkerState::ttbcr
TTBCR ttbcr
Definition: table_walker.hh:849
gem5::ArmISA::TableWalker::TableWalkerStats::pageSizes
statistics::Vector pageSizes
Definition: table_walker.hh:1075
gem5::ArmISA::TableWalker::WalkerState::isFetch
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
Definition: table_walker.hh:866
gem5::ArmISA::TableWalker::port
Port * port
Port shared by the two table walkers.
Definition: table_walker.hh:1035
gem5::ArmISA::TableWalker::DescriptorBase::getRawData
virtual uint64_t getRawData() const =0
gem5::ArmISA::TableWalker::Stage2Walk::translateTiming
void translateTiming(ThreadContext *tc)
Definition: table_walker.cc:2569
gem5::ArmISA::TableWalker::doL2LongDescriptorWrapper
void doL2LongDescriptorWrapper()
Definition: table_walker.cc:2139
gem5::ArmISA::TableWalker::doL1DescriptorWrapper
void doL1DescriptorWrapper()
Definition: table_walker.cc:2021
gem5::ArmISA::TableWalker::WalkerState::req
RequestPtr req
Request that is currently being serviced.
Definition: table_walker.hh:815
gem5::htog
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:187
gem5::ArmISA::MMU::translateAtomic
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: mmu.hh:245
gem5::ArmISA::TableWalker::processWalkWrapper
void processWalkWrapper()
Definition: table_walker.cc:484
gem5::ArmISA::TableWalker::LongDescriptor::physAddrRange
uint8_t physAddrRange
Definition: table_walker.hh:442
gem5::ArmISA::TableWalker::LongDescriptor::Invalid
@ Invalid
Definition: table_walker.hh:418
gem5::ArmISA::TableWalker::_physAddrRange
uint8_t _physAddrRange
Definition: table_walker.hh:1057
gem5::ArmISA::TableWalker::numSquashable
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
Definition: table_walker.hh:1053
gem5::ArmISA::te
Bitfield< 30 > te
Definition: misc_types.hh:338
gem5::ArmISA::TableWalker::WalkerState::transState
BaseMMU::Translation * transState
Translation state for delayed requests.
Definition: table_walker.hh:826
gem5::ArmISA::TableWalker::DescriptorBase::secure
virtual bool secure(bool have_security, WalkerState *currState) const =0
gem5::ArmISA::mask
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
gem5::ArmISA::TableWalker::LongDescriptor::grainSize
GrainSize grainSize
Width of the granule size in bits.
Definition: table_walker.hh:440
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
compiler.hh
gem5::ArmISA::TableWalker::DescriptorBase
Definition: table_walker.hh:73
flags
uint8_t flags
Definition: helpers.cc:66
gem5::ArmISA::TableWalker::WalkerState::tableWalker
TableWalker * tableWalker
Definition: table_walker.hh:914
gem5::ArmISA::EL3
@ EL3
Definition: types.hh:276
gem5::ArmISA::TableWalker::WalkerState::htcr
HTCR htcr
Cached copy of the htcr as it existed when translation began.
Definition: table_walker.hh:854
gem5::ArmISA::TableWalker::TableWalkerState
Definition: table_walker.hh:932
gem5::DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
gem5::ArmISA::TableWalker::WalkerState::isWrite
bool isWrite
If the access is a write.
Definition: table_walker.hh:863
gem5::ArmISA::TableWalker::L1Descriptor::Reserved
@ Reserved
Definition: table_walker.hh:109
gem5::ArmISA::TableWalker::checkVAddrSizeFaultAArch64
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
Definition: table_walker.cc:865
gem5::ArmISA::TableWalker::WalkerState::vtcr
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
Definition: table_walker.hh:860
gem5::ArmISA::s1TranslationRegime
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:236
faults.hh
gem5::ArmISA::TableWalker::L1Descriptor::type
EntryType type() const
Definition: table_walker.hh:144
gem5::ArmISA::TableWalker::generateLongDescFault
Fault generateLongDescFault(ArmFault::FaultSource src)
Definition: table_walker.cc:1780
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:619
gem5::ArmISA::TableWalker::LongDescriptor
Long-descriptor format (LPAE)
Definition: table_walker.hh:412
gem5::statistics::Histogram::init
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2154
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:542
gem5::ArmISA::MISCREG_NMRR
@ MISCREG_NMRR
Definition: misc.hh:376
gem5::ArmISA::TableWalker::LongDescriptor::Page
@ Page
Definition: table_walker.hh:421
name
const std::string & name()
Definition: trace.cc:49
gem5::ArmISA::MiscRegIndex
MiscRegIndex
Definition: misc.hh:59
gem5::ArmISA::TableWalker::TableWalkerStats::pendingWalks
statistics::Histogram pendingWalks
Definition: table_walker.hh:1074
gem5::ArmISA::TableWalker::doL2Descriptor
void doL2Descriptor()
Definition: table_walker.cc:1965
gem5::context_switch_task_id::DMA
@ DMA
Definition: request.hh:84
gem5::ArmISA::MISCREG_TTBR1
@ MISCREG_TTBR1
Definition: misc.hh:258
gem5::ArmISA::isSecure
bool isSecure(ThreadContext *tc)
Definition: utility.cc:73
gem5::ArmISA::TableWalker::pendingReqs
unsigned pendingReqs
Definition: table_walker.hh:1079
gem5::ArmISA::TableWalker::TableWalkerStats::walkWaitTime
statistics::Histogram walkWaitTime
Definition: table_walker.hh:1071
gem5::ClockedObject
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Definition: clocked_object.hh:234
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::ArmISA::TableWalker::DescriptorBase::lookupLevel
LookupLevel lookupLevel
Current lookup level for this descriptor.
Definition: table_walker.hh:79
gem5::ArmISA::TableWalker::DescriptorBase::global
virtual bool global(WalkerState *currState) const =0
gem5::ArmISA::MISCREG_MAIR0
@ MISCREG_MAIR0
Definition: misc.hh:373
gem5::ArmISA::sh
Bitfield< 8, 7 > sh
Definition: misc_types.hh:661
gem5::ArmISA::TableWalker::L1Descriptor::supersection
bool supersection() const
Is the page a Supersection (16 MiB)?
Definition: table_walker.hh:151
gem5::ArmISA::TableWalker::LongDescriptor::aarch64
bool aarch64
True if the current lookup is performed in AArch64 state.
Definition: table_walker.hh:437
gem5::ArmISA::ArmFault
Definition: faults.hh:64
gem5::X86ISA::reg
Bitfield< 5, 3 > reg
Definition: types.hh:92
gem5::ArmISA::TableWalker::WalkerState::xnTable
bool xnTable
Definition: table_walker.hh:879
gem5::ArmISA::TableWalker::LongDescriptor::af
bool af() const
Returns true if the access flag (AF) is set.
Definition: table_walker.hh:662
gem5::Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
gem5::ArmISA::TableWalker::WalkerState::stage2Req
bool stage2Req
Flag indicating if a second stage of lookup is required.
Definition: table_walker.hh:886
gem5::BaseMMU::Translation
Definition: mmu.hh:58
gem5::ArmISA::MMU::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: mmu.hh:256
gem5::ArmISA::TableWalker::Port::sendTimingReq
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
Definition: table_walker.cc:195
warn_if
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:273
state
atomic_var_t state
Definition: helpers.cc:188
gem5::ArmISA::TableWalker::readDataUntimed
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
Definition: table_walker.cc:2481
gem5::ArmISA::MMU::ArmTranslationType
ArmTranslationType
Definition: mmu.hh:114
gem5::ArmISA::EL0
@ EL0
Definition: types.hh:273
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
gem5::ArmISA::TableWalker::LongDescriptor::rwTable
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
Definition: table_walker.hh:767
gem5::ArmISA::TlbEntry::MemoryType::StronglyOrdered
@ StronglyOrdered
gem5::ArmISA::TableWalker
Definition: table_walker.hh:66
gem5::ArmISA::MISCREG_HCR
@ MISCREG_HCR
Definition: misc.hh:249
gem5::ArmISA::TableWalker::WalkerState::rwTable
bool rwTable
Definition: table_walker.hh:877
gem5::ArmISA::TableWalker::currState
WalkerState * currState
Definition: table_walker.hh:1046
gem5::ArmISA::TableWalker::TableWalker
TableWalker(const Params &p)
Definition: table_walker.cc:62
base.hh
gem5::ArmISA::TlbEntry::xn
bool xn
Definition: pagetable.hh:258
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::ArmISA::TableWalker::WalkerState::hpd
bool hpd
Hierarchical access permission disable.
Definition: table_walker.hh:883
gem5::ArmISA::n
Bitfield< 31 > n
Definition: misc_types.hh:456
gem5::ArmISA::TableWalker::LongDescriptor::memAttr
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
Definition: table_walker.hh:727
gem5::ArmISA::MISCREG_VTTBR_EL2
@ MISCREG_VTTBR_EL2
Definition: misc.hh:606
gem5::ArmISA::TableWalker::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: table_walker.cc:258
gem5::ArmISA::MMU::translateFunctional
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition: mmu.hh:91
gem5::ArmISA::TableWalker::drainResume
void drainResume() override
Resume execution after a successful drain.
Definition: table_walker.cc:279
gem5::ArmISA::TableWalker::LongDescriptor::domain
TlbEntry::DomainType domain() const override
Definition: table_walker.hh:711
gem5::ArmISA::ArmFault::TranslationLL
@ TranslationLL
Definition: faults.hh:101
gem5::ArmISA::TableWalker::memAttrs
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
Definition: table_walker.cc:1241
gem5::ArmISA::ps
Bitfield< 18, 16 > ps
Definition: misc_types.hh:514
gem5::ArmISA::TableWalker::mmu
MMU * mmu
The MMU to forward second stage look upts to.
Definition: table_walker.hh:1029
gem5::ArmISA::TableWalker::~TableWalker
virtual ~TableWalker()
Definition: table_walker.cc:98
gem5::ArmISA::TableWalker::doL2DescriptorWrapper
void doL2DescriptorWrapper()
Definition: table_walker.cc:2083
gem5::ArmISA::TableWalker::LongDescriptor::attrIndx
uint8_t attrIndx() const
Attribute index.
Definition: table_walker.hh:719
gem5::ArmISA::TableWalker::memAttrsLPAE
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
Definition: table_walker.cc:1448
gem5::ArmISA::TableWalker::WalkerState::isUncacheable
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
Definition: table_walker.hh:872
gem5::ArmISA::longDescFormatInUse
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:136
gem5::ArmISA::TableWalker::WalkerState::l2Desc
L2Descriptor l2Desc
Definition: table_walker.hh:905
gem5::ArmISA::ArmFault::annotate
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:239
gem5::ArmISA::MISCREG_VSTCR_EL2
@ MISCREG_VSTCR_EL2
Definition: misc.hh:609
gem5::ArmISA::TableWalker::LongDescriptor::offsetBits
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
Definition: table_walker.hh:525
gem5::ArmISA::TableWalker::LongDescriptor::dbgHeader
std::string dbgHeader() const override
Definition: table_walker.hh:451
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::ArmISA::id
Bitfield< 33 > id
Definition: misc_types.hh:251
gem5::ArmISA::ArmFault::AccessFlagLL
@ AccessFlagLL
Definition: faults.hh:102
gem5::ArmISA::TableWalker::checkAddrSizeFaultAArch64
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
Definition: table_walker.cc:883
gem5::Request::PT_WALK
@ PT_WALK
The request is a page table walk.
Definition: request.hh:188
gem5::ArmISA::ArmFault::S1PTW
@ S1PTW
Definition: faults.hh:134
gem5::ArmISA::MMU::release
const ArmRelease * release() const
Definition: mmu.hh:382
gem5::ArmISA::snsBankedIndex
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: misc.cc:666
gem5::ThreadContext::getCpuPtr
virtual BaseCPU * getCpuPtr()=0
gem5::ArmISA::TableWalker::L2Descriptor::invalid
bool invalid() const
Is the entry invalid.
Definition: table_walker.hh:326
gem5::ArmISA::TableWalker::isStage2
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
Definition: table_walker.hh:1038
gem5::RequestorID
uint16_t RequestorID
Definition: request.hh:95
gem5::ArmISA::TableWalker::Port::recvTimingResp
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Definition: table_walker.cc:206
gem5::ClockedObject::Params
ClockedObjectParams Params
Parameters of ClockedObject.
Definition: clocked_object.hh:240
gem5::statistics::DataWrap::flags
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:358
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::ArmISA::TableWalker::doL3LongDescriptorWrapper
void doL3LongDescriptorWrapper()
Definition: table_walker.cc:2145
gem5::ArmISA::TableWalker::stats
gem5::ArmISA::TableWalker::TableWalkerStats stats
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:790
gem5::ArmISA::MISCREG_HCR_EL2
@ MISCREG_HCR_EL2
Definition: misc.hh:587
gem5::ArmISA::TableWalker::DescriptorBase::ap
virtual uint8_t ap() const =0
gem5::ArmISA::TableWalker::WalkerState::hcr
HCR hcr
Cached copy of the htcr as it existed when translation began.
Definition: table_walker.hh:857
gem5::ArmISA::MISCREG_VSTTBR_EL2
@ MISCREG_VSTTBR_EL2
Definition: misc.hh:608
gem5::ArmISA::TableWalker::WalkerState::WalkerState
WalkerState()
Definition: table_walker.cc:125
gem5::ArmISA::MISCREG_TTBR0_EL1
@ MISCREG_TTBR0_EL1
Definition: misc.hh:598
gem5::ArmISA::TableWalker::WalkerState::aarch64
bool aarch64
If the access is performed in AArch64 state.
Definition: table_walker.hh:806
gem5::ArmISA::TableWalker::toLookupLevel
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
Definition: table_walker.cc:2418
gem5::ArmISA::TableWalker::processWalk
Fault processWalk()
Definition: table_walker.cc:580
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::ArmISA::TableWalker::WalkerState::timing
bool timing
If the mode is timing or atomic.
Definition: table_walker.hh:892
gem5::ArmISA::computeAddrTop
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition: utility.cc:409
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
gem5::ArmISA::TableWalker::DescriptorBase::shareable
virtual bool shareable() const
Definition: table_walker.hh:94
gem5::statistics::VectorBase::init
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1040
gem5::ArmISA::stride
Bitfield< 21, 20 > stride
Definition: misc_types.hh:447
gem5::ArmISA::TableWalker::LongDescriptor::userTable
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
Definition: table_walker.hh:776
gem5::ArmISA::MMU
Definition: mmu.hh:60
gem5::ArmISA::TableWalker::setMmu
void setMmu(MMU *_mmu)
Definition: table_walker.cc:119
gem5::ArmISA::TableWalker::doLongDescriptor
void doLongDescriptor()
Definition: table_walker.cc:1800
gem5::ArmISA::TableWalker::WalkerState
Definition: table_walker.hh:799
gem5::ArmISA::MMU::hasWalkCache
bool hasWalkCache() const
Definition: mmu.hh:384
gem5::BaseMMU::Translation::finish
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
gem5::ArmISA::TableWalker::pendingChange
void pendingChange()
Definition: table_walker.cc:2435
gem5::ArmISA::HaveExt
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition: utility.cc:229
gem5::ArmISA::TableWalker::DescriptorBase::domain
virtual TlbEntry::DomainType domain() const =0
gem5::ArmISA::TableWalker::WalkerState::fault
Fault fault
The fault that we are going to return.
Definition: table_walker.hh:829
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:595
thread_context.hh
gem5::ArmISA::TableWalker::LongDescriptor::xn
bool xn() const override
Is execution allowed on this mapping?
Definition: table_walker.hh:619
gem5::ArmISA::MMU::lookup
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition: mmu.cc:1425
gem5::ArmISA::ArmFault::OVA
@ OVA
Definition: faults.hh:135
gem5::ArmISA::TableWalker::tlb
TLB * tlb
TLB that is initiating these table walks.
Definition: table_walker.hh:1041
gem5::ArmISA::MISCREG_TTBR1_EL2
@ MISCREG_TTBR1_EL2
Definition: misc.hh:821
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::ArmISA::TableWalker::WalkerState::l1Desc
L1Descriptor l1Desc
Short-format descriptors.
Definition: table_walker.hh:904
gem5::ArmISA::TlbEntry::pxn
bool pxn
Definition: pagetable.hh:259
gem5::ArmISA::GrainSize
GrainSize
Definition: pagetable.hh:61
gem5::ArmISA::TableWalker::Stage2Walk::Stage2Walk
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
Definition: table_walker.cc:2533
gem5::ArmISA::TableWalker::TableWalkerStats::walksShortTerminatedAtLevel
statistics::Vector walksShortTerminatedAtLevel
Definition: table_walker.hh:1067
gem5::ArmISA::TlbEntry::ap
uint8_t ap
Definition: pagetable.hh:225
gem5::ArmISA::TableWalker::Port::createPacket
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
Definition: table_walker.cc:150
gem5::ArmISA::TableWalker::WalkerState::mode
BaseMMU::Mode mode
Save mode for use in delayed response.
Definition: table_walker.hh:898
gem5::ArmISA::MISCREG_TCR_EL1
@ MISCREG_TCR_EL1
Definition: misc.hh:602
gem5::ArmISA::TableWalker::LongDescriptor::pxn
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
Definition: table_walker.hh:627
gem5::ArmISA::ExceptionLevel
ExceptionLevel
Definition: types.hh:271
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::TableWalker::pending
bool pending
If a timing translation is currently in progress.
Definition: table_walker.hh:1049
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::ArmISA::TableWalker::insertTableEntry
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
Definition: table_walker.cc:2339
gem5::ArmISA::TlbEntry::lookupLevel
LookupLevel lookupLevel
Definition: pagetable.hh:215
gem5::ArmISA::TableWalker::TableWalkerStats::squashedBefore
statistics::Scalar squashedBefore
Definition: table_walker.hh:1069

Generated on Sat Jun 18 2022 08:12:16 for gem5 by doxygen 1.8.17