gem5  v22.1.0.0
table_walker.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010, 2012-2019, 2021-2022 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 #include "arch/arm/table_walker.hh"
38 
39 #include <cassert>
40 #include <memory>
41 
42 #include "arch/arm/faults.hh"
43 #include "arch/arm/mmu.hh"
44 #include "arch/arm/pagetable.hh"
45 #include "arch/arm/system.hh"
46 #include "arch/arm/tlb.hh"
47 #include "base/compiler.hh"
48 #include "cpu/base.hh"
49 #include "cpu/thread_context.hh"
50 #include "debug/Checkpoint.hh"
51 #include "debug/Drain.hh"
52 #include "debug/PageTableWalker.hh"
53 #include "debug/TLB.hh"
54 #include "debug/TLBVerbose.hh"
55 #include "sim/system.hh"
56 
57 namespace gem5
58 {
59 
60 using namespace ArmISA;
61 
63  : ClockedObject(p),
64  requestorId(p.sys->getRequestorId(this)),
65  port(new Port(this, requestorId)),
66  isStage2(p.is_stage2), tlb(NULL),
67  currState(NULL), pending(false),
68  numSquashable(p.num_squash_per_cycle),
69  release(nullptr),
70  stats(this),
71  pendingReqs(0),
72  pendingChangeTick(curTick()),
73  doL1DescEvent([this]{ doL1DescriptorWrapper(); }, name()),
74  doL2DescEvent([this]{ doL2DescriptorWrapper(); }, name()),
75  doL0LongDescEvent([this]{ doL0LongDescriptorWrapper(); }, name()),
76  doL1LongDescEvent([this]{ doL1LongDescriptorWrapper(); }, name()),
77  doL2LongDescEvent([this]{ doL2LongDescriptorWrapper(); }, name()),
78  doL3LongDescEvent([this]{ doL3LongDescriptorWrapper(); }, name()),
79  LongDescEventByLevel { &doL0LongDescEvent, &doL1LongDescEvent,
80  &doL2LongDescEvent, &doL3LongDescEvent },
81  doProcessEvent([this]{ processWalkWrapper(); }, name())
82 {
83  sctlr = 0;
84 
85  // Cache system-level properties
86  if (FullSystem) {
87  ArmSystem *arm_sys = dynamic_cast<ArmSystem *>(p.sys);
88  assert(arm_sys);
89  _physAddrRange = arm_sys->physAddrRange();
90  _haveLargeAsid64 = arm_sys->haveLargeAsid64();
91  } else {
92  _haveLargeAsid64 = false;
93  _physAddrRange = 48;
94  }
95 
96 }
97 
99 {
100  ;
101 }
102 
105 {
106  return static_cast<Port&>(getPort("port"));
107 }
108 
109 Port &
110 TableWalker::getPort(const std::string &if_name, PortID idx)
111 {
112  if (if_name == "port") {
113  return *port;
114  }
115  return ClockedObject::getPort(if_name, idx);
116 }
117 
118 void
120 {
121  mmu = _mmu;
122  release = mmu->release();
123 }
124 
126  tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr),
127  asid(0), vmid(0), isHyp(false), transState(nullptr),
128  vaddr(0), vaddr_tainted(0),
129  sctlr(0), scr(0), cpsr(0), tcr(0),
130  htcr(0), hcr(0), vtcr(0),
131  isWrite(false), isFetch(false), isSecure(false),
132  isUncacheable(false),
133  secureLookup(false), rwTable(false), userTable(false), xnTable(false),
134  pxnTable(false), hpd(false), stage2Req(false),
135  stage2Tran(nullptr), timing(false), functional(false),
136  mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
137  delayed(false), tableWalker(nullptr)
138 {
139 }
140 
142  : QueuedRequestPort(_walker->name() + ".port", _walker,
143  reqQueue, snoopRespQueue),
144  reqQueue(*_walker, *this), snoopRespQueue(*_walker, *this),
145  requestorId(id)
146 {
147 }
148 
149 PacketPtr
151  Addr desc_addr, int size,
152  uint8_t *data, Request::Flags flags, Tick delay,
153  Event *event)
154 {
155  RequestPtr req = std::make_shared<Request>(
156  desc_addr, size, flags, requestorId);
157  req->taskId(context_switch_task_id::DMA);
158 
159  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
160  pkt->dataStatic(data);
161 
162  auto state = new TableWalkerState;
163  state->event = event;
164  state->delay = delay;
165 
166  pkt->senderState = state;
167  return pkt;
168 }
169 
170 void
172  Addr desc_addr, int size,
173  uint8_t *data, Request::Flags flags)
174 {
175  auto pkt = createPacket(desc_addr, size, data, flags, 0, nullptr);
176 
177  sendFunctional(pkt);
178 
179  handleRespPacket(pkt);
180 }
181 
182 void
184  Addr desc_addr, int size,
185  uint8_t *data, Request::Flags flags, Tick delay)
186 {
187  auto pkt = createPacket(desc_addr, size, data, flags, delay, nullptr);
188 
189  Tick lat = sendAtomic(pkt);
190 
191  handleRespPacket(pkt, lat);
192 }
193 
194 void
196  Addr desc_addr, int size,
197  uint8_t *data, Request::Flags flags, Tick delay,
198  Event *event)
199 {
200  auto pkt = createPacket(desc_addr, size, data, flags, delay, event);
201 
202  schedTimingReq(pkt, curTick());
203 }
204 
205 bool
207 {
208  // We shouldn't ever get a cacheable block in Modified state.
209  assert(pkt->req->isUncacheable() ||
210  !(pkt->cacheResponding() && !pkt->hasSharers()));
211 
212  handleRespPacket(pkt);
213 
214  return true;
215 }
216 
217 void
219 {
220  // Should always see a response with a sender state.
221  assert(pkt->isResponse());
222 
223  // Get the DMA sender state.
224  auto *state = dynamic_cast<TableWalkerState*>(pkt->senderState);
225  assert(state);
226 
227  handleResp(state, pkt->getAddr(), pkt->req->getSize(), delay);
228 
229  delete pkt;
230 }
231 
232 void
234  Addr size, Tick delay)
235 {
236  if (state->event) {
237  owner.schedule(state->event, curTick() + delay);
238  }
239  delete state;
240 }
241 
242 void
244 {
245  if (drainState() == DrainState::Draining &&
246  stateQueues[LookupLevel::L0].empty() &&
247  stateQueues[LookupLevel::L1].empty() &&
248  stateQueues[LookupLevel::L2].empty() &&
249  stateQueues[LookupLevel::L3].empty() &&
250  pendingQueue.empty()) {
251 
252  DPRINTF(Drain, "TableWalker done draining, processing drain event\n");
253  signalDrainDone();
254  }
255 }
256 
259 {
260  bool state_queues_not_empty = false;
261 
262  for (int i = 0; i < LookupLevel::Num_ArmLookupLevel; ++i) {
263  if (!stateQueues[i].empty()) {
264  state_queues_not_empty = true;
265  break;
266  }
267  }
268 
269  if (state_queues_not_empty || pendingQueue.size()) {
270  DPRINTF(Drain, "TableWalker not drained\n");
271  return DrainState::Draining;
272  } else {
273  DPRINTF(Drain, "TableWalker free, no need to drain\n");
274  return DrainState::Drained;
275  }
276 }
277 
278 void
280 {
281  if (params().sys->isTimingMode() && currState) {
282  delete currState;
283  currState = NULL;
284  pendingChange();
285  }
286 }
287 
288 Fault
289 TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
290  vmid_t _vmid, bool _isHyp, MMU::Mode _mode,
291  MMU::Translation *_trans, bool _timing, bool _functional,
292  bool secure, MMU::ArmTranslationType tranType,
293  bool _stage2Req, const TlbEntry *walk_entry)
294 {
295  assert(!(_functional && _timing));
296  ++stats.walks;
297 
298  WalkerState *savedCurrState = NULL;
299 
300  if (!currState && !_functional) {
301  // For atomic mode, a new WalkerState instance should be only created
302  // once per TLB. For timing mode, a new instance is generated for every
303  // TLB miss.
304  DPRINTF(PageTableWalker, "creating new instance of WalkerState\n");
305 
306  currState = new WalkerState();
307  currState->tableWalker = this;
308  } else if (_functional) {
309  // If we are mixing functional mode with timing (or even
310  // atomic), we need to to be careful and clean up after
311  // ourselves to not risk getting into an inconsistent state.
312  DPRINTF(PageTableWalker,
313  "creating functional instance of WalkerState\n");
314  savedCurrState = currState;
315  currState = new WalkerState();
316  currState->tableWalker = this;
317  } else if (_timing) {
318  // This is a translation that was completed and then faulted again
319  // because some underlying parameters that affect the translation
320  // changed out from under us (e.g. asid). It will either be a
321  // misprediction, in which case nothing will happen or we'll use
322  // this fault to re-execute the faulting instruction which should clean
323  // up everything.
324  if (currState->vaddr_tainted == _req->getVaddr()) {
326  return std::make_shared<ReExec>();
327  }
328  }
329  pendingChange();
330 
332  currState->tc = _tc;
333  // ARM DDI 0487A.f (ARMv8 ARM) pg J8-5672
334  // aarch32/translation/translation/AArch32.TranslateAddress dictates
335  // even AArch32 EL0 will use AArch64 translation if EL1 is in AArch64.
336  if (isStage2) {
337  currState->el = EL1;
338  currState->aarch64 = ELIs64(_tc, EL2);
339  } else {
340  currState->el =
341  MMU::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
342  currState->aarch64 =
343  ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
344  }
345  currState->transState = _trans;
346  currState->req = _req;
347  if (walk_entry) {
348  currState->walkEntry = *walk_entry;
349  } else {
351  }
353  currState->asid = _asid;
354  currState->vmid = _vmid;
355  currState->isHyp = _isHyp;
356  currState->timing = _timing;
357  currState->functional = _functional;
358  currState->mode = _mode;
359  currState->tranType = tranType;
360  currState->isSecure = secure;
362 
365  currState->vaddr_tainted = currState->req->getVaddr();
366  if (currState->aarch64)
370  else
372 
373  if (currState->aarch64) {
375  if (isStage2) {
377  if (currState->secureLookup) {
378  currState->vtcr =
380  } else {
381  currState->vtcr =
383  }
384  } else switch (currState->el) {
385  case EL0:
386  if (HaveExt(currState->tc, ArmExtension::FEAT_VHE) &&
387  currState->hcr.tge == 1 && currState->hcr.e2h ==1) {
390  } else {
393  }
394  break;
395  case EL1:
398  break;
399  case EL2:
400  assert(release->has(ArmExtension::VIRTUALIZATION));
403  break;
404  case EL3:
405  assert(release->has(ArmExtension::SECURITY));
408  break;
409  default:
410  panic("Invalid exception level");
411  break;
412  }
413  } else {
421  }
422  sctlr = currState->sctlr;
423 
426 
428 
429  currState->stage2Req = _stage2Req && !isStage2;
430 
431  bool long_desc_format = currState->aarch64 || _isHyp || isStage2 ||
433 
434  if (long_desc_format) {
435  // Helper variables used for hierarchical permissions
437  currState->rwTable = true;
438  currState->userTable = true;
439  currState->xnTable = false;
440  currState->pxnTable = false;
441 
443  } else {
445  }
446 
447  if (!currState->timing) {
448  Fault fault = NoFault;
449  if (currState->aarch64)
450  fault = processWalkAArch64();
451  else if (long_desc_format)
452  fault = processWalkLPAE();
453  else
454  fault = processWalk();
455 
456  // If this was a functional non-timing access restore state to
457  // how we found it.
458  if (currState->functional) {
459  delete currState;
460  currState = savedCurrState;
461  }
462  return fault;
463  }
464 
465  if (pending || pendingQueue.size()) {
466  pendingQueue.push_back(currState);
467  currState = NULL;
468  pendingChange();
469  } else {
470  pending = true;
471  pendingChange();
472  if (currState->aarch64)
473  return processWalkAArch64();
474  else if (long_desc_format)
475  return processWalkLPAE();
476  else
477  return processWalk();
478  }
479 
480  return NoFault;
481 }
482 
483 void
485 {
486  assert(!currState);
487  assert(pendingQueue.size());
488  pendingChange();
489  currState = pendingQueue.front();
490 
491  // Check if a previous walk filled this request already
492  // @TODO Should this always be the TLB or should we look in the stage2 TLB?
494  currState->vmid, currState->isHyp, currState->isSecure, true, false,
495  currState->el, false, isStage2, currState->mode);
496 
497  // Check if we still need to have a walk for this request. If the requesting
498  // instruction has been squashed, or a previous walk has filled the TLB with
499  // a match, we just want to get rid of the walk. The latter could happen
500  // when there are multiple outstanding misses to a single page and a
501  // previous request has been successfully translated.
502  if (!currState->transState->squashed() && (!te || te->partial)) {
503  // We've got a valid request, lets process it
504  pending = true;
505  pendingQueue.pop_front();
506  // Keep currState in case one of the processWalk... calls NULLs it
507 
508  if (te && te->partial) {
509  currState->walkEntry = *te;
510  }
511  WalkerState *curr_state_copy = currState;
512  Fault f;
513  if (currState->aarch64)
514  f = processWalkAArch64();
515  else if (longDescFormatInUse(currState->tc) ||
517  f = processWalkLPAE();
518  else
519  f = processWalk();
520 
521  if (f != NoFault) {
522  curr_state_copy->transState->finish(f, curr_state_copy->req,
523  curr_state_copy->tc, curr_state_copy->mode);
524 
525  delete curr_state_copy;
526  }
527  return;
528  }
529 
530 
531  // If the instruction that we were translating for has been
532  // squashed we shouldn't bother.
533  unsigned num_squashed = 0;
534  ThreadContext *tc = currState->tc;
535  while ((num_squashed < numSquashable) && currState &&
537  (te && !te->partial))) {
538  pendingQueue.pop_front();
539  num_squashed++;
541 
542  DPRINTF(TLB, "Squashing table walk for address %#x\n",
544 
545  if (currState->transState->squashed()) {
546  // finish the translation which will delete the translation object
548  std::make_shared<UnimpFault>("Squashed Inst"),
550  } else {
551  // translate the request now that we know it will work
556  }
557 
558  // delete the current request
559  delete currState;
560 
561  // peak at the next one
562  if (pendingQueue.size()) {
563  currState = pendingQueue.front();
566  false, currState->el, false, isStage2, currState->mode);
567  } else {
568  // Terminate the loop, nothing more to do
569  currState = NULL;
570  }
571  }
572  pendingChange();
573 
574  // if we still have pending translations, schedule more work
575  nextWalk(tc);
576  currState = NULL;
577 }
578 
579 Fault
581 {
582  Addr ttbr = 0;
583 
584  // For short descriptors, translation configs are held in
585  // TTBR1.
588 
589  const auto irgn0_mask = 0x1;
590  const auto irgn1_mask = 0x40;
591  currState->isUncacheable = (ttbr1 & (irgn0_mask | irgn1_mask)) == 0;
592 
593  // If translation isn't enabled, we shouldn't be here
594  assert(currState->sctlr.m || isStage2);
595  const bool is_atomic = currState->req->isAtomic();
596  const bool have_security = release->has(ArmExtension::SECURITY);
597 
598  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x, bits:%#x\n",
600  32 - currState->ttbcr.n));
601 
603 
604  if (currState->ttbcr.n == 0 || !mbits(currState->vaddr, 31,
605  32 - currState->ttbcr.n)) {
606  DPRINTF(TLB, " - Selecting TTBR0\n");
607  // Check if table walk is allowed when Security Extensions are enabled
608  if (have_security && currState->ttbcr.pd0) {
609  if (currState->isFetch)
610  return std::make_shared<PrefetchAbort>(
613  isStage2,
615  else
616  return std::make_shared<DataAbort>(
619  is_atomic ? false : currState->isWrite,
622  }
625  } else {
626  DPRINTF(TLB, " - Selecting TTBR1\n");
627  // Check if table walk is allowed when Security Extensions are enabled
628  if (have_security && currState->ttbcr.pd1) {
629  if (currState->isFetch)
630  return std::make_shared<PrefetchAbort>(
633  isStage2,
635  else
636  return std::make_shared<DataAbort>(
639  is_atomic ? false : currState->isWrite,
642  }
643  ttbr = ttbr1;
644  currState->ttbcr.n = 0;
645  }
646 
647  Addr l1desc_addr = mbits(ttbr, 31, 14 - currState->ttbcr.n) |
648  (bits(currState->vaddr, 31 - currState->ttbcr.n, 20) << 2);
649  DPRINTF(TLB, " - Descriptor at address %#x (%s)\n", l1desc_addr,
650  currState->isSecure ? "s" : "ns");
651 
652  // Trickbox address check
653  Fault f;
654  f = testWalk(l1desc_addr, sizeof(uint32_t),
656  if (f) {
657  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
658  if (currState->timing) {
659  pending = false;
661  currState = NULL;
662  } else {
663  currState->tc = NULL;
664  currState->req = NULL;
665  }
666  return f;
667  }
668 
670  if (currState->sctlr.c == 0 || currState->isUncacheable) {
672  }
673 
674  if (currState->isSecure) {
675  flag.set(Request::SECURE);
676  }
677 
678  bool delayed;
679  delayed = fetchDescriptor(l1desc_addr, (uint8_t*)&currState->l1Desc.data,
680  sizeof(uint32_t), flag, LookupLevel::L1,
681  &doL1DescEvent,
683  if (!delayed) {
684  f = currState->fault;
685  }
686 
687  return f;
688 }
689 
690 Fault
692 {
693  Addr ttbr, ttbr0_max, ttbr1_min, desc_addr;
694  int tsz, n;
695  LookupLevel start_lookup_level = LookupLevel::L1;
696 
697  DPRINTF(TLB, "Beginning table walk for address %#x, TTBCR: %#x\n",
699 
701 
703  if (currState->isSecure)
704  flag.set(Request::SECURE);
705 
706  // work out which base address register to use, if in hyp mode we always
707  // use HTTBR
708  if (isStage2) {
709  DPRINTF(TLB, " - Selecting VTTBR (long-desc.)\n");
711  tsz = sext<4>(currState->vtcr.t0sz);
712  start_lookup_level = currState->vtcr.sl0 ?
714  currState->isUncacheable = currState->vtcr.irgn0 == 0;
715  } else if (currState->isHyp) {
716  DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n");
718  tsz = currState->htcr.t0sz;
719  currState->isUncacheable = currState->htcr.irgn0 == 0;
720  } else {
721  assert(longDescFormatInUse(currState->tc));
722 
723  // Determine boundaries of TTBR0/1 regions
724  if (currState->ttbcr.t0sz)
725  ttbr0_max = (1ULL << (32 - currState->ttbcr.t0sz)) - 1;
726  else if (currState->ttbcr.t1sz)
727  ttbr0_max = (1ULL << 32) -
728  (1ULL << (32 - currState->ttbcr.t1sz)) - 1;
729  else
730  ttbr0_max = (1ULL << 32) - 1;
731  if (currState->ttbcr.t1sz)
732  ttbr1_min = (1ULL << 32) - (1ULL << (32 - currState->ttbcr.t1sz));
733  else
734  ttbr1_min = (1ULL << (32 - currState->ttbcr.t0sz));
735 
736  const bool is_atomic = currState->req->isAtomic();
737 
738  // The following code snippet selects the appropriate translation table base
739  // address (TTBR0 or TTBR1) and the appropriate starting lookup level
740  // depending on the address range supported by the translation table (ARM
741  // ARM issue C B3.6.4)
742  if (currState->vaddr <= ttbr0_max) {
743  DPRINTF(TLB, " - Selecting TTBR0 (long-desc.)\n");
744  // Check if table walk is allowed
745  if (currState->ttbcr.epd0) {
746  if (currState->isFetch)
747  return std::make_shared<PrefetchAbort>(
750  isStage2,
752  else
753  return std::make_shared<DataAbort>(
756  is_atomic ? false : currState->isWrite,
758  isStage2,
760  }
763  tsz = currState->ttbcr.t0sz;
764  currState->isUncacheable = currState->ttbcr.irgn0 == 0;
765  if (ttbr0_max < (1ULL << 30)) // Upper limit < 1 GiB
766  start_lookup_level = LookupLevel::L2;
767  } else if (currState->vaddr >= ttbr1_min) {
768  DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
769  // Check if table walk is allowed
770  if (currState->ttbcr.epd1) {
771  if (currState->isFetch)
772  return std::make_shared<PrefetchAbort>(
775  isStage2,
777  else
778  return std::make_shared<DataAbort>(
781  is_atomic ? false : currState->isWrite,
783  isStage2,
785  }
788  tsz = currState->ttbcr.t1sz;
789  currState->isUncacheable = currState->ttbcr.irgn1 == 0;
790  // Lower limit >= 3 GiB
791  if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
792  start_lookup_level = LookupLevel::L2;
793  } else {
794  // Out of boundaries -> translation fault
795  if (currState->isFetch)
796  return std::make_shared<PrefetchAbort>(
799  isStage2,
801  else
802  return std::make_shared<DataAbort>(
805  is_atomic ? false : currState->isWrite,
808  }
809 
810  }
811 
812  // Perform lookup (ARM ARM issue C B3.6.6)
813  if (start_lookup_level == LookupLevel::L1) {
814  n = 5 - tsz;
815  desc_addr = mbits(ttbr, 39, n) |
816  (bits(currState->vaddr, n + 26, 30) << 3);
817  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
818  desc_addr, currState->isSecure ? "s" : "ns");
819  } else {
820  // Skip first-level lookup
821  n = (tsz >= 2 ? 14 - tsz : 12);
822  desc_addr = mbits(ttbr, 39, n) |
823  (bits(currState->vaddr, n + 17, 21) << 3);
824  DPRINTF(TLB, " - Descriptor at address %#x (%s) (long-desc.)\n",
825  desc_addr, currState->isSecure ? "s" : "ns");
826  }
827 
828  // Trickbox address check
829  Fault f = testWalk(desc_addr, sizeof(uint64_t),
830  TlbEntry::DomainType::NoAccess, start_lookup_level,
831  isStage2);
832  if (f) {
833  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
834  if (currState->timing) {
835  pending = false;
837  currState = NULL;
838  } else {
839  currState->tc = NULL;
840  currState->req = NULL;
841  }
842  return f;
843  }
844 
845  if (currState->sctlr.c == 0 || currState->isUncacheable) {
847  }
848 
849  currState->longDesc.lookupLevel = start_lookup_level;
850  currState->longDesc.aarch64 = false;
852 
853  bool delayed = fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
854  sizeof(uint64_t), flag, start_lookup_level,
855  LongDescEventByLevel[start_lookup_level],
857  if (!delayed) {
858  f = currState->fault;
859  }
860 
861  return f;
862 }
863 
864 bool
866  GrainSize tg, int tsz, bool low_range)
867 {
868  // The effective maximum input size is 48 if ARMv8.2-LVA is not
869  // supported or if the translation granule that is in use is 4KB or
870  // 16KB in size. When ARMv8.2-LVA is supported, for the 64KB
871  // translation granule size only, the effective minimum value of
872  // 52.
873  const bool have_lva = HaveExt(currState->tc, ArmExtension::FEAT_LVA);
874  int in_max = (have_lva && tg == Grain64KB) ? 52 : 48;
875  int in_min = 64 - (tg == Grain64KB ? 47 : 48);
876 
877  return tsz > in_max || tsz < in_min || (low_range ?
878  bits(currState->vaddr, top_bit, tsz) != 0x0 :
879  bits(currState->vaddr, top_bit, tsz) != mask(top_bit - tsz + 1));
880 }
881 
882 bool
884 {
885  return (pa_range != _physAddrRange &&
886  bits(addr, _physAddrRange - 1, pa_range));
887 }
888 
889 Fault
891 {
892  assert(currState->aarch64);
893 
894  DPRINTF(TLB, "Beginning table walk for address %#llx, TCR: %#llx\n",
896 
898 
899  // Determine TTBR, table size, granule size and phys. address range
900  Addr ttbr = 0;
901  int tsz = 0, ps = 0;
902  GrainSize tg = Grain4KB; // grain size computed from tg* field
903  bool fault = false;
904 
905  int top_bit = computeAddrTop(currState->tc,
906  bits(currState->vaddr, 55),
908  currState->tcr,
909  currState->el);
910 
911  bool vaddr_fault = false;
912  switch (currState->el) {
913  case EL0:
914  {
915  Addr ttbr0;
916  Addr ttbr1;
917  if (HaveExt(currState->tc, ArmExtension::FEAT_VHE) &&
918  currState->hcr.tge==1 && currState->hcr.e2h == 1) {
919  // VHE code for EL2&0 regime
922  } else {
925  }
926  switch (bits(currState->vaddr, 63,48)) {
927  case 0:
928  DPRINTF(TLB, " - Selecting TTBR0 (AArch64)\n");
929  ttbr = ttbr0;
930  tsz = 64 - currState->tcr.t0sz;
931  tg = GrainMap_tg0[currState->tcr.tg0];
932  currState->hpd = currState->tcr.hpd0;
933  currState->isUncacheable = currState->tcr.irgn0 == 0;
935  top_bit, tg, tsz, true);
936 
937  if (vaddr_fault || currState->tcr.epd0)
938  fault = true;
939  break;
940  case 0xffff:
941  DPRINTF(TLB, " - Selecting TTBR1 (AArch64)\n");
942  ttbr = ttbr1;
943  tsz = 64 - currState->tcr.t1sz;
944  tg = GrainMap_tg1[currState->tcr.tg1];
945  currState->hpd = currState->tcr.hpd1;
946  currState->isUncacheable = currState->tcr.irgn1 == 0;
948  top_bit, tg, tsz, false);
949 
950  if (vaddr_fault || currState->tcr.epd1)
951  fault = true;
952  break;
953  default:
954  // top two bytes must be all 0s or all 1s, else invalid addr
955  fault = true;
956  }
957  ps = currState->tcr.ips;
958  }
959  break;
960  case EL1:
961  if (isStage2) {
962  if (currState->secureLookup) {
963  DPRINTF(TLB, " - Selecting VSTTBR_EL2 (AArch64 stage 2)\n");
965  } else {
966  DPRINTF(TLB, " - Selecting VTTBR_EL2 (AArch64 stage 2)\n");
968  }
969  tsz = 64 - currState->vtcr.t0sz64;
970  tg = GrainMap_tg0[currState->vtcr.tg0];
971 
972  ps = currState->vtcr.ps;
973  currState->isUncacheable = currState->vtcr.irgn0 == 0;
974  } else {
975  switch (bits(currState->vaddr, top_bit)) {
976  case 0:
977  DPRINTF(TLB, " - Selecting TTBR0_EL1 (AArch64)\n");
979  tsz = 64 - currState->tcr.t0sz;
980  tg = GrainMap_tg0[currState->tcr.tg0];
981  currState->hpd = currState->tcr.hpd0;
982  currState->isUncacheable = currState->tcr.irgn0 == 0;
984  top_bit, tg, tsz, true);
985 
986  if (vaddr_fault || currState->tcr.epd0)
987  fault = true;
988  break;
989  case 0x1:
990  DPRINTF(TLB, " - Selecting TTBR1_EL1 (AArch64)\n");
992  tsz = 64 - currState->tcr.t1sz;
993  tg = GrainMap_tg1[currState->tcr.tg1];
994  currState->hpd = currState->tcr.hpd1;
995  currState->isUncacheable = currState->tcr.irgn1 == 0;
997  top_bit, tg, tsz, false);
998 
999  if (vaddr_fault || currState->tcr.epd1)
1000  fault = true;
1001  break;
1002  default:
1003  // top two bytes must be all 0s or all 1s, else invalid addr
1004  fault = true;
1005  }
1006  ps = currState->tcr.ips;
1007  }
1008  break;
1009  case EL2:
1010  switch(bits(currState->vaddr, top_bit)) {
1011  case 0:
1012  DPRINTF(TLB, " - Selecting TTBR0_EL2 (AArch64)\n");
1014  tsz = 64 - currState->tcr.t0sz;
1015  tg = GrainMap_tg0[currState->tcr.tg0];
1016  currState->hpd = currState->hcr.e2h ?
1017  currState->tcr.hpd0 : currState->tcr.hpd;
1018  currState->isUncacheable = currState->tcr.irgn0 == 0;
1020  top_bit, tg, tsz, true);
1021 
1022  if (vaddr_fault || (currState->hcr.e2h && currState->tcr.epd0))
1023  fault = true;
1024  break;
1025 
1026  case 0x1:
1027  DPRINTF(TLB, " - Selecting TTBR1_EL2 (AArch64)\n");
1029  tsz = 64 - currState->tcr.t1sz;
1030  tg = GrainMap_tg1[currState->tcr.tg1];
1031  currState->hpd = currState->tcr.hpd1;
1032  currState->isUncacheable = currState->tcr.irgn1 == 0;
1034  top_bit, tg, tsz, false);
1035 
1036  if (vaddr_fault || !currState->hcr.e2h || currState->tcr.epd1)
1037  fault = true;
1038  break;
1039 
1040  default:
1041  // invalid addr if top two bytes are not all 0s
1042  fault = true;
1043  }
1044  ps = currState->hcr.e2h ? currState->tcr.ips: currState->tcr.ps;
1045  break;
1046  case EL3:
1047  switch(bits(currState->vaddr, top_bit)) {
1048  case 0:
1049  DPRINTF(TLB, " - Selecting TTBR0_EL3 (AArch64)\n");
1051  tsz = 64 - currState->tcr.t0sz;
1052  tg = GrainMap_tg0[currState->tcr.tg0];
1053  currState->hpd = currState->tcr.hpd;
1054  currState->isUncacheable = currState->tcr.irgn0 == 0;
1056  top_bit, tg, tsz, true);
1057 
1058  if (vaddr_fault)
1059  fault = true;
1060  break;
1061  default:
1062  // invalid addr if top two bytes are not all 0s
1063  fault = true;
1064  }
1065  ps = currState->tcr.ps;
1066  break;
1067  }
1068 
1069  const bool is_atomic = currState->req->isAtomic();
1070 
1071  if (fault) {
1072  Fault f;
1073  if (currState->isFetch)
1074  f = std::make_shared<PrefetchAbort>(
1078  else
1079  f = std::make_shared<DataAbort>(
1082  is_atomic ? false : currState->isWrite,
1085 
1086  if (currState->timing) {
1087  pending = false;
1088  nextWalk(currState->tc);
1089  currState = NULL;
1090  } else {
1091  currState->tc = NULL;
1092  currState->req = NULL;
1093  }
1094  return f;
1095 
1096  }
1097 
1098  if (tg == ReservedGrain) {
1099  warn_once("Reserved granule size requested; gem5's IMPLEMENTATION "
1100  "DEFINED behavior takes this to mean 4KB granules\n");
1101  tg = Grain4KB;
1102  }
1103 
1104  // Clamp to lower limit
1105  int pa_range = decodePhysAddrRange64(ps);
1106  if (pa_range > _physAddrRange) {
1108  } else {
1109  currState->physAddrRange = pa_range;
1110  }
1111 
1112  auto [table_addr, desc_addr, start_lookup_level] = walkAddresses(
1113  ttbr, tg, tsz, pa_range);
1114 
1115  // Determine physical address size and raise an Address Size Fault if
1116  // necessary
1118  DPRINTF(TLB, "Address size fault before any lookup\n");
1119  Fault f;
1120  if (currState->isFetch)
1121  f = std::make_shared<PrefetchAbort>(
1123  ArmFault::AddressSizeLL + start_lookup_level,
1124  isStage2,
1126  else
1127  f = std::make_shared<DataAbort>(
1130  is_atomic ? false : currState->isWrite,
1131  ArmFault::AddressSizeLL + start_lookup_level,
1132  isStage2,
1134 
1135 
1136  if (currState->timing) {
1137  pending = false;
1138  nextWalk(currState->tc);
1139  currState = NULL;
1140  } else {
1141  currState->tc = NULL;
1142  currState->req = NULL;
1143  }
1144  return f;
1145 
1146  }
1147 
1148  // Trickbox address check
1149  Fault f = testWalk(desc_addr, sizeof(uint64_t),
1150  TlbEntry::DomainType::NoAccess, start_lookup_level, isStage2);
1151  if (f) {
1152  DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
1153  if (currState->timing) {
1154  pending = false;
1155  nextWalk(currState->tc);
1156  currState = NULL;
1157  } else {
1158  currState->tc = NULL;
1159  currState->req = NULL;
1160  }
1161  return f;
1162  }
1163 
1165  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1166  flag.set(Request::UNCACHEABLE);
1167  }
1168 
1169  if (currState->isSecure) {
1170  flag.set(Request::SECURE);
1171  }
1172 
1173  currState->longDesc.lookupLevel = start_lookup_level;
1174  currState->longDesc.aarch64 = true;
1177 
1178  if (currState->timing) {
1179  fetchDescriptor(desc_addr, (uint8_t*) &currState->longDesc.data,
1180  sizeof(uint64_t), flag, start_lookup_level,
1181  LongDescEventByLevel[start_lookup_level], NULL);
1182  } else {
1183  fetchDescriptor(desc_addr, (uint8_t*)&currState->longDesc.data,
1184  sizeof(uint64_t), flag, -1, NULL,
1186  f = currState->fault;
1187  }
1188 
1189  return f;
1190 }
1191 
1192 std::tuple<Addr, Addr, TableWalker::LookupLevel>
1193 TableWalker::walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
1194 {
1195  const auto* ptops = getPageTableOps(tg);
1196 
1197  LookupLevel first_level = LookupLevel::Num_ArmLookupLevel;
1198  Addr table_addr = 0;
1199  Addr desc_addr = 0;
1200 
1201  if (currState->walkEntry.valid) {
1202  // WalkCache hit
1203  TlbEntry* entry = &currState->walkEntry;
1204  DPRINTF(PageTableWalker,
1205  "Walk Cache hit: va=%#x, level=%d, table address=%#x\n",
1206  currState->vaddr, entry->lookupLevel, entry->pfn);
1207 
1208  currState->xnTable = entry->xn;
1209  currState->pxnTable = entry->pxn;
1210  currState->rwTable = bits(entry->ap, 1);
1211  currState->userTable = bits(entry->ap, 0);
1212 
1213  table_addr = entry->pfn;
1214  first_level = (LookupLevel)(entry->lookupLevel + 1);
1215  } else {
1216  // WalkCache miss
1217  first_level = isStage2 ?
1218  ptops->firstS2Level(currState->vtcr.sl0) :
1219  ptops->firstLevel(64 - tsz);
1220  panic_if(first_level == LookupLevel::Num_ArmLookupLevel,
1221  "Table walker couldn't find lookup level\n");
1222 
1223  int stride = tg - 3;
1224  int base_addr_lo = 3 + tsz - stride * (3 - first_level) - tg;
1225 
1226  if (pa_range == 52) {
1227  int z = (base_addr_lo < 6) ? 6 : base_addr_lo;
1228  table_addr = mbits(ttbr, 47, z);
1229  table_addr |= (bits(ttbr, 5, 2) << 48);
1230  } else {
1231  table_addr = mbits(ttbr, 47, base_addr_lo);
1232  }
1233  }
1234 
1235  desc_addr = table_addr + ptops->index(currState->vaddr, first_level, tsz);
1236 
1237  return std::make_tuple(table_addr, desc_addr, first_level);
1238 }
1239 
1240 void
1242  uint8_t texcb, bool s)
1243 {
1244  // Note: tc and sctlr local variables are hiding tc and sctrl class
1245  // variables
1246  DPRINTF(TLBVerbose, "memAttrs texcb:%d s:%d\n", texcb, s);
1247  te.shareable = false; // default value
1248  te.nonCacheable = false;
1249  te.outerShareable = false;
1250  if (sctlr.tre == 0 || ((sctlr.tre == 1) && (sctlr.m == 0))) {
1251  switch(texcb) {
1252  case 0: // Stongly-ordered
1253  te.nonCacheable = true;
1255  te.shareable = true;
1256  te.innerAttrs = 1;
1257  te.outerAttrs = 0;
1258  break;
1259  case 1: // Shareable Device
1260  te.nonCacheable = true;
1262  te.shareable = true;
1263  te.innerAttrs = 3;
1264  te.outerAttrs = 0;
1265  break;
1266  case 2: // Outer and Inner Write-Through, no Write-Allocate
1268  te.shareable = s;
1269  te.innerAttrs = 6;
1270  te.outerAttrs = bits(texcb, 1, 0);
1271  break;
1272  case 3: // Outer and Inner Write-Back, no Write-Allocate
1274  te.shareable = s;
1275  te.innerAttrs = 7;
1276  te.outerAttrs = bits(texcb, 1, 0);
1277  break;
1278  case 4: // Outer and Inner Non-cacheable
1279  te.nonCacheable = true;
1281  te.shareable = s;
1282  te.innerAttrs = 0;
1283  te.outerAttrs = bits(texcb, 1, 0);
1284  break;
1285  case 5: // Reserved
1286  panic("Reserved texcb value!\n");
1287  break;
1288  case 6: // Implementation Defined
1289  panic("Implementation-defined texcb value!\n");
1290  break;
1291  case 7: // Outer and Inner Write-Back, Write-Allocate
1293  te.shareable = s;
1294  te.innerAttrs = 5;
1295  te.outerAttrs = 1;
1296  break;
1297  case 8: // Non-shareable Device
1298  te.nonCacheable = true;
1300  te.shareable = false;
1301  te.innerAttrs = 3;
1302  te.outerAttrs = 0;
1303  break;
1304  case 9 ... 15: // Reserved
1305  panic("Reserved texcb value!\n");
1306  break;
1307  case 16 ... 31: // Cacheable Memory
1309  te.shareable = s;
1310  if (bits(texcb, 1,0) == 0 || bits(texcb, 3,2) == 0)
1311  te.nonCacheable = true;
1312  te.innerAttrs = bits(texcb, 1, 0);
1313  te.outerAttrs = bits(texcb, 3, 2);
1314  break;
1315  default:
1316  panic("More than 32 states for 5 bits?\n");
1317  }
1318  } else {
1319  assert(tc);
1320  PRRR prrr = tc->readMiscReg(snsBankedIndex(MISCREG_PRRR,
1322  NMRR nmrr = tc->readMiscReg(snsBankedIndex(MISCREG_NMRR,
1324  DPRINTF(TLBVerbose, "memAttrs PRRR:%08x NMRR:%08x\n", prrr, nmrr);
1325  uint8_t curr_tr = 0, curr_ir = 0, curr_or = 0;
1326  switch(bits(texcb, 2,0)) {
1327  case 0:
1328  curr_tr = prrr.tr0;
1329  curr_ir = nmrr.ir0;
1330  curr_or = nmrr.or0;
1331  te.outerShareable = (prrr.nos0 == 0);
1332  break;
1333  case 1:
1334  curr_tr = prrr.tr1;
1335  curr_ir = nmrr.ir1;
1336  curr_or = nmrr.or1;
1337  te.outerShareable = (prrr.nos1 == 0);
1338  break;
1339  case 2:
1340  curr_tr = prrr.tr2;
1341  curr_ir = nmrr.ir2;
1342  curr_or = nmrr.or2;
1343  te.outerShareable = (prrr.nos2 == 0);
1344  break;
1345  case 3:
1346  curr_tr = prrr.tr3;
1347  curr_ir = nmrr.ir3;
1348  curr_or = nmrr.or3;
1349  te.outerShareable = (prrr.nos3 == 0);
1350  break;
1351  case 4:
1352  curr_tr = prrr.tr4;
1353  curr_ir = nmrr.ir4;
1354  curr_or = nmrr.or4;
1355  te.outerShareable = (prrr.nos4 == 0);
1356  break;
1357  case 5:
1358  curr_tr = prrr.tr5;
1359  curr_ir = nmrr.ir5;
1360  curr_or = nmrr.or5;
1361  te.outerShareable = (prrr.nos5 == 0);
1362  break;
1363  case 6:
1364  panic("Imp defined type\n");
1365  case 7:
1366  curr_tr = prrr.tr7;
1367  curr_ir = nmrr.ir7;
1368  curr_or = nmrr.or7;
1369  te.outerShareable = (prrr.nos7 == 0);
1370  break;
1371  }
1372 
1373  switch(curr_tr) {
1374  case 0:
1375  DPRINTF(TLBVerbose, "StronglyOrdered\n");
1377  te.nonCacheable = true;
1378  te.innerAttrs = 1;
1379  te.outerAttrs = 0;
1380  te.shareable = true;
1381  break;
1382  case 1:
1383  DPRINTF(TLBVerbose, "Device ds1:%d ds0:%d s:%d\n",
1384  prrr.ds1, prrr.ds0, s);
1386  te.nonCacheable = true;
1387  te.innerAttrs = 3;
1388  te.outerAttrs = 0;
1389  if (prrr.ds1 && s)
1390  te.shareable = true;
1391  if (prrr.ds0 && !s)
1392  te.shareable = true;
1393  break;
1394  case 2:
1395  DPRINTF(TLBVerbose, "Normal ns1:%d ns0:%d s:%d\n",
1396  prrr.ns1, prrr.ns0, s);
1398  if (prrr.ns1 && s)
1399  te.shareable = true;
1400  if (prrr.ns0 && !s)
1401  te.shareable = true;
1402  break;
1403  case 3:
1404  panic("Reserved type");
1405  }
1406 
1407  if (te.mtype == TlbEntry::MemoryType::Normal){
1408  switch(curr_ir) {
1409  case 0:
1410  te.nonCacheable = true;
1411  te.innerAttrs = 0;
1412  break;
1413  case 1:
1414  te.innerAttrs = 5;
1415  break;
1416  case 2:
1417  te.innerAttrs = 6;
1418  break;
1419  case 3:
1420  te.innerAttrs = 7;
1421  break;
1422  }
1423 
1424  switch(curr_or) {
1425  case 0:
1426  te.nonCacheable = true;
1427  te.outerAttrs = 0;
1428  break;
1429  case 1:
1430  te.outerAttrs = 1;
1431  break;
1432  case 2:
1433  te.outerAttrs = 2;
1434  break;
1435  case 3:
1436  te.outerAttrs = 3;
1437  break;
1438  }
1439  }
1440  }
1441  DPRINTF(TLBVerbose, "memAttrs: shareable: %d, innerAttrs: %d, "
1442  "outerAttrs: %d\n",
1443  te.shareable, te.innerAttrs, te.outerAttrs);
1444  te.setAttributes(false);
1445 }
1446 
1447 void
1449  LongDescriptor &l_descriptor)
1450 {
1451  assert(release->has(ArmExtension::LPAE));
1452 
1453  uint8_t attr;
1454  uint8_t sh = l_descriptor.sh();
1455  // Different format and source of attributes if this is a stage 2
1456  // translation
1457  if (isStage2) {
1458  attr = l_descriptor.memAttr();
1459  uint8_t attr_3_2 = (attr >> 2) & 0x3;
1460  uint8_t attr_1_0 = attr & 0x3;
1461 
1462  DPRINTF(TLBVerbose, "memAttrsLPAE MemAttr:%#x sh:%#x\n", attr, sh);
1463 
1464  if (attr_3_2 == 0) {
1465  te.mtype = attr_1_0 == 0 ? TlbEntry::MemoryType::StronglyOrdered
1467  te.outerAttrs = 0;
1468  te.innerAttrs = attr_1_0 == 0 ? 1 : 3;
1469  te.nonCacheable = true;
1470  } else {
1472  te.outerAttrs = attr_3_2 == 1 ? 0 :
1473  attr_3_2 == 2 ? 2 : 1;
1474  te.innerAttrs = attr_1_0 == 1 ? 0 :
1475  attr_1_0 == 2 ? 6 : 5;
1476  te.nonCacheable = (attr_3_2 == 1) || (attr_1_0 == 1);
1477  }
1478  } else {
1479  uint8_t attrIndx = l_descriptor.attrIndx();
1480 
1481  // LPAE always uses remapping of memory attributes, irrespective of the
1482  // value of SCTLR.TRE
1483  MiscRegIndex reg = attrIndx & 0x4 ? MISCREG_MAIR1 : MISCREG_MAIR0;
1484  int reg_as_int = snsBankedIndex(reg, currState->tc,
1485  !currState->isSecure);
1486  uint32_t mair = currState->tc->readMiscReg(reg_as_int);
1487  attr = (mair >> (8 * (attrIndx % 4))) & 0xff;
1488  uint8_t attr_7_4 = bits(attr, 7, 4);
1489  uint8_t attr_3_0 = bits(attr, 3, 0);
1490  DPRINTF(TLBVerbose, "memAttrsLPAE AttrIndx:%#x sh:%#x, attr %#x\n", attrIndx, sh, attr);
1491 
1492  // Note: the memory subsystem only cares about the 'cacheable' memory
1493  // attribute. The other attributes are only used to fill the PAR register
1494  // accordingly to provide the illusion of full support
1495  te.nonCacheable = false;
1496 
1497  switch (attr_7_4) {
1498  case 0x0:
1499  // Strongly-ordered or Device memory
1500  if (attr_3_0 == 0x0)
1502  else if (attr_3_0 == 0x4)
1504  else
1505  panic("Unpredictable behavior\n");
1506  te.nonCacheable = true;
1507  te.outerAttrs = 0;
1508  break;
1509  case 0x4:
1510  // Normal memory, Outer Non-cacheable
1512  te.outerAttrs = 0;
1513  if (attr_3_0 == 0x4)
1514  // Inner Non-cacheable
1515  te.nonCacheable = true;
1516  else if (attr_3_0 < 0x8)
1517  panic("Unpredictable behavior\n");
1518  break;
1519  case 0x8:
1520  case 0x9:
1521  case 0xa:
1522  case 0xb:
1523  case 0xc:
1524  case 0xd:
1525  case 0xe:
1526  case 0xf:
1527  if (attr_7_4 & 0x4) {
1528  te.outerAttrs = (attr_7_4 & 1) ? 1 : 3;
1529  } else {
1530  te.outerAttrs = 0x2;
1531  }
1532  // Normal memory, Outer Cacheable
1534  if (attr_3_0 != 0x4 && attr_3_0 < 0x8)
1535  panic("Unpredictable behavior\n");
1536  break;
1537  default:
1538  panic("Unpredictable behavior\n");
1539  break;
1540  }
1541 
1542  switch (attr_3_0) {
1543  case 0x0:
1544  te.innerAttrs = 0x1;
1545  break;
1546  case 0x4:
1547  te.innerAttrs = attr_7_4 == 0 ? 0x3 : 0;
1548  break;
1549  case 0x8:
1550  case 0x9:
1551  case 0xA:
1552  case 0xB:
1553  te.innerAttrs = 6;
1554  break;
1555  case 0xC:
1556  case 0xD:
1557  case 0xE:
1558  case 0xF:
1559  te.innerAttrs = attr_3_0 & 1 ? 0x5 : 0x7;
1560  break;
1561  default:
1562  panic("Unpredictable behavior\n");
1563  break;
1564  }
1565  }
1566 
1567  te.outerShareable = sh == 2;
1568  te.shareable = (sh & 0x2) ? true : false;
1569  te.setAttributes(true);
1570  te.attributes |= (uint64_t) attr << 56;
1571 }
1572 
1573 void
1575  LongDescriptor &l_descriptor)
1576 {
1577  uint8_t attr;
1578  uint8_t attr_hi;
1579  uint8_t attr_lo;
1580  uint8_t sh = l_descriptor.sh();
1581 
1582  if (isStage2) {
1583  attr = l_descriptor.memAttr();
1584  uint8_t attr_hi = (attr >> 2) & 0x3;
1585  uint8_t attr_lo = attr & 0x3;
1586 
1587  DPRINTF(TLBVerbose, "memAttrsAArch64 MemAttr:%#x sh:%#x\n", attr, sh);
1588 
1589  if (attr_hi == 0) {
1590  te.mtype = attr_lo == 0 ? TlbEntry::MemoryType::StronglyOrdered
1592  te.outerAttrs = 0;
1593  te.innerAttrs = attr_lo == 0 ? 1 : 3;
1594  te.nonCacheable = true;
1595  } else {
1597  te.outerAttrs = attr_hi == 1 ? 0 :
1598  attr_hi == 2 ? 2 : 1;
1599  te.innerAttrs = attr_lo == 1 ? 0 :
1600  attr_lo == 2 ? 6 : 5;
1601  // Treat write-through memory as uncacheable, this is safe
1602  // but for performance reasons not optimal.
1603  te.nonCacheable = (attr_hi == 1) || (attr_hi == 2) ||
1604  (attr_lo == 1) || (attr_lo == 2);
1605  }
1606  } else {
1607  uint8_t attrIndx = l_descriptor.attrIndx();
1608 
1609  DPRINTF(TLBVerbose, "memAttrsAArch64 AttrIndx:%#x sh:%#x\n", attrIndx, sh);
1611 
1612  // Select MAIR
1613  uint64_t mair;
1614  switch (regime) {
1615  case EL0:
1616  case EL1:
1617  mair = tc->readMiscReg(MISCREG_MAIR_EL1);
1618  break;
1619  case EL2:
1620  mair = tc->readMiscReg(MISCREG_MAIR_EL2);
1621  break;
1622  case EL3:
1623  mair = tc->readMiscReg(MISCREG_MAIR_EL3);
1624  break;
1625  default:
1626  panic("Invalid exception level");
1627  break;
1628  }
1629 
1630  // Select attributes
1631  attr = bits(mair, 8 * attrIndx + 7, 8 * attrIndx);
1632  attr_lo = bits(attr, 3, 0);
1633  attr_hi = bits(attr, 7, 4);
1634 
1635  // Memory type
1637 
1638  // Cacheability
1639  te.nonCacheable = false;
1640  if (te.mtype == TlbEntry::MemoryType::Device) { // Device memory
1641  te.nonCacheable = true;
1642  }
1643  // Treat write-through memory as uncacheable, this is safe
1644  // but for performance reasons not optimal.
1645  switch (attr_hi) {
1646  case 0x1 ... 0x3: // Normal Memory, Outer Write-through transient
1647  case 0x4: // Normal memory, Outer Non-cacheable
1648  case 0x8 ... 0xb: // Normal Memory, Outer Write-through non-transient
1649  te.nonCacheable = true;
1650  }
1651  switch (attr_lo) {
1652  case 0x1 ... 0x3: // Normal Memory, Inner Write-through transient
1653  case 0x9 ... 0xb: // Normal Memory, Inner Write-through non-transient
1654  warn_if(!attr_hi, "Unpredictable behavior");
1655  [[fallthrough]];
1656  case 0x4: // Device-nGnRE memory or
1657  // Normal memory, Inner Non-cacheable
1658  case 0x8: // Device-nGRE memory or
1659  // Normal memory, Inner Write-through non-transient
1660  te.nonCacheable = true;
1661  }
1662 
1663  te.shareable = sh == 2;
1664  te.outerShareable = (sh & 0x2) ? true : false;
1665  // Attributes formatted according to the 64-bit PAR
1666  te.attributes = ((uint64_t) attr << 56) |
1667  (1 << 11) | // LPAE bit
1668  (te.ns << 9) | // NS bit
1669  (sh << 7);
1670  }
1671 }
1672 
1673 void
1675 {
1676  if (currState->fault != NoFault) {
1677  return;
1678  }
1679 
1681  byteOrder(currState->tc));
1682 
1683  DPRINTF(TLB, "L1 descriptor for %#x is %#x\n",
1685  TlbEntry te;
1686 
1687  const bool is_atomic = currState->req->isAtomic();
1688 
1689  switch (currState->l1Desc.type()) {
1690  case L1Descriptor::Ignore:
1692  if (!currState->timing) {
1693  currState->tc = NULL;
1694  currState->req = NULL;
1695  }
1696  DPRINTF(TLB, "L1 Descriptor Reserved/Ignore, causing fault\n");
1697  if (currState->isFetch)
1698  currState->fault =
1699  std::make_shared<PrefetchAbort>(
1702  isStage2,
1704  else
1705  currState->fault =
1706  std::make_shared<DataAbort>(
1709  is_atomic ? false : currState->isWrite,
1712  return;
1713  case L1Descriptor::Section:
1714  if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
1720  currState->fault = std::make_shared<DataAbort>(
1722  currState->l1Desc.domain(),
1723  is_atomic ? false : currState->isWrite,
1725  isStage2,
1727  }
1728  if (currState->l1Desc.supersection()) {
1729  panic("Haven't implemented supersections\n");
1730  }
1732  return;
1734  {
1735  Addr l2desc_addr;
1736  l2desc_addr = currState->l1Desc.l2Addr() |
1737  (bits(currState->vaddr, 19, 12) << 2);
1738  DPRINTF(TLB, "L1 descriptor points to page table at: %#x (%s)\n",
1739  l2desc_addr, currState->isSecure ? "s" : "ns");
1740 
1741  // Trickbox address check
1742  currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
1743  currState->l1Desc.domain(),
1745 
1746  if (currState->fault) {
1747  if (!currState->timing) {
1748  currState->tc = NULL;
1749  currState->req = NULL;
1750  }
1751  return;
1752  }
1753 
1755 
1756  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1757  flag.set(Request::UNCACHEABLE);
1758  }
1759 
1760  if (currState->isSecure)
1761  flag.set(Request::SECURE);
1762 
1763  bool delayed;
1764  delayed = fetchDescriptor(l2desc_addr,
1765  (uint8_t*)&currState->l2Desc.data,
1766  sizeof(uint32_t), flag, -1, &doL2DescEvent,
1768  if (delayed) {
1769  currState->delayed = true;
1770  }
1771 
1772  return;
1773  }
1774  default:
1775  panic("A new type in a 2 bit field?\n");
1776  }
1777 }
1778 
1779 Fault
1781 {
1782  if (currState->isFetch) {
1783  return std::make_shared<PrefetchAbort>(
1786  isStage2,
1788  } else {
1789  return std::make_shared<DataAbort>(
1792  currState->req->isAtomic() ? false : currState->isWrite,
1794  isStage2,
1796  }
1797 }
1798 
1799 void
1801 {
1802  if (currState->fault != NoFault) {
1803  return;
1804  }
1805 
1807  byteOrder(currState->tc));
1808 
1809  DPRINTF(TLB, "L%d descriptor for %#llx is %#llx (%s)\n",
1812  currState->aarch64 ? "AArch64" : "long-desc.");
1813 
1816  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, pxn: %d, "
1817  "xn: %d, ap: %d, af: %d, type: %d\n",
1820  currState->longDesc.pxn(),
1821  currState->longDesc.xn(),
1822  currState->longDesc.ap(),
1823  currState->longDesc.af(),
1824  currState->longDesc.type());
1825  } else {
1826  DPRINTF(PageTableWalker, "Analyzing L%d descriptor: %#llx, type: %d\n",
1829  currState->longDesc.type());
1830  }
1831 
1832  TlbEntry te;
1833 
1834  switch (currState->longDesc.type()) {
1836  DPRINTF(TLB, "L%d descriptor Invalid, causing fault type %d\n",
1839 
1841  if (!currState->timing) {
1842  currState->tc = NULL;
1843  currState->req = NULL;
1844  }
1845  return;
1846 
1847  case LongDescriptor::Block:
1848  case LongDescriptor::Page:
1849  {
1850  auto fault_source = ArmFault::FaultSourceInvalid;
1851  // Check for address size fault
1854 
1855  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1857  fault_source = ArmFault::AddressSizeLL;
1858 
1859  // Check for access fault
1860  } else if (currState->longDesc.af() == 0) {
1861 
1862  DPRINTF(TLB, "L%d descriptor causing Access Fault\n",
1864  fault_source = ArmFault::AccessFlagLL;
1865  }
1866 
1867  if (fault_source != ArmFault::FaultSourceInvalid) {
1868  currState->fault = generateLongDescFault(fault_source);
1869  } else {
1871  }
1872  }
1873  return;
1874  case LongDescriptor::Table:
1875  {
1876  // Set hierarchical permission flags
1887 
1888  // Set up next level lookup
1889  Addr next_desc_addr = currState->longDesc.nextDescAddr(
1890  currState->vaddr);
1891 
1892  DPRINTF(TLB, "L%d descriptor points to L%d descriptor at: %#x (%s)\n",
1895  next_desc_addr,
1896  currState->secureLookup ? "s" : "ns");
1897 
1898  // Check for address size fault
1900  next_desc_addr, currState->physAddrRange)) {
1901  DPRINTF(TLB, "L%d descriptor causing Address Size Fault\n",
1903 
1906  return;
1907  }
1908 
1909  // Trickbox address check
1911  next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
1913 
1914  if (currState->fault) {
1915  if (!currState->timing) {
1916  currState->tc = NULL;
1917  currState->req = NULL;
1918  }
1919  return;
1920  }
1921 
1922  if (mmu->hasWalkCache()) {
1924  }
1925 
1926 
1928  if (currState->secureLookup)
1929  flag.set(Request::SECURE);
1930 
1931  if (currState->sctlr.c == 0 || currState->isUncacheable) {
1932  flag.set(Request::UNCACHEABLE);
1933  }
1934 
1937  Event *event = NULL;
1938  switch (L) {
1939  case LookupLevel::L1:
1940  assert(currState->aarch64);
1941  case LookupLevel::L2:
1942  case LookupLevel::L3:
1943  event = LongDescEventByLevel[L];
1944  break;
1945  default:
1946  panic("Wrong lookup level in table walk\n");
1947  break;
1948  }
1949 
1950  bool delayed;
1951  delayed = fetchDescriptor(next_desc_addr, (uint8_t*)&currState->longDesc.data,
1952  sizeof(uint64_t), flag, -1, event,
1954  if (delayed) {
1955  currState->delayed = true;
1956  }
1957  }
1958  return;
1959  default:
1960  panic("A new type in a 2 bit field?\n");
1961  }
1962 }
1963 
1964 void
1966 {
1967  if (currState->fault != NoFault) {
1968  return;
1969  }
1970 
1972  byteOrder(currState->tc));
1973 
1974  DPRINTF(TLB, "L2 descriptor for %#x is %#x\n",
1976  TlbEntry te;
1977 
1978  const bool is_atomic = currState->req->isAtomic();
1979 
1980  if (currState->l2Desc.invalid()) {
1981  DPRINTF(TLB, "L2 descriptor invalid, causing fault\n");
1982  if (!currState->timing) {
1983  currState->tc = NULL;
1984  currState->req = NULL;
1985  }
1986  if (currState->isFetch)
1987  currState->fault = std::make_shared<PrefetchAbort>(
1990  isStage2,
1992  else
1993  currState->fault = std::make_shared<DataAbort>(
1995  is_atomic ? false : currState->isWrite,
1997  isStage2,
1999  return;
2000  }
2001 
2002  if (currState->sctlr.afe && bits(currState->l2Desc.ap(), 0) == 0) {
2006  DPRINTF(TLB, "Generating access fault at L2, afe: %d, ap: %d\n",
2007  currState->sctlr.afe, currState->l2Desc.ap());
2008 
2009  currState->fault = std::make_shared<DataAbort>(
2012  is_atomic ? false : currState->isWrite,
2015  }
2016 
2018 }
2019 
2020 void
2022 {
2024  currState->delayed = false;
2025  // if there's a stage2 translation object we don't need it any more
2026  if (currState->stage2Tran) {
2027  delete currState->stage2Tran;
2028  currState->stage2Tran = NULL;
2029  }
2030 
2031 
2032  DPRINTF(PageTableWalker, "L1 Desc object host addr: %p\n",
2033  &currState->l1Desc.data);
2034  DPRINTF(PageTableWalker, "L1 Desc object data: %08x\n",
2035  currState->l1Desc.data);
2036 
2037  DPRINTF(PageTableWalker, "calling doL1Descriptor for vaddr:%#x\n",
2039  doL1Descriptor();
2040 
2041  stateQueues[LookupLevel::L1].pop_front();
2042  // Check if fault was generated
2043  if (currState->fault != NoFault) {
2045  currState->tc, currState->mode);
2047 
2048  pending = false;
2049  nextWalk(currState->tc);
2050 
2051  currState->req = NULL;
2052  currState->tc = NULL;
2053  currState->delayed = false;
2054  delete currState;
2055  }
2056  else if (!currState->delayed) {
2057  // delay is not set so there is no L2 to do
2058  // Don't finish the translation if a stage 2 look up is underway
2060  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2061 
2065 
2067 
2068  pending = false;
2069  nextWalk(currState->tc);
2070 
2071  currState->req = NULL;
2072  currState->tc = NULL;
2073  currState->delayed = false;
2074  delete currState;
2075  } else {
2076  // need to do L2 descriptor
2078  }
2079  currState = NULL;
2080 }
2081 
2082 void
2084 {
2086  assert(currState->delayed);
2087  // if there's a stage2 translation object we don't need it any more
2088  if (currState->stage2Tran) {
2089  delete currState->stage2Tran;
2090  currState->stage2Tran = NULL;
2091  }
2092 
2093  DPRINTF(PageTableWalker, "calling doL2Descriptor for vaddr:%#x\n",
2095  doL2Descriptor();
2096 
2097  // Check if fault was generated
2098  if (currState->fault != NoFault) {
2100  currState->tc, currState->mode);
2102  } else {
2104  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2105 
2109 
2111  }
2112 
2113 
2114  stateQueues[LookupLevel::L2].pop_front();
2115  pending = false;
2116  nextWalk(currState->tc);
2117 
2118  currState->req = NULL;
2119  currState->tc = NULL;
2120  currState->delayed = false;
2121 
2122  delete currState;
2123  currState = NULL;
2124 }
2125 
2126 void
2128 {
2130 }
2131 
2132 void
2134 {
2136 }
2137 
2138 void
2140 {
2142 }
2143 
2144 void
2146 {
2148 }
2149 
2150 void
2152 {
2153  currState = stateQueues[curr_lookup_level].front();
2154  assert(curr_lookup_level == currState->longDesc.lookupLevel);
2155  currState->delayed = false;
2156 
2157  // if there's a stage2 translation object we don't need it any more
2158  if (currState->stage2Tran) {
2159  delete currState->stage2Tran;
2160  currState->stage2Tran = NULL;
2161  }
2162 
2163  DPRINTF(PageTableWalker, "calling doLongDescriptor for vaddr:%#x\n",
2165  doLongDescriptor();
2166 
2167  stateQueues[curr_lookup_level].pop_front();
2168 
2169  if (currState->fault != NoFault) {
2170  // A fault was generated
2172  currState->tc, currState->mode);
2173 
2174  pending = false;
2175  nextWalk(currState->tc);
2176 
2177  currState->req = NULL;
2178  currState->tc = NULL;
2179  currState->delayed = false;
2180  delete currState;
2181  } else if (!currState->delayed) {
2182  // No additional lookups required
2183  DPRINTF(PageTableWalker, "calling translateTiming again\n");
2185 
2189 
2190  stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
2191 
2192  pending = false;
2193  nextWalk(currState->tc);
2194 
2195  currState->req = NULL;
2196  currState->tc = NULL;
2197  currState->delayed = false;
2198  delete currState;
2199  } else {
2200  if (curr_lookup_level >= LookupLevel::Num_ArmLookupLevel - 1)
2201  panic("Max. number of lookups already reached in table walk\n");
2202  // Need to perform additional lookups
2204  }
2205  currState = NULL;
2206 }
2207 
2208 
2209 void
2211 {
2212  if (pendingQueue.size())
2214  else
2215  completeDrain();
2216 }
2217 
2218 bool
2219 TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
2220  Request::Flags flags, int queueIndex, Event *event,
2221  void (TableWalker::*doDescriptor)())
2222 {
2223  bool isTiming = currState->timing;
2224 
2225  DPRINTF(PageTableWalker,
2226  "Fetching descriptor at address: 0x%x stage2Req: %d\n",
2227  descAddr, currState->stage2Req);
2228 
2229  // If this translation has a stage 2 then we know descAddr is an IPA and
2230  // needs to be translated before we can access the page table. Do that
2231  // check here.
2232  if (currState->stage2Req) {
2233  Fault fault;
2234 
2235  if (isTiming) {
2236  auto *tran = new
2237  Stage2Walk(*this, data, event, currState->vaddr,
2239  currState->stage2Tran = tran;
2240  readDataTimed(currState->tc, descAddr, tran, numBytes, flags);
2241  fault = tran->fault;
2242  } else {
2243  fault = readDataUntimed(currState->tc,
2244  currState->vaddr, descAddr, data, numBytes, flags,
2245  currState->mode,
2248  }
2249 
2250  if (fault != NoFault) {
2251  currState->fault = fault;
2252  }
2253  if (isTiming) {
2254  if (queueIndex >= 0) {
2255  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2256  "queue size before adding: %d\n",
2257  stateQueues[queueIndex].size());
2258  stateQueues[queueIndex].push_back(currState);
2259  currState = NULL;
2260  }
2261  } else {
2262  (this->*doDescriptor)();
2263  }
2264  } else {
2265  if (isTiming) {
2266  port->sendTimingReq(descAddr, numBytes, data, flags,
2268 
2269  if (queueIndex >= 0) {
2270  DPRINTF(PageTableWalker, "Adding to walker fifo: "
2271  "queue size before adding: %d\n",
2272  stateQueues[queueIndex].size());
2273  stateQueues[queueIndex].push_back(currState);
2274  currState = NULL;
2275  }
2276  } else if (!currState->functional) {
2277  port->sendAtomicReq(descAddr, numBytes, data, flags,
2279 
2280  (this->*doDescriptor)();
2281  } else {
2282  port->sendFunctionalReq(descAddr, numBytes, data, flags);
2283  (this->*doDescriptor)();
2284  }
2285  }
2286  return (isTiming);
2287 }
2288 
2289 void
2291 {
2292  const bool have_security = release->has(ArmExtension::SECURITY);
2293  TlbEntry te;
2294 
2295  // Create and fill a new page table entry
2296  te.valid = true;
2297  te.longDescFormat = true;
2298  te.partial = true;
2299  // The entry is global if there is no address space identifier
2300  // to differentiate translation contexts
2301  te.global = !mmu->hasUnprivRegime(
2302  currState->el, currState->hcr.e2h);
2303  te.isHyp = currState->isHyp;
2304  te.asid = currState->asid;
2305  te.vmid = currState->vmid;
2306  te.N = descriptor.offsetBits();
2307  te.vpn = currState->vaddr >> te.N;
2308  te.size = (1ULL << te.N) - 1;
2309  te.pfn = descriptor.nextTableAddr();
2310  te.domain = descriptor.domain();
2311  te.lookupLevel = descriptor.lookupLevel;
2312  te.ns = !descriptor.secure(have_security, currState);
2313  te.nstid = !currState->isSecure;
2314  te.type = TypeTLB::unified;
2315 
2316  if (currState->aarch64)
2317  te.el = currState->el;
2318  else
2319  te.el = EL1;
2320 
2321  te.xn = currState->xnTable;
2322  te.pxn = currState->pxnTable;
2323  te.ap = (currState->rwTable << 1) | (currState->userTable);
2324 
2325  // Debug output
2326  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2327  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2328  te.N, te.pfn, te.size, te.global, te.valid);
2329  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2330  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2331  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2332  te.nonCacheable, te.ns);
2333  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2334  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2335  descriptor.getRawData());
2336 
2337  // Insert the entry into the TLBs
2338  tlb->multiInsert(te);
2339 }
2340 
2341 void
2342 TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
2343 {
2344  const bool have_security = release->has(ArmExtension::SECURITY);
2345  TlbEntry te;
2346 
2347  // Create and fill a new page table entry
2348  te.valid = true;
2349  te.longDescFormat = long_descriptor;
2350  te.isHyp = currState->isHyp;
2351  te.asid = currState->asid;
2352  te.vmid = currState->vmid;
2353  te.N = descriptor.offsetBits();
2354  te.vpn = currState->vaddr >> te.N;
2355  te.size = (1<<te.N) - 1;
2356  te.pfn = descriptor.pfn();
2357  te.domain = descriptor.domain();
2358  te.lookupLevel = descriptor.lookupLevel;
2359  te.ns = !descriptor.secure(have_security, currState);
2360  te.nstid = !currState->isSecure;
2361  te.xn = descriptor.xn();
2362  te.type = currState->mode == BaseMMU::Execute ?
2363  TypeTLB::instruction : TypeTLB::data;
2364 
2365  if (currState->aarch64)
2366  te.el = currState->el;
2367  else
2368  te.el = EL1;
2369 
2372 
2373  // ASID has no meaning for stage 2 TLB entries, so mark all stage 2 entries
2374  // as global
2375  te.global = descriptor.global(currState) || isStage2;
2376  if (long_descriptor) {
2377  LongDescriptor l_descriptor =
2378  dynamic_cast<LongDescriptor &>(descriptor);
2379 
2380  te.xn |= currState->xnTable;
2381  te.pxn = currState->pxnTable || l_descriptor.pxn();
2382  if (isStage2) {
2383  // this is actually the HAP field, but its stored in the same bit
2384  // possitions as the AP field in a stage 1 translation.
2385  te.hap = l_descriptor.ap();
2386  } else {
2387  te.ap = ((!currState->rwTable || descriptor.ap() >> 1) << 1) |
2388  (currState->userTable && (descriptor.ap() & 0x1));
2389  }
2390  if (currState->aarch64)
2391  memAttrsAArch64(currState->tc, te, l_descriptor);
2392  else
2393  memAttrsLPAE(currState->tc, te, l_descriptor);
2394  } else {
2395  te.ap = descriptor.ap();
2396  memAttrs(currState->tc, te, currState->sctlr, descriptor.texcb(),
2397  descriptor.shareable());
2398  }
2399 
2400  // Debug output
2401  DPRINTF(TLB, descriptor.dbgHeader().c_str());
2402  DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n",
2403  te.N, te.pfn, te.size, te.global, te.valid);
2404  DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d "
2405  "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn,
2406  te.ap, static_cast<uint8_t>(te.domain), te.asid, te.vmid, te.isHyp,
2407  te.nonCacheable, te.ns);
2408  DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n",
2409  descriptor.lookupLevel, static_cast<uint8_t>(descriptor.domain()),
2410  descriptor.getRawData());
2411 
2412  // Insert the entry into the TLBs
2413  tlb->multiInsert(te);
2414  if (!currState->timing) {
2415  currState->tc = NULL;
2416  currState->req = NULL;
2417  }
2418 }
2419 
2421 TableWalker::toLookupLevel(uint8_t lookup_level_as_int)
2422 {
2423  switch (lookup_level_as_int) {
2424  case LookupLevel::L1:
2425  return LookupLevel::L1;
2426  case LookupLevel::L2:
2427  return LookupLevel::L2;
2428  case LookupLevel::L3:
2429  return LookupLevel::L3;
2430  default:
2431  panic("Invalid lookup level conversion");
2432  }
2433 }
2434 
2435 /* this method keeps track of the table walker queue's residency, so
2436  * needs to be called whenever requests start and complete. */
2437 void
2439 {
2440  unsigned n = pendingQueue.size();
2441  if ((currState != NULL) && (currState != pendingQueue.front())) {
2442  ++n;
2443  }
2444 
2445  if (n != pendingReqs) {
2446  Tick now = curTick();
2448  pendingReqs = n;
2449  pendingChangeTick = now;
2450  }
2451 }
2452 
2453 Fault
2455  LookupLevel lookup_level, bool stage2)
2456 {
2457  return mmu->testWalk(pa, size, currState->vaddr, currState->isSecure,
2458  currState->mode, domain, lookup_level, stage2);
2459 }
2460 
2461 
2462 uint8_t
2464 {
2465  /* for stats.pageSizes */
2466  switch(N) {
2467  case 12: return 0; // 4K
2468  case 14: return 1; // 16K (using 16K granule in v8-64)
2469  case 16: return 2; // 64K
2470  case 20: return 3; // 1M
2471  case 21: return 4; // 2M-LPAE
2472  case 24: return 5; // 16M
2473  case 25: return 6; // 32M (using 16K granule in v8-64)
2474  case 29: return 7; // 512M (using 64K granule in v8-64)
2475  case 30: return 8; // 1G-LPAE
2476  case 42: return 9; // 1G-LPAE
2477  default:
2478  panic("unknown page size");
2479  return 255;
2480  }
2481 }
2482 
2483 Fault
2485  uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
2486  MMU::ArmTranslationType tran_type, bool functional)
2487 {
2488  Fault fault;
2489 
2490  // translate to physical address using the second stage MMU
2491  auto req = std::make_shared<Request>();
2492  req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
2493  requestorId, 0);
2494 
2495  if (functional) {
2496  fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
2497  tran_type, true);
2498  } else {
2499  fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
2500  tran_type, true);
2501  }
2502 
2503  // Now do the access.
2504  if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2505  Packet pkt = Packet(req, MemCmd::ReadReq);
2506  pkt.dataStatic(data);
2507  if (functional) {
2508  port->sendFunctional(&pkt);
2509  } else {
2510  port->sendAtomic(&pkt);
2511  }
2512  assert(!pkt.isError());
2513  }
2514 
2515  // If there was a fault annotate it with the flag saying the foult occured
2516  // while doing a translation for a stage 1 page table walk.
2517  if (fault != NoFault) {
2518  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2519  arm_fault->annotate(ArmFault::S1PTW, true);
2520  arm_fault->annotate(ArmFault::OVA, vaddr);
2521  }
2522  return fault;
2523 }
2524 
2525 void
2527  Stage2Walk *translation, int num_bytes,
2529 {
2530  // translate to physical address using the second stage MMU
2531  translation->setVirt(
2532  desc_addr, num_bytes, flags | Request::PT_WALK, requestorId);
2533  translation->translateTiming(tc);
2534 }
2535 
2537  uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
2538  MMU::ArmTranslationType tran_type)
2539  : data(_data), numBytes(0), event(_event), parent(_parent),
2540  oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
2541 {
2542  req = std::make_shared<Request>();
2543 }
2544 
2545 void
2547  const RequestPtr &req,
2549 {
2550  fault = _fault;
2551 
2552  // If there was a fault annotate it with the flag saying the foult occured
2553  // while doing a translation for a stage 1 page table walk.
2554  if (fault != NoFault) {
2555  ArmFault *arm_fault = reinterpret_cast<ArmFault *>(fault.get());
2556  arm_fault->annotate(ArmFault::S1PTW, true);
2557  arm_fault->annotate(ArmFault::OVA, oVAddr);
2558  }
2559 
2560  if (_fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
2561  parent.getTableWalkerPort().sendTimingReq(
2562  req->getPaddr(), numBytes, data, req->getFlags(),
2563  tc->getCpuPtr()->clockPeriod(), event);
2564  } else {
2565  // We can't do the DMA access as there's been a problem, so tell the
2566  // event we're done
2567  event->process();
2568  }
2569 }
2570 
2571 void
2573 {
2574  parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
2575 }
2576 
2578  : statistics::Group(parent),
2579  ADD_STAT(walks, statistics::units::Count::get(),
2580  "Table walker walks requested"),
2581  ADD_STAT(walksShortDescriptor, statistics::units::Count::get(),
2582  "Table walker walks initiated with short descriptors"),
2583  ADD_STAT(walksLongDescriptor, statistics::units::Count::get(),
2584  "Table walker walks initiated with long descriptors"),
2585  ADD_STAT(walksShortTerminatedAtLevel, statistics::units::Count::get(),
2586  "Level at which table walker walks with short descriptors "
2587  "terminate"),
2588  ADD_STAT(walksLongTerminatedAtLevel, statistics::units::Count::get(),
2589  "Level at which table walker walks with long descriptors "
2590  "terminate"),
2591  ADD_STAT(squashedBefore, statistics::units::Count::get(),
2592  "Table walks squashed before starting"),
2593  ADD_STAT(squashedAfter, statistics::units::Count::get(),
2594  "Table walks squashed after completion"),
2595  ADD_STAT(walkWaitTime, statistics::units::Tick::get(),
2596  "Table walker wait (enqueue to first request) latency"),
2597  ADD_STAT(walkServiceTime, statistics::units::Tick::get(),
2598  "Table walker service (enqueue to completion) latency"),
2599  ADD_STAT(pendingWalks, statistics::units::Tick::get(),
2600  "Table walker pending requests distribution"),
2601  ADD_STAT(pageSizes, statistics::units::Count::get(),
2602  "Table walker page sizes translated"),
2603  ADD_STAT(requestOrigin, statistics::units::Count::get(),
2604  "Table walker requests started/completed, data/inst")
2605 {
2608 
2611 
2613  .init(2)
2615 
2616  walksShortTerminatedAtLevel.subname(0, "Level1");
2617  walksShortTerminatedAtLevel.subname(1, "Level2");
2618 
2620  .init(4)
2622  walksLongTerminatedAtLevel.subname(0, "Level0");
2623  walksLongTerminatedAtLevel.subname(1, "Level1");
2624  walksLongTerminatedAtLevel.subname(2, "Level2");
2625  walksLongTerminatedAtLevel.subname(3, "Level3");
2626 
2629 
2632 
2633  walkWaitTime
2634  .init(16)
2636 
2638  .init(16)
2640 
2641  pendingWalks
2642  .init(16)
2645 
2646  pageSizes // see DDI 0487A D4-1661
2647  .init(10)
2650  pageSizes.subname(0, "4KiB");
2651  pageSizes.subname(1, "16KiB");
2652  pageSizes.subname(2, "64KiB");
2653  pageSizes.subname(3, "1MiB");
2654  pageSizes.subname(4, "2MiB");
2655  pageSizes.subname(5, "16MiB");
2656  pageSizes.subname(6, "32MiB");
2657  pageSizes.subname(7, "512MiB");
2658  pageSizes.subname(8, "1GiB");
2659  pageSizes.subname(9, "4TiB");
2660 
2662  .init(2,2) // Instruction/Data, requests/completed
2664  requestOrigin.subname(0,"Requested");
2665  requestOrigin.subname(1,"Completed");
2666  requestOrigin.ysubname(0,"Data");
2667  requestOrigin.ysubname(1,"Inst");
2668 }
2669 
2670 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
const char data[]
FaultSource
Generic fault source enums used to index into {short/long/aarch64}DescFaultSources[] to get the actua...
Definition: faults.hh:96
virtual void annotate(AnnotationIDs id, uint64_t val)
Definition: faults.hh:239
static bool hasUnprivRegime(ExceptionLevel el, bool e2h)
Definition: mmu.cc:702
TlbEntry * lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode)
Lookup an entry in the TLB.
Definition: mmu.cc:1409
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
Definition: mmu.cc:1609
const ArmRelease * release() const
Definition: mmu.hh:382
void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode) override
Definition: mmu.hh:256
bool hasWalkCache() const
Definition: mmu.hh:384
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type)
Determine the EL to use for the purpose of a translation given a specific translation type.
Definition: mmu.cc:1370
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode) override
Definition: mmu.hh:245
TranslationGenPtr translateFunctional(Addr start, Addr size, ThreadContext *tc, Mode mode, Request::Flags flags) override
Returns a translation generator for a region of virtual addresses, instead of directly translating a ...
Definition: mmu.hh:91
void multiInsert(TlbEntry &pte)
Insert a PTE in the current TLB and in the higher levels.
Definition: tlb.cc:270
virtual bool global(WalkerState *currState) const =0
virtual uint64_t getRawData() const =0
virtual std::string dbgHeader() const =0
virtual uint8_t offsetBits() const =0
LookupLevel lookupLevel
Current lookup level for this descriptor.
Definition: table_walker.hh:79
virtual TlbEntry::DomainType domain() const =0
virtual bool secure(bool have_security, WalkerState *currState) const =0
uint32_t data
The raw bits of the entry.
bool supersection() const
Is the page a Supersection (16 MiB)?
Addr l2Addr() const
Address of L2 descriptor if it exists.
uint8_t ap() const override
Three bit access protection flags.
TlbEntry::DomainType domain() const override
Domain Client/Manager: ARM DDI 0406B: B3-31.
uint8_t ap() const override
Three bit access protection flags.
uint32_t data
The raw bits of the entry.
bool invalid() const
Is the entry invalid.
Long-descriptor format (LPAE)
uint8_t sh() const
2-bit shareability field
uint8_t memAttr() const
Memory attributes, only used by stage 2 translations.
uint8_t rwTable() const
R/W protection flag for subsequent levels of lookup.
uint8_t offsetBits() const override
Return the bit width of the page/block offset.
bool pxn() const
Is privileged execution allowed on this mapping? (LPAE only)
bool af() const
Returns true if the access flag (AF) is set.
bool pxnTable() const
Is privileged execution allowed on subsequent lookup levels?
bool aarch64
True if the current lookup is performed in AArch64 state.
EntryType type() const
Return the descriptor type.
bool xn() const override
Is execution allowed on this mapping?
bool secure(bool have_security, WalkerState *currState) const override
Returns true if this entry targets the secure physical address map.
std::string dbgHeader() const override
Addr nextTableAddr() const
Return the address of the next page table.
GrainSize grainSize
Width of the granule size in bits.
uint8_t attrIndx() const
Attribute index.
uint8_t ap() const override
2-bit access protection flags
uint64_t data
The raw bits of the entry.
Addr nextDescAddr(Addr va) const
Return the address of the next descriptor.
Addr paddr() const
Return the physical address of the entry.
uint8_t userTable() const
User/privileged mode protection flag for subsequent levels of lookup.
bool secureTable() const
Whether the subsequent levels of lookup are secure.
TlbEntry::DomainType domain() const override
uint64_t getRawData() const override
bool xnTable() const
Is execution allowed on subsequent lookup levels?
void sendFunctionalReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag)
void sendTimingReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
PacketPtr createPacket(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay, Event *event)
void handleResp(TableWalkerState *state, Addr addr, Addr size, Tick delay=0)
void handleRespPacket(PacketPtr pkt, Tick delay=0)
void sendAtomicReq(Addr desc_addr, int size, uint8_t *data, Request::Flags flag, Tick delay)
Port(TableWalker *_walker, RequestorID id)
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
This translation class is used to trigger the data fetch once a timing translation returns the transl...
void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)
void setVirt(Addr vaddr, int size, Request::Flags flags, int requestorId)
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type)
void translateTiming(ThreadContext *tc)
bool isWrite
If the access is a write.
Addr vaddr_tainted
The virtual address that is being translated.
RequestPtr req
Request that is currently being serviced.
VTCR_t vtcr
Cached copy of the vtcr as it existed when translation began.
HCR hcr
Cached copy of the htcr as it existed when translation began.
Addr vaddr
The virtual address that is being translated with tagging removed.
bool functional
If the atomic mode should be functional.
bool secureLookup
Helper variables used to implement hierarchical access permissions when the long-desc.
bool isUncacheable
True if table walks are uncacheable (for table descriptors)
ThreadContext * tc
Thread context that we're doing the walk for.
bool hpd
Hierarchical access permission disable.
BaseMMU::Translation * transState
Translation state for delayed requests.
bool isSecure
If the access comes from the secure state.
BaseMMU::Mode mode
Save mode for use in delayed response.
HTCR htcr
Cached copy of the htcr as it existed when translation began.
ExceptionLevel el
Current exception level.
MMU::ArmTranslationType tranType
The translation type that has been requested.
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
Fault fault
The fault that we are going to return.
Tick startTime
Timestamp for calculating elapsed time in service (for stats)
bool isFetch
If the access is a fetch (for execution, and no-exec) must be checked?
bool stage2Req
Flag indicating if a second stage of lookup is required.
TlbEntry walkEntry
Initial walk entry allowing to skip lookup levels.
bool timing
If the mode is timing or atomic.
LongDescriptor longDesc
Long-format descriptor (LPAE and AArch64)
int physAddrRange
Current physical address range in bits.
bool delayed
Whether the response is delayed in timing mode due to additional lookups.
uint16_t asid
ASID that we're servicing the request under.
L1Descriptor l1Desc
Short-format descriptors.
bool aarch64
If the access is performed in AArch64 state.
BaseMMU::Translation * stage2Tran
A pointer to the stage 2 translation that's in progress.
static LookupLevel toLookupLevel(uint8_t lookup_level_as_int)
enums::ArmLookupLevel LookupLevel
Definition: table_walker.hh:68
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain, LookupLevel lookup_level, bool stage2)
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s)
const ArmRelease * release
Cached copies of system-level properties.
Fault generateLongDescFault(ArmFault::FaultSource src)
EventFunctionWrapper doL1DescEvent
EventFunctionWrapper doProcessEvent
static const unsigned REQUESTED
static const unsigned COMPLETED
void memAttrsAArch64(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
void insertPartialTableEntry(LongDescriptor &descriptor)
void drainResume() override
Resume execution after a successful drain.
void doLongDescriptorWrapper(LookupLevel curr_lookup_level)
bool pending
If a timing translation is currently in progress.
Port * port
Port shared by the two table walkers.
std::tuple< Addr, Addr, LookupLevel > walkAddresses(Addr ttbr, GrainSize tg, int tsz, int pa_range)
Returns a tuple made of: 1) The address of the first page table 2) The address of the first descripto...
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr, uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode, MMU::ArmTranslationType tran_type, bool functional)
TableWalker(const Params &p)
Definition: table_walker.cc:62
void nextWalk(ThreadContext *tc)
void readDataTimed(ThreadContext *tc, Addr desc_addr, Stage2Walk *translation, int num_bytes, Request::Flags flags)
EventFunctionWrapper doL2DescEvent
bool checkVAddrSizeFaultAArch64(Addr addr, int top_bit, GrainSize granule, int tsz, bool low_range)
gem5::Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::list< WalkerState * > pendingQueue
Queue of requests that have passed are waiting because the walker is currently busy.
Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry)
MMU * mmu
The MMU to forward second stage look upts to.
RequestorID requestorId
Requestor id assigned by the MMU.
bool fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, Request::Flags flags, int queueIndex, Event *event, void(TableWalker::*doDescriptor)())
gem5::ArmISA::TableWalker::TableWalkerStats stats
const bool isStage2
Indicates whether this table walker is part of the stage 2 mmu.
bool checkAddrSizeFaultAArch64(Addr addr, int pa_range)
Returns true if the address exceeds the range permitted by the system-wide setting or by the TCR_ELx ...
static uint8_t pageSizeNtoStatBin(uint8_t N)
void completeDrain()
Checks if all state is cleared and if so, completes drain.
void insertTableEntry(DescriptorBase &descriptor, bool longDescriptor)
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
std::list< WalkerState * > stateQueues[LookupLevel::Num_ArmLookupLevel]
Queues of requests for all the different lookup levels.
unsigned numSquashable
The number of walks belonging to squashed instructions that can be removed from the pendingQueue per ...
TLB * tlb
TLB that is initiating these table walks.
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, LongDescriptor &lDescriptor)
SCTLR sctlr
Cached copy of the sctlr as it existed when translation began.
bool has(ArmExtension ext) const
Definition: system.hh:76
virtual bool squashed() const
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: mmu.hh:84
virtual void finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseMMU::Mode mode)=0
@ Execute
Definition: mmu.hh:56
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
virtual std::string name() const
Definition: named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
Addr getAddr() const
Definition: packet.hh:805
bool isError() const
Definition: packet.hh:621
bool isResponse() const
Definition: packet.hh:597
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1162
SenderState * senderState
This packet's sender state.
Definition: packet.hh:544
RequestPtr req
A pointer to the original request.
Definition: packet.hh:376
bool cacheResponding() const
Definition: packet.hh:657
bool hasSharers() const
Definition: packet.hh:684
Ports are used to interface objects to each other.
Definition: port.hh:62
The QueuedRequestPort combines two queues, a request queue and a snoop response queue,...
Definition: qport.hh:110
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition: port.hh:464
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:485
@ PT_WALK
The request is a page table walk.
Definition: request.hh:188
@ SECURE
The request targets the secure memory space.
Definition: request.hh:186
@ UNCACHEABLE
The request is to an uncacheable address.
Definition: request.hh:125
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal readMiscReg(RegIndex misc_reg)=0
virtual BaseCPU * getCpuPtr()=0
Derived & ysubname(off_type index, const std::string &subname)
Definition: statistics.hh:490
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Definition: statistics.hh:402
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:358
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
Statistics container.
Definition: group.hh:94
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2154
Derived & init(size_type _x, size_type _y)
Definition: statistics.hh:1174
Derived & init(size_type size)
Set this vector to have the given size.
Definition: statistics.hh:1040
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
constexpr T mbits(T val, unsigned first, unsigned last)
Mask off the given bits in place like bits() but without shifting.
Definition: bitfield.hh:103
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
DrainState
Object drain/handover states.
Definition: drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
void set(Type mask)
Set all flag's bits matching the given mask.
Definition: flags.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
const Params & params() const
Definition: sim_object.hh:176
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
atomic_var_t state
Definition: helpers.cc:188
uint8_t flags
Definition: helpers.cc:66
#define warn_once(...)
Definition: logging.hh:250
#define warn_if(cond,...)
Conditional warning macro that checks the supplied condition and only prints a warning if the conditi...
Definition: logging.hh:273
ByteOrder byteOrder(const ThreadContext *tc)
Definition: utility.hh:357
Bitfield< 30 > te
Definition: misc_types.hh:344
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition: pagetable.cc:476
Bitfield< 31 > n
Definition: misc_types.hh:462
Bitfield< 24 > hpd
Definition: misc_types.hh:540
Bitfield< 3, 0 > mask
Definition: pcstate.hh:63
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
bool isSecure(ThreadContext *tc)
Definition: utility.cc:74
Bitfield< 18, 16 > ps
Definition: misc_types.hh:520
ExceptionLevel s1TranslationRegime(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:231
bool longDescFormatInUse(ThreadContext *tc)
Definition: utility.cc:131
Bitfield< 4 > s
Definition: misc_types.hh:568
Bitfield< 8, 7 > sh
Definition: misc_types.hh:667
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 7, 4 > domain
Definition: misc_types.hh:430
Bitfield< 11 > z
Definition: misc_types.hh:381
bool ELIs64(ThreadContext *tc, ExceptionLevel el)
Definition: utility.cc:269
const GrainSize GrainMap_tg1[]
Definition: pagetable.cc:51
Bitfield< 33 > id
Definition: misc_types.hh:257
int computeAddrTop(ThreadContext *tc, bool selbit, bool is_instr, TCR tcr, ExceptionLevel el)
Definition: utility.cc:405
Bitfield< 21, 20 > stride
Definition: misc_types.hh:453
MiscRegIndex
Definition: misc.hh:64
@ MISCREG_HCR
Definition: misc.hh:253
@ MISCREG_VSTCR_EL2
Definition: misc.hh:613
@ MISCREG_SCTLR_EL2
Definition: misc.hh:589
@ MISCREG_TCR_EL2
Definition: misc.hh:609
@ MISCREG_MAIR_EL1
Definition: misc.hh:731
@ MISCREG_SCTLR
Definition: misc.hh:240
@ MISCREG_TTBCR
Definition: misc.hh:265
@ MISCREG_MAIR_EL2
Definition: misc.hh:735
@ MISCREG_TCR_EL3
Definition: misc.hh:615
@ MISCREG_TCR_EL1
Definition: misc.hh:606
@ MISCREG_SCTLR_EL1
Definition: misc.hh:584
@ MISCREG_PRRR
Definition: misc.hh:374
@ MISCREG_MAIR_EL3
Definition: misc.hh:737
@ MISCREG_CPSR
Definition: misc.hh:65
@ MISCREG_NMRR
Definition: misc.hh:380
@ MISCREG_TTBR1_EL1
Definition: misc.hh:604
@ MISCREG_MAIR1
Definition: misc.hh:383
@ MISCREG_TTBR1_EL2
Definition: misc.hh:825
@ MISCREG_HTTBR
Definition: misc.hh:452
@ MISCREG_HCR_EL2
Definition: misc.hh:591
@ MISCREG_TTBR1
Definition: misc.hh:262
@ MISCREG_VTCR_EL2
Definition: misc.hh:611
@ MISCREG_VTTBR
Definition: misc.hh:453
@ MISCREG_HTCR
Definition: misc.hh:268
@ MISCREG_TTBR0_EL3
Definition: misc.hh:614
@ MISCREG_VTCR
Definition: misc.hh:269
@ MISCREG_TTBR0
Definition: misc.hh:259
@ MISCREG_TTBR0_EL2
Definition: misc.hh:608
@ MISCREG_TTBR0_EL1
Definition: misc.hh:602
@ MISCREG_SCTLR_EL3
Definition: misc.hh:596
@ MISCREG_VSTTBR_EL2
Definition: misc.hh:612
@ MISCREG_VTTBR_EL2
Definition: misc.hh:610
@ MISCREG_MAIR0
Definition: misc.hh:377
Bitfield< 6 > f
Definition: misc_types.hh:68
Bitfield< 3, 2 > el
Definition: misc_types.hh:73
uint16_t vmid_t
Definition: types.hh:57
int decodePhysAddrRange64(uint8_t pa_enc)
Returns the n.
Definition: utility.cc:1281
const GrainSize GrainMap_tg0[]
Definition: pagetable.cc:49
int snsBankedIndex(MiscRegIndex reg, ThreadContext *tc)
Definition: misc.cc:670
Bitfield< 34 > aarch64
Definition: types.hh:81
bool HaveExt(ThreadContext *tc, ArmExtension ext)
Returns true if the provided ThreadContext supports the ArmExtension passed as a second argument.
Definition: utility.cc:224
Addr purifyTaggedAddr(Addr addr, ThreadContext *tc, ExceptionLevel el, TCR tcr, bool is_instr)
Removes the tag from tagged addresses if that mode is enabled.
Definition: utility.cc:466
Bitfield< 39, 12 > pa
Definition: misc_types.hh:663
Bitfield< 59, 56 > tlb
Definition: misc_types.hh:92
Bitfield< 10, 5 > event
constexpr RegId L2
Definition: int.hh:113
constexpr RegId L1
Definition: int.hh:112
constexpr RegId L0
Definition: int.hh:111
constexpr RegId L3
Definition: int.hh:114
Bitfield< 54 > p
Definition: pagetable.hh:70
Bitfield< 5, 3 > reg
Definition: types.hh:92
Bitfield< 3 > addr
Definition: types.hh:84
Bitfield< 7, 0 > L
Definition: int.hh:62
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:62
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
const FlagsType total
Print the total.
Definition: info.hh:60
const FlagsType dist
Print the distribution.
Definition: info.hh:66
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:245
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
uint64_t Tick
Tick count type.
Definition: types.hh:58
uint16_t RequestorID
Definition: request.hh:95
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:187
uint64_t RegVal
Definition: types.hh:173
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
TableWalkerStats(statistics::Group *parent)
LookupLevel lookupLevel
Definition: pagetable.hh:215
const std::string & name()
Definition: trace.cc:49

Generated on Wed Dec 21 2022 10:22:27 for gem5 by doxygen 1.9.1