gem5  v22.1.0.0
smmu_v3_transl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013, 2018-2019, 2021 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
39 
40 #include "arch/arm/pagetable.hh"
41 #include "debug/SMMUv3.hh"
42 #include "debug/SMMUv3Hazard.hh"
43 #include "dev/arm/amba.hh"
44 #include "dev/arm/smmu_v3.hh"
45 #include "sim/system.hh"
46 
47 namespace gem5
48 {
49 
50 using namespace ArmISA;
51 
52 SMMUTranslRequest
54 {
56  req.addr = pkt->getAddr();
57  req.size = pkt->getSize();
58  req.sid = pkt->req->streamId();
59  req.ssid = pkt->req->hasSubstreamId() ?
60  pkt->req->substreamId() : 0;
61  req.isWrite = pkt->isWrite();
62  req.isPrefetch = false;
63  req.isAtsRequest = ats;
64  req.pkt = pkt;
65 
66  return req;
67 }
68 
70 SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
71 {
73  req.addr = addr;
74  req.size = 0;
75  req.sid = sid;
76  req.ssid = ssid;
77  req.isWrite = false;
78  req.isPrefetch = true;
79  req.isAtsRequest = false;
80  req.pkt = NULL;
81 
82  return req;
83 }
84 
86  SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
87  :
88  SMMUProcess(name, _smmu),
89  ifc(_ifc)
90 {
91  // Decrease number of pending translation slots on the device interface
92  assert(ifc.xlateSlotsRemaining > 0);
94 
96  reinit();
97 }
98 
100 {
101  // Increase number of pending translation slots on the device interface
102  assert(ifc.pendingMemAccesses > 0);
104 
105  // If no more SMMU memory accesses are pending,
106  // signal SMMU Device Interface as drained
107  if (ifc.pendingMemAccesses == 0) {
109  }
110 }
111 
112 void
114 {
115  request = req;
116 
117  reinit();
118 }
119 
120 void
122 {
123  assert(smmu.system.isTimingMode());
124 
125  assert(!"Stalls are broken");
126 
127  Tick resumeTick = curTick();
128 
129  (void) resumeTick;
130  DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
131  resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
132 
134 
136 }
137 
138 void
140 {
141  // Hack:
142  // The coroutine starts running as soon as it's created.
143  // But we need to wait for request data esp. in atomic mode.
144  SMMUAction a;
145  a.type = ACTION_INITIAL_NOP;
146  a.pkt = NULL;
147  yield(a);
148 
149  const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
150 
151  if ((request.addr + request.size) > next4k)
152  panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
154 
155 
156  unsigned numResponderBeats = request.isWrite ?
157  (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
158 
160  doDelay(yield, Cycles(numResponderBeats));
162 
163 
164  recvTick = curTick();
165 
166  if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) {
167  // SMMU disabled
168  doDelay(yield, Cycles(1));
170  return;
171  }
172 
173  TranslResult tr;
174  bool wasPrefetched = false;
175 
176  if (request.isPrefetch) {
177  // Abort prefetch if:
178  // - there's already a transaction looking up the same 4k page, OR
179  // - requested address is already in the TLB.
180  if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
181  completePrefetch(yield); // this never returns
182 
184 
185  tr = smmuTranslation(yield);
186 
187  if (tr.fault == FAULT_NONE)
188  ifcTLBUpdate(yield, tr);
189 
190  hazard4kRelease();
191 
192  completePrefetch(yield);
193  } else {
195 
196  if (!microTLBLookup(yield, tr)) {
197  bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
198  if (!hit) {
199  while (!hit && hazard4kCheck()) {
200  hazard4kHold(yield);
201  hit = ifcTLBLookup(yield, tr, wasPrefetched);
202  }
203  }
204 
205  // Issue prefetch if:
206  // - there was a TLB hit and the entry was prefetched, OR
207  // - TLB miss was successfully serviced
208  if (hit) {
209  if (wasPrefetched)
210  issuePrefetch(next4k);
211  } else {
213 
214  tr = smmuTranslation(yield);
215 
216  if (tr.fault == FAULT_NONE) {
217  ifcTLBUpdate(yield, tr);
218 
219  issuePrefetch(next4k);
220  }
221 
222  hazard4kRelease();
223  }
224 
225  if (tr.fault == FAULT_NONE)
226  microTLBUpdate(yield, tr);
227  }
228 
229  hazardIdHold(yield);
230  hazardIdRelease();
231 
232  if (tr.fault != FAULT_NONE)
233  panic("Translation Fault (addr=%#x, size=%#x, sid=%d, ssid=%d, "
234  "isWrite=%d, isPrefetch=%d, isAtsRequest=%d)\n",
237 
238  completeTransaction(yield, tr);
239  }
240 }
241 
244 {
245  TranslResult tr;
246  tr.fault = FAULT_NONE;
247  tr.addr = addr;
248  tr.addrMask = 0;
249  tr.writable = 1;
250 
251  return tr;
252 }
253 
256 {
257  TranslResult tr;
258 
259  // Need SMMU credit to proceed
260  doSemaphoreDown(yield, smmu.transSem);
261 
262  // Simulate pipelined IFC->SMMU link
264  doDelay(yield, Cycles(1)); // serialize transactions
266  doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
267 
268  bool haveConfig = true;
269  if (!configCacheLookup(yield, context)) {
270  if (findConfig(yield, context, tr)) {
271  configCacheUpdate(yield, context);
272  } else {
273  haveConfig = false;
274  }
275  }
276 
277  if (haveConfig && !smmuTLBLookup(yield, tr)) {
278  // SMMU main TLB miss
279 
280  // Need PTW slot to proceed
281  doSemaphoreDown(yield, smmu.ptwSem);
282 
283  // Page table walk
284  Tick ptwStartTick = curTick();
285 
286  if (context.stage1Enable) {
287  tr = translateStage1And2(yield, request.addr);
288  } else if (context.stage2Enable) {
289  tr = translateStage2(yield, request.addr, true);
290  } else {
291  tr = bypass(request.addr);
292  }
293 
295  smmu.stats.ptwTimeDist.sample(curTick() - ptwStartTick);
296 
297  // Free PTW slot
299 
300  if (tr.fault == FAULT_NONE)
301  smmuTLBUpdate(yield, tr);
302  }
303 
304  // Simulate pipelined SMMU->RESPONSE INTERFACE link
306  doDelay(yield, Cycles(1)); // serialize transactions
308  doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
309 
310  // return SMMU credit
312 
313  return tr;
314 }
315 
316 bool
318 {
319  if (!ifc.microTLBEnable)
320  return false;
321 
323  doDelay(yield, ifc.microTLBLat);
324  const SMMUTLB::Entry *e =
327 
328  if (!e) {
329  DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
331 
332  return false;
333  }
334 
335  DPRINTF(SMMUv3,
336  "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
337  request.addr, e->vaMask, request.sid, request.ssid, e->pa);
338 
339  tr.fault = FAULT_NONE;
340  tr.addr = e->pa + (request.addr & ~e->vaMask);;
341  tr.addrMask = e->vaMask;
342  tr.writable = e->permissions;
343 
344  return true;
345 }
346 
347 bool
349  bool &wasPrefetched)
350 {
351  if (!ifc.mainTLBEnable)
352  return false;
353 
355  doDelay(yield, ifc.mainTLBLat);
356  const SMMUTLB::Entry *e =
359 
360  if (!e) {
361  DPRINTF(SMMUv3,
362  "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
364 
365  return false;
366  }
367 
368  DPRINTF(SMMUv3,
369  "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
370  "paddr=%#x\n", request.addr, e->vaMask, request.sid,
371  request.ssid, e->pa);
372 
373  tr.fault = FAULT_NONE;
374  tr.addr = e->pa + (request.addr & ~e->vaMask);;
375  tr.addrMask = e->vaMask;
376  tr.writable = e->permissions;
377  wasPrefetched = e->prefetched;
378 
379  return true;
380 }
381 
382 bool
384 {
385  if (!smmu.tlbEnable)
386  return false;
387 
388  doSemaphoreDown(yield, smmu.tlbSem);
389  doDelay(yield, smmu.tlbLat);
390  const ARMArchTLB::Entry *e =
393 
394  if (!e) {
395  DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
397 
398  return false;
399  }
400 
401  DPRINTF(SMMUv3,
402  "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
403  request.addr, e->vaMask, context.asid, context.vmid, e->pa);
404 
405  tr.fault = FAULT_NONE;
406  tr.addr = e->pa + (request.addr & ~e->vaMask);;
407  tr.addrMask = e->vaMask;
408  tr.writable = e->permissions;
409 
410  return true;
411 }
412 
413 void
415  const TranslResult &tr)
416 {
417  assert(tr.fault == FAULT_NONE);
418 
419  if (!ifc.microTLBEnable)
420  return;
421 
423  e.valid = true;
424  e.prefetched = false;
425  e.sid = request.sid;
426  e.ssid = request.ssid;
427  e.vaMask = tr.addrMask;
428  e.va = request.addr & e.vaMask;
429  e.pa = tr.addr & e.vaMask;
430  e.permissions = tr.writable;
431  e.asid = context.asid;
432  e.vmid = context.vmid;
433 
435 
436  DPRINTF(SMMUv3,
437  "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
438  e.va, e.vaMask, e.pa, e.sid, e.ssid);
439 
441 
443 }
444 
445 void
447  const TranslResult &tr)
448 {
449  assert(tr.fault == FAULT_NONE);
450 
451  if (!ifc.mainTLBEnable)
452  return;
453 
455  e.valid = true;
456  e.prefetched = request.isPrefetch;
457  e.sid = request.sid;
458  e.ssid = request.ssid;
459  e.vaMask = tr.addrMask;
460  e.va = request.addr & e.vaMask;
461  e.pa = tr.addr & e.vaMask;
462  e.permissions = tr.writable;
463  e.asid = context.asid;
464  e.vmid = context.vmid;
465 
468  alloc = request.isPrefetch ?
470 
472 
473  DPRINTF(SMMUv3,
474  "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
475  "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
476 
477  ifc.mainTLB->store(e, alloc);
478 
480 }
481 
482 void
484  const TranslResult &tr)
485 {
486  assert(tr.fault == FAULT_NONE);
487 
488  if (!smmu.tlbEnable)
489  return;
490 
492  e.valid = true;
493  e.vaMask = tr.addrMask;
494  e.va = request.addr & e.vaMask;
495  e.asid = context.asid;
496  e.vmid = context.vmid;
497  e.pa = tr.addr & e.vaMask;
498  e.permissions = tr.writable;
499 
500  doSemaphoreDown(yield, smmu.tlbSem);
501 
502  DPRINTF(SMMUv3,
503  "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
504  e.va, e.vaMask, e.pa, e.asid, e.vmid);
505 
506  smmu.tlb.store(e);
507 
509 }
510 
511 bool
513 {
514  if (!smmu.configCacheEnable)
515  return false;
516 
518  doDelay(yield, smmu.configLat);
519  const ConfigCache::Entry *e =
522 
523  if (!e) {
524  DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
526 
527  return false;
528  }
529 
530  DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
531  request.sid, request.ssid, e->ttb0, e->asid);
532 
533  tc.stage1Enable = e->stage1_en;
534  tc.stage2Enable = e->stage2_en;
535 
536  tc.ttb0 = e->ttb0;
537  tc.ttb1 = e->ttb1;
538  tc.asid = e->asid;
539  tc.httb = e->httb;
540  tc.vmid = e->vmid;
541 
542  tc.stage1TranslGranule = e->stage1_tg;
543  tc.stage2TranslGranule = e->stage2_tg;
544 
545  tc.t0sz = e->t0sz;
546  tc.s2t0sz = e->s2t0sz;
547 
548  return true;
549 }
550 
551 void
553  const TranslContext &tc)
554 {
555  if (!smmu.configCacheEnable)
556  return;
557 
559  e.valid = true;
560  e.sid = request.sid;
561  e.ssid = request.ssid;
562  e.stage1_en = tc.stage1Enable;
563  e.stage2_en = tc.stage2Enable;
564  e.ttb0 = tc.ttb0;
565  e.ttb1 = tc.ttb1;
566  e.asid = tc.asid;
567  e.httb = tc.httb;
568  e.vmid = tc.vmid;
569  e.stage1_tg = tc.stage1TranslGranule;
570  e.stage2_tg = tc.stage2TranslGranule;
571  e.t0sz = tc.t0sz;
572  e.s2t0sz = tc.s2t0sz;
573 
575 
576  DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
577 
579 
581 }
582 
583 bool
585  TranslContext &tc,
586  TranslResult &tr)
587 {
588  tc.stage1Enable = false;
589  tc.stage2Enable = false;
590 
591  StreamTableEntry ste;
592  doReadSTE(yield, ste, request.sid);
593 
594  switch (ste.dw0.config) {
595  case STE_CONFIG_BYPASS:
596  break;
597 
599  tc.stage1Enable = true;
600  break;
601 
603  tc.stage2Enable = true;
604  break;
605 
607  tc.stage1Enable = true;
608  tc.stage2Enable = true;
609  break;
610 
611  default:
612  panic("Bad or unimplemented STE config %d\n",
613  ste.dw0.config);
614  }
615 
616 
617  // Establish stage 2 context first since
618  // Context Descriptors can be in IPA space.
619  if (tc.stage2Enable) {
620  tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
621  tc.vmid = ste.dw2.s2vmid;
622  tc.stage2TranslGranule = ste.dw2.s2tg;
623  tc.s2t0sz = ste.dw2.s2t0sz;
624  } else {
625  tc.httb = 0xdeadbeef;
626  tc.vmid = 0;
628  tc.s2t0sz = 0;
629  }
630 
631 
632  // Now fetch stage 1 config.
633  if (context.stage1Enable) {
635  doReadCD(yield, cd, ste, request.sid, request.ssid);
636 
637  tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
638  tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
639  tc.asid = cd.dw0.asid;
640  tc.stage1TranslGranule = cd.dw0.tg0;
641  tc.t0sz = cd.dw0.t0sz;
642  } else {
643  tc.ttb0 = 0xcafebabe;
644  tc.ttb1 = 0xcafed00d;
645  tc.asid = 0;
647  tc.t0sz = 0;
648  }
649 
650  return true;
651 }
652 
653 void
655  Yield &yield,
656  const WalkCache::Entry *&walkEntry,
657  Addr addr, uint16_t asid, uint16_t vmid,
658  unsigned stage, unsigned level)
659 {
660  const char *indent = stage==2 ? " " : "";
661  (void) indent; // this is only used in DPRINTFs
662 
663  const auto tg = stage == 1 ?
666 
667  const auto *pt_ops = getPageTableOps(tg);
668 
669  unsigned walkCacheLevels =
671  (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
672  0;
673 
674  if ((1 << level) & walkCacheLevels) {
675  doSemaphoreDown(yield, smmu.walkSem);
676  doDelay(yield, smmu.walkLat);
677 
678  walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
679  asid, vmid, stage, level);
680 
681  if (walkEntry) {
682  DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
683  "base=%#x (S%d, L%d)\n",
684  indent, addr, asid, vmid, walkEntry->pa, stage, level);
685  } else {
686  DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
687  "(S%d, L%d)\n",
688  indent, addr, asid, vmid, stage, level);
689  }
690 
692  }
693 }
694 
695 void
697  Addr vaMask, Addr pa,
698  unsigned stage, unsigned level,
699  bool leaf, uint8_t permissions)
700 {
701  unsigned walkCacheLevels =
703 
704  if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
706  e.valid = true;
707  e.va = va;
708  e.vaMask = vaMask;
709  e.asid = stage==1 ? context.asid : 0;
710  e.vmid = context.vmid;
711  e.stage = stage;
712  e.level = level;
713  e.leaf = leaf;
714  e.pa = pa;
715  e.permissions = permissions;
716 
717  doSemaphoreDown(yield, smmu.walkSem);
718 
719  DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
720  "tpa=%#x leaf=%s (S%d, L%d)\n",
721  e.stage==2 ? " " : "",
722  e.va, e.vaMask, e.asid, e.vmid,
723  e.pa, e.leaf, e.stage, e.level);
724 
726 
728  }
729 }
730 
731 /*
732  * Please note:
733  * This does not deal with the case where stage 1 page size
734  * is larger than stage 2 page size.
735  */
738  const PageTableOps *pt_ops,
739  unsigned level, Addr walkPtr)
740 {
741  PageTableOps::pte_t pte = 0;
742 
743  doSemaphoreDown(yield, smmu.cycleSem);
744  doDelay(yield, Cycles(1));
746 
747  for (; level <= pt_ops->lastLevel(); level++) {
748  Addr pte_addr = walkPtr + pt_ops->index(
749  addr, level, 64 - context.t0sz);
750 
751  DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
752  level, pte_addr);
753 
754  doReadPTE(yield, addr, pte_addr, &pte, 1, level);
755 
756  DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
757  level, pte, pte_addr);
758 
759  doSemaphoreDown(yield, smmu.cycleSem);
760  doDelay(yield, Cycles(1));
762 
763  bool valid = pt_ops->isValid(pte, level);
764  bool leaf = pt_ops->isLeaf(pte, level);
765 
766  if (!valid) {
767  DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
768 
769  TranslResult tr;
771  return tr;
772  }
773 
774  if (valid && leaf && request.isWrite &&
775  !pt_ops->isWritable(pte, level, false))
776  {
777  DPRINTF(SMMUv3, "S1 page not writable - fault\n");
778 
779  TranslResult tr;
780  tr.fault = FAULT_PERMISSION;
781  return tr;
782  }
783 
784  walkPtr = pt_ops->nextLevelPointer(pte, level);
785 
786  if (leaf)
787  break;
788 
789  if (context.stage2Enable) {
790  TranslResult s2tr = translateStage2(yield, walkPtr, false);
791  if (s2tr.fault != FAULT_NONE)
792  return s2tr;
793 
794  walkPtr = s2tr.addr;
795  }
796 
797  walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
798  1, level, leaf, 0);
799  }
800 
801  TranslResult tr;
802  tr.fault = FAULT_NONE;
803  tr.addrMask = pt_ops->pageMask(pte, level);
804  tr.addr = walkPtr + (addr & ~tr.addrMask);
805  tr.writable = pt_ops->isWritable(pte, level, false);
806 
807  if (context.stage2Enable) {
808  TranslResult s2tr = translateStage2(yield, tr.addr, true);
809  if (s2tr.fault != FAULT_NONE)
810  return s2tr;
811 
812  tr = combineTranslations(tr, s2tr);
813  }
814 
815  walkCacheUpdate(yield, addr, tr.addrMask, walkPtr,
816  1, level, true, tr.writable);
817 
818  return tr;
819 }
820 
823  const PageTableOps *pt_ops,
824  unsigned level, Addr walkPtr)
825 {
827 
828  doSemaphoreDown(yield, smmu.cycleSem);
829  doDelay(yield, Cycles(1));
831 
832  for (; level <= pt_ops->lastLevel(); level++) {
833  Addr pte_addr = walkPtr + pt_ops->index(
834  addr, level, 64 - context.s2t0sz);
835 
836  DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
837  level, pte_addr);
838 
839  doReadPTE(yield, addr, pte_addr, &pte, 2, level);
840 
841  DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
842  level, pte, pte_addr);
843 
844  doSemaphoreDown(yield, smmu.cycleSem);
845  doDelay(yield, Cycles(1));
847 
848  bool valid = pt_ops->isValid(pte, level);
849  bool leaf = pt_ops->isLeaf(pte, level);
850 
851  if (!valid) {
852  DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
853 
854  TranslResult tr;
856  return tr;
857  }
858 
859  if (valid && leaf && request.isWrite &&
860  !pt_ops->isWritable(pte, level, true))
861  {
862  DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
863 
864  TranslResult tr;
865  tr.fault = FAULT_PERMISSION;
866  return tr;
867  }
868 
869  walkPtr = pt_ops->nextLevelPointer(pte, level);
870 
871  if (final_tr || smmu.walkCacheNonfinalEnable)
872  walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
873  2, level, leaf,
874  leaf ? pt_ops->isWritable(pte, level, true) : 0);
875  if (leaf)
876  break;
877  }
878 
879  TranslResult tr;
880  tr.fault = FAULT_NONE;
881  tr.addrMask = pt_ops->pageMask(pte, level);
882  tr.addr = walkPtr + (addr & ~tr.addrMask);
883  tr.writable = pt_ops->isWritable(pte, level, true);
884 
885  return tr;
886 }
887 
890 {
891  const auto tg = GrainMap_tg0[context.stage1TranslGranule];
892  const auto *pt_ops = getPageTableOps(tg);
893 
894  const WalkCache::Entry *walk_ep = NULL;
895  unsigned level;
896 
897  // Level here is actually (level+1) so we can count down
898  // to 0 using unsigned int.
899  for (level = pt_ops->lastLevel() + 1;
900  level > pt_ops->firstLevel(context.t0sz);
901  level--)
902  {
903  walkCacheLookup(yield, walk_ep, addr,
904  context.asid, context.vmid, 1, level-1);
905 
906  if (walk_ep)
907  break;
908  }
909 
910  // Correct level (see above).
911  level -= 1;
912 
913  TranslResult tr;
914  if (walk_ep) {
915  if (walk_ep->leaf) {
916  tr.fault = FAULT_NONE;
917  tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
918  tr.addrMask = walk_ep->vaMask;
919  tr.writable = walk_ep->permissions;
920  } else {
921  tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
922  }
923  } else {
924  Addr table_addr = context.ttb0;
925  if (context.stage2Enable) {
926  TranslResult s2tr = translateStage2(yield, table_addr, false);
927  if (s2tr.fault != FAULT_NONE)
928  return s2tr;
929 
930  table_addr = s2tr.addr;
931  }
932 
933  tr = walkStage1And2(yield, addr, pt_ops,
934  pt_ops->firstLevel(context.t0sz),
935  table_addr);
936  }
937 
938  if (tr.fault == FAULT_NONE)
939  DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
940 
941  return tr;
942 }
943 
946 {
947  const auto tg = GrainMap_tg0[context.stage2TranslGranule];
948  const auto *pt_ops = getPageTableOps(tg);
949 
950  const IPACache::Entry *ipa_ep = NULL;
951  if (smmu.ipaCacheEnable) {
952  doSemaphoreDown(yield, smmu.ipaSem);
953  doDelay(yield, smmu.ipaLat);
954  ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
956  }
957 
958  if (ipa_ep) {
959  TranslResult tr;
960  tr.fault = FAULT_NONE;
961  tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
962  tr.addrMask = ipa_ep->ipaMask;
963  tr.writable = ipa_ep->permissions;
964 
965  DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
966  addr, context.vmid, tr.addr);
967 
968  return tr;
969  } else if (smmu.ipaCacheEnable) {
970  DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
971  addr, context.vmid);
972  }
973 
974  const WalkCache::Entry *walk_ep = NULL;
975  unsigned level = pt_ops->firstLevel(context.s2t0sz);
976 
977  if (final_tr || smmu.walkCacheNonfinalEnable) {
978  // Level here is actually (level+1) so we can count down
979  // to 0 using unsigned int.
980  for (level = pt_ops->lastLevel() + 1;
981  level > pt_ops->firstLevel(context.s2t0sz);
982  level--)
983  {
984  walkCacheLookup(yield, walk_ep, addr,
985  0, context.vmid, 2, level-1);
986 
987  if (walk_ep)
988  break;
989  }
990 
991  // Correct level (see above).
992  level -= 1;
993  }
994 
995  TranslResult tr;
996  if (walk_ep) {
997  if (walk_ep->leaf) {
998  tr.fault = FAULT_NONE;
999  tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
1000  tr.addrMask = walk_ep->vaMask;
1001  tr.writable = walk_ep->permissions;
1002  } else {
1003  tr = walkStage2(yield, addr, final_tr, pt_ops,
1004  level + 1, walk_ep->pa);
1005  }
1006  } else {
1007  tr = walkStage2(yield, addr, final_tr, pt_ops,
1008  pt_ops->firstLevel(context.s2t0sz),
1009  context.httb);
1010  }
1011 
1012  if (tr.fault == FAULT_NONE)
1013  DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
1014  context.stage1Enable ? "ip" : "v", addr, tr.addr);
1015 
1016  if (smmu.ipaCacheEnable) {
1018  e.valid = true;
1019  e.ipaMask = tr.addrMask;
1020  e.ipa = addr & e.ipaMask;
1021  e.pa = tr.addr & tr.addrMask;
1022  e.permissions = tr.writable;
1023  e.vmid = context.vmid;
1024 
1025  doSemaphoreDown(yield, smmu.ipaSem);
1026  smmu.ipaCache.store(e);
1028  }
1029 
1030  return tr;
1031 }
1032 
1035  const TranslResult &s2tr) const
1036 {
1037  if (s2tr.fault != FAULT_NONE)
1038  return s2tr;
1039 
1040  assert(s1tr.fault == FAULT_NONE);
1041 
1042  TranslResult tr;
1043  tr.fault = FAULT_NONE;
1044  tr.addr = s2tr.addr;
1045  tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1046  tr.writable = s1tr.writable & s2tr.writable;
1047 
1048  return tr;
1049 }
1050 
1051 bool
1053 {
1054  Addr addr4k = request.addr & ~0xfffULL;
1055 
1056  for (auto it = ifc.duplicateReqs.begin();
1057  it != ifc.duplicateReqs.end();
1058  ++it)
1059  {
1060  Addr other4k = (*it)->request.addr & ~0xfffULL;
1061  if (addr4k == other4k)
1062  return true;
1063  }
1064 
1065  return false;
1066 }
1067 
1068 void
1070 {
1071  DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1072  this, request.addr & ~0xfffULL);
1073 
1074  ifc.duplicateReqs.push_back(this);
1075 }
1076 
1077 void
1079 {
1080  Addr addr4k = request.addr & ~0xfffULL;
1081 
1082  bool found_hazard;
1083 
1084  do {
1085  found_hazard = false;
1086 
1087  for (auto it = ifc.duplicateReqs.begin();
1088  it!=ifc.duplicateReqs.end() && *it!=this;
1089  ++it)
1090  {
1091  Addr other4k = (*it)->request.addr & ~0xfffULL;
1092 
1093  DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1094  this, addr4k, *it, other4k);
1095 
1096  if (addr4k == other4k) {
1097  DPRINTF(SMMUv3Hazard,
1098  "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1099  this, addr4k, *it, other4k);
1100 
1102 
1103  DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1104  this, addr4k);
1105 
1106  // This is to avoid checking *it!=this after doWaitForSignal()
1107  // since it could have been deleted.
1108  found_hazard = true;
1109  break;
1110  }
1111  }
1112  } while (found_hazard);
1113 }
1114 
1115 void
1117 {
1118  DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1119  this, request.addr & ~0xfffULL);
1120 
1122 
1123  for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1124  if (*it == this)
1125  break;
1126 
1127  if (it == ifc.duplicateReqs.end())
1128  panic("hazard4kRelease: request not found");
1129 
1130  ifc.duplicateReqs.erase(it);
1131 
1133 }
1134 
1135 void
1137 {
1138  auto orderId = AMBA::orderId(request.pkt);
1139 
1140  DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1141 
1142  assert(orderId < SMMU_MAX_TRANS_ID);
1143 
1145  request.isWrite ?
1147  depReqs.push_back(this);
1148 }
1149 
1150 void
1152 {
1153  auto orderId = AMBA::orderId(request.pkt);
1154 
1155  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1156 
1158  request.isWrite ?
1161 
1162  bool found_hazard;
1163 
1164  do {
1165  found_hazard = false;
1166 
1167  for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1168  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1169  this, orderId, *it);
1170 
1171  if (AMBA::orderId((*it)->request.pkt) == orderId) {
1172  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1173  this, orderId, *it);
1174 
1176 
1177  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1178  this, orderId);
1179 
1180  // This is to avoid checking *it!=this after doWaitForSignal()
1181  // since it could have been deleted.
1182  found_hazard = true;
1183  break;
1184  }
1185  }
1186  } while (found_hazard);
1187 }
1188 
1189 void
1191 {
1192  auto orderId = AMBA::orderId(request.pkt);
1193 
1194  DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1195 
1197  request.isWrite ?
1200 
1201  for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1202  if (*it == this)
1203  break;
1204  }
1205 
1206  if (it == depReqs.end())
1207  panic("hazardIdRelease: request not found");
1208 
1209  depReqs.erase(it);
1210 
1212 }
1213 
1214 void
1216 {
1217  if (!smmu.system.isTimingMode())
1218  return;
1219 
1221  return;
1222 
1223  std::string proc_name = csprintf("%sprf", name());
1224  SMMUTranslationProcess *proc =
1225  new SMMUTranslationProcess(proc_name, smmu, ifc);
1226 
1227  proc->beginTransaction(
1229  proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1230 }
1231 
1232 void
1234  const TranslResult &tr)
1235 {
1236  assert(tr.fault == FAULT_NONE);
1237 
1238  unsigned numRequestorBeats = request.isWrite ?
1241  1;
1242 
1244  doDelay(yield, Cycles(numRequestorBeats));
1246 
1247 
1252  (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1253 
1255 
1256 
1257  SMMUAction a;
1258 
1259  if (request.isAtsRequest) {
1260  a.type = ACTION_SEND_RESP_ATS;
1261 
1262  if (smmu.system.isAtomicMode()) {
1264  } else if (smmu.system.isTimingMode()) {
1266  } else {
1267  panic("Not in atomic or timing mode");
1268  }
1269  } else {
1270  a.type = ACTION_SEND_REQ_FINAL;
1271  a.ifc = &ifc;
1272  }
1273 
1274  a.pkt = request.pkt;
1275  a.delay = 0;
1276 
1277  a.pkt->setAddr(tr.addr);
1278  a.pkt->req->setPaddr(tr.addr);
1279 
1280  yield(a);
1281 
1282  if (!request.isAtsRequest) {
1283  PacketPtr pkt = yield.get();
1284  pkt->setAddr(request.addr);
1285 
1286  a.type = ACTION_SEND_RESP;
1287  a.pkt = pkt;
1288  a.ifc = &ifc;
1289  a.delay = 0;
1290  yield(a);
1291  }
1292 }
1293 
1294 void
1296 {
1298 
1299  SMMUAction a;
1300  a.type = ACTION_TERMINATE;
1301  a.pkt = NULL;
1302  a.ifc = &ifc;
1303  a.delay = 0;
1304  yield(a);
1305 }
1306 
1307 void
1309 {
1310  int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1311 
1312  if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1313  (smmu.regs.eventq_cons & sizeMask))
1314  panic("Event queue full - aborting\n");
1315 
1316  Addr event_addr =
1318  (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1319 
1320  DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1321  "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1322  event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1323  ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1324 
1325  // This deliberately resets the overflow field in eventq_prod!
1326  smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1327 
1328  doWrite(yield, event_addr, &ev, sizeof(ev));
1329 
1331  panic("eventq msi not enabled\n");
1332 
1335 }
1336 
1337 void
1339  StreamTableEntry &ste,
1340  uint32_t sid)
1341 {
1342  unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1343  if (sid >= max_sid)
1344  panic("SID %#x out of range, max=%#x", sid, max_sid);
1345 
1346  Addr ste_addr;
1347 
1349  unsigned split =
1351 
1352  if (split!= 7 && split!=8 && split!=16)
1353  panic("Invalid stream table split %d", split);
1354 
1355  uint64_t l2_ptr;
1356  uint64_t l2_addr =
1358  bits(sid, 32, split) * sizeof(l2_ptr);
1359 
1360  DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1361 
1362  doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1363 
1364  DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1365 
1366  unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1367  if (span == 0)
1368  panic("Invalid level 1 stream table descriptor");
1369 
1370  unsigned index = bits(sid, split-1, 0);
1371  if (index >= (1 << span))
1372  panic("StreamID %d out of level 1 descriptor range %d",
1373  sid, 1<<span);
1374 
1375  ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1376 
1378  } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK)
1379  == ST_CFG_FMT_LINEAR) {
1380  ste_addr =
1381  (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1382  } else {
1383  panic("Invalid stream table format");
1384  }
1385 
1386  DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1387 
1388  doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1389 
1390  DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1391  DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1392  DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1393  DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1394  DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1395  DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1396  DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1397  DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1398 
1399  if (!ste.dw0.valid)
1400  panic("STE @ %#x not valid\n", ste_addr);
1401 
1402  smmu.stats.steFetches++;
1403 }
1404 
1405 void
1408  const StreamTableEntry &ste,
1409  uint32_t sid, uint32_t ssid)
1410 {
1411  Addr cd_addr = 0;
1412 
1413  if (ste.dw0.s1cdmax == 0) {
1414  cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1415  } else {
1416  unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1417  if (ssid >= max_ssid)
1418  panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1419 
1420  if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1421  ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1422  {
1423  unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1424 
1425  uint64_t l2_ptr;
1426  uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1427  bits(ssid, 24, split) * sizeof(l2_ptr);
1428 
1429  if (context.stage2Enable)
1430  l2_addr = translateStage2(yield, l2_addr, false).addr;
1431 
1432  DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1433 
1434  doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1435 
1436  DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1437 
1438  cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1439 
1441  } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1442  cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1443  }
1444  }
1445 
1446  if (context.stage2Enable)
1447  cd_addr = translateStage2(yield, cd_addr, false).addr;
1448 
1449  DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1450 
1451  doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1452 
1453  DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1454  DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1455  DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1456  DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1457  DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1458  DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1459  DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1460  DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1461 
1462 
1463  if (!cd.dw0.valid)
1464  panic("CD @ %#x not valid\n", cd_addr);
1465 
1466  smmu.stats.cdFetches++;
1467 }
1468 
1469 void
1471  void *ptr, size_t size,
1472  uint32_t sid, uint32_t ssid)
1473 {
1474  doRead(yield, addr, ptr, size);
1475 }
1476 
1477 void
1479  void *ptr, unsigned stage,
1480  unsigned level)
1481 {
1482  size_t pte_size = sizeof(PageTableOps::pte_t);
1483 
1484  Addr mask = pte_size - 1;
1485  Addr base = addr & ~mask;
1486 
1487  doRead(yield, base, ptr, pte_size);
1488 }
1489 
1490 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
void store(const Entry &incoming)
const Entry * lookup(Addr va, uint16_t asid, uint16_t vmid, bool updStats=true)
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
const Entry * lookup(uint32_t sid, uint32_t ssid, bool updStats=true)
void store(const Entry &incoming)
CallerType: A reference to an object of this class will be passed to the coroutine task.
Definition: coroutine.hh:86
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
void store(const Entry &incoming)
const Entry * lookup(Addr ipa, uint16_t vmid, bool updStats=true)
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
Addr getAddr() const
Definition: packet.hh:805
void makeTimingResponse()
Definition: packet.hh:1077
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
Definition: packet.hh:813
bool isWrite() const
Definition: packet.hh:593
RequestPtr req
A pointer to the original request.
Definition: packet.hh:376
unsigned getSize() const
Definition: packet.hh:815
void makeAtomicResponse()
Definition: packet.hh:1071
const std::string name() const
void doWrite(Yield &yield, Addr addr, const void *ptr, size_t size)
void doDelay(Yield &yield, Cycles cycles)
void doSemaphoreUp(SMMUSemaphore &sem)
void scheduleWakeup(Tick when)
void doBroadcastSignal(SMMUSignal &sig)
void doSemaphoreDown(Yield &yield, SMMUSemaphore &sem)
void doWaitForSignal(Yield &yield, SMMUSignal &sig)
void doRead(Yield &yield, Addr addr, void *ptr, size_t size)
Definition: smmu_v3_proc.cc:75
void store(const Entry &incoming, AllocPolicy alloc)
const Entry * lookup(uint32_t sid, uint32_t ssid, Addr va, bool updStats=true)
bool ifcTLBLookup(Yield &yield, TranslResult &tr, bool &wasPrefetched)
void configCacheUpdate(Yield &yield, const TranslContext &tc)
TranslResult translateStage1And2(Yield &yield, Addr addr)
void smmuTLBUpdate(Yield &yield, const TranslResult &tr)
SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
TranslResult smmuTranslation(Yield &yield)
TranslResult bypass(Addr addr) const
void beginTransaction(const SMMUTranslRequest &req)
void doReadCD(Yield &yield, ContextDescriptor &cd, const StreamTableEntry &ste, uint32_t sid, uint32_t ssid)
bool findConfig(Yield &yield, TranslContext &tc, TranslResult &tr)
void walkCacheLookup(Yield &yield, const WalkCache::Entry *&walkEntry, Addr addr, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level)
bool smmuTLBLookup(Yield &yield, TranslResult &tr)
virtual void main(Yield &yield)
void hazardIdRegister()
Used to force ordering on transactions with the same orderId.
TranslResult walkStage2(Yield &yield, Addr addr, bool final_tr, const ArmISA::PageTableOps *pt_ops, unsigned level, Addr walkPtr)
TranslResult translateStage2(Yield &yield, Addr addr, bool final_tr)
void doReadConfig(Yield &yield, Addr addr, void *ptr, size_t size, uint32_t sid, uint32_t ssid)
void hazard4kHold(Yield &yield)
void doReadSTE(Yield &yield, StreamTableEntry &ste, uint32_t sid)
void hazardIdHold(Yield &yield)
SMMUv3DeviceInterface & ifc
bool microTLBLookup(Yield &yield, TranslResult &tr)
bool configCacheLookup(Yield &yield, TranslContext &tc)
bool hazard4kCheck()
Used to force ordering on transactions with same (SID, SSID, 4k page) to avoid multiple identical pag...
void ifcTLBUpdate(Yield &yield, const TranslResult &tr)
TranslResult walkStage1And2(Yield &yield, Addr addr, const ArmISA::PageTableOps *pt_ops, unsigned level, Addr walkPtr)
void sendEvent(Yield &yield, const SMMUEvent &ev)
void microTLBUpdate(Yield &yield, const TranslResult &tr)
void doReadPTE(Yield &yield, Addr va, Addr addr, void *ptr, unsigned stage, unsigned level)
void walkCacheUpdate(Yield &yield, Addr va, Addr vaMask, Addr pa, unsigned stage, unsigned level, bool leaf, uint8_t permissions)
void completePrefetch(Yield &yield)
GEM5_CLASS_VAR_USED Tick faultTick
TranslResult combineTranslations(const TranslResult &s1tr, const TranslResult &s2tr) const
void completeTransaction(Yield &yield, const TranslResult &tr)
std::list< SMMUTranslationProcess * > dependentReads[SMMU_MAX_TRANS_ID]
std::list< SMMUTranslationProcess * > dependentWrites[SMMU_MAX_TRANS_ID]
std::list< SMMUTranslationProcess * > duplicateReqs
const System & system
Definition: smmu_v3.hh:93
const Cycles tlbLat
Definition: smmu_v3.hh:131
const bool ipaCacheEnable
Definition: smmu_v3.hh:109
const unsigned requestPortWidth
Definition: smmu_v3.hh:116
gem5::SMMUv3::SMMUv3Stats stats
SMMUSemaphore requestPortSem
Definition: smmu_v3.hh:124
SMMUSemaphore walkSem
Definition: smmu_v3.hh:123
const bool configCacheEnable
Definition: smmu_v3.hh:108
SMMUSemaphore ptwSem
Definition: smmu_v3.hh:127
SMMUSemaphore cycleSem
Definition: smmu_v3.hh:128
SMMUSemaphore tlbSem
Definition: smmu_v3.hh:118
ARMArchTLB tlb
Definition: smmu_v3.hh:102
SMMUSemaphore transSem
Definition: smmu_v3.hh:126
ConfigCache configCache
Definition: smmu_v3.hh:103
WalkCache walkCache
Definition: smmu_v3.hh:105
const Cycles configLat
Definition: smmu_v3.hh:134
const bool tlbEnable
Definition: smmu_v3.hh:107
const Cycles smmuIfcLat
Definition: smmu_v3.hh:133
const Cycles ifcSmmuLat
Definition: smmu_v3.hh:132
const bool walkCacheNonfinalEnable
Definition: smmu_v3.hh:113
const Cycles walkLat
Definition: smmu_v3.hh:136
const unsigned walkCacheS1Levels
Definition: smmu_v3.hh:114
const bool walkCacheEnable
Definition: smmu_v3.hh:110
void scheduleDeviceRetries()
Definition: smmu_v3.cc:217
const unsigned walkCacheS2Levels
Definition: smmu_v3.hh:115
IPACache ipaCache
Definition: smmu_v3.hh:104
SMMUSemaphore ifcSmmuSem
Definition: smmu_v3.hh:119
SMMUSemaphore smmuIfcSem
Definition: smmu_v3.hh:120
SMMURegs regs
Definition: smmu_v3.hh:155
SMMUAction runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
Definition: smmu_v3.cc:286
const Cycles ipaLat
Definition: smmu_v3.hh:135
SMMUSemaphore configSem
Definition: smmu_v3.hh:121
SMMUSemaphore ipaSem
Definition: smmu_v3.hh:122
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:261
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:273
void store(const Entry &incoming)
const Entry * lookup(Addr va, Addr vaMask, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level, bool updStats=true)
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
STL list class.
Definition: stl.hh:51
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
constexpr uint64_t mask(unsigned nbits)
Generate a 64-bit mask of 'nbits' 1s, right justified.
Definition: bitfield.hh:63
std::enable_if_t<!std::is_same_v< T, void >, T > get()
get() is the way we can extrapolate arguments from the coroutine caller.
Definition: coroutine.hh:142
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
static OrderID orderId(PacketPtr pkt)
Definition: amba.hh:52
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition: pagetable.cc:476
Bitfield< 32 > cd
Definition: misc_types.hh:258
Bitfield< 9 > e
Definition: misc_types.hh:65
Bitfield< 23 > span
Definition: misc_types.hh:355
Bitfield< 8 > a
Definition: misc_types.hh:66
const GrainSize GrainMap_tg0[]
Definition: pagetable.cc:49
Bitfield< 39, 12 > pa
Definition: misc_types.hh:663
Bitfield< 8 > va
Definition: misc_types.hh:282
Bitfield< 30, 0 > index
Bitfield< 51, 12 > base
Definition: pagetable.hh:141
Bitfield< 20 > level
Definition: intmessage.hh:51
Bitfield< 3 > addr
Definition: types.hh:84
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
@ STE_CONFIG_STAGE1_ONLY
Definition: smmu_v3_defs.hh:60
@ STE_CONFIG_STAGE2_ONLY
Definition: smmu_v3_defs.hh:61
@ STE_CONFIG_BYPASS
Definition: smmu_v3_defs.hh:59
@ STE_CONFIG_STAGE1_AND_2
Definition: smmu_v3_defs.hh:62
uint64_t Tick
Tick count type.
Definition: types.hh:58
@ TRANS_GRANULE_INVALID
Definition: smmu_v3_defs.hh:85
@ STAGE1_CFG_2L_4K
Definition: smmu_v3_defs.hh:68
@ STAGE1_CFG_1L
Definition: smmu_v3_defs.hh:67
@ STAGE1_CFG_2L_64K
Definition: smmu_v3_defs.hh:69
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
@ ST_CFG_SPLIT_SHIFT
Definition: smmu_v3_defs.hh:74
@ STE_S2TTB_SHIFT
Definition: smmu_v3_defs.hh:77
@ ST_CD_ADDR_SHIFT
Definition: smmu_v3_defs.hh:75
@ CD_TTB_SHIFT
Definition: smmu_v3_defs.hh:76
@ CR0_SMMUEN_MASK
@ SMMU_MAX_TRANS_ID
@ ST_CFG_FMT_LINEAR
Definition: smmu_v3_defs.hh:94
@ ST_CFG_FMT_2LEVEL
Definition: smmu_v3_defs.hh:95
@ ST_CFG_SPLIT_MASK
Definition: smmu_v3_defs.hh:92
@ Q_BASE_ADDR_MASK
@ ST_L2_ADDR_MASK
Definition: smmu_v3_defs.hh:97
@ Q_BASE_SIZE_MASK
@ E_BASE_ENABLE_MASK
@ ST_CFG_SIZE_MASK
Definition: smmu_v3_defs.hh:91
@ ST_L2_SPAN_MASK
Definition: smmu_v3_defs.hh:96
@ ST_CFG_FMT_MASK
Definition: smmu_v3_defs.hh:93
@ VMT_BASE_ADDR_MASK
Definition: smmu_v3_defs.hh:99
@ E_BASE_ADDR_MASK
@ ACTION_SEND_RESP
Definition: smmu_v3_proc.hh:63
@ ACTION_TERMINATE
Definition: smmu_v3_proc.hh:67
@ ACTION_INITIAL_NOP
Definition: smmu_v3_proc.hh:60
@ ACTION_SEND_RESP_ATS
Definition: smmu_v3_proc.hh:64
@ ACTION_SEND_REQ_FINAL
Definition: smmu_v3_proc.hh:62
This is an implementation of the SMMUv3 architecture.
virtual bool isWritable(pte_t pte, unsigned level, bool stage2) const =0
Addr walkMask(unsigned level) const
Definition: pagetable.cc:55
virtual LookupLevel lastLevel() const =0
virtual Addr nextLevelPointer(pte_t pte, unsigned level) const =0
virtual bool isValid(pte_t pte, unsigned level) const =0
virtual Addr pageMask(pte_t pte, unsigned level) const =0
virtual Addr index(Addr va, unsigned level, int tsz) const =0
virtual bool isLeaf(pte_t pte, unsigned level) const =0
uint32_t substreamId
static SMMUTranslRequest fromPacket(PacketPtr pkt, bool ats=false)
static SMMUTranslRequest prefetch(Addr addr, uint32_t sid, uint32_t ssid)
statistics::Distribution ptwTimeDist
Definition: smmu_v3.hh:147
statistics::Scalar cdL1Fetches
Definition: smmu_v3.hh:144
statistics::Scalar steL1Fetches
Definition: smmu_v3.hh:142
statistics::Scalar steFetches
Definition: smmu_v3.hh:143
statistics::Distribution translationTimeDist
Definition: smmu_v3.hh:146
statistics::Scalar cdFetches
Definition: smmu_v3.hh:145
Bitfield< 51, 6 > s1ctxptr
Bitfield< 3, 1 > config
Bitfield< 5, 4 > s1fmt
Bitfield< 37, 32 > s2t0sz
Bitfield< 47, 46 > s2tg
Bitfield< 63, 59 > s1cdmax
const std::string & name()
Definition: trace.cc:49
uint64_t eventq_irq_cfg0
uint32_t eventq_prod
uint32_t eventq_irq_cfg1
uint64_t strtab_base
uint32_t eventq_cons
uint32_t strtab_base_cfg
uint64_t eventq_base

Generated on Wed Dec 21 2022 10:22:33 for gem5 by doxygen 1.9.1