gem5  v21.1.0.2
smmu_v3_transl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013, 2018-2019 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
39 
40 #include "debug/SMMUv3.hh"
41 #include "debug/SMMUv3Hazard.hh"
42 #include "dev/arm/amba.hh"
43 #include "dev/arm/smmu_v3.hh"
44 #include "sim/system.hh"
45 
46 namespace gem5
47 {
48 
49 SMMUTranslRequest
51 {
53  req.addr = pkt->getAddr();
54  req.size = pkt->getSize();
55  req.sid = pkt->req->streamId();
56  req.ssid = pkt->req->hasSubstreamId() ?
57  pkt->req->substreamId() : 0;
58  req.isWrite = pkt->isWrite();
59  req.isPrefetch = false;
60  req.isAtsRequest = ats;
61  req.pkt = pkt;
62 
63  return req;
64 }
65 
67 SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
68 {
70  req.addr = addr;
71  req.size = 0;
72  req.sid = sid;
73  req.ssid = ssid;
74  req.isWrite = false;
75  req.isPrefetch = true;
76  req.isAtsRequest = false;
77  req.pkt = NULL;
78 
79  return req;
80 }
81 
83  SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
84  :
85  SMMUProcess(name, _smmu),
86  ifc(_ifc)
87 {
88  // Decrease number of pending translation slots on the device interface
89  assert(ifc.xlateSlotsRemaining > 0);
91 
93  reinit();
94 }
95 
97 {
98  // Increase number of pending translation slots on the device interface
99  assert(ifc.pendingMemAccesses > 0);
101 
102  // If no more SMMU memory accesses are pending,
103  // signal SMMU Device Interface as drained
104  if (ifc.pendingMemAccesses == 0) {
106  }
107 }
108 
109 void
111 {
112  request = req;
113 
114  reinit();
115 }
116 
117 void
119 {
120  assert(smmu.system.isTimingMode());
121 
122  assert(!"Stalls are broken");
123 
124  Tick resumeTick = curTick();
125 
126  (void) resumeTick;
127  DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
128  resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
129 
131 
133 }
134 
135 void
137 {
138  // Hack:
139  // The coroutine starts running as soon as it's created.
140  // But we need to wait for request data esp. in atomic mode.
141  SMMUAction a;
142  a.type = ACTION_INITIAL_NOP;
143  a.pkt = NULL;
144  yield(a);
145 
146  const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
147 
148  if ((request.addr + request.size) > next4k)
149  panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
151 
152 
153  unsigned numResponderBeats = request.isWrite ?
154  (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
155 
157  doDelay(yield, Cycles(numResponderBeats));
159 
160 
161  recvTick = curTick();
162 
163  if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) {
164  // SMMU disabled
165  doDelay(yield, Cycles(1));
167  return;
168  }
169 
170  TranslResult tr;
171  bool wasPrefetched = false;
172 
173  if (request.isPrefetch) {
174  // Abort prefetch if:
175  // - there's already a transaction looking up the same 4k page, OR
176  // - requested address is already in the TLB.
177  if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
178  completePrefetch(yield); // this never returns
179 
181 
182  tr = smmuTranslation(yield);
183 
184  if (tr.fault == FAULT_NONE)
185  ifcTLBUpdate(yield, tr);
186 
187  hazard4kRelease();
188 
189  completePrefetch(yield);
190  } else {
192 
193  if (!microTLBLookup(yield, tr)) {
194  bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
195  if (!hit) {
196  while (!hit && hazard4kCheck()) {
197  hazard4kHold(yield);
198  hit = ifcTLBLookup(yield, tr, wasPrefetched);
199  }
200  }
201 
202  // Issue prefetch if:
203  // - there was a TLB hit and the entry was prefetched, OR
204  // - TLB miss was successfully serviced
205  if (hit) {
206  if (wasPrefetched)
207  issuePrefetch(next4k);
208  } else {
210 
211  tr = smmuTranslation(yield);
212 
213  if (tr.fault == FAULT_NONE) {
214  ifcTLBUpdate(yield, tr);
215 
216  issuePrefetch(next4k);
217  }
218 
219  hazard4kRelease();
220  }
221 
222  if (tr.fault == FAULT_NONE)
223  microTLBUpdate(yield, tr);
224  }
225 
226  hazardIdHold(yield);
227  hazardIdRelease();
228 
229  if (tr.fault != FAULT_NONE)
230  panic("Translation Fault (addr=%#x, size=%#x, sid=%d, ssid=%d, "
231  "isWrite=%d, isPrefetch=%d, isAtsRequest=%d)\n",
234 
235  completeTransaction(yield, tr);
236  }
237 }
238 
241 {
242  TranslResult tr;
243  tr.fault = FAULT_NONE;
244  tr.addr = addr;
245  tr.addrMask = 0;
246  tr.writable = 1;
247 
248  return tr;
249 }
250 
253 {
254  TranslResult tr;
255 
256  // Need SMMU credit to proceed
257  doSemaphoreDown(yield, smmu.transSem);
258 
259  // Simulate pipelined IFC->SMMU link
261  doDelay(yield, Cycles(1)); // serialize transactions
263  doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
264 
265  bool haveConfig = true;
266  if (!configCacheLookup(yield, context)) {
267  if (findConfig(yield, context, tr)) {
268  configCacheUpdate(yield, context);
269  } else {
270  haveConfig = false;
271  }
272  }
273 
274  if (haveConfig && !smmuTLBLookup(yield, tr)) {
275  // SMMU main TLB miss
276 
277  // Need PTW slot to proceed
278  doSemaphoreDown(yield, smmu.ptwSem);
279 
280  // Page table walk
281  Tick ptwStartTick = curTick();
282 
283  if (context.stage1Enable) {
284  tr = translateStage1And2(yield, request.addr);
285  } else if (context.stage2Enable) {
286  tr = translateStage2(yield, request.addr, true);
287  } else {
288  tr = bypass(request.addr);
289  }
290 
292  smmu.stats.ptwTimeDist.sample(curTick() - ptwStartTick);
293 
294  // Free PTW slot
296 
297  if (tr.fault == FAULT_NONE)
298  smmuTLBUpdate(yield, tr);
299  }
300 
301  // Simulate pipelined SMMU->RESPONSE INTERFACE link
303  doDelay(yield, Cycles(1)); // serialize transactions
305  doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
306 
307  // return SMMU credit
309 
310  return tr;
311 }
312 
313 bool
315 {
316  if (!ifc.microTLBEnable)
317  return false;
318 
320  doDelay(yield, ifc.microTLBLat);
321  const SMMUTLB::Entry *e =
324 
325  if (!e) {
326  DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
328 
329  return false;
330  }
331 
332  DPRINTF(SMMUv3,
333  "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
334  request.addr, e->vaMask, request.sid, request.ssid, e->pa);
335 
336  tr.fault = FAULT_NONE;
337  tr.addr = e->pa + (request.addr & ~e->vaMask);;
338  tr.addrMask = e->vaMask;
339  tr.writable = e->permissions;
340 
341  return true;
342 }
343 
344 bool
346  bool &wasPrefetched)
347 {
348  if (!ifc.mainTLBEnable)
349  return false;
350 
352  doDelay(yield, ifc.mainTLBLat);
353  const SMMUTLB::Entry *e =
356 
357  if (!e) {
358  DPRINTF(SMMUv3,
359  "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
361 
362  return false;
363  }
364 
365  DPRINTF(SMMUv3,
366  "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
367  "paddr=%#x\n", request.addr, e->vaMask, request.sid,
368  request.ssid, e->pa);
369 
370  tr.fault = FAULT_NONE;
371  tr.addr = e->pa + (request.addr & ~e->vaMask);;
372  tr.addrMask = e->vaMask;
373  tr.writable = e->permissions;
374  wasPrefetched = e->prefetched;
375 
376  return true;
377 }
378 
379 bool
381 {
382  if (!smmu.tlbEnable)
383  return false;
384 
385  doSemaphoreDown(yield, smmu.tlbSem);
386  doDelay(yield, smmu.tlbLat);
387  const ARMArchTLB::Entry *e =
390 
391  if (!e) {
392  DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
394 
395  return false;
396  }
397 
398  DPRINTF(SMMUv3,
399  "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
400  request.addr, e->vaMask, context.asid, context.vmid, e->pa);
401 
402  tr.fault = FAULT_NONE;
403  tr.addr = e->pa + (request.addr & ~e->vaMask);;
404  tr.addrMask = e->vaMask;
405  tr.writable = e->permissions;
406 
407  return true;
408 }
409 
410 void
412  const TranslResult &tr)
413 {
414  assert(tr.fault == FAULT_NONE);
415 
416  if (!ifc.microTLBEnable)
417  return;
418 
420  e.valid = true;
421  e.prefetched = false;
422  e.sid = request.sid;
423  e.ssid = request.ssid;
424  e.vaMask = tr.addrMask;
425  e.va = request.addr & e.vaMask;
426  e.pa = tr.addr & e.vaMask;
427  e.permissions = tr.writable;
428  e.asid = context.asid;
429  e.vmid = context.vmid;
430 
432 
433  DPRINTF(SMMUv3,
434  "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
435  e.va, e.vaMask, e.pa, e.sid, e.ssid);
436 
438 
440 }
441 
442 void
444  const TranslResult &tr)
445 {
446  assert(tr.fault == FAULT_NONE);
447 
448  if (!ifc.mainTLBEnable)
449  return;
450 
452  e.valid = true;
453  e.prefetched = request.isPrefetch;
454  e.sid = request.sid;
455  e.ssid = request.ssid;
456  e.vaMask = tr.addrMask;
457  e.va = request.addr & e.vaMask;
458  e.pa = tr.addr & e.vaMask;
459  e.permissions = tr.writable;
460  e.asid = context.asid;
461  e.vmid = context.vmid;
462 
465  alloc = request.isPrefetch ?
467 
469 
470  DPRINTF(SMMUv3,
471  "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
472  "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
473 
474  ifc.mainTLB->store(e, alloc);
475 
477 }
478 
479 void
481  const TranslResult &tr)
482 {
483  assert(tr.fault == FAULT_NONE);
484 
485  if (!smmu.tlbEnable)
486  return;
487 
489  e.valid = true;
490  e.vaMask = tr.addrMask;
491  e.va = request.addr & e.vaMask;
492  e.asid = context.asid;
493  e.vmid = context.vmid;
494  e.pa = tr.addr & e.vaMask;
495  e.permissions = tr.writable;
496 
497  doSemaphoreDown(yield, smmu.tlbSem);
498 
499  DPRINTF(SMMUv3,
500  "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
501  e.va, e.vaMask, e.pa, e.asid, e.vmid);
502 
503  smmu.tlb.store(e);
504 
506 }
507 
508 bool
510 {
511  if (!smmu.configCacheEnable)
512  return false;
513 
515  doDelay(yield, smmu.configLat);
516  const ConfigCache::Entry *e =
519 
520  if (!e) {
521  DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
523 
524  return false;
525  }
526 
527  DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
528  request.sid, request.ssid, e->ttb0, e->asid);
529 
530  tc.stage1Enable = e->stage1_en;
531  tc.stage2Enable = e->stage2_en;
532 
533  tc.ttb0 = e->ttb0;
534  tc.ttb1 = e->ttb1;
535  tc.asid = e->asid;
536  tc.httb = e->httb;
537  tc.vmid = e->vmid;
538 
539  tc.stage1TranslGranule = e->stage1_tg;
540  tc.stage2TranslGranule = e->stage2_tg;
541 
542  tc.t0sz = e->t0sz;
543  tc.s2t0sz = e->s2t0sz;
544 
545  return true;
546 }
547 
548 void
550  const TranslContext &tc)
551 {
552  if (!smmu.configCacheEnable)
553  return;
554 
556  e.valid = true;
557  e.sid = request.sid;
558  e.ssid = request.ssid;
559  e.stage1_en = tc.stage1Enable;
560  e.stage2_en = tc.stage2Enable;
561  e.ttb0 = tc.ttb0;
562  e.ttb1 = tc.ttb1;
563  e.asid = tc.asid;
564  e.httb = tc.httb;
565  e.vmid = tc.vmid;
566  e.stage1_tg = tc.stage1TranslGranule;
567  e.stage2_tg = tc.stage2TranslGranule;
568  e.t0sz = tc.t0sz;
569  e.s2t0sz = tc.s2t0sz;
570 
572 
573  DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
574 
576 
578 }
579 
580 bool
582  TranslContext &tc,
583  TranslResult &tr)
584 {
585  tc.stage1Enable = false;
586  tc.stage2Enable = false;
587 
588  StreamTableEntry ste;
589  doReadSTE(yield, ste, request.sid);
590 
591  switch (ste.dw0.config) {
592  case STE_CONFIG_BYPASS:
593  break;
594 
596  tc.stage1Enable = true;
597  break;
598 
600  tc.stage2Enable = true;
601  break;
602 
604  tc.stage1Enable = true;
605  tc.stage2Enable = true;
606  break;
607 
608  default:
609  panic("Bad or unimplemented STE config %d\n",
610  ste.dw0.config);
611  }
612 
613 
614  // Establish stage 2 context first since
615  // Context Descriptors can be in IPA space.
616  if (tc.stage2Enable) {
617  tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
618  tc.vmid = ste.dw2.s2vmid;
619  tc.stage2TranslGranule = ste.dw2.s2tg;
620  tc.s2t0sz = ste.dw2.s2t0sz;
621  } else {
622  tc.httb = 0xdeadbeef;
623  tc.vmid = 0;
625  tc.s2t0sz = 0;
626  }
627 
628 
629  // Now fetch stage 1 config.
630  if (context.stage1Enable) {
632  doReadCD(yield, cd, ste, request.sid, request.ssid);
633 
634  tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
635  tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
636  tc.asid = cd.dw0.asid;
637  tc.stage1TranslGranule = cd.dw0.tg0;
638  tc.t0sz = cd.dw0.t0sz;
639  } else {
640  tc.ttb0 = 0xcafebabe;
641  tc.ttb1 = 0xcafed00d;
642  tc.asid = 0;
644  tc.t0sz = 0;
645  }
646 
647  return true;
648 }
649 
650 void
652  Yield &yield,
653  const WalkCache::Entry *&walkEntry,
654  Addr addr, uint16_t asid, uint16_t vmid,
655  unsigned stage, unsigned level)
656 {
657  const char *indent = stage==2 ? " " : "";
658  (void) indent; // this is only used in DPRINTFs
659 
660  const PageTableOps *pt_ops =
661  stage == 1 ?
664 
665  unsigned walkCacheLevels =
667  (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
668  0;
669 
670  if ((1 << level) & walkCacheLevels) {
671  doSemaphoreDown(yield, smmu.walkSem);
672  doDelay(yield, smmu.walkLat);
673 
674  walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
675  asid, vmid, stage, level);
676 
677  if (walkEntry) {
678  DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
679  "base=%#x (S%d, L%d)\n",
680  indent, addr, asid, vmid, walkEntry->pa, stage, level);
681  } else {
682  DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
683  "(S%d, L%d)\n",
684  indent, addr, asid, vmid, stage, level);
685  }
686 
688  }
689 }
690 
691 void
693  Addr vaMask, Addr pa,
694  unsigned stage, unsigned level,
695  bool leaf, uint8_t permissions)
696 {
697  unsigned walkCacheLevels =
699 
700  if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
702  e.valid = true;
703  e.va = va;
704  e.vaMask = vaMask;
705  e.asid = stage==1 ? context.asid : 0;
706  e.vmid = context.vmid;
707  e.stage = stage;
708  e.level = level;
709  e.leaf = leaf;
710  e.pa = pa;
711  e.permissions = permissions;
712 
713  doSemaphoreDown(yield, smmu.walkSem);
714 
715  DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
716  "tpa=%#x leaf=%s (S%d, L%d)\n",
717  e.stage==2 ? " " : "",
718  e.va, e.vaMask, e.asid, e.vmid,
719  e.pa, e.leaf, e.stage, e.level);
720 
722 
724  }
725 }
726 
727 /*
728  * Please note:
729  * This does not deal with the case where stage 1 page size
730  * is larger than stage 2 page size.
731  */
734  const PageTableOps *pt_ops,
735  unsigned level, Addr walkPtr)
736 {
737  PageTableOps::pte_t pte = 0;
738 
739  doSemaphoreDown(yield, smmu.cycleSem);
740  doDelay(yield, Cycles(1));
742 
743  for (; level <= pt_ops->lastLevel(); level++) {
744  Addr pte_addr = walkPtr + pt_ops->index(addr, level);
745 
746  DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
747  level, pte_addr);
748 
749  doReadPTE(yield, addr, pte_addr, &pte, 1, level);
750 
751  DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
752  level, pte, pte_addr);
753 
754  doSemaphoreDown(yield, smmu.cycleSem);
755  doDelay(yield, Cycles(1));
757 
758  bool valid = pt_ops->isValid(pte, level);
759  bool leaf = pt_ops->isLeaf(pte, level);
760 
761  if (!valid) {
762  DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
763 
764  TranslResult tr;
766  return tr;
767  }
768 
769  if (valid && leaf && request.isWrite &&
770  !pt_ops->isWritable(pte, level, false))
771  {
772  DPRINTF(SMMUv3, "S1 page not writable - fault\n");
773 
774  TranslResult tr;
775  tr.fault = FAULT_PERMISSION;
776  return tr;
777  }
778 
779  walkPtr = pt_ops->nextLevelPointer(pte, level);
780 
781  if (leaf)
782  break;
783 
784  if (context.stage2Enable) {
785  TranslResult s2tr = translateStage2(yield, walkPtr, false);
786  if (s2tr.fault != FAULT_NONE)
787  return s2tr;
788 
789  walkPtr = s2tr.addr;
790  }
791 
792  walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
793  1, level, leaf, 0);
794  }
795 
796  TranslResult tr;
797  tr.fault = FAULT_NONE;
798  tr.addrMask = pt_ops->pageMask(pte, level);
799  tr.addr = walkPtr + (addr & ~tr.addrMask);
800  tr.writable = pt_ops->isWritable(pte, level, false);
801 
802  if (context.stage2Enable) {
803  TranslResult s2tr = translateStage2(yield, tr.addr, true);
804  if (s2tr.fault != FAULT_NONE)
805  return s2tr;
806 
807  tr = combineTranslations(tr, s2tr);
808  }
809 
810  walkCacheUpdate(yield, addr, tr.addrMask, walkPtr,
811  1, level, true, tr.writable);
812 
813  return tr;
814 }
815 
818  const PageTableOps *pt_ops,
819  unsigned level, Addr walkPtr)
820 {
822 
823  doSemaphoreDown(yield, smmu.cycleSem);
824  doDelay(yield, Cycles(1));
826 
827  for (; level <= pt_ops->lastLevel(); level++) {
828  Addr pte_addr = walkPtr + pt_ops->index(addr, level);
829 
830  DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
831  level, pte_addr);
832 
833  doReadPTE(yield, addr, pte_addr, &pte, 2, level);
834 
835  DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
836  level, pte, pte_addr);
837 
838  doSemaphoreDown(yield, smmu.cycleSem);
839  doDelay(yield, Cycles(1));
841 
842  bool valid = pt_ops->isValid(pte, level);
843  bool leaf = pt_ops->isLeaf(pte, level);
844 
845  if (!valid) {
846  DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
847 
848  TranslResult tr;
850  return tr;
851  }
852 
853  if (valid && leaf && request.isWrite &&
854  !pt_ops->isWritable(pte, level, true))
855  {
856  DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
857 
858  TranslResult tr;
859  tr.fault = FAULT_PERMISSION;
860  return tr;
861  }
862 
863  walkPtr = pt_ops->nextLevelPointer(pte, level);
864 
865  if (final_tr || smmu.walkCacheNonfinalEnable)
866  walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
867  2, level, leaf,
868  leaf ? pt_ops->isWritable(pte, level, true) : 0);
869  if (leaf)
870  break;
871  }
872 
873  TranslResult tr;
874  tr.fault = FAULT_NONE;
875  tr.addrMask = pt_ops->pageMask(pte, level);
876  tr.addr = walkPtr + (addr & ~tr.addrMask);
877  tr.writable = pt_ops->isWritable(pte, level, true);
878 
879  return tr;
880 }
881 
884 {
885  const PageTableOps *pt_ops =
887 
888  const WalkCache::Entry *walk_ep = NULL;
889  unsigned level;
890 
891  // Level here is actually (level+1) so we can count down
892  // to 0 using unsigned int.
893  for (level = pt_ops->lastLevel() + 1;
894  level > pt_ops->firstLevel(context.t0sz);
895  level--)
896  {
897  walkCacheLookup(yield, walk_ep, addr,
898  context.asid, context.vmid, 1, level-1);
899 
900  if (walk_ep)
901  break;
902  }
903 
904  // Correct level (see above).
905  level -= 1;
906 
907  TranslResult tr;
908  if (walk_ep) {
909  if (walk_ep->leaf) {
910  tr.fault = FAULT_NONE;
911  tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
912  tr.addrMask = walk_ep->vaMask;
913  tr.writable = walk_ep->permissions;
914  } else {
915  tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
916  }
917  } else {
918  Addr table_addr = context.ttb0;
919  if (context.stage2Enable) {
920  TranslResult s2tr = translateStage2(yield, table_addr, false);
921  if (s2tr.fault != FAULT_NONE)
922  return s2tr;
923 
924  table_addr = s2tr.addr;
925  }
926 
927  tr = walkStage1And2(yield, addr, pt_ops,
928  pt_ops->firstLevel(context.t0sz),
929  table_addr);
930  }
931 
932  if (tr.fault == FAULT_NONE)
933  DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
934 
935  return tr;
936 }
937 
940 {
941  const PageTableOps *pt_ops =
943 
944  const IPACache::Entry *ipa_ep = NULL;
945  if (smmu.ipaCacheEnable) {
946  doSemaphoreDown(yield, smmu.ipaSem);
947  doDelay(yield, smmu.ipaLat);
948  ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
950  }
951 
952  if (ipa_ep) {
953  TranslResult tr;
954  tr.fault = FAULT_NONE;
955  tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
956  tr.addrMask = ipa_ep->ipaMask;
957  tr.writable = ipa_ep->permissions;
958 
959  DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
960  addr, context.vmid, tr.addr);
961 
962  return tr;
963  } else if (smmu.ipaCacheEnable) {
964  DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
965  addr, context.vmid);
966  }
967 
968  const WalkCache::Entry *walk_ep = NULL;
969  unsigned level = pt_ops->firstLevel(context.s2t0sz);
970 
971  if (final_tr || smmu.walkCacheNonfinalEnable) {
972  // Level here is actually (level+1) so we can count down
973  // to 0 using unsigned int.
974  for (level = pt_ops->lastLevel() + 1;
975  level > pt_ops->firstLevel(context.s2t0sz);
976  level--)
977  {
978  walkCacheLookup(yield, walk_ep, addr,
979  0, context.vmid, 2, level-1);
980 
981  if (walk_ep)
982  break;
983  }
984 
985  // Correct level (see above).
986  level -= 1;
987  }
988 
989  TranslResult tr;
990  if (walk_ep) {
991  if (walk_ep->leaf) {
992  tr.fault = FAULT_NONE;
993  tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
994  tr.addrMask = walk_ep->vaMask;
995  tr.writable = walk_ep->permissions;
996  } else {
997  tr = walkStage2(yield, addr, final_tr, pt_ops,
998  level + 1, walk_ep->pa);
999  }
1000  } else {
1001  tr = walkStage2(yield, addr, final_tr, pt_ops,
1002  pt_ops->firstLevel(context.s2t0sz),
1003  context.httb);
1004  }
1005 
1006  if (tr.fault == FAULT_NONE)
1007  DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
1008  context.stage1Enable ? "ip" : "v", addr, tr.addr);
1009 
1010  if (smmu.ipaCacheEnable) {
1012  e.valid = true;
1013  e.ipaMask = tr.addrMask;
1014  e.ipa = addr & e.ipaMask;
1015  e.pa = tr.addr & tr.addrMask;
1016  e.permissions = tr.writable;
1017  e.vmid = context.vmid;
1018 
1019  doSemaphoreDown(yield, smmu.ipaSem);
1020  smmu.ipaCache.store(e);
1022  }
1023 
1024  return tr;
1025 }
1026 
1029  const TranslResult &s2tr) const
1030 {
1031  if (s2tr.fault != FAULT_NONE)
1032  return s2tr;
1033 
1034  assert(s1tr.fault == FAULT_NONE);
1035 
1036  TranslResult tr;
1037  tr.fault = FAULT_NONE;
1038  tr.addr = s2tr.addr;
1039  tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1040  tr.writable = s1tr.writable & s2tr.writable;
1041 
1042  return tr;
1043 }
1044 
1045 bool
1047 {
1048  Addr addr4k = request.addr & ~0xfffULL;
1049 
1050  for (auto it = ifc.duplicateReqs.begin();
1051  it != ifc.duplicateReqs.end();
1052  ++it)
1053  {
1054  Addr other4k = (*it)->request.addr & ~0xfffULL;
1055  if (addr4k == other4k)
1056  return true;
1057  }
1058 
1059  return false;
1060 }
1061 
1062 void
1064 {
1065  DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1066  this, request.addr & ~0xfffULL);
1067 
1068  ifc.duplicateReqs.push_back(this);
1069 }
1070 
1071 void
1073 {
1074  Addr addr4k = request.addr & ~0xfffULL;
1075 
1076  bool found_hazard;
1077 
1078  do {
1079  found_hazard = false;
1080 
1081  for (auto it = ifc.duplicateReqs.begin();
1082  it!=ifc.duplicateReqs.end() && *it!=this;
1083  ++it)
1084  {
1085  Addr other4k = (*it)->request.addr & ~0xfffULL;
1086 
1087  DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1088  this, addr4k, *it, other4k);
1089 
1090  if (addr4k == other4k) {
1091  DPRINTF(SMMUv3Hazard,
1092  "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1093  this, addr4k, *it, other4k);
1094 
1096 
1097  DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1098  this, addr4k);
1099 
1100  // This is to avoid checking *it!=this after doWaitForSignal()
1101  // since it could have been deleted.
1102  found_hazard = true;
1103  break;
1104  }
1105  }
1106  } while (found_hazard);
1107 }
1108 
1109 void
1111 {
1112  DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1113  this, request.addr & ~0xfffULL);
1114 
1116 
1117  for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1118  if (*it == this)
1119  break;
1120 
1121  if (it == ifc.duplicateReqs.end())
1122  panic("hazard4kRelease: request not found");
1123 
1124  ifc.duplicateReqs.erase(it);
1125 
1127 }
1128 
1129 void
1131 {
1132  auto orderId = AMBA::orderId(request.pkt);
1133 
1134  DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1135 
1136  assert(orderId < SMMU_MAX_TRANS_ID);
1137 
1139  request.isWrite ?
1141  depReqs.push_back(this);
1142 }
1143 
1144 void
1146 {
1147  auto orderId = AMBA::orderId(request.pkt);
1148 
1149  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1150 
1152  request.isWrite ?
1155 
1156  bool found_hazard;
1157 
1158  do {
1159  found_hazard = false;
1160 
1161  for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1162  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1163  this, orderId, *it);
1164 
1165  if (AMBA::orderId((*it)->request.pkt) == orderId) {
1166  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1167  this, orderId, *it);
1168 
1170 
1171  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1172  this, orderId);
1173 
1174  // This is to avoid checking *it!=this after doWaitForSignal()
1175  // since it could have been deleted.
1176  found_hazard = true;
1177  break;
1178  }
1179  }
1180  } while (found_hazard);
1181 }
1182 
1183 void
1185 {
1186  auto orderId = AMBA::orderId(request.pkt);
1187 
1188  DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1189 
1191  request.isWrite ?
1194 
1195  for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1196  if (*it == this)
1197  break;
1198  }
1199 
1200  if (it == depReqs.end())
1201  panic("hazardIdRelease: request not found");
1202 
1203  depReqs.erase(it);
1204 
1206 }
1207 
1208 void
1210 {
1211  if (!smmu.system.isTimingMode())
1212  return;
1213 
1215  return;
1216 
1217  std::string proc_name = csprintf("%sprf", name());
1218  SMMUTranslationProcess *proc =
1219  new SMMUTranslationProcess(proc_name, smmu, ifc);
1220 
1221  proc->beginTransaction(
1223  proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1224 }
1225 
1226 void
1228  const TranslResult &tr)
1229 {
1230  assert(tr.fault == FAULT_NONE);
1231 
1232  unsigned numRequestorBeats = request.isWrite ?
1235  1;
1236 
1238  doDelay(yield, Cycles(numRequestorBeats));
1240 
1241 
1246  (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1247 
1249 
1250 
1251  SMMUAction a;
1252 
1253  if (request.isAtsRequest) {
1254  a.type = ACTION_SEND_RESP_ATS;
1255 
1256  if (smmu.system.isAtomicMode()) {
1258  } else if (smmu.system.isTimingMode()) {
1260  } else {
1261  panic("Not in atomic or timing mode");
1262  }
1263  } else {
1264  a.type = ACTION_SEND_REQ_FINAL;
1265  a.ifc = &ifc;
1266  }
1267 
1268  a.pkt = request.pkt;
1269  a.delay = 0;
1270 
1271  a.pkt->setAddr(tr.addr);
1272  a.pkt->req->setPaddr(tr.addr);
1273 
1274  yield(a);
1275 
1276  if (!request.isAtsRequest) {
1277  PacketPtr pkt = yield.get();
1278  pkt->setAddr(request.addr);
1279 
1280  a.type = ACTION_SEND_RESP;
1281  a.pkt = pkt;
1282  a.ifc = &ifc;
1283  a.delay = 0;
1284  yield(a);
1285  }
1286 }
1287 
1288 void
1290 {
1292 
1293  SMMUAction a;
1294  a.type = ACTION_TERMINATE;
1295  a.pkt = NULL;
1296  a.ifc = &ifc;
1297  a.delay = 0;
1298  yield(a);
1299 }
1300 
1301 void
1303 {
1304  int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1305 
1306  if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1307  (smmu.regs.eventq_cons & sizeMask))
1308  panic("Event queue full - aborting\n");
1309 
1310  Addr event_addr =
1312  (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1313 
1314  DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1315  "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1316  event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1317  ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1318 
1319  // This deliberately resets the overflow field in eventq_prod!
1320  smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1321 
1322  doWrite(yield, event_addr, &ev, sizeof(ev));
1323 
1325  panic("eventq msi not enabled\n");
1326 
1329 }
1330 
1331 void
1333  StreamTableEntry &ste,
1334  uint32_t sid)
1335 {
1336  unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1337  if (sid >= max_sid)
1338  panic("SID %#x out of range, max=%#x", sid, max_sid);
1339 
1340  Addr ste_addr;
1341 
1343  unsigned split =
1345 
1346  if (split!= 7 && split!=8 && split!=16)
1347  panic("Invalid stream table split %d", split);
1348 
1349  uint64_t l2_ptr;
1350  uint64_t l2_addr =
1352  bits(sid, 32, split) * sizeof(l2_ptr);
1353 
1354  DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1355 
1356  doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1357 
1358  DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1359 
1360  unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1361  if (span == 0)
1362  panic("Invalid level 1 stream table descriptor");
1363 
1364  unsigned index = bits(sid, split-1, 0);
1365  if (index >= (1 << span))
1366  panic("StreamID %d out of level 1 descriptor range %d",
1367  sid, 1<<span);
1368 
1369  ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1370 
1372  } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK)
1373  == ST_CFG_FMT_LINEAR) {
1374  ste_addr =
1375  (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1376  } else {
1377  panic("Invalid stream table format");
1378  }
1379 
1380  DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1381 
1382  doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1383 
1384  DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1385  DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1386  DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1387  DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1388  DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1389  DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1390  DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1391  DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1392 
1393  if (!ste.dw0.valid)
1394  panic("STE @ %#x not valid\n", ste_addr);
1395 
1396  smmu.stats.steFetches++;
1397 }
1398 
1399 void
1402  const StreamTableEntry &ste,
1403  uint32_t sid, uint32_t ssid)
1404 {
1405  Addr cd_addr = 0;
1406 
1407  if (ste.dw0.s1cdmax == 0) {
1408  cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1409  } else {
1410  unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1411  if (ssid >= max_ssid)
1412  panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1413 
1414  if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1415  ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1416  {
1417  unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1418 
1419  uint64_t l2_ptr;
1420  uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1421  bits(ssid, 24, split) * sizeof(l2_ptr);
1422 
1423  if (context.stage2Enable)
1424  l2_addr = translateStage2(yield, l2_addr, false).addr;
1425 
1426  DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1427 
1428  doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1429 
1430  DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1431 
1432  cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1433 
1435  } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1436  cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1437  }
1438  }
1439 
1440  if (context.stage2Enable)
1441  cd_addr = translateStage2(yield, cd_addr, false).addr;
1442 
1443  DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1444 
1445  doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1446 
1447  DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1448  DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1449  DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1450  DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1451  DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1452  DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1453  DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1454  DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1455 
1456 
1457  if (!cd.dw0.valid)
1458  panic("CD @ %#x not valid\n", cd_addr);
1459 
1460  smmu.stats.cdFetches++;
1461 }
1462 
1463 void
1465  void *ptr, size_t size,
1466  uint32_t sid, uint32_t ssid)
1467 {
1468  doRead(yield, addr, ptr, size);
1469 }
1470 
1471 void
1473  void *ptr, unsigned stage,
1474  unsigned level)
1475 {
1476  size_t pte_size = sizeof(PageTableOps::pte_t);
1477 
1478  Addr mask = pte_size - 1;
1479  Addr base = addr & ~mask;
1480 
1481  doRead(yield, base, ptr, pte_size);
1482 }
1483 
1484 } // namespace gem5
gem5::ACTION_INITIAL_NOP
@ ACTION_INITIAL_NOP
Definition: smmu_v3_proc.hh:60
gem5::X86ISA::level
Bitfield< 20 > level
Definition: intmessage.hh:51
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::ConfigCache::lookup
const Entry * lookup(uint32_t sid, uint32_t ssid, bool updStats=true)
Definition: smmu_v3_caches.cc:843
gem5::StreamTableEntry::s2tg
Bitfield< 47, 46 > s2tg
Definition: smmu_v3_defs.hh:239
gem5::SMMUTranslationProcess::FAULT_TRANSLATION
@ FAULT_TRANSLATION
Definition: smmu_v3_transl.hh:85
gem5::PageTableOps::walkMask
virtual Addr walkMask(unsigned level) const =0
gem5::SMMUTranslationProcess::microTLBUpdate
void microTLBUpdate(Yield &yield, const TranslResult &tr)
Definition: smmu_v3_transl.cc:411
gem5::STE_CONFIG_STAGE2_ONLY
@ STE_CONFIG_STAGE2_ONLY
Definition: smmu_v3_defs.hh:61
gem5::WalkCache::lookup
const Entry * lookup(Addr va, Addr vaMask, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level, bool updStats=true)
Definition: smmu_v3_caches.cc:1021
gem5::SMMUTranslRequest::sid
uint32_t sid
Definition: smmu_v3_transl.hh:54
gem5::SMMUv3DeviceInterface::microTLBEnable
const bool microTLBEnable
Definition: smmu_v3_deviceifc.hh:68
gem5::StreamTableEntry::s1ctxptr
Bitfield< 51, 6 > s1ctxptr
Definition: smmu_v3_defs.hh:200
gem5::SMMUTranslationProcess::hazardIdRegister
void hazardIdRegister()
Used to force ordering on transactions with the same orderId.
Definition: smmu_v3_transl.cc:1130
gem5::SMMUTranslRequest::ssid
uint32_t ssid
Definition: smmu_v3_transl.hh:55
gem5::STE_CONFIG_BYPASS
@ STE_CONFIG_BYPASS
Definition: smmu_v3_defs.hh:59
gem5::SMMUTLB::AllocPolicy
AllocPolicy
Definition: smmu_v3_caches.hh:102
gem5::WalkCache::Entry::leaf
bool leaf
Definition: smmu_v3_caches.hh:309
gem5::SMMUv3::walkCache
WalkCache walkCache
Definition: smmu_v3.hh:106
gem5::ARMArchTLB::store
void store(const Entry &incoming)
Definition: smmu_v3_caches.cc:497
gem5::SMMUv3::ifcSmmuSem
SMMUSemaphore ifcSmmuSem
Definition: smmu_v3.hh:120
gem5::ConfigCache::store
void store(const Entry &incoming)
Definition: smmu_v3_caches.cc:875
gem5::StreamTableEntry
Definition: smmu_v3_defs.hh:194
gem5::SMMUTranslationProcess::issuePrefetch
void issuePrefetch(Addr addr)
Definition: smmu_v3_transl.cc:1209
system.hh
gem5::SMMUTranslationProcess::hazardIdRelease
void hazardIdRelease()
Definition: smmu_v3_transl.cc:1184
gem5::SMMUv3::getPageTableOps
const PageTableOps * getPageTableOps(uint8_t trans_granule)
Definition: smmu_v3.cc:571
gem5::SMMUEvent::va
uint64_t va
Definition: smmu_v3_defs.hh:404
gem5::SMMUv3DeviceInterface::mainTLBLat
const Cycles mainTLBLat
Definition: smmu_v3_deviceifc.hh:76
gem5::ArmISA::span
Bitfield< 23 > span
Definition: misc_types.hh:348
gem5::Coroutine::CallerType::get
std::enable_if_t<!std::is_same< T, void >::value, T > get()
get() is the way we can extrapolate arguments from the coroutine caller.
Definition: coroutine.hh:142
gem5::SMMUTranslationProcess::TranslContext::ttb1
Addr ttb1
Definition: smmu_v3_transl.hh:73
gem5::SMMURegs::eventq_base
uint64_t eventq_base
Definition: smmu_v3_defs.hh:154
gem5::SMMUProcess
Definition: smmu_v3_proc.hh:97
gem5::SMMUEvent::substreamId
uint32_t substreamId
Definition: smmu_v3_defs.hh:403
gem5::SMMUv3::requestPortWidth
const unsigned requestPortWidth
Definition: smmu_v3.hh:117
gem5::SMMUv3::transSem
SMMUSemaphore transSem
Definition: smmu_v3.hh:127
gem5::SMMUProcess::doSemaphoreDown
void doSemaphoreDown(Yield &yield, SMMUSemaphore &sem)
Definition: smmu_v3_proc.cc:151
gem5::SMMUv3::runProcessTiming
SMMUAction runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
Definition: smmu_v3.cc:286
gem5::SMMUTranslationProcess::smmuTLBLookup
bool smmuTLBLookup(Yield &yield, TranslResult &tr)
Definition: smmu_v3_transl.cc:380
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
gem5::SMMUv3::regs
SMMURegs regs
Definition: smmu_v3.hh:156
gem5::SMMUAction
Definition: smmu_v3_proc.hh:70
gem5::SMMUTranslationProcess::recvTick
Tick recvTick
Definition: smmu_v3_transl.hh:102
gem5::PageTableOps::isWritable
virtual bool isWritable(pte_t pte, unsigned level, bool stage2) const =0
gem5::SMMUTranslationProcess::hazard4kHold
void hazard4kHold(Yield &yield)
Definition: smmu_v3_transl.cc:1072
gem5::SMMUTranslationProcess::smmuTranslation
TranslResult smmuTranslation(Yield &yield)
Definition: smmu_v3_transl.cc:252
gem5::SMMUTranslationProcess::TranslContext::ttb0
Addr ttb0
Definition: smmu_v3_transl.hh:73
gem5::SMMUv3DeviceInterface::microTLB
SMMUTLB * microTLB
Definition: smmu_v3_deviceifc.hh:65
gem5::TRANS_GRANULE_INVALID
@ TRANS_GRANULE_INVALID
Definition: smmu_v3_defs.hh:85
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::SMMUTranslationProcess::translateStage1And2
TranslResult translateStage1And2(Yield &yield, Addr addr)
Definition: smmu_v3_transl.cc:883
gem5::SMMUTranslationProcess::SMMUTranslationProcess
SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
Definition: smmu_v3_transl.cc:82
gem5::ArmISA::asid
asid
Definition: misc_types.hh:617
gem5::SMMUv3DeviceInterface::devicePortSem
SMMUSemaphore devicePortSem
Definition: smmu_v3_deviceifc.hh:71
gem5::WalkCache::Entry
Definition: smmu_v3_caches.hh:295
gem5::SMMUTranslationProcess::configCacheUpdate
void configCacheUpdate(Yield &yield, const TranslContext &tc)
Definition: smmu_v3_transl.cc:549
gem5::SMMUv3::ipaCacheEnable
const bool ipaCacheEnable
Definition: smmu_v3.hh:110
gem5::SMMUEvent::stag
uint16_t stag
Definition: smmu_v3_defs.hh:400
gem5::IPACache::Entry::permissions
uint8_t permissions
Definition: smmu_v3_caches.hh:220
gem5::SMMUv3::SMMUv3Stats::ptwTimeDist
statistics::Distribution ptwTimeDist
Definition: smmu_v3.hh:148
smmu_v3_transl.hh
gem5::System::isAtomicMode
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:264
gem5::SMMUv3::tlbLat
const Cycles tlbLat
Definition: smmu_v3.hh:132
gem5::SMMURegs::eventq_prod
uint32_t eventq_prod
Definition: smmu_v3_defs.hh:185
gem5::SMMURegs::eventq_irq_cfg0
uint64_t eventq_irq_cfg0
Definition: smmu_v3_defs.hh:157
gem5::ST_CFG_SIZE_MASK
@ ST_CFG_SIZE_MASK
Definition: smmu_v3_defs.hh:91
gem5::SMMUv3DeviceInterface::prefetchEnable
const bool prefetchEnable
Definition: smmu_v3_deviceifc.hh:89
gem5::SMMUv3::walkCacheS1Levels
const unsigned walkCacheS1Levels
Definition: smmu_v3.hh:115
gem5::ArmISA::e
Bitfield< 9 > e
Definition: misc_types.hh:64
gem5::ArmISA::a
Bitfield< 8 > a
Definition: misc_types.hh:65
gem5::SMMUTranslRequest::prefetch
static SMMUTranslRequest prefetch(Addr addr, uint32_t sid, uint32_t ssid)
Definition: smmu_v3_transl.cc:67
gem5::PageTableOps::index
virtual Addr index(Addr va, unsigned level) const =0
gem5::SMMUTranslRequest::addr
Addr addr
Definition: smmu_v3_transl.hh:52
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::SMMUv3::ifcSmmuLat
const Cycles ifcSmmuLat
Definition: smmu_v3.hh:133
gem5::SMMUTranslRequest::isAtsRequest
bool isAtsRequest
Definition: smmu_v3_transl.hh:58
gem5::SMMUTranslationProcess::TranslResult::addrMask
Addr addrMask
Definition: smmu_v3_transl.hh:93
gem5::SMMURegs::strtab_base_cfg
uint32_t strtab_base_cfg
Definition: smmu_v3_defs.hh:149
gem5::SMMUv3::SMMUv3Stats::cdFetches
statistics::Scalar cdFetches
Definition: smmu_v3.hh:146
gem5::ACTION_SEND_RESP
@ ACTION_SEND_RESP
Definition: smmu_v3_proc.hh:63
gem5::SMMUTranslationProcess::context
TranslContext context
Definition: smmu_v3_transl.hh:100
gem5::StreamTableEntry::s2vmid
s2vmid
Definition: smmu_v3_defs.hh:233
gem5::SMMUv3::SMMUv3Stats::steFetches
statistics::Scalar steFetches
Definition: smmu_v3.hh:144
gem5::SMMUTranslationProcess::TranslContext::t0sz
uint8_t t0sz
Definition: smmu_v3_transl.hh:78
gem5::SMMUTranslationProcess::walkStage2
TranslResult walkStage2(Yield &yield, Addr addr, bool final_tr, const PageTableOps *pt_ops, unsigned level, Addr walkPtr)
Definition: smmu_v3_transl.cc:817
gem5::csprintf
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
gem5::SMMUProcess::doSemaphoreUp
void doSemaphoreUp(SMMUSemaphore &sem)
Definition: smmu_v3_proc.cc:163
gem5::SMMUTranslationProcess::TranslContext
Definition: smmu_v3_transl.hh:69
gem5::Packet::makeAtomicResponse
void makeAtomicResponse()
Definition: packet.hh:1043
gem5::PageTableOps::pte_t
int64_t pte_t
Definition: smmu_v3_ptops.hh:50
gem5::CR0_SMMUEN_MASK
@ CR0_SMMUEN_MASK
Definition: smmu_v3_defs.hh:323
gem5::X86ISA::base
Bitfield< 51, 12 > base
Definition: pagetable.hh:141
gem5::SMMUv3DeviceInterface::mainTLBEnable
const bool mainTLBEnable
Definition: smmu_v3_deviceifc.hh:69
gem5::ConfigCache::Entry
Definition: smmu_v3_caches.hh:248
gem5::SMMUv3::configSem
SMMUSemaphore configSem
Definition: smmu_v3.hh:122
gem5::SMMUv3::SMMUv3Stats::steL1Fetches
statistics::Scalar steL1Fetches
Definition: smmu_v3.hh:143
gem5::SMMUv3::walkCacheEnable
const bool walkCacheEnable
Definition: smmu_v3.hh:111
gem5::SMMUTranslationProcess::hazardIdHold
void hazardIdHold(Yield &yield)
Definition: smmu_v3_transl.cc:1145
gem5::SMMUProcess::reinit
void reinit()
Definition: smmu_v3_proc.cc:67
gem5::SMMUTLB::lookup
const Entry * lookup(uint32_t sid, uint32_t ssid, Addr va, bool updStats=true)
Definition: smmu_v3_caches.cc:184
gem5::SMMUTranslationProcess::hazard4kRegister
void hazard4kRegister()
Definition: smmu_v3_transl.cc:1063
gem5::SMMUTranslationProcess::sendEvent
void sendEvent(Yield &yield, const SMMUEvent &ev)
Definition: smmu_v3_transl.cc:1302
gem5::StreamTableEntry::s1fmt
Bitfield< 5, 4 > s1fmt
Definition: smmu_v3_defs.hh:199
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1325
gem5::SMMUTranslRequest::fromPacket
static SMMUTranslRequest fromPacket(PacketPtr pkt, bool ats=false)
Definition: smmu_v3_transl.cc:50
gem5::SMMUTranslationProcess::FAULT_PERMISSION
@ FAULT_PERMISSION
Definition: smmu_v3_transl.hh:86
gem5::SMMUTranslRequest::isPrefetch
bool isPrefetch
Definition: smmu_v3_transl.hh:57
gem5::SMMUTranslationProcess::doReadCD
void doReadCD(Yield &yield, ContextDescriptor &cd, const StreamTableEntry &ste, uint32_t sid, uint32_t ssid)
Definition: smmu_v3_transl.cc:1400
gem5::SMMUv3::walkCacheNonfinalEnable
const bool walkCacheNonfinalEnable
Definition: smmu_v3.hh:114
gem5::SMMUv3::SMMUv3Stats::translationTimeDist
statistics::Distribution translationTimeDist
Definition: smmu_v3.hh:147
gem5::SMMUv3
Definition: smmu_v3.hh:85
gem5::WalkCache::Entry::vaMask
Addr vaMask
Definition: smmu_v3_caches.hh:302
gem5::mask
constexpr uint64_t mask(unsigned nbits)
Generate a 64-bit mask of 'nbits' 1s, right justified.
Definition: bitfield.hh:63
gem5::SMMUTranslationProcess::configCacheLookup
bool configCacheLookup(Yield &yield, TranslContext &tc)
Definition: smmu_v3_transl.cc:509
gem5::STAGE1_CFG_1L
@ STAGE1_CFG_1L
Definition: smmu_v3_defs.hh:67
gem5::SMMUTranslationProcess::doReadPTE
void doReadPTE(Yield &yield, Addr va, Addr addr, void *ptr, unsigned stage, unsigned level)
Definition: smmu_v3_transl.cc:1472
gem5::SMMUv3::configCache
ConfigCache configCache
Definition: smmu_v3.hh:104
gem5::VMT_BASE_ADDR_MASK
@ VMT_BASE_ADDR_MASK
Definition: smmu_v3_defs.hh:99
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::SMMUTranslRequest::isWrite
bool isWrite
Definition: smmu_v3_transl.hh:56
gem5::ContextDescriptor
Definition: smmu_v3_defs.hh:260
gem5::AMBA::orderId
static OrderID orderId(PacketPtr pkt)
Definition: amba.hh:52
gem5::ST_L2_ADDR_MASK
@ ST_L2_ADDR_MASK
Definition: smmu_v3_defs.hh:97
gem5::SMMUTranslationProcess::TranslResult::writable
bool writable
Definition: smmu_v3_transl.hh:94
gem5::SMMUv3DeviceInterface::mainTLBSem
SMMUSemaphore mainTLBSem
Definition: smmu_v3_deviceifc.hh:73
gem5::SMMUProcess::name
const std::string name() const
Definition: smmu_v3_proc.hh:135
gem5::SMMUv3DeviceInterface::dependentReads
std::list< SMMUTranslationProcess * > dependentReads[SMMU_MAX_TRANS_ID]
Definition: smmu_v3_deviceifc.hh:95
gem5::PageTableOps::firstLevel
virtual unsigned firstLevel(uint8_t tsz) const =0
gem5::SMMUv3DeviceInterface::portWidth
const unsigned portWidth
Definition: smmu_v3_deviceifc.hh:83
gem5::ArmISA::pa
Bitfield< 39, 12 > pa
Definition: misc_types.hh:656
gem5::PageTableOps::isValid
virtual bool isValid(pte_t pte, unsigned level) const =0
gem5::ACTION_SEND_RESP_ATS
@ ACTION_SEND_RESP_ATS
Definition: smmu_v3_proc.hh:64
gem5::Q_BASE_ADDR_MASK
@ Q_BASE_ADDR_MASK
Definition: smmu_v3_defs.hh:102
gem5::SMMUEvent::flags
uint32_t flags
Definition: smmu_v3_defs.hh:401
gem5::SMMUTranslationProcess::faultTick
GEM5_CLASS_VAR_USED Tick faultTick
Definition: smmu_v3_transl.hh:103
gem5::ARMArchTLB::Entry
Definition: smmu_v3_caches.hh:164
gem5::SMMUv3::cycleSem
SMMUSemaphore cycleSem
Definition: smmu_v3.hh:129
gem5::SMMUProcess::doBroadcastSignal
void doBroadcastSignal(SMMUSignal &sig)
Definition: smmu_v3_proc.cc:185
gem5::PageTableOps::isLeaf
virtual bool isLeaf(pte_t pte, unsigned level) const =0
gem5::SMMUTranslationProcess::~SMMUTranslationProcess
virtual ~SMMUTranslationProcess()
Definition: smmu_v3_transl.cc:96
gem5::ARMArchTLB::lookup
const Entry * lookup(Addr va, uint16_t asid, uint16_t vmid, bool updStats=true)
Definition: smmu_v3_caches.cc:464
gem5::ACTION_TERMINATE
@ ACTION_TERMINATE
Definition: smmu_v3_proc.hh:67
gem5::StreamTableEntry::config
Bitfield< 3, 1 > config
Definition: smmu_v3_defs.hh:198
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::SMMUv3::requestPortSem
SMMUSemaphore requestPortSem
Definition: smmu_v3.hh:125
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::Packet::setAddr
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
Definition: packet.hh:789
gem5::ST_CFG_SPLIT_SHIFT
@ ST_CFG_SPLIT_SHIFT
Definition: smmu_v3_defs.hh:74
gem5::SMMUv3DeviceInterface::dependentReqRemoved
SMMUSignal dependentReqRemoved
Definition: smmu_v3_deviceifc.hh:97
gem5::SMMUv3::ptwSem
SMMUSemaphore ptwSem
Definition: smmu_v3.hh:128
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::StreamTableEntry::_pad
uint64_t _pad[4]
Definition: smmu_v3_defs.hh:257
gem5::IPACache::store
void store(const Entry &incoming)
Definition: smmu_v3_caches.cc:696
gem5::PageTableOps::nextLevelPointer
virtual Addr nextLevelPointer(pte_t pte, unsigned level) const =0
gem5::STAGE1_CFG_2L_64K
@ STAGE1_CFG_2L_64K
Definition: smmu_v3_defs.hh:69
gem5::SMMUTLB::store
void store(const Entry &incoming, AllocPolicy alloc)
Definition: smmu_v3_caches.cc:245
gem5::SMMUTranslRequest::pkt
PacketPtr pkt
Definition: smmu_v3_transl.hh:60
gem5::SMMUTranslationProcess::main
virtual void main(Yield &yield)
Definition: smmu_v3_transl.cc:136
gem5::SMMUTranslationProcess::TranslContext::httb
Addr httb
Definition: smmu_v3_transl.hh:73
gem5::SMMUTranslationProcess::walkStage1And2
TranslResult walkStage1And2(Yield &yield, Addr addr, const PageTableOps *pt_ops, unsigned level, Addr walkPtr)
Definition: smmu_v3_transl.cc:733
gem5::SMMU_MAX_TRANS_ID
@ SMMU_MAX_TRANS_ID
Definition: smmu_v3_defs.hh:410
gem5::SMMUTranslationProcess::hazard4kCheck
bool hazard4kCheck()
Used to force ordering on transactions with same (SID, SSID, 4k page) to avoid multiple identical pag...
Definition: smmu_v3_transl.cc:1046
gem5::SMMUv3::tlbSem
SMMUSemaphore tlbSem
Definition: smmu_v3.hh:119
gem5::SMMUv3::ipaLat
const Cycles ipaLat
Definition: smmu_v3.hh:136
gem5::SMMUv3::walkLat
const Cycles walkLat
Definition: smmu_v3.hh:137
gem5::SMMUTranslationProcess::TranslContext::stage1TranslGranule
uint8_t stage1TranslGranule
Definition: smmu_v3_transl.hh:76
gem5::ArmISA::cd
Bitfield< 32 > cd
Definition: misc_types.hh:251
gem5::ST_CFG_FMT_MASK
@ ST_CFG_FMT_MASK
Definition: smmu_v3_defs.hh:93
gem5::SMMUTranslationProcess::TranslResult
Definition: smmu_v3_transl.hh:89
gem5::ST_CFG_FMT_2LEVEL
@ ST_CFG_FMT_2LEVEL
Definition: smmu_v3_defs.hh:95
gem5::SMMUv3::tlbEnable
const bool tlbEnable
Definition: smmu_v3.hh:108
gem5::SMMUv3::smmuIfcSem
SMMUSemaphore smmuIfcSem
Definition: smmu_v3.hh:121
gem5::SMMUTranslationProcess::beginTransaction
void beginTransaction(const SMMUTranslRequest &req)
Definition: smmu_v3_transl.cc:110
gem5::SMMURegs::eventq_irq_cfg1
uint32_t eventq_irq_cfg1
Definition: smmu_v3_defs.hh:158
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
gem5::STE_CONFIG_STAGE1_AND_2
@ STE_CONFIG_STAGE1_AND_2
Definition: smmu_v3_defs.hh:62
gem5::SMMUv3DeviceInterface::dependentWrites
std::list< SMMUTranslationProcess * > dependentWrites[SMMU_MAX_TRANS_ID]
Definition: smmu_v3_deviceifc.hh:96
gem5::SMMUTranslationProcess::TranslContext::vmid
uint16_t vmid
Definition: smmu_v3_transl.hh:75
gem5::SMMUEvent::ipa
uint64_t ipa
Definition: smmu_v3_defs.hh:405
gem5::System::isTimingMode
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:276
gem5::SMMUv3DeviceInterface::microTLBSem
SMMUSemaphore microTLBSem
Definition: smmu_v3_deviceifc.hh:72
gem5::SMMUv3DeviceInterface::pendingMemAccesses
unsigned pendingMemAccesses
Definition: smmu_v3_deviceifc.hh:87
gem5::SMMUv3::ipaCache
IPACache ipaCache
Definition: smmu_v3.hh:105
gem5::SMMUv3DeviceInterface
Definition: smmu_v3_deviceifc.hh:58
gem5::SMMUv3DeviceInterface::duplicateReqRemoved
SMMUSignal duplicateReqRemoved
Definition: smmu_v3_deviceifc.hh:93
gem5::WalkCache::Entry::permissions
uint8_t permissions
Definition: smmu_v3_caches.hh:311
gem5::StreamTableEntry::s1cdmax
Bitfield< 63, 59 > s1cdmax
Definition: smmu_v3_defs.hh:201
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::SMMUTranslRequest::size
unsigned size
Definition: smmu_v3_transl.hh:53
gem5::SMMUTLB::ALLOC_ANY_WAY
@ ALLOC_ANY_WAY
Definition: smmu_v3_caches.hh:104
gem5::STAGE1_CFG_2L_4K
@ STAGE1_CFG_2L_4K
Definition: smmu_v3_defs.hh:68
gem5::ST_CD_ADDR_SHIFT
@ ST_CD_ADDR_SHIFT
Definition: smmu_v3_defs.hh:75
gem5::SMMUProcess::scheduleWakeup
void scheduleWakeup(Tick when)
Definition: smmu_v3_proc.cc:200
name
const std::string & name()
Definition: trace.cc:49
gem5::SMMUTranslationProcess::request
SMMUTranslRequest request
Definition: smmu_v3_transl.hh:99
gem5::SMMUv3::smmuIfcLat
const Cycles smmuIfcLat
Definition: smmu_v3.hh:134
gem5::SMMUv3DeviceInterface::prefetchReserveLastWay
const bool prefetchReserveLastWay
Definition: smmu_v3_deviceifc.hh:90
gem5::ArmISA::va
Bitfield< 8 > va
Definition: misc_types.hh:275
gem5::SMMUEvent::type
uint16_t type
Definition: smmu_v3_defs.hh:399
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::SMMUTranslationProcess::TranslResult::addr
Addr addr
Definition: smmu_v3_transl.hh:92
gem5::SMMUTranslationProcess::walkCacheUpdate
void walkCacheUpdate(Yield &yield, Addr va, Addr vaMask, Addr pa, unsigned stage, unsigned level, bool leaf, uint8_t permissions)
Definition: smmu_v3_transl.cc:692
gem5::SMMUTranslationProcess::ifcTLBUpdate
void ifcTLBUpdate(Yield &yield, const TranslResult &tr)
Definition: smmu_v3_transl.cc:443
gem5::SMMURegs::eventq_cons
uint32_t eventq_cons
Definition: smmu_v3_defs.hh:186
gem5::WalkCache::Entry::pa
Addr pa
Definition: smmu_v3_caches.hh:310
gem5::SMMUTranslationProcess::hazard4kRelease
void hazard4kRelease()
Definition: smmu_v3_transl.cc:1110
gem5::SMMUTranslationProcess::TranslResult::fault
FaultType fault
Definition: smmu_v3_transl.hh:91
gem5::SMMUProcess::doDelay
void doDelay(Yield &yield, Cycles cycles)
Definition: smmu_v3_proc.cc:131
gem5::ST_CFG_SPLIT_MASK
@ ST_CFG_SPLIT_MASK
Definition: smmu_v3_defs.hh:92
gem5::SMMUTranslationProcess::TranslContext::s2t0sz
uint8_t s2t0sz
Definition: smmu_v3_transl.hh:79
gem5::SMMUv3::system
const System & system
Definition: smmu_v3.hh:94
gem5::SMMUv3::walkCacheS2Levels
const unsigned walkCacheS2Levels
Definition: smmu_v3.hh:116
gem5::Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
gem5::SMMUTLB::ALLOC_LAST_WAY
@ ALLOC_LAST_WAY
Definition: smmu_v3_caches.hh:106
gem5::SMMUTranslationProcess::completeTransaction
void completeTransaction(Yield &yield, const TranslResult &tr)
Definition: smmu_v3_transl.cc:1227
gem5::SMMUv3DeviceInterface::mainTLB
SMMUTLB * mainTLB
Definition: smmu_v3_deviceifc.hh:66
gem5::SMMUv3DeviceInterface::xlateSlotsRemaining
unsigned xlateSlotsRemaining
Definition: smmu_v3_deviceifc.hh:86
gem5::SMMUTranslationProcess::smmuTLBUpdate
void smmuTLBUpdate(Yield &yield, const TranslResult &tr)
Definition: smmu_v3_transl.cc:480
gem5::STE_CONFIG_STAGE1_ONLY
@ STE_CONFIG_STAGE1_ONLY
Definition: smmu_v3_defs.hh:60
gem5::SMMUEvent
Definition: smmu_v3_defs.hh:397
gem5::SMMUEvent::streamId
uint32_t streamId
Definition: smmu_v3_defs.hh:402
smmu_v3.hh
gem5::SMMUv3DeviceInterface::duplicateReqs
std::list< SMMUTranslationProcess * > duplicateReqs
Definition: smmu_v3_deviceifc.hh:92
gem5::Packet::makeTimingResponse
void makeTimingResponse()
Definition: packet.hh:1049
gem5::WalkCache::store
void store(const Entry &incoming)
Definition: smmu_v3_caches.cc:1062
gem5::SMMUProcess::doRead
void doRead(Yield &yield, Addr addr, void *ptr, size_t size)
Definition: smmu_v3_proc.cc:75
gem5::ST_L2_SPAN_MASK
@ ST_L2_SPAN_MASK
Definition: smmu_v3_defs.hh:96
gem5::SMMUTranslationProcess::combineTranslations
TranslResult combineTranslations(const TranslResult &s1tr, const TranslResult &s2tr) const
Definition: smmu_v3_transl.cc:1028
gem5::SMMUv3::ipaSem
SMMUSemaphore ipaSem
Definition: smmu_v3.hh:123
gem5::SMMUTranslationProcess::walkCacheLookup
void walkCacheLookup(Yield &yield, const WalkCache::Entry *&walkEntry, Addr addr, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level)
Definition: smmu_v3_transl.cc:651
gem5::SMMUTranslationProcess::TranslContext::asid
uint16_t asid
Definition: smmu_v3_transl.hh:74
gem5::PageTableOps::pageMask
virtual Addr pageMask(pte_t pte, unsigned level) const =0
gem5::ST_CFG_FMT_LINEAR
@ ST_CFG_FMT_LINEAR
Definition: smmu_v3_defs.hh:94
gem5::PageTableOps::lastLevel
virtual unsigned lastLevel() const =0
gem5::SMMUProcess::doWrite
void doWrite(Yield &yield, Addr addr, const void *ptr, size_t size)
Definition: smmu_v3_proc.cc:104
gem5::IPACache::Entry::pa
Addr pa
Definition: smmu_v3_caches.hh:219
gem5::IPACache::lookup
const Entry * lookup(Addr ipa, uint16_t vmid, bool updStats=true)
Definition: smmu_v3_caches.cc:663
gem5::SMMURegs::strtab_base
uint64_t strtab_base
Definition: smmu_v3_defs.hh:148
gem5::ACTION_SEND_REQ_FINAL
@ ACTION_SEND_REQ_FINAL
Definition: smmu_v3_proc.hh:62
gem5::SMMUTranslRequest
Definition: smmu_v3_transl.hh:50
gem5::SMMUv3::configCacheEnable
const bool configCacheEnable
Definition: smmu_v3.hh:109
gem5::SMMURegs::cr0
uint32_t cr0
Definition: smmu_v3_defs.hh:123
gem5::Coroutine::CallerType
CallerType: A reference to an object of this class will be passed to the coroutine task.
Definition: coroutine.hh:85
gem5::SMMUv3DeviceInterface::microTLBLat
const Cycles microTLBLat
Definition: smmu_v3_deviceifc.hh:75
gem5::SMMUTLB::ALLOC_ANY_BUT_LAST_WAY
@ ALLOC_ANY_BUT_LAST_WAY
Definition: smmu_v3_caches.hh:105
gem5::E_BASE_ENABLE_MASK
@ E_BASE_ENABLE_MASK
Definition: smmu_v3_defs.hh:105
gem5::SMMUTranslationProcess::ifcTLBLookup
bool ifcTLBLookup(Yield &yield, TranslResult &tr, bool &wasPrefetched)
Definition: smmu_v3_transl.cc:345
gem5::SMMUTranslationProcess::FAULT_NONE
@ FAULT_NONE
Definition: smmu_v3_transl.hh:84
gem5::SMMUProcess::doWaitForSignal
void doWaitForSignal(Yield &yield, SMMUSignal &sig)
Definition: smmu_v3_proc.cc:178
gem5::SMMUTranslationProcess::doReadConfig
void doReadConfig(Yield &yield, Addr addr, void *ptr, size_t size, uint32_t sid, uint32_t ssid)
Definition: smmu_v3_transl.cc:1464
gem5::SMMUv3::tlb
ARMArchTLB tlb
Definition: smmu_v3.hh:103
amba.hh
gem5::SMMUTranslationProcess::TranslContext::stage1Enable
bool stage1Enable
Definition: smmu_v3_transl.hh:71
gem5::SMMUTranslationProcess::findConfig
bool findConfig(Yield &yield, TranslContext &tc, TranslResult &tr)
Definition: smmu_v3_transl.cc:581
gem5::SMMUv3::configLat
const Cycles configLat
Definition: smmu_v3.hh:135
gem5::SMMUv3DeviceInterface::wrBufSlotsRemaining
unsigned wrBufSlotsRemaining
Definition: smmu_v3_deviceifc.hh:85
std::list
STL list class.
Definition: stl.hh:51
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::SMMUv3::stats
gem5::SMMUv3::SMMUv3Stats stats
gem5::SMMUTranslationProcess::bypass
TranslResult bypass(Addr addr) const
Definition: smmu_v3_transl.cc:240
gem5::SMMUTranslationProcess::TranslContext::stage2Enable
bool stage2Enable
Definition: smmu_v3_transl.hh:72
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::SMMUv3::scheduleDeviceRetries
void scheduleDeviceRetries()
Definition: smmu_v3.cc:217
gem5::IPACache::Entry::ipaMask
Addr ipaMask
Definition: smmu_v3_caches.hh:215
gem5::SMMUProcess::smmu
SMMUv3 & smmu
Definition: smmu_v3_proc.hh:110
gem5::SMMUTranslationProcess::microTLBLookup
bool microTLBLookup(Yield &yield, TranslResult &tr)
Definition: smmu_v3_transl.cc:314
gem5::SMMUv3::SMMUv3Stats::cdL1Fetches
statistics::Scalar cdL1Fetches
Definition: smmu_v3.hh:145
gem5::CD_TTB_SHIFT
@ CD_TTB_SHIFT
Definition: smmu_v3_defs.hh:76
gem5::SMMUTranslationProcess::doReadSTE
void doReadSTE(Yield &yield, StreamTableEntry &ste, uint32_t sid)
Definition: smmu_v3_transl.cc:1332
gem5::PageTableOps
Definition: smmu_v3_ptops.hh:48
gem5::SMMUTranslationProcess::translateStage2
TranslResult translateStage2(Yield &yield, Addr addr, bool final_tr)
Definition: smmu_v3_transl.cc:939
gem5::SMMUv3::walkSem
SMMUSemaphore walkSem
Definition: smmu_v3.hh:124
gem5::SMMUTLB::Entry
Definition: smmu_v3_caches.hh:109
gem5::SMMUTranslationProcess
Definition: smmu_v3_transl.hh:66
gem5::StreamTableEntry::s2ttb
s2ttb
Definition: smmu_v3_defs.hh:253
gem5::E_BASE_ADDR_MASK
@ E_BASE_ADDR_MASK
Definition: smmu_v3_defs.hh:106
gem5::STE_S2TTB_SHIFT
@ STE_S2TTB_SHIFT
Definition: smmu_v3_defs.hh:77
gem5::StreamTableEntry::s2t0sz
Bitfield< 37, 32 > s2t0sz
Definition: smmu_v3_defs.hh:234
gem5::SMMUTranslationProcess::TranslContext::stage2TranslGranule
uint8_t stage2TranslGranule
Definition: smmu_v3_transl.hh:77
gem5::SMMUTranslationProcess::completePrefetch
void completePrefetch(Yield &yield)
Definition: smmu_v3_transl.cc:1289
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::SMMUTranslationProcess::resumeTransaction
void resumeTransaction()
Definition: smmu_v3_transl.cc:118
gem5::SMMUTranslationProcess::ifc
SMMUv3DeviceInterface & ifc
Definition: smmu_v3_transl.hh:97
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::Q_BASE_SIZE_MASK
@ Q_BASE_SIZE_MASK
Definition: smmu_v3_defs.hh:103
gem5::IPACache::Entry
Definition: smmu_v3_caches.hh:208
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84

Generated on Tue Sep 21 2021 12:25:15 for gem5 by doxygen 1.8.17