gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
smmu_v3_transl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013, 2018-2019 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Stan Czerniawski
38  */
39 
41 
42 #include "debug/SMMUv3.hh"
43 #include "debug/SMMUv3Hazard.hh"
44 #include "dev/arm/amba.hh"
45 #include "dev/arm/smmu_v3.hh"
46 #include "sim/system.hh"
47 
50 {
52  req.addr = pkt->getAddr();
53  req.size = pkt->getSize();
54  req.sid = pkt->req->streamId();
55  req.ssid = pkt->req->hasSubstreamId() ?
56  pkt->req->substreamId() : 0;
57  req.isWrite = pkt->isWrite();
58  req.isPrefetch = false;
59  req.isAtsRequest = ats;
60  req.pkt = pkt;
61 
62  return req;
63 }
64 
67 {
69  req.addr = addr;
70  req.size = 0;
71  req.sid = sid;
72  req.ssid = ssid;
73  req.isWrite = false;
74  req.isPrefetch = true;
75  req.isAtsRequest = false;
76  req.pkt = NULL;
77 
78  return req;
79 }
80 
82  SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
83  :
84  SMMUProcess(name, _smmu),
85  ifc(_ifc)
86 {
87  // Decrease number of pending translation slots on the slave interface
88  assert(ifc.xlateSlotsRemaining > 0);
90 
92  reinit();
93 }
94 
96 {
97  // Increase number of pending translation slots on the slave interface
98  assert(ifc.pendingMemAccesses > 0);
100 
101  // If no more SMMU memory accesses are pending,
102  // signal SMMU Slave Interface as drained
103  if (ifc.pendingMemAccesses == 0) {
105  }
106 }
107 
108 void
110 {
111  request = req;
112 
113  reinit();
114 }
115 
116 void
118 {
119  assert(smmu.system.isTimingMode());
120 
121  assert(!"Stalls are broken");
122 
123  Tick resumeTick = curTick();
124 
125  (void) resumeTick;
126  DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
127  resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
128 
130 
132 }
133 
134 void
136 {
137  // Hack:
138  // The coroutine starts running as soon as it's created.
139  // But we need to wait for request data esp. in atomic mode.
140  SMMUAction a;
142  a.pkt = NULL;
143  yield(a);
144 
145  const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
146 
147  if ((request.addr + request.size) > next4k)
148  panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
150 
151 
152  unsigned numSlaveBeats = request.isWrite ?
153  (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
154 
156  doDelay(yield, Cycles(numSlaveBeats));
158 
159 
160  recvTick = curTick();
161 
162  if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) {
163  // SMMU disabled
164  doDelay(yield, Cycles(1));
166  return;
167  }
168 
169  TranslResult tr;
170  bool wasPrefetched = false;
171 
172  if (request.isPrefetch) {
173  // Abort prefetch if:
174  // - there's already a transaction looking up the same 4k page, OR
175  // - requested address is already in the TLB.
176  if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
177  completePrefetch(yield); // this never returns
178 
180 
181  tr = smmuTranslation(yield);
182 
183  if (tr.fault == FAULT_NONE)
184  ifcTLBUpdate(yield, tr);
185 
186  hazard4kRelease();
187 
188  completePrefetch(yield);
189  } else {
191 
192  if (!microTLBLookup(yield, tr)) {
193  bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
194  if (!hit) {
195  while (!hit && hazard4kCheck()) {
196  hazard4kHold(yield);
197  hit = ifcTLBLookup(yield, tr, wasPrefetched);
198  }
199  }
200 
201  // Issue prefetch if:
202  // - there was a TLB hit and the entry was prefetched, OR
203  // - TLB miss was successfully serviced
204  if (hit) {
205  if (wasPrefetched)
206  issuePrefetch(next4k);
207  } else {
209 
210  tr = smmuTranslation(yield);
211 
212  if (tr.fault == FAULT_NONE) {
213  ifcTLBUpdate(yield, tr);
214 
215  issuePrefetch(next4k);
216  }
217 
218  hazard4kRelease();
219  }
220 
221  if (tr.fault == FAULT_NONE)
222  microTLBUpdate(yield, tr);
223  }
224 
225  hazardIdHold(yield);
226  hazardIdRelease();
227 
228  if (tr.fault != FAULT_NONE)
229  panic("Translation Fault (addr=%#x, size=%#x, sid=%d, ssid=%d, "
230  "isWrite=%d, isPrefetch=%d, isAtsRequest=%d)\n",
233 
234  completeTransaction(yield, tr);
235  }
236 }
237 
240 {
241  TranslResult tr;
242  tr.fault = FAULT_NONE;
243  tr.addr = addr;
244  tr.addrMask = 0;
245  tr.writable = 1;
246 
247  return tr;
248 }
249 
252 {
253  TranslResult tr;
254 
255  // Need SMMU credit to proceed
256  doSemaphoreDown(yield, smmu.transSem);
257 
258  // Simulate pipelined IFC->SMMU link
260  doDelay(yield, Cycles(1)); // serialize transactions
262  doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
263 
264  bool haveConfig = true;
265  if (!configCacheLookup(yield, context)) {
266  if(findConfig(yield, context, tr)) {
267  configCacheUpdate(yield, context);
268  } else {
269  haveConfig = false;
270  }
271  }
272 
273  if (haveConfig && !smmuTLBLookup(yield, tr)) {
274  // SMMU main TLB miss
275 
276  // Need PTW slot to proceed
277  doSemaphoreDown(yield, smmu.ptwSem);
278 
279  // Page table walk
280  Tick ptwStartTick = curTick();
281 
282  if (context.stage1Enable) {
283  tr = translateStage1And2(yield, request.addr);
284  } else if (context.stage2Enable) {
285  tr = translateStage2(yield, request.addr, true);
286  } else {
287  tr = bypass(request.addr);
288  }
289 
291  smmu.ptwTimeDist.sample(curTick() - ptwStartTick);
292 
293  // Free PTW slot
295 
296  if (tr.fault == FAULT_NONE)
297  smmuTLBUpdate(yield, tr);
298  }
299 
300  // Simulate pipelined SMMU->SLAVE INTERFACE link
302  doDelay(yield, Cycles(1)); // serialize transactions
304  doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
305 
306  // return SMMU credit
308 
309  return tr;
310 }
311 
312 bool
314 {
315  if (!ifc.microTLBEnable)
316  return false;
317 
319  doDelay(yield, ifc.microTLBLat);
320  const SMMUTLB::Entry *e =
323 
324  if (!e) {
325  DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
327 
328  return false;
329  }
330 
331  DPRINTF(SMMUv3,
332  "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
334 
335  tr.fault = FAULT_NONE;
336  tr.addr = e->pa + (request.addr & ~e->vaMask);;
337  tr.addrMask = e->vaMask;
338  tr.writable = e->permissions;
339 
340  return true;
341 }
342 
343 bool
345  bool &wasPrefetched)
346 {
347  if (!ifc.mainTLBEnable)
348  return false;
349 
351  doDelay(yield, ifc.mainTLBLat);
352  const SMMUTLB::Entry *e =
355 
356  if (!e) {
357  DPRINTF(SMMUv3,
358  "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
360 
361  return false;
362  }
363 
364  DPRINTF(SMMUv3,
365  "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
366  "paddr=%#x\n", request.addr, e->vaMask, request.sid,
367  request.ssid, e->pa);
368 
369  tr.fault = FAULT_NONE;
370  tr.addr = e->pa + (request.addr & ~e->vaMask);;
371  tr.addrMask = e->vaMask;
372  tr.writable = e->permissions;
373  wasPrefetched = e->prefetched;
374 
375  return true;
376 }
377 
378 bool
380 {
381  if (!smmu.tlbEnable)
382  return false;
383 
384  doSemaphoreDown(yield, smmu.tlbSem);
385  doDelay(yield, smmu.tlbLat);
386  const ARMArchTLB::Entry *e =
389 
390  if (!e) {
391  DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
393 
394  return false;
395  }
396 
397  DPRINTF(SMMUv3,
398  "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
400 
401  tr.fault = FAULT_NONE;
402  tr.addr = e->pa + (request.addr & ~e->vaMask);;
403  tr.addrMask = e->vaMask;
404  tr.writable = e->permissions;
405 
406  return true;
407 }
408 
409 void
411  const TranslResult &tr)
412 {
413  assert(tr.fault == FAULT_NONE);
414 
415  if (!ifc.microTLBEnable)
416  return;
417 
419  e.valid = true;
420  e.prefetched = false;
421  e.sid = request.sid;
422  e.ssid = request.ssid;
423  e.vaMask = tr.addrMask;
424  e.va = request.addr & e.vaMask;
425  e.pa = tr.addr & e.vaMask;
426  e.permissions = tr.writable;
427  e.asid = context.asid;
428  e.vmid = context.vmid;
429 
431 
432  DPRINTF(SMMUv3,
433  "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
434  e.va, e.vaMask, e.pa, e.sid, e.ssid);
435 
437 
439 }
440 
441 void
443  const TranslResult &tr)
444 {
445  assert(tr.fault == FAULT_NONE);
446 
447  if (!ifc.mainTLBEnable)
448  return;
449 
451  e.valid = true;
453  e.sid = request.sid;
454  e.ssid = request.ssid;
455  e.vaMask = tr.addrMask;
456  e.va = request.addr & e.vaMask;
457  e.pa = tr.addr & e.vaMask;
458  e.permissions = tr.writable;
459  e.asid = context.asid;
460  e.vmid = context.vmid;
461 
464  alloc = request.isPrefetch ?
466 
468 
469  DPRINTF(SMMUv3,
470  "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
471  "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
472 
473  ifc.mainTLB->store(e, alloc);
474 
476 }
477 
478 void
480  const TranslResult &tr)
481 {
482  assert(tr.fault == FAULT_NONE);
483 
484  if (!smmu.tlbEnable)
485  return;
486 
488  e.valid = true;
489  e.vaMask = tr.addrMask;
490  e.va = request.addr & e.vaMask;
491  e.asid = context.asid;
492  e.vmid = context.vmid;
493  e.pa = tr.addr & e.vaMask;
494  e.permissions = tr.writable;
495 
496  doSemaphoreDown(yield, smmu.tlbSem);
497 
498  DPRINTF(SMMUv3,
499  "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
500  e.va, e.vaMask, e.pa, e.asid, e.vmid);
501 
502  smmu.tlb.store(e);
503 
505 }
506 
507 bool
509 {
510  if (!smmu.configCacheEnable)
511  return false;
512 
514  doDelay(yield, smmu.configLat);
515  const ConfigCache::Entry *e =
518 
519  if (!e) {
520  DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
522 
523  return false;
524  }
525 
526  DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
527  request.sid, request.ssid, e->ttb0, e->asid);
528 
529  tc.stage1Enable = e->stage1_en;
530  tc.stage2Enable = e->stage2_en;
531 
532  tc.ttb0 = e->ttb0;
533  tc.ttb1 = e->ttb1;
534  tc.asid = e->asid;
535  tc.httb = e->httb;
536  tc.vmid = e->vmid;
537 
540 
541  tc.t0sz = e->t0sz;
542  tc.s2t0sz = e->s2t0sz;
543 
544  return true;
545 }
546 
547 void
549  const TranslContext &tc)
550 {
551  if (!smmu.configCacheEnable)
552  return;
553 
555  e.valid = true;
556  e.sid = request.sid;
557  e.ssid = request.ssid;
558  e.stage1_en = tc.stage1Enable;
559  e.stage2_en = tc.stage2Enable;
560  e.ttb0 = tc.ttb0;
561  e.ttb1 = tc.ttb1;
562  e.asid = tc.asid;
563  e.httb = tc.httb;
564  e.vmid = tc.vmid;
567  e.t0sz = tc.t0sz;
568  e.s2t0sz = tc.s2t0sz;
569 
571 
572  DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
573 
575 
577 }
578 
579 bool
581  TranslContext &tc,
582  TranslResult &tr)
583 {
584  tc.stage1Enable = false;
585  tc.stage2Enable = false;
586 
587  StreamTableEntry ste;
588  doReadSTE(yield, ste, request.sid);
589 
590  switch (ste.dw0.config) {
591  case STE_CONFIG_BYPASS:
592  break;
593 
595  tc.stage1Enable = true;
596  break;
597 
599  tc.stage2Enable = true;
600  break;
601 
603  tc.stage1Enable = true;
604  tc.stage2Enable = true;
605  break;
606 
607  default:
608  panic("Bad or unimplemented STE config %d\n",
609  ste.dw0.config);
610  }
611 
612 
613  // Establish stage 2 context first since
614  // Context Descriptors can be in IPA space.
615  if (tc.stage2Enable) {
616  tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
617  tc.vmid = ste.dw2.s2vmid;
618  tc.stage2TranslGranule = ste.dw2.s2tg;
619  tc.s2t0sz = ste.dw2.s2t0sz;
620  } else {
621  tc.httb = 0xdeadbeef;
622  tc.vmid = 0;
624  tc.s2t0sz = 0;
625  }
626 
627 
628  // Now fetch stage 1 config.
629  if (context.stage1Enable) {
631  doReadCD(yield, cd, ste, request.sid, request.ssid);
632 
633  tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
634  tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
635  tc.asid = cd.dw0.asid;
636  tc.stage1TranslGranule = cd.dw0.tg0;
637  tc.t0sz = cd.dw0.t0sz;
638  } else {
639  tc.ttb0 = 0xcafebabe;
640  tc.ttb1 = 0xcafed00d;
641  tc.asid = 0;
643  tc.t0sz = 0;
644  }
645 
646  return true;
647 }
648 
649 void
651  Yield &yield,
652  const WalkCache::Entry *&walkEntry,
653  Addr addr, uint16_t asid, uint16_t vmid,
654  unsigned stage, unsigned level)
655 {
656  const char *indent = stage==2 ? " " : "";
657  (void) indent; // this is only used in DPRINTFs
658 
659  const PageTableOps *pt_ops =
660  stage == 1 ?
663 
664  unsigned walkCacheLevels =
666  (stage == 1 ? smmu.walkCacheS1Levels : smmu.walkCacheS2Levels) :
667  0;
668 
669  if ((1 << level) & walkCacheLevels) {
670  doSemaphoreDown(yield, smmu.walkSem);
671  doDelay(yield, smmu.walkLat);
672 
673  walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
674  asid, vmid, stage, level);
675 
676  if (walkEntry) {
677  DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
678  "base=%#x (S%d, L%d)\n",
679  indent, addr, asid, vmid, walkEntry->pa, stage, level);
680  } else {
681  DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
682  "(S%d, L%d)\n",
683  indent, addr, asid, vmid, stage, level);
684  }
685 
687  }
688 }
689 
690 void
692  Addr vaMask, Addr pa,
693  unsigned stage, unsigned level,
694  bool leaf, uint8_t permissions)
695 {
696  unsigned walkCacheLevels =
698 
699  if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
701  e.valid = true;
702  e.va = va;
703  e.vaMask = vaMask;
704  e.asid = stage==1 ? context.asid : 0;
705  e.vmid = context.vmid;
706  e.stage = stage;
707  e.level = level;
708  e.leaf = leaf;
709  e.pa = pa;
710  e.permissions = permissions;
711 
712  doSemaphoreDown(yield, smmu.walkSem);
713 
714  DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
715  "tpa=%#x leaf=%s (S%d, L%d)\n",
716  e.stage==2 ? " " : "",
717  e.va, e.vaMask, e.asid, e.vmid,
718  e.pa, e.leaf, e.stage, e.level);
719 
720  smmu.walkCache.store(e);
721 
723  }
724 }
725 
726 /*
727  * Please note:
728  * This does not deal with the case where stage 1 page size
729  * is larger than stage 2 page size.
730  */
733  const PageTableOps *pt_ops,
734  unsigned level, Addr walkPtr)
735 {
736  PageTableOps::pte_t pte = 0;
737 
738  doSemaphoreDown(yield, smmu.cycleSem);
739  doDelay(yield, Cycles(1));
741 
742  for (; level <= pt_ops->lastLevel(); level++) {
743  Addr pte_addr = walkPtr + pt_ops->index(addr, level);
744 
745  DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
746  level, pte_addr);
747 
748  doReadPTE(yield, addr, pte_addr, &pte, 1, level);
749 
750  DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
751  level, pte, pte_addr);
752 
753  doSemaphoreDown(yield, smmu.cycleSem);
754  doDelay(yield, Cycles(1));
756 
757  bool valid = pt_ops->isValid(pte, level);
758  bool leaf = pt_ops->isLeaf(pte, level);
759 
760  if (!valid) {
761  DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
762 
763  TranslResult tr;
765  return tr;
766  }
767 
768  if (valid && leaf && request.isWrite &&
769  !pt_ops->isWritable(pte, level, false))
770  {
771  DPRINTF(SMMUv3, "S1 page not writable - fault\n");
772 
773  TranslResult tr;
774  tr.fault = FAULT_PERMISSION;
775  return tr;
776  }
777 
778  walkPtr = pt_ops->nextLevelPointer(pte, level);
779 
780  if (leaf)
781  break;
782 
783  if (context.stage2Enable) {
784  TranslResult s2tr = translateStage2(yield, walkPtr, false);
785  if (s2tr.fault != FAULT_NONE)
786  return s2tr;
787 
788  walkPtr = s2tr.addr;
789  }
790 
791  walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
792  1, level, leaf, 0);
793  }
794 
795  TranslResult tr;
796  tr.fault = FAULT_NONE;
797  tr.addrMask = pt_ops->pageMask(pte, level);
798  tr.addr = walkPtr + (addr & ~tr.addrMask);
799  tr.writable = pt_ops->isWritable(pte, level, false);
800 
801  if (context.stage2Enable) {
802  TranslResult s2tr = translateStage2(yield, tr.addr, true);
803  if (s2tr.fault != FAULT_NONE)
804  return s2tr;
805 
806  tr = combineTranslations(tr, s2tr);
807  }
808 
809  walkCacheUpdate(yield, addr, tr.addrMask, walkPtr,
810  1, level, true, tr.writable);
811 
812  return tr;
813 }
814 
817  const PageTableOps *pt_ops,
818  unsigned level, Addr walkPtr)
819 {
821 
822  doSemaphoreDown(yield, smmu.cycleSem);
823  doDelay(yield, Cycles(1));
825 
826  for (; level <= pt_ops->lastLevel(); level++) {
827  Addr pte_addr = walkPtr + pt_ops->index(addr, level);
828 
829  DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
830  level, pte_addr);
831 
832  doReadPTE(yield, addr, pte_addr, &pte, 2, level);
833 
834  DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
835  level, pte, pte_addr);
836 
837  doSemaphoreDown(yield, smmu.cycleSem);
838  doDelay(yield, Cycles(1));
840 
841  bool valid = pt_ops->isValid(pte, level);
842  bool leaf = pt_ops->isLeaf(pte, level);
843 
844  if (!valid) {
845  DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
846 
847  TranslResult tr;
849  return tr;
850  }
851 
852  if (valid && leaf && request.isWrite &&
853  !pt_ops->isWritable(pte, level, true))
854  {
855  DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
856 
857  TranslResult tr;
858  tr.fault = FAULT_PERMISSION;
859  return tr;
860  }
861 
862  walkPtr = pt_ops->nextLevelPointer(pte, level);
863 
864  if (final_tr || smmu.walkCacheNonfinalEnable)
865  walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
866  2, level, leaf,
867  leaf ? pt_ops->isWritable(pte, level, true) : 0);
868  if (leaf)
869  break;
870  }
871 
872  TranslResult tr;
873  tr.fault = FAULT_NONE;
874  tr.addrMask = pt_ops->pageMask(pte, level);
875  tr.addr = walkPtr + (addr & ~tr.addrMask);
876  tr.writable = pt_ops->isWritable(pte, level, true);
877 
878  return tr;
879 }
880 
883 {
884  const PageTableOps *pt_ops =
886 
887  const WalkCache::Entry *walk_ep = NULL;
888  unsigned level;
889 
890  // Level here is actually (level+1) so we can count down
891  // to 0 using unsigned int.
892  for (level = pt_ops->lastLevel() + 1;
893  level > pt_ops->firstLevel(context.t0sz);
894  level--)
895  {
896  walkCacheLookup(yield, walk_ep, addr,
897  context.asid, context.vmid, 1, level-1);
898 
899  if (walk_ep)
900  break;
901  }
902 
903  // Correct level (see above).
904  level -= 1;
905 
906  TranslResult tr;
907  if (walk_ep) {
908  if (walk_ep->leaf) {
909  tr.fault = FAULT_NONE;
910  tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
911  tr.addrMask = walk_ep->vaMask;
912  tr.writable = walk_ep->permissions;
913  } else {
914  tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
915  }
916  } else {
917  Addr table_addr = context.ttb0;
918  if (context.stage2Enable) {
919  TranslResult s2tr = translateStage2(yield, table_addr, false);
920  if (s2tr.fault != FAULT_NONE)
921  return s2tr;
922 
923  table_addr = s2tr.addr;
924  }
925 
926  tr = walkStage1And2(yield, addr, pt_ops,
927  pt_ops->firstLevel(context.t0sz),
928  table_addr);
929  }
930 
931  if (tr.fault == FAULT_NONE)
932  DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
933 
934  return tr;
935 }
936 
939 {
940  const PageTableOps *pt_ops =
942 
943  const IPACache::Entry *ipa_ep = NULL;
944  if (smmu.ipaCacheEnable) {
945  doSemaphoreDown(yield, smmu.ipaSem);
946  doDelay(yield, smmu.ipaLat);
947  ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
949  }
950 
951  if (ipa_ep) {
952  TranslResult tr;
953  tr.fault = FAULT_NONE;
954  tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
955  tr.addrMask = ipa_ep->ipaMask;
956  tr.writable = ipa_ep->permissions;
957 
958  DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
959  addr, context.vmid, tr.addr);
960 
961  return tr;
962  } else if (smmu.ipaCacheEnable) {
963  DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
964  addr, context.vmid);
965  }
966 
967  const WalkCache::Entry *walk_ep = NULL;
968  unsigned level = pt_ops->firstLevel(context.s2t0sz);
969 
970  if (final_tr || smmu.walkCacheNonfinalEnable) {
971  // Level here is actually (level+1) so we can count down
972  // to 0 using unsigned int.
973  for (level = pt_ops->lastLevel() + 1;
974  level > pt_ops->firstLevel(context.s2t0sz);
975  level--)
976  {
977  walkCacheLookup(yield, walk_ep, addr,
978  0, context.vmid, 2, level-1);
979 
980  if (walk_ep)
981  break;
982  }
983 
984  // Correct level (see above).
985  level -= 1;
986  }
987 
988  TranslResult tr;
989  if (walk_ep) {
990  if (walk_ep->leaf) {
991  tr.fault = FAULT_NONE;
992  tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
993  tr.addrMask = walk_ep->vaMask;
994  tr.writable = walk_ep->permissions;
995  } else {
996  tr = walkStage2(yield, addr, final_tr, pt_ops,
997  level + 1, walk_ep->pa);
998  }
999  } else {
1000  tr = walkStage2(yield, addr, final_tr, pt_ops,
1001  pt_ops->firstLevel(context.s2t0sz),
1002  context.httb);
1003  }
1004 
1005  if (tr.fault == FAULT_NONE)
1006  DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
1007  context.stage1Enable ? "ip" : "v", addr, tr.addr);
1008 
1009  if (smmu.ipaCacheEnable) {
1011  e.valid = true;
1012  e.ipaMask = tr.addrMask;
1013  e.ipa = addr & e.ipaMask;
1014  e.pa = tr.addr & tr.addrMask;
1015  e.permissions = tr.writable;
1016  e.vmid = context.vmid;
1017 
1018  doSemaphoreDown(yield, smmu.ipaSem);
1019  smmu.ipaCache.store(e);
1021  }
1022 
1023  return tr;
1024 }
1025 
1028  const TranslResult &s2tr) const
1029 {
1030  if (s2tr.fault != FAULT_NONE)
1031  return s2tr;
1032 
1033  assert(s1tr.fault == FAULT_NONE);
1034 
1035  TranslResult tr;
1036  tr.fault = FAULT_NONE;
1037  tr.addr = s2tr.addr;
1038  tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1039  tr.writable = s1tr.writable & s2tr.writable;
1040 
1041  return tr;
1042 }
1043 
1044 bool
1046 {
1047  Addr addr4k = request.addr & ~0xfffULL;
1048 
1049  for (auto it = ifc.duplicateReqs.begin();
1050  it != ifc.duplicateReqs.end();
1051  ++it)
1052  {
1053  Addr other4k = (*it)->request.addr & ~0xfffULL;
1054  if (addr4k == other4k)
1055  return true;
1056  }
1057 
1058  return false;
1059 }
1060 
1061 void
1063 {
1064  DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1065  this, request.addr & ~0xfffULL);
1066 
1067  ifc.duplicateReqs.push_back(this);
1068 }
1069 
1070 void
1072 {
1073  Addr addr4k = request.addr & ~0xfffULL;
1074 
1075  bool found_hazard;
1076 
1077  do {
1078  found_hazard = false;
1079 
1080  for (auto it = ifc.duplicateReqs.begin();
1081  it!=ifc.duplicateReqs.end() && *it!=this;
1082  ++it)
1083  {
1084  Addr other4k = (*it)->request.addr & ~0xfffULL;
1085 
1086  DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1087  this, addr4k, *it, other4k);
1088 
1089  if (addr4k == other4k) {
1090  DPRINTF(SMMUv3Hazard,
1091  "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1092  this, addr4k, *it, other4k);
1093 
1095 
1096  DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1097  this, addr4k);
1098 
1099  // This is to avoid checking *it!=this after doWaitForSignal()
1100  // since it could have been deleted.
1101  found_hazard = true;
1102  break;
1103  }
1104  }
1105  } while (found_hazard);
1106 }
1107 
1108 void
1110 {
1111  DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1112  this, request.addr & ~0xfffULL);
1113 
1115 
1116  for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1117  if (*it == this)
1118  break;
1119 
1120  if (it == ifc.duplicateReqs.end())
1121  panic("hazard4kRelease: request not found");
1122 
1123  ifc.duplicateReqs.erase(it);
1124 
1126 }
1127 
1128 void
1130 {
1131  auto orderId = AMBA::orderId(request.pkt);
1132 
1133  DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1134 
1135  assert(orderId < SMMU_MAX_TRANS_ID);
1136 
1138  request.isWrite ?
1140  depReqs.push_back(this);
1141 }
1142 
1143 void
1145 {
1146  auto orderId = AMBA::orderId(request.pkt);
1147 
1148  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1149 
1151  request.isWrite ?
1154 
1155  bool found_hazard;
1156 
1157  do {
1158  found_hazard = false;
1159 
1160  for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1161  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1162  this, orderId, *it);
1163 
1164  if (AMBA::orderId((*it)->request.pkt) == orderId) {
1165  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1166  this, orderId, *it);
1167 
1169 
1170  DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1171  this, orderId);
1172 
1173  // This is to avoid checking *it!=this after doWaitForSignal()
1174  // since it could have been deleted.
1175  found_hazard = true;
1176  break;
1177  }
1178  }
1179  } while (found_hazard);
1180 }
1181 
1182 void
1184 {
1185  auto orderId = AMBA::orderId(request.pkt);
1186 
1187  DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1188 
1190  request.isWrite ?
1193 
1194  for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1195  if (*it == this)
1196  break;
1197  }
1198 
1199  if (it == depReqs.end())
1200  panic("hazardIdRelease: request not found");
1201 
1202  depReqs.erase(it);
1203 
1205 }
1206 
1207 void
1209 {
1210  if (!smmu.system.isTimingMode())
1211  return;
1212 
1214  return;
1215 
1216  std::string proc_name = csprintf("%sprf", name());
1217  SMMUTranslationProcess *proc =
1218  new SMMUTranslationProcess(proc_name, smmu, ifc);
1219 
1220  proc->beginTransaction(
1222  proc->scheduleWakeup(smmu.clockEdge(Cycles(1)));
1223 }
1224 
1225 void
1227  const TranslResult &tr)
1228 {
1229  assert(tr.fault == FAULT_NONE);
1230 
1231  unsigned numMasterBeats = request.isWrite ?
1233  / smmu.masterPortWidth :
1234  1;
1235 
1237  doDelay(yield, Cycles(numMasterBeats));
1239 
1240 
1245  (request.size + (ifc.portWidth-1)) / ifc.portWidth;
1246 
1248 
1249 
1250  SMMUAction a;
1251 
1252  if (request.isAtsRequest) {
1254 
1255  if (smmu.system.isAtomicMode()) {
1257  } else if (smmu.system.isTimingMode()) {
1259  } else {
1260  panic("Not in atomic or timing mode");
1261  }
1262  } else {
1264  a.ifc = &ifc;
1265  }
1266 
1267  a.pkt = request.pkt;
1268  a.delay = 0;
1269 
1270  a.pkt->setAddr(tr.addr);
1271  a.pkt->req->setPaddr(tr.addr);
1272 
1273  yield(a);
1274 
1275  if (!request.isAtsRequest) {
1276  PacketPtr pkt = yield.get();
1277  pkt->setAddr(request.addr);
1278 
1279  a.type = ACTION_SEND_RESP;
1280  a.pkt = pkt;
1281  a.ifc = &ifc;
1282  a.delay = 0;
1283  yield(a);
1284  }
1285 }
1286 
1287 void
1289 {
1291 
1292  SMMUAction a;
1293  a.type = ACTION_TERMINATE;
1294  a.pkt = NULL;
1295  a.ifc = &ifc;
1296  a.delay = 0;
1297  yield(a);
1298 }
1299 
1300 void
1302 {
1303  int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1304 
1305  if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1306  (smmu.regs.eventq_cons & sizeMask))
1307  panic("Event queue full - aborting\n");
1308 
1309  Addr event_addr =
1311  (smmu.regs.eventq_prod & sizeMask) * sizeof(ev);
1312 
1313  DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): type=%#x stag=%#x "
1314  "flags=%#x sid=%#x ssid=%#x va=%#08x ipa=%#x\n",
1315  event_addr, smmu.regs.eventq_prod, ev.type, ev.stag,
1316  ev.flags, ev.streamId, ev.substreamId, ev.va, ev.ipa);
1317 
1318  // This deliberately resets the overflow field in eventq_prod!
1319  smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1320 
1321  doWrite(yield, event_addr, &ev, sizeof(ev));
1322 
1324  panic("eventq msi not enabled\n");
1325 
1328 }
1329 
1330 void
1332  StreamTableEntry &ste,
1333  uint32_t sid)
1334 {
1335  unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1336  if (sid >= max_sid)
1337  panic("SID %#x out of range, max=%#x", sid, max_sid);
1338 
1339  Addr ste_addr;
1340 
1342  unsigned split =
1344 
1345  if (split!= 7 && split!=8 && split!=16)
1346  panic("Invalid stream table split %d", split);
1347 
1348  uint64_t l2_ptr;
1349  uint64_t l2_addr =
1351  bits(sid, 32, split) * sizeof(l2_ptr);
1352 
1353  DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1354 
1355  doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1356 
1357  DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1358 
1359  unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1360  if (span == 0)
1361  panic("Invalid level 1 stream table descriptor");
1362 
1363  unsigned index = bits(sid, split-1, 0);
1364  if (index >= (1 << span))
1365  panic("StreamID %d out of level 1 descriptor range %d",
1366  sid, 1<<span);
1367 
1368  ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1369 
1370  smmu.steL1Fetches++;
1372  ste_addr =
1373  (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1374  } else {
1375  panic("Invalid stream table format");
1376  }
1377 
1378  DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1379 
1380  doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1381 
1382  DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1383  DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1384  DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1385  DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1386  DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1387  DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1388  DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1389  DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1390 
1391  if (!ste.dw0.valid)
1392  panic("STE @ %#x not valid\n", ste_addr);
1393 
1394  smmu.steFetches++;
1395 }
1396 
1397 void
1400  const StreamTableEntry &ste,
1401  uint32_t sid, uint32_t ssid)
1402 {
1403  Addr cd_addr;
1404 
1405  if (ste.dw0.s1cdmax == 0) {
1406  cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1407  } else {
1408  unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1409  if (ssid >= max_ssid)
1410  panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1411 
1412  if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1413  ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1414  {
1415  unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1416 
1417  uint64_t l2_ptr;
1418  uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1419  bits(ssid, 24, split) * sizeof(l2_ptr);
1420 
1421  if (context.stage2Enable)
1422  l2_addr = translateStage2(yield, l2_addr, false).addr;
1423 
1424  DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1425 
1426  doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1427 
1428  DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1429 
1430  cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1431 
1432  smmu.cdL1Fetches++;
1433  } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1434  cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1435  }
1436  }
1437 
1438  if (context.stage2Enable)
1439  cd_addr = translateStage2(yield, cd_addr, false).addr;
1440 
1441  DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1442 
1443  doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1444 
1445  DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1446  DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1447  DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1448  DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1449  DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1450  DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1451  DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1452  DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1453 
1454 
1455  if (!cd.dw0.valid)
1456  panic("CD @ %#x not valid\n", cd_addr);
1457 
1458  smmu.cdFetches++;
1459 }
1460 
1461 void
1463  void *ptr, size_t size,
1464  uint32_t sid, uint32_t ssid)
1465 {
1466  doRead(yield, addr, ptr, size);
1467 }
1468 
1469 void
1471  void *ptr, unsigned stage,
1472  unsigned level)
1473 {
1474  size_t pte_size = sizeof(PageTableOps::pte_t);
1475 
1476  Addr mask = pte_size - 1;
1477  Addr base = addr & ~mask;
1478 
1479  doRead(yield, base, ptr, pte_size);
1480 }
SMMUSemaphore ifcSmmuSem
Definition: smmu_v3.hh:116
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
static SMMUTranslRequest prefetch(Addr addr, uint32_t sid, uint32_t ssid)
const Cycles tlbLat
Definition: smmu_v3.hh:128
#define DPRINTF(x,...)
Definition: trace.hh:229
IPACache ipaCache
Definition: smmu_v3.hh:101
void sendEvent(Yield &yield, const SMMUEvent &ev)
void ifcTLBUpdate(Yield &yield, const TranslResult &tr)
void store(const Entry &incoming, AllocPolicy alloc)
Bitfield< 30, 0 > index
void doDelay(Yield &yield, Cycles cycles)
void walkCacheLookup(Yield &yield, const WalkCache::Entry *&walkEntry, Addr addr, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level)
Bitfield< 51, 4 > ttb0
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
SMMUSemaphore tlbSem
Definition: smmu_v3.hh:115
void doReadSTE(Yield &yield, StreamTableEntry &ste, uint32_t sid)
void completePrefetch(Yield &yield)
Stats::Scalar steFetches
Definition: smmu_v3.hh:137
Stats::Distribution translationTimeDist
Definition: smmu_v3.hh:140
const std::string & name()
Definition: trace.cc:54
void hazard4kHold(Yield &yield)
virtual void main(Yield &yield)
const Entry * lookup(uint32_t sid, uint32_t ssid, Addr va, bool updStats=true)
void smmuTLBUpdate(Yield &yield, const TranslResult &tr)
SMMUSemaphore ptwSem
Definition: smmu_v3.hh:124
const Entry * lookup(Addr ipa, uint16_t vmid, bool updStats=true)
SMMUSemaphore ipaSem
Definition: smmu_v3.hh:119
void store(const Entry &incoming)
void makeTimingResponse()
Definition: packet.hh:955
const Cycles smmuIfcLat
Definition: smmu_v3.hh:130
uint64_t _pad[4]
const Cycles ifcSmmuLat
Definition: smmu_v3.hh:129
uint64_t eventq_base
Bitfield< 8 > a
void hazardIdHold(Yield &yield)
virtual Addr index(Addr va, unsigned level) const =0
void doReadConfig(Yield &yield, Addr addr, void *ptr, size_t size, uint32_t sid, uint32_t ssid)
SMMUSignal dependentReqRemoved
ip6_addr_t addr
Definition: inet.hh:335
Bitfield< 7, 6 > tg0
Bitfield< 23 > span
void scheduleSlaveRetries()
Definition: smmu_v3.cc:213
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:267
const unsigned walkCacheS1Levels
Definition: smmu_v3.hh:111
const unsigned masterPortWidth
Definition: smmu_v3.hh:113
SMMUv3SlaveInterface * ifc
Definition: smmu_v3_proc.hh:72
Stats::Scalar steL1Fetches
Definition: smmu_v3.hh:136
uint64_t ipa
TranslResult translateStage1And2(Yield &yield, Addr addr)
TranslResult bypass(Addr addr) const
virtual Addr walkMask(unsigned level) const =0
bool microTLBLookup(Yield &yield, TranslResult &tr)
bool isWrite() const
Definition: packet.hh:529
void reinit()
Definition: smmu_v3_proc.cc:63
const bool tlbEnable
Definition: smmu_v3.hh:104
virtual bool isWritable(pte_t pte, unsigned level, bool stage2) const =0
void doBroadcastSignal(SMMUSignal &sig)
void walkCacheUpdate(Yield &yield, Addr va, Addr vaMask, Addr pa, unsigned stage, unsigned level, bool leaf, uint8_t permissions)
Stats::Scalar cdFetches
Definition: smmu_v3.hh:139
std::list< SMMUTranslationProcess * > dependentReads[SMMU_MAX_TRANS_ID]
std::list< SMMUTranslationProcess * > dependentWrites[SMMU_MAX_TRANS_ID]
bool smmuTLBLookup(Yield &yield, TranslResult &tr)
SMMUSemaphore microTLBSem
virtual unsigned lastLevel() const =0
PacketPtr pkt
Definition: smmu_v3_proc.hh:71
CallerType: A reference to an object of this class will be passed to the coroutine task...
Definition: coroutine.hh:85
uint16_t type
Stats::Scalar cdL1Fetches
Definition: smmu_v3.hh:138
const bool prefetchReserveLastWay
void hazardIdRegister()
Used to force ordering on transactions with the same orderId.
RequestPtr req
A pointer to the original request.
Definition: packet.hh:327
Bitfield< 51, 6 > s1ctxptr
void doRead(Yield &yield, Addr addr, void *ptr, size_t size)
Definition: smmu_v3_proc.cc:71
unsigned getSize() const
Definition: packet.hh:736
TranslResult combineTranslations(const TranslResult &s1tr, const TranslResult &s2tr) const
bool isAtomicMode() const
Is the system in atomic mode?
Definition: system.hh:139
Tick curTick()
The current simulated tick.
Definition: core.hh:47
void beginTransaction(const SMMUTranslRequest &req)
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:162
void doWrite(Yield &yield, Addr addr, const void *ptr, size_t size)
virtual Addr nextLevelPointer(pte_t pte, unsigned level) const =0
TranslResult smmuTranslation(Yield &yield)
void makeAtomicResponse()
Definition: packet.hh:949
uint32_t eventq_prod
SMMUSemaphore transSem
Definition: smmu_v3.hh:123
const Cycles ipaLat
Definition: smmu_v3.hh:132
uint64_t Tick
Tick count type.
Definition: types.hh:63
Stats::Distribution ptwTimeDist
Definition: smmu_v3.hh:141
uint64_t strtab_base
uint64_t va
SMMUActionType type
Definition: smmu_v3_proc.hh:70
void doSemaphoreUp(SMMUSemaphore &sem)
void scheduleWakeup(Tick when)
SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
Bitfield< 51, 12 > base
Definition: pagetable.hh:142
SMMUSemaphore smmuIfcSem
Definition: smmu_v3.hh:117
Bitfield< 39, 12 > pa
virtual bool isValid(pte_t pte, unsigned level) const =0
const Entry * lookup(Addr va, uint16_t asid, uint16_t vmid, bool updStats=true)
Bitfield< 63, 48 > asid
const Cycles walkLat
Definition: smmu_v3.hh:133
Addr getAddr() const
Definition: packet.hh:726
static SMMUTranslRequest fromPacket(PacketPtr pkt, bool ats=false)
Bitfield< 5, 4 > s1fmt
void store(const Entry &incoming)
uint32_t strtab_base_cfg
STL list class.
Definition: stl.hh:54
void completeTransaction(Yield &yield, const TranslResult &tr)
WalkCache walkCache
Definition: smmu_v3.hh:102
Bitfield< 63, 59 > s1cdmax
Bitfield< 51, 4 > ttb1
virtual bool isLeaf(pte_t pte, unsigned level) const =0
SMMUSignal duplicateReqRemoved
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
SMMUSemaphore masterPortSem
Definition: smmu_v3.hh:121
static OrderID orderId(PacketPtr pkt)
Definition: amba.hh:51
#define ULL(N)
uint64_t constant
Definition: types.hh:50
void doReadCD(Yield &yield, ContextDescriptor &cd, const StreamTableEntry &ste, uint32_t sid, uint32_t ssid)
uint64_t eventq_irq_cfg0
Bitfield< 37, 32 > s2t0sz
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
Bitfield< 8 > va
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
void store(const Entry &incoming)
uint32_t substreamId
uint16_t stag
ARMArchTLB tlb
Definition: smmu_v3.hh:99
SMMUv3SlaveInterface & ifc
SMMUSemaphore configSem
Definition: smmu_v3.hh:118
Bitfield< 20 > level
Definition: intmessage.hh:49
uint32_t eventq_irq_cfg1
This is an implementation of the SMMUv3 architecture.
Bitfield< 9 > e
TranslResult walkStage1And2(Yield &yield, Addr addr, const PageTableOps *pt_ops, unsigned level, Addr walkPtr)
uint32_t eventq_cons
const Entry * lookup(uint32_t sid, uint32_t ssid, bool updStats=true)
bool ifcTLBLookup(Yield &yield, TranslResult &tr, bool &wasPrefetched)
SMMUv3 & smmu
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
Definition: packet.hh:734
TranslResult walkStage2(Yield &yield, Addr addr, bool final_tr, const PageTableOps *pt_ops, unsigned level, Addr walkPtr)
uint32_t cr0
SMMUAction runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
Definition: smmu_v3.cc:282
SMMURegs regs
Definition: smmu_v3.hh:148
void doReadPTE(Yield &yield, Addr va, Addr addr, void *ptr, unsigned stage, unsigned level)
SMMUSemaphore walkSem
Definition: smmu_v3.hh:120
const System & system
Definition: smmu_v3.hh:92
void doWaitForSignal(Yield &yield, SMMUSignal &sig)
void store(const Entry &incoming)
const Entry * lookup(Addr va, Addr vaMask, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level, bool updStats=true)
Bitfield< 47, 46 > s2tg
void issuePrefetch(Addr addr)
const unsigned portWidth
uint32_t streamId
const PageTableOps * getPageTableOps(uint8_t trans_granule)
Definition: smmu_v3.cc:566
const bool walkCacheNonfinalEnable
Definition: smmu_v3.hh:110
SMMUSemaphore cycleSem
Definition: smmu_v3.hh:125
std::enable_if<!std::is_same< T, void >::value, T >::type get()
get() is the way we can extrapolate arguments from the coroutine caller.
Definition: coroutine.hh:136
uint32_t flags
const bool walkCacheEnable
Definition: smmu_v3.hh:107
void doSemaphoreDown(Yield &yield, SMMUSemaphore &sem)
SMMUTranslRequest request
Bitfield< 3, 1 > config
Bitfield< 3, 0 > mask
Definition: types.hh:64
const bool ipaCacheEnable
Definition: smmu_v3.hh:106
void microTLBUpdate(Yield &yield, const TranslResult &tr)
const unsigned walkCacheS2Levels
Definition: smmu_v3.hh:112
Bitfield< 32 > cd
void configCacheUpdate(Yield &yield, const TranslContext &tc)
virtual ~SMMUTranslationProcess()
ConfigCache configCache
Definition: smmu_v3.hh:100
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:150
bool configCacheLookup(Yield &yield, TranslContext &tc)
const std::string name() const
T bits(T val, int first, int last)
Extract the bitfield from position &#39;first&#39; to &#39;last&#39; (inclusive) from &#39;val&#39; and right justify it...
Definition: bitfield.hh:72
SMMUSemaphore mainTLBSem
Bitfield< 31 > valid
const bool configCacheEnable
Definition: smmu_v3.hh:105
virtual Addr pageMask(pte_t pte, unsigned level) const =0
uint64_t _pad[3]
bool findConfig(Yield &yield, TranslContext &tc, TranslResult &tr)
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1899
std::list< SMMUTranslationProcess * > duplicateReqs
virtual unsigned firstLevel(uint8_t tsz) const =0
SMMUSemaphore slavePortSem
TranslResult translateStage2(Yield &yield, Addr addr, bool final_tr)
const Cycles configLat
Definition: smmu_v3.hh:131
bool hazard4kCheck()
Used to force ordering on transactions with same (SID, SSID, 4k page) to avoid multiple identical pag...

Generated on Fri Feb 28 2020 16:27:00 for gem5 by doxygen 1.8.13