gem5 v24.0.0.0
Loading...
Searching...
No Matches
smmu_v3_transl.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013, 2018-2019, 2021, 2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
39
40#include "arch/arm/pagetable.hh"
41#include "debug/SMMUv3.hh"
42#include "debug/SMMUv3Hazard.hh"
43#include "dev/arm/amba.hh"
44#include "dev/arm/base_gic.hh"
45#include "dev/arm/smmu_v3.hh"
46#include "sim/system.hh"
47
48namespace gem5
49{
50
51using namespace ArmISA;
52
53SMMUTranslRequest
55{
57 req.addr = pkt->getAddr();
58 req.size = pkt->getSize();
59 req.sid = pkt->req->streamId();
60 req.ssid = pkt->req->hasSubstreamId() ?
61 pkt->req->substreamId() : 0;
62 req.isWrite = pkt->isWrite();
63 req.isPrefetch = false;
64 req.isAtsRequest = ats;
65 req.pkt = pkt;
66
67 return req;
68}
69
71SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid)
72{
74 req.addr = addr;
75 req.size = 0;
76 req.sid = sid;
77 req.ssid = ssid;
78 req.isWrite = false;
79 req.isPrefetch = true;
80 req.isAtsRequest = false;
81 req.pkt = NULL;
82
83 return req;
84}
85
87 SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
88 :
89 SMMUProcess(name, _smmu),
90 ifc(_ifc)
91{
92 // Decrease number of pending translation slots on the device interface
93 assert(ifc.xlateSlotsRemaining > 0);
95
97 reinit();
98}
99
101{
102 // Increase number of pending translation slots on the device interface
103 assert(ifc.pendingMemAccesses > 0);
105
106 // If no more SMMU memory accesses are pending,
107 // signal SMMU Device Interface as drained
108 if (ifc.pendingMemAccesses == 0) {
110 }
111}
112
113void
120
121void
123{
124 assert(smmu.system.isTimingMode());
125
126 assert(!"Stalls are broken");
127
128 Tick resumeTick = curTick();
129
130 (void) resumeTick;
131 DPRINTF(SMMUv3, "Resume at tick = %d. Fault duration = %d (%.3fus)\n",
132 resumeTick, resumeTick-faultTick, (resumeTick-faultTick) / 1e6);
133
135
137}
138
139void
141{
142 // Hack:
143 // The coroutine starts running as soon as it's created.
144 // But we need to wait for request data esp. in atomic mode.
147 a.pkt = NULL;
148 yield(a);
149
150 const Addr next4k = (request.addr + 0x1000ULL) & ~0xfffULL;
151
152 if ((request.addr + request.size) > next4k)
153 panic("Transaction crosses 4k boundary (addr=%#x size=%#x)!\n",
155
156
157 unsigned numResponderBeats = request.isWrite ?
158 (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
159
161 doDelay(yield, Cycles(numResponderBeats));
163
164
165 recvTick = curTick();
166
167 if (!(smmu.regs.cr0 & CR0_SMMUEN_MASK)) {
168 // SMMU disabled
169 doDelay(yield, Cycles(1));
171 return;
172 }
173
174 TranslResult tr;
175 bool wasPrefetched = false;
176
177 if (request.isPrefetch) {
178 // Abort prefetch if:
179 // - there's already a transaction looking up the same 4k page, OR
180 // - requested address is already in the TLB.
181 if (hazard4kCheck() || ifcTLBLookup(yield, tr, wasPrefetched))
182 completePrefetch(yield); // this never returns
183
185
186 tr = smmuTranslation(yield);
187
188 if (!tr.isFaulting())
189 ifcTLBUpdate(yield, tr);
190
192
193 completePrefetch(yield);
194 } else {
196
197 if (!microTLBLookup(yield, tr)) {
198 bool hit = ifcTLBLookup(yield, tr, wasPrefetched);
199 if (!hit) {
200 while (!hit && hazard4kCheck()) {
201 hazard4kHold(yield);
202 hit = ifcTLBLookup(yield, tr, wasPrefetched);
203 }
204 }
205
206 // Issue prefetch if:
207 // - there was a TLB hit and the entry was prefetched, OR
208 // - TLB miss was successfully serviced
209 if (hit) {
210 if (wasPrefetched)
211 issuePrefetch(next4k);
212 } else {
214
215 tr = smmuTranslation(yield);
216
217 if (!tr.isFaulting()) {
218 ifcTLBUpdate(yield, tr);
219
220 issuePrefetch(next4k);
221 }
222
224 }
225
226 if (!tr.isFaulting())
227 microTLBUpdate(yield, tr);
228 }
229
230 hazardIdHold(yield);
232
233 if (tr.isFaulting()) {
234 abortTransaction(yield, tr);
235 } else {
236 completeTransaction(yield, tr);
237 }
238 }
239}
240
243{
244 TranslResult tr;
245 tr.fault = Fault(FAULT_NONE);
246 tr.addr = addr;
247 tr.addrMask = 0;
248 tr.writable = 1;
249
250 return tr;
251}
252
255{
256 TranslResult tr;
257
258 // Need SMMU credit to proceed
260
261 // Simulate pipelined IFC->SMMU link
263 doDelay(yield, Cycles(1)); // serialize transactions
265 doDelay(yield, smmu.ifcSmmuLat - Cycles(1)); // remaining pipeline delay
266
267 bool haveConfig = true;
268 if (!configCacheLookup(yield, context)) {
269 if (findConfig(yield, context, tr)) {
271 } else {
272 haveConfig = false;
273 }
274 }
275
276 if (haveConfig && !smmuTLBLookup(yield, tr)) {
277 // SMMU main TLB miss
278
279 // Need PTW slot to proceed
281
282 // Page table walk
283 Tick ptwStartTick = curTick();
284
285 if (context.stage1Enable) {
286 tr = translateStage1And2(yield, request.addr);
287 } else if (context.stage2Enable) {
288 tr = translateStage2(yield, request.addr, true);
289 } else {
290 tr = bypass(request.addr);
291 }
292
294 smmu.stats.ptwTimeDist.sample(curTick() - ptwStartTick);
295
296 // Free PTW slot
298
299 if (!tr.isFaulting())
300 smmuTLBUpdate(yield, tr);
301 }
302
303 // Simulate pipelined SMMU->RESPONSE INTERFACE link
305 doDelay(yield, Cycles(1)); // serialize transactions
307 doDelay(yield, smmu.smmuIfcLat - Cycles(1)); // remaining pipeline delay
308
309 // return SMMU credit
311
312 return tr;
313}
314
315bool
317{
318 if (!ifc.microTLBEnable)
319 return false;
320
322 doDelay(yield, ifc.microTLBLat);
323 const SMMUTLB::Entry *e =
326
327 if (!e) {
328 DPRINTF(SMMUv3, "micro TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
330
331 return false;
332 }
333
335 "micro TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x paddr=%#x\n",
336 request.addr, e->vaMask, request.sid, request.ssid, e->pa);
337
338 tr.fault = Fault(FAULT_NONE);
339 tr.addr = e->pa + (request.addr & ~e->vaMask);;
340 tr.addrMask = e->vaMask;
341 tr.writable = e->permissions;
342
343 return true;
344}
345
346bool
348 bool &wasPrefetched)
349{
350 if (!ifc.mainTLBEnable)
351 return false;
352
354 doDelay(yield, ifc.mainTLBLat);
355 const SMMUTLB::Entry *e =
358
359 if (!e) {
361 "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
363
364 return false;
365 }
366
368 "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
369 "paddr=%#x\n", request.addr, e->vaMask, request.sid,
370 request.ssid, e->pa);
371
372 tr.fault = Fault(FAULT_NONE);
373 tr.addr = e->pa + (request.addr & ~e->vaMask);;
374 tr.addrMask = e->vaMask;
375 tr.writable = e->permissions;
376 wasPrefetched = e->prefetched;
377
378 return true;
379}
380
381bool
383{
384 if (!smmu.tlbEnable)
385 return false;
386
388 doDelay(yield, smmu.tlbLat);
389 const ARMArchTLB::Entry *e =
392
393 if (!e) {
394 DPRINTF(SMMUv3, "SMMU TLB miss vaddr=%#x asid=%#x vmid=%#x\n",
396
397 return false;
398 }
399
401 "SMMU TLB hit vaddr=%#x amask=%#x asid=%#x vmid=%#x paddr=%#x\n",
402 request.addr, e->vaMask, context.asid, context.vmid, e->pa);
403
404 tr.fault = Fault(FAULT_NONE);
405 tr.addr = e->pa + (request.addr & ~e->vaMask);;
406 tr.addrMask = e->vaMask;
407 tr.writable = e->permissions;
408
409 return true;
410}
411
412void
414 const TranslResult &tr)
415{
416 assert(!tr.isFaulting());
417
418 if (!ifc.microTLBEnable)
419 return;
420
422 e.valid = true;
423 e.prefetched = false;
424 e.sid = request.sid;
425 e.ssid = request.ssid;
426 e.vaMask = tr.addrMask;
427 e.va = request.addr & e.vaMask;
428 e.pa = tr.addr & e.vaMask;
429 e.permissions = tr.writable;
430 e.asid = context.asid;
431 e.vmid = context.vmid;
432
434
436 "micro TLB upd vaddr=%#x amask=%#x paddr=%#x sid=%#x ssid=%#x\n",
437 e.va, e.vaMask, e.pa, e.sid, e.ssid);
438
440
442}
443
444void
446 const TranslResult &tr)
447{
448 assert(!tr.isFaulting());
449
450 if (!ifc.mainTLBEnable)
451 return;
452
454 e.valid = true;
455 e.prefetched = request.isPrefetch;
456 e.sid = request.sid;
457 e.ssid = request.ssid;
458 e.vaMask = tr.addrMask;
459 e.va = request.addr & e.vaMask;
460 e.pa = tr.addr & e.vaMask;
461 e.permissions = tr.writable;
462 e.asid = context.asid;
463 e.vmid = context.vmid;
464
467 alloc = request.isPrefetch ?
469
471
473 "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
474 "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
475
476 ifc.mainTLB->store(e, alloc);
477
479}
480
481void
483 const TranslResult &tr)
484{
485 assert(!tr.isFaulting());
486
487 if (!smmu.tlbEnable)
488 return;
489
491 e.valid = true;
492 e.vaMask = tr.addrMask;
493 e.va = request.addr & e.vaMask;
494 e.asid = context.asid;
495 e.vmid = context.vmid;
496 e.pa = tr.addr & e.vaMask;
497 e.permissions = tr.writable;
498
500
502 "SMMU TLB upd vaddr=%#x amask=%#x paddr=%#x asid=%#x vmid=%#x\n",
503 e.va, e.vaMask, e.pa, e.asid, e.vmid);
504
505 smmu.tlb.store(e);
506
508}
509
510bool
512{
514 return false;
515
517 doDelay(yield, smmu.configLat);
518 const ConfigCache::Entry *e =
521
522 if (!e) {
523 DPRINTF(SMMUv3, "Config miss sid=%#x ssid=%#x\n",
525
526 return false;
527 }
528
529 DPRINTF(SMMUv3, "Config hit sid=%#x ssid=%#x ttb=%#08x asid=%#x\n",
530 request.sid, request.ssid, e->ttb0, e->asid);
531
532 tc.stage1Enable = e->stage1_en;
533 tc.stage2Enable = e->stage2_en;
534
535 tc.ttb0 = e->ttb0;
536 tc.ttb1 = e->ttb1;
537 tc.asid = e->asid;
538 tc.httb = e->httb;
539 tc.vmid = e->vmid;
540
541 tc.stage1TranslGranule = e->stage1_tg;
542 tc.stage2TranslGranule = e->stage2_tg;
543
544 tc.t0sz = e->t0sz;
545 tc.s2t0sz = e->s2t0sz;
546
547 return true;
548}
549
550void
552 const TranslContext &tc)
553{
555 return;
556
558 e.valid = true;
559 e.sid = request.sid;
560 e.ssid = request.ssid;
561 e.stage1_en = tc.stage1Enable;
562 e.stage2_en = tc.stage2Enable;
563 e.ttb0 = tc.ttb0;
564 e.ttb1 = tc.ttb1;
565 e.asid = tc.asid;
566 e.httb = tc.httb;
567 e.vmid = tc.vmid;
568 e.stage1_tg = tc.stage1TranslGranule;
569 e.stage2_tg = tc.stage2TranslGranule;
570 e.t0sz = tc.t0sz;
571 e.s2t0sz = tc.s2t0sz;
572
574
575 DPRINTF(SMMUv3, "Config upd sid=%#x ssid=%#x\n", e.sid, e.ssid);
576
578
580}
581
582bool
584 TranslContext &tc,
585 TranslResult &tr)
586{
587 tc.stage1Enable = false;
588 tc.stage2Enable = false;
589
591 doReadSTE(yield, ste, request.sid);
592
593 switch (ste.dw0.config) {
595 break;
596
598 tc.stage1Enable = true;
599 break;
600
602 tc.stage2Enable = true;
603 break;
604
606 tc.stage1Enable = true;
607 tc.stage2Enable = true;
608 break;
609
610 default:
611 panic("Bad or unimplemented STE config %d\n",
612 ste.dw0.config);
613 }
614
615
616 // Establish stage 2 context first since
617 // Context Descriptors can be in IPA space.
618 if (tc.stage2Enable) {
619 tc.httb = ste.dw3.s2ttb << STE_S2TTB_SHIFT;
620 tc.vmid = ste.dw2.s2vmid;
621 tc.stage2TranslGranule = ste.dw2.s2tg;
622 tc.s2t0sz = ste.dw2.s2t0sz;
623 } else {
624 tc.httb = 0xdeadbeef;
625 tc.vmid = 0;
627 tc.s2t0sz = 0;
628 }
629
630
631 // Now fetch stage 1 config.
632 if (context.stage1Enable) {
634 tr = doReadCD(yield, cd, ste, request.sid, request.ssid);
635
636 tc.ttb0 = cd.dw1.ttb0 << CD_TTB_SHIFT;
637 tc.ttb1 = cd.dw2.ttb1 << CD_TTB_SHIFT;
638 tc.asid = cd.dw0.asid;
639 tc.stage1TranslGranule = cd.dw0.tg0;
640 tc.t0sz = cd.dw0.t0sz;
641 } else {
642 tc.ttb0 = 0xcafebabe;
643 tc.ttb1 = 0xcafed00d;
644 tc.asid = 0;
646 tc.t0sz = 0;
647 }
648
649 return !tr.isFaulting();
650}
651
652void
654 Yield &yield,
655 const WalkCache::Entry *&walkEntry,
656 Addr addr, uint16_t asid, uint16_t vmid,
657 unsigned stage, unsigned level)
658{
659 const char *indent = stage==2 ? " " : "";
660 (void) indent; // this is only used in DPRINTFs
661
662 const auto tg = stage == 1 ?
665
666 const auto *pt_ops = getPageTableOps(tg);
667
668 unsigned walkCacheLevels =
671 0;
672
673 if ((1 << level) & walkCacheLevels) {
675 doDelay(yield, smmu.walkLat);
676
677 walkEntry = smmu.walkCache.lookup(addr, pt_ops->walkMask(level),
678 asid, vmid, stage, level);
679
680 if (walkEntry) {
681 DPRINTF(SMMUv3, "%sWalkCache hit va=%#x asid=%#x vmid=%#x "
682 "base=%#x (S%d, L%d)\n",
683 indent, addr, asid, vmid, walkEntry->pa, stage, level);
684 } else {
685 DPRINTF(SMMUv3, "%sWalkCache miss va=%#x asid=%#x vmid=%#x "
686 "(S%d, L%d)\n",
687 indent, addr, asid, vmid, stage, level);
688 }
689
691 }
692}
693
694void
696 Addr vaMask, Addr pa,
697 unsigned stage, unsigned level,
698 bool leaf, uint8_t permissions)
699{
700 unsigned walkCacheLevels =
702
703 if (smmu.walkCacheEnable && ((1<<level) & walkCacheLevels)) {
705 e.valid = true;
706 e.va = va;
707 e.vaMask = vaMask;
708 e.asid = stage==1 ? context.asid : 0;
709 e.vmid = context.vmid;
710 e.stage = stage;
711 e.level = level;
712 e.leaf = leaf;
713 e.pa = pa;
714 e.permissions = permissions;
715
717
718 DPRINTF(SMMUv3, "%sWalkCache upd va=%#x mask=%#x asid=%#x vmid=%#x "
719 "tpa=%#x leaf=%s (S%d, L%d)\n",
720 e.stage==2 ? " " : "",
721 e.va, e.vaMask, e.asid, e.vmid,
722 e.pa, e.leaf, e.stage, e.level);
723
725
727 }
728}
729
730/*
731 * Please note:
732 * This does not deal with the case where stage 1 page size
733 * is larger than stage 2 page size.
734 */
737 const PageTableOps *pt_ops,
738 unsigned level, Addr walkPtr)
739{
740 PageTableOps::pte_t pte = 0;
741
743 doDelay(yield, Cycles(1));
745
746 for (; level <= pt_ops->lastLevel(); level++) {
747 Addr pte_addr = walkPtr + pt_ops->index(
748 addr, level, 64 - context.t0sz);
749
750 DPRINTF(SMMUv3, "Fetching S1 L%d PTE from pa=%#08x\n",
751 level, pte_addr);
752
753 doReadPTE(yield, addr, pte_addr, &pte, 1, level);
754
755 DPRINTF(SMMUv3, "Got S1 L%d PTE=%#x from pa=%#08x\n",
756 level, pte, pte_addr);
757
759 doDelay(yield, Cycles(1));
761
762 bool valid = pt_ops->isValid(pte, level);
763 bool leaf = pt_ops->isLeaf(pte, level);
764
765 if (!valid) {
766 DPRINTF(SMMUv3, "S1 PTE not valid - fault\n");
767
768 TranslResult tr;
770 return tr;
771 }
772
773 if (valid && leaf && request.isWrite &&
774 !pt_ops->isWritable(pte, level, false))
775 {
776 DPRINTF(SMMUv3, "S1 page not writable - fault\n");
777
778 TranslResult tr;
780 return tr;
781 }
782
783 walkPtr = pt_ops->nextLevelPointer(pte, level);
784
785 if (leaf)
786 break;
787
788 if (context.stage2Enable) {
789 TranslResult s2tr = translateStage2(yield, walkPtr, false);
790 if (s2tr.isFaulting())
791 return s2tr;
792
793 walkPtr = s2tr.addr;
794 }
795
796 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
797 1, level, leaf, 0);
798 }
799
800 TranslResult tr;
801 tr.fault = Fault(FAULT_NONE);
802 tr.addrMask = pt_ops->pageMask(pte, level);
803 tr.addr = walkPtr + (addr & ~tr.addrMask);
804 tr.writable = pt_ops->isWritable(pte, level, false);
805
806 if (context.stage2Enable) {
807 TranslResult s2tr = translateStage2(yield, tr.addr, true);
808 if (s2tr.isFaulting()) {
810 return s2tr;
811 }
812
813 tr = combineTranslations(tr, s2tr);
814 }
815
816 walkCacheUpdate(yield, addr, tr.addrMask, walkPtr,
817 1, level, true, tr.writable);
818
819 return tr;
820}
821
824 const PageTableOps *pt_ops,
825 unsigned level, Addr walkPtr)
826{
828
830 doDelay(yield, Cycles(1));
832
833 for (; level <= pt_ops->lastLevel(); level++) {
834 Addr pte_addr = walkPtr + pt_ops->index(
835 addr, level, 64 - context.s2t0sz);
836
837 DPRINTF(SMMUv3, " Fetching S2 L%d PTE from pa=%#08x\n",
838 level, pte_addr);
839
840 doReadPTE(yield, addr, pte_addr, &pte, 2, level);
841
842 DPRINTF(SMMUv3, " Got S2 L%d PTE=%#x from pa=%#08x\n",
843 level, pte, pte_addr);
844
846 doDelay(yield, Cycles(1));
848
849 bool valid = pt_ops->isValid(pte, level);
850 bool leaf = pt_ops->isLeaf(pte, level);
851
852 if (!valid) {
853 DPRINTF(SMMUv3, " S2 PTE not valid - fault\n");
854
855 TranslResult tr;
857 return tr;
858 }
859
860 if (valid && leaf && request.isWrite &&
861 !pt_ops->isWritable(pte, level, true))
862 {
863 DPRINTF(SMMUv3, " S2 PTE not writable = fault\n");
864
865 TranslResult tr;
867 return tr;
868 }
869
870 walkPtr = pt_ops->nextLevelPointer(pte, level);
871
872 if (final_tr || smmu.walkCacheNonfinalEnable)
873 walkCacheUpdate(yield, addr, pt_ops->walkMask(level), walkPtr,
874 2, level, leaf,
875 leaf ? pt_ops->isWritable(pte, level, true) : 0);
876 if (leaf)
877 break;
878 }
879
880 TranslResult tr;
881 tr.fault = Fault(FAULT_NONE);
882 tr.addrMask = pt_ops->pageMask(pte, level);
883 tr.addr = walkPtr + (addr & ~tr.addrMask);
884 tr.writable = pt_ops->isWritable(pte, level, true);
885
886 return tr;
887}
888
891{
893 const auto *pt_ops = getPageTableOps(tg);
894
895 const WalkCache::Entry *walk_ep = NULL;
896 unsigned level;
897
898 // Level here is actually (level+1) so we can count down
899 // to 0 using unsigned int.
900 for (level = pt_ops->lastLevel() + 1;
901 level > pt_ops->firstLevel(context.t0sz);
902 level--)
903 {
904 walkCacheLookup(yield, walk_ep, addr,
905 context.asid, context.vmid, 1, level-1);
906
907 if (walk_ep)
908 break;
909 }
910
911 // Correct level (see above).
912 level -= 1;
913
914 TranslResult tr;
915 if (walk_ep) {
916 if (walk_ep->leaf) {
917 tr.fault = Fault(FAULT_NONE);
918 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
919 tr.addrMask = walk_ep->vaMask;
920 tr.writable = walk_ep->permissions;
921 } else {
922 tr = walkStage1And2(yield, addr, pt_ops, level+1, walk_ep->pa);
923 }
924 } else {
925 Addr table_addr = context.ttb0;
926 if (context.stage2Enable) {
927 TranslResult s2tr = translateStage2(yield, table_addr, false);
928 if (s2tr.isFaulting()) {
929 return s2tr;
930 }
931
932 table_addr = s2tr.addr;
933 }
934
935 tr = walkStage1And2(yield, addr, pt_ops,
936 pt_ops->firstLevel(context.t0sz),
937 table_addr);
938 }
939
940 if (!tr.isFaulting())
941 DPRINTF(SMMUv3, "Translated vaddr %#x to paddr %#x\n", addr, tr.addr);
942
943 return tr;
944}
945
948{
950 const auto *pt_ops = getPageTableOps(tg);
951
952 const IPACache::Entry *ipa_ep = NULL;
953 if (smmu.ipaCacheEnable) {
955 doDelay(yield, smmu.ipaLat);
956 ipa_ep = smmu.ipaCache.lookup(addr, context.vmid);
958 }
959
960 if (ipa_ep) {
961 TranslResult tr;
962 tr.fault = Fault(FAULT_NONE);
963 tr.addr = ipa_ep->pa + (addr & ~ipa_ep->ipaMask);
964 tr.addrMask = ipa_ep->ipaMask;
965 tr.writable = ipa_ep->permissions;
966
967 DPRINTF(SMMUv3, " IPACache hit ipa=%#x vmid=%#x pa=%#x\n",
968 addr, context.vmid, tr.addr);
969
970 return tr;
971 } else if (smmu.ipaCacheEnable) {
972 DPRINTF(SMMUv3, " IPACache miss ipa=%#x vmid=%#x\n",
973 addr, context.vmid);
974 }
975
976 const WalkCache::Entry *walk_ep = NULL;
977 unsigned level = pt_ops->firstLevel(context.s2t0sz);
978
979 if (final_tr || smmu.walkCacheNonfinalEnable) {
980 // Level here is actually (level+1) so we can count down
981 // to 0 using unsigned int.
982 for (level = pt_ops->lastLevel() + 1;
983 level > pt_ops->firstLevel(context.s2t0sz);
984 level--)
985 {
986 walkCacheLookup(yield, walk_ep, addr,
987 0, context.vmid, 2, level-1);
988
989 if (walk_ep)
990 break;
991 }
992
993 // Correct level (see above).
994 level -= 1;
995 }
996
997 TranslResult tr;
998 if (walk_ep) {
999 if (walk_ep->leaf) {
1000 tr.fault = Fault(FAULT_NONE);
1001 tr.addr = walk_ep->pa + (addr & ~walk_ep->vaMask);
1002 tr.addrMask = walk_ep->vaMask;
1003 tr.writable = walk_ep->permissions;
1004 } else {
1005 tr = walkStage2(yield, addr, final_tr, pt_ops,
1006 level + 1, walk_ep->pa);
1007 }
1008 } else {
1009 tr = walkStage2(yield, addr, final_tr, pt_ops,
1010 pt_ops->firstLevel(context.s2t0sz),
1011 context.httb);
1012 }
1013
1014 if (!tr.isFaulting())
1015 DPRINTF(SMMUv3, " Translated %saddr %#x to paddr %#x\n",
1016 context.stage1Enable ? "ip" : "v", addr, tr.addr);
1017
1018 if (smmu.ipaCacheEnable) {
1020 e.valid = true;
1021 e.ipaMask = tr.addrMask;
1022 e.ipa = addr & e.ipaMask;
1023 e.pa = tr.addr & tr.addrMask;
1024 e.permissions = tr.writable;
1025 e.vmid = context.vmid;
1026
1027 doSemaphoreDown(yield, smmu.ipaSem);
1030 }
1031
1032 return tr;
1033}
1034
1037 const TranslResult &s2tr) const
1038{
1039 if (s2tr.isFaulting())
1040 return s2tr;
1041
1042 assert(!s1tr.isFaulting());
1043
1044 TranslResult tr;
1045 tr.fault = Fault(FAULT_NONE);
1046 tr.addr = s2tr.addr;
1047 tr.addrMask = s1tr.addrMask | s2tr.addrMask;
1048 tr.writable = s1tr.writable & s2tr.writable;
1049
1050 return tr;
1051}
1052
1053bool
1055{
1056 Addr addr4k = request.addr & ~0xfffULL;
1057
1058 for (auto it = ifc.duplicateReqs.begin();
1059 it != ifc.duplicateReqs.end();
1060 ++it)
1061 {
1062 Addr other4k = (*it)->request.addr & ~0xfffULL;
1063 if (addr4k == other4k)
1064 return true;
1065 }
1066
1067 return false;
1068}
1069
1070void
1072{
1073 DPRINTF(SMMUv3Hazard, "4kReg: p=%p a4k=%#x\n",
1074 this, request.addr & ~0xfffULL);
1075
1076 ifc.duplicateReqs.push_back(this);
1077}
1078
1079void
1081{
1082 Addr addr4k = request.addr & ~0xfffULL;
1083
1084 bool found_hazard;
1085
1086 do {
1087 found_hazard = false;
1088
1089 for (auto it = ifc.duplicateReqs.begin();
1090 it!=ifc.duplicateReqs.end() && *it!=this;
1091 ++it)
1092 {
1093 Addr other4k = (*it)->request.addr & ~0xfffULL;
1094
1095 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x Q: p=%p a4k=%#x\n",
1096 this, addr4k, *it, other4k);
1097
1098 if (addr4k == other4k) {
1099 DPRINTF(SMMUv3Hazard,
1100 "4kHold: p=%p a4k=%#x WAIT on p=%p a4k=%#x\n",
1101 this, addr4k, *it, other4k);
1102
1104
1105 DPRINTF(SMMUv3Hazard, "4kHold: p=%p a4k=%#x RESUME\n",
1106 this, addr4k);
1107
1108 // This is to avoid checking *it!=this after doWaitForSignal()
1109 // since it could have been deleted.
1110 found_hazard = true;
1111 break;
1112 }
1113 }
1114 } while (found_hazard);
1115}
1116
1117void
1119{
1120 DPRINTF(SMMUv3Hazard, "4kRel: p=%p a4k=%#x\n",
1121 this, request.addr & ~0xfffULL);
1122
1124
1125 for (it = ifc.duplicateReqs.begin(); it != ifc.duplicateReqs.end(); ++it)
1126 if (*it == this)
1127 break;
1128
1129 if (it == ifc.duplicateReqs.end())
1130 panic("hazard4kRelease: request not found");
1131
1132 ifc.duplicateReqs.erase(it);
1133
1135}
1136
1137void
1139{
1140 auto orderId = AMBA::orderId(request.pkt);
1141
1142 DPRINTF(SMMUv3Hazard, "IdReg: p=%p oid=%d\n", this, orderId);
1143
1144 assert(orderId < SMMU_MAX_TRANS_ID);
1145
1148 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1149 depReqs.push_back(this);
1150}
1151
1152void
1154{
1155 auto orderId = AMBA::orderId(request.pkt);
1156
1157 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d\n", this, orderId);
1158
1161 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1163
1164 bool found_hazard;
1165
1166 do {
1167 found_hazard = false;
1168
1169 for (auto it = depReqs.begin(); it!=depReqs.end() && *it!=this; ++it) {
1170 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d Q: %p\n",
1171 this, orderId, *it);
1172
1173 if (AMBA::orderId((*it)->request.pkt) == orderId) {
1174 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d WAIT on=%p\n",
1175 this, orderId, *it);
1176
1178
1179 DPRINTF(SMMUv3Hazard, "IdHold: p=%p oid=%d RESUME\n",
1180 this, orderId);
1181
1182 // This is to avoid checking *it!=this after doWaitForSignal()
1183 // since it could have been deleted.
1184 found_hazard = true;
1185 break;
1186 }
1187 }
1188 } while (found_hazard);
1189}
1190
1191void
1193{
1194 auto orderId = AMBA::orderId(request.pkt);
1195
1196 DPRINTF(SMMUv3Hazard, "IdRel: p=%p oid=%d\n", this, orderId);
1197
1200 ifc.dependentWrites[orderId] : ifc.dependentReads[orderId];
1202
1203 for (it = depReqs.begin(); it != depReqs.end(); ++it) {
1204 if (*it == this)
1205 break;
1206 }
1207
1208 if (it == depReqs.end())
1209 panic("hazardIdRelease: request not found");
1210
1211 depReqs.erase(it);
1212
1214}
1215
1216void
1218{
1219 if (!smmu.system.isTimingMode())
1220 return;
1221
1223 return;
1224
1225 std::string proc_name = csprintf("%sprf", name());
1227 new SMMUTranslationProcess(proc_name, smmu, ifc);
1228
1229 proc->beginTransaction(
1232}
1233
1234void
1236 const TranslResult &tr)
1237{
1238 DPRINTF(SMMUv3, "Translation Fault (addr=%#x, size=%#x, sid=%d, ssid=%d, "
1239 "isWrite=%d, isPrefetch=%d, isAtsRequest=%d)\n",
1242
1243 // If eventq is not enabled, silently discard event
1244 // TODO: Handle full queue (we are currently aborting
1245 // in send event)
1246 if (smmu.regs.cr0 & CR0_EVENTQEN_MASK) {
1247 SMMUEvent event = generateEvent(tr);
1248
1249 sendEvent(yield, event);
1250 }
1251
1254
1255 if (smmu.system.isAtomicMode()) {
1257 } else if (smmu.system.isTimingMode()) {
1259 } else {
1260 panic("Not in atomic or timing mode");
1261 }
1262
1264
1265 SMMUAction a;
1266 // Send the bad address response to the client device
1268 a.pkt = request.pkt;
1269 a.ifc = &ifc;
1270 a.delay = 0;
1271 yield(a);
1272}
1273
1274void
1276 const TranslResult &tr)
1277{
1278 assert(!tr.isFaulting());
1279
1280 unsigned numRequestorBeats = request.isWrite ?
1283 1;
1284
1286 doDelay(yield, Cycles(numRequestorBeats));
1288
1289
1295
1297
1298
1299 SMMUAction a;
1300
1301 if (request.isAtsRequest) {
1303
1304 if (smmu.system.isAtomicMode()) {
1306 } else if (smmu.system.isTimingMode()) {
1308 } else {
1309 panic("Not in atomic or timing mode");
1310 }
1311 } else {
1312 a.type = ACTION_SEND_REQ_FINAL;
1313 a.ifc = &ifc;
1314 }
1315
1316 a.pkt = request.pkt;
1317 a.delay = 0;
1318
1319 a.pkt->setAddr(tr.addr);
1320 a.pkt->req->setPaddr(tr.addr);
1321
1322 yield(a);
1323
1324 if (!request.isAtsRequest) {
1325 PacketPtr pkt = yield.get();
1326 pkt->setAddr(request.addr);
1327
1328 a.type = ACTION_SEND_RESP;
1329 a.pkt = pkt;
1330 a.ifc = &ifc;
1331 a.delay = 0;
1332 yield(a);
1333 }
1334}
1335
1336void
1338{
1340
1341 SMMUAction a;
1343 a.pkt = NULL;
1344 a.ifc = &ifc;
1345 a.delay = 0;
1346 yield(a);
1347}
1348
1351{
1353 switch (tr.fault.type) {
1354 case FAULT_PERMISSION:
1355 case FAULT_TRANSLATION:
1356 event.data.dw0.streamId = request.sid;
1357 event.data.dw0.substreamId = request.ssid;
1358 event.data.dw1.rnw = !request.isWrite;
1359 event.data.dw2.inputAddr = request.addr;
1360 event.data.dw1.s2 = tr.fault.stage2;
1361 if (tr.fault.stage2) {
1362 // Only support non-secure mode in the SMMU
1363 event.data.dw1.nsipa = true;
1364 event.data.dw3.ipa = tr.fault.ipa;
1365 }
1366 event.data.dw1.clss = tr.fault.clss;
1367 break;
1368 default:
1369 panic("Unsupported fault: %d\n", tr.fault.type);
1370 }
1371
1372 return event;
1373}
1374
1375void
1377{
1378 int sizeMask = mask(smmu.regs.eventq_base & Q_BASE_SIZE_MASK);
1379
1380 if (((smmu.regs.eventq_prod+1) & sizeMask) ==
1381 (smmu.regs.eventq_cons & sizeMask))
1382 panic("Event queue full - aborting\n");
1383
1384 Addr event_addr =
1385 (smmu.regs.eventq_base & Q_BASE_ADDR_MASK) +
1386 (smmu.regs.eventq_prod & sizeMask) * sizeof(ev.data);
1387
1388 DPRINTF(SMMUv3, "Sending event to addr=%#08x (pos=%d): %s\n",
1389 event_addr, smmu.regs.eventq_prod, ev.print());
1390
1391 bool empty_queue = (smmu.regs.eventq_prod & sizeMask) ==
1392 (smmu.regs.eventq_cons & sizeMask);
1393
1394 // This deliberately resets the overflow field in eventq_prod!
1395 smmu.regs.eventq_prod = (smmu.regs.eventq_prod + 1) & sizeMask;
1396
1397 doWrite(yield, event_addr, &ev.data, sizeof(ev.data));
1398
1399 // Send an event queue interrupt when transitioning from empty to
1400 // non empty queue
1401 if (IRQCtrl irq_ctrl = smmu.regs.irq_ctrl;
1402 irq_ctrl.eventqIrqEn && empty_queue) {
1403
1404 sendEventInterrupt(yield);
1405 }
1406}
1407
1408void
1410{
1411 Addr msi_addr = smmu.regs.eventq_irq_cfg0 & E_BASE_ADDR_MASK;
1412
1413 // Check if MSIs are enabled by inspecting the SMMU_IDR.MSI bit
1414 // According to the SMMUv3 spec, using an address equal to 0
1415 // disables the sending of the MSI
1416 if (IDR0 idr0 = smmu.regs.idr0; idr0.msi && msi_addr != 0) {
1417 DPRINTF(SMMUv3, "Raise Event queue MSI\n");
1418 doWrite(yield, msi_addr,
1419 &smmu.regs.eventq_irq_cfg1, sizeof(smmu.regs.eventq_irq_cfg1));
1420 }
1421 if (smmu.eventqInterrupt) {
1422 DPRINTF(SMMUv3, "Raise Event queue wired interrupt\n");
1424 }
1425}
1426
1427void
1429 StreamTableEntry &ste,
1430 uint32_t sid)
1431{
1432 unsigned max_sid = 1 << (smmu.regs.strtab_base_cfg & ST_CFG_SIZE_MASK);
1433 if (sid >= max_sid)
1434 panic("SID %#x out of range, max=%#x", sid, max_sid);
1435
1436 Addr ste_addr;
1437
1438 if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK) == ST_CFG_FMT_2LEVEL) {
1439 unsigned split =
1440 (smmu.regs.strtab_base_cfg & ST_CFG_SPLIT_MASK) >> ST_CFG_SPLIT_SHIFT;
1441
1442 if (split!= 7 && split!=8 && split!=16)
1443 panic("Invalid stream table split %d", split);
1444
1445 uint64_t l2_ptr;
1446 uint64_t l2_addr =
1447 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) +
1448 bits(sid, 32, split) * sizeof(l2_ptr);
1449
1450 DPRINTF(SMMUv3, "Read L1STE at %#x\n", l2_addr);
1451
1452 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, 0);
1453
1454 DPRINTF(SMMUv3, "Got L1STE L1 at %#x: 0x%016x\n", l2_addr, l2_ptr);
1455
1456 unsigned span = l2_ptr & ST_L2_SPAN_MASK;
1457 if (span == 0)
1458 panic("Invalid level 1 stream table descriptor");
1459
1460 unsigned index = bits(sid, split-1, 0);
1461 if (index >= (1 << span))
1462 panic("StreamID %d out of level 1 descriptor range %d",
1463 sid, 1<<span);
1464
1465 ste_addr = (l2_ptr & ST_L2_ADDR_MASK) + index * sizeof(ste);
1466
1468 } else if ((smmu.regs.strtab_base_cfg & ST_CFG_FMT_MASK)
1469 == ST_CFG_FMT_LINEAR) {
1470 ste_addr =
1471 (smmu.regs.strtab_base & VMT_BASE_ADDR_MASK) + sid * sizeof(ste);
1472 } else {
1473 panic("Invalid stream table format");
1474 }
1475
1476 DPRINTF(SMMUv3, "Read STE at %#x\n", ste_addr);
1477
1478 doReadConfig(yield, ste_addr, &ste, sizeof(ste), sid, 0);
1479
1480 DPRINTF(SMMUv3, "Got STE at %#x [0]: 0x%016x\n", ste_addr, ste.dw0);
1481 DPRINTF(SMMUv3, " STE at %#x [1]: 0x%016x\n", ste_addr, ste.dw1);
1482 DPRINTF(SMMUv3, " STE at %#x [2]: 0x%016x\n", ste_addr, ste.dw2);
1483 DPRINTF(SMMUv3, " STE at %#x [3]: 0x%016x\n", ste_addr, ste.dw3);
1484 DPRINTF(SMMUv3, " STE at %#x [4]: 0x%016x\n", ste_addr, ste._pad[0]);
1485 DPRINTF(SMMUv3, " STE at %#x [5]: 0x%016x\n", ste_addr, ste._pad[1]);
1486 DPRINTF(SMMUv3, " STE at %#x [6]: 0x%016x\n", ste_addr, ste._pad[2]);
1487 DPRINTF(SMMUv3, " STE at %#x [7]: 0x%016x\n", ste_addr, ste._pad[3]);
1488
1489 if (!ste.dw0.valid)
1490 panic("STE @ %#x not valid\n", ste_addr);
1491
1493}
1494
1498 const StreamTableEntry &ste,
1499 uint32_t sid, uint32_t ssid)
1500{
1501 TranslResult tr;
1502 Addr cd_addr = 0;
1503
1504 if (ste.dw0.s1cdmax == 0) {
1505 cd_addr = ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT;
1506 } else {
1507 unsigned max_ssid = 1 << ste.dw0.s1cdmax;
1508 if (ssid >= max_ssid)
1509 panic("SSID %#x out of range, max=%#x", ssid, max_ssid);
1510
1511 if (ste.dw0.s1fmt==STAGE1_CFG_2L_4K ||
1512 ste.dw0.s1fmt==STAGE1_CFG_2L_64K)
1513 {
1514 unsigned split = ste.dw0.s1fmt==STAGE1_CFG_2L_4K ? 7 : 11;
1515
1516 uint64_t l2_ptr;
1517 uint64_t l2_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) +
1518 bits(ssid, 24, split) * sizeof(l2_ptr);
1519
1520 if (context.stage2Enable) {
1521 tr = translateStage2(yield, l2_addr, false);
1522 if (tr.isFaulting()) {
1524 return tr;
1525 }
1526
1527 l2_addr = tr.addr;
1528 }
1529
1530 DPRINTF(SMMUv3, "Read L1CD at %#x\n", l2_addr);
1531
1532 doReadConfig(yield, l2_addr, &l2_ptr, sizeof(l2_ptr), sid, ssid);
1533
1534 DPRINTF(SMMUv3, "Got L1CD at %#x: 0x%016x\n", l2_addr, l2_ptr);
1535
1536 cd_addr = l2_ptr + bits(ssid, split-1, 0) * sizeof(cd);
1537
1539 } else if (ste.dw0.s1fmt == STAGE1_CFG_1L) {
1540 cd_addr = (ste.dw0.s1ctxptr << ST_CD_ADDR_SHIFT) + ssid*sizeof(cd);
1541 }
1542 }
1543
1544 if (context.stage2Enable) {
1545 tr = translateStage2(yield, cd_addr, false);
1546 if (tr.isFaulting()) {
1548 return tr;
1549 }
1550
1551 cd_addr = tr.addr;
1552 }
1553
1554 DPRINTF(SMMUv3, "Read CD at %#x\n", cd_addr);
1555
1556 doReadConfig(yield, cd_addr, &cd, sizeof(cd), sid, ssid);
1557
1558 DPRINTF(SMMUv3, "Got CD at %#x [0]: 0x%016x\n", cd_addr, cd.dw0);
1559 DPRINTF(SMMUv3, " CD at %#x [1]: 0x%016x\n", cd_addr, cd.dw1);
1560 DPRINTF(SMMUv3, " CD at %#x [2]: 0x%016x\n", cd_addr, cd.dw2);
1561 DPRINTF(SMMUv3, " CD at %#x [3]: 0x%016x\n", cd_addr, cd.mair);
1562 DPRINTF(SMMUv3, " CD at %#x [4]: 0x%016x\n", cd_addr, cd.amair);
1563 DPRINTF(SMMUv3, " CD at %#x [5]: 0x%016x\n", cd_addr, cd._pad[0]);
1564 DPRINTF(SMMUv3, " CD at %#x [6]: 0x%016x\n", cd_addr, cd._pad[1]);
1565 DPRINTF(SMMUv3, " CD at %#x [7]: 0x%016x\n", cd_addr, cd._pad[2]);
1566
1567
1568 if (!cd.dw0.valid)
1569 panic("CD @ %#x not valid\n", cd_addr);
1570
1572 return tr;
1573}
1574
1575void
1577 void *ptr, size_t size,
1578 uint32_t sid, uint32_t ssid)
1579{
1580 doRead(yield, addr, ptr, size);
1581}
1582
1583void
1585 void *ptr, unsigned stage,
1586 unsigned level)
1587{
1588 size_t pte_size = sizeof(PageTableOps::pte_t);
1589
1590 Addr mask = pte_size - 1;
1591 Addr base = addr & ~mask;
1592
1593 doRead(yield, base, ptr, pte_size);
1594}
1595
1596} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
Base class for ARM GIC implementations.
void store(const Entry &incoming)
const Entry * lookup(Addr va, uint16_t asid, uint16_t vmid, bool updStats=true)
virtual void raise()=0
Signal an interrupt.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
const Entry * lookup(uint32_t sid, uint32_t ssid, bool updStats=true)
void store(const Entry &incoming)
CallerType: A reference to an object of this class will be passed to the coroutine task.
Definition coroutine.hh:85
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
void store(const Entry &incoming)
const Entry * lookup(Addr ipa, uint16_t vmid, bool updStats=true)
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void setBadAddress()
Definition packet.hh:786
Addr getAddr() const
Definition packet.hh:807
void makeTimingResponse()
Definition packet.hh:1080
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
Definition packet.hh:815
bool isWrite() const
Definition packet.hh:594
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
void makeAtomicResponse()
Definition packet.hh:1074
const std::string name() const
void doWrite(Yield &yield, Addr addr, const void *ptr, size_t size)
void doDelay(Yield &yield, Cycles cycles)
void doSemaphoreUp(SMMUSemaphore &sem)
void scheduleWakeup(Tick when)
void doBroadcastSignal(SMMUSignal &sig)
void doSemaphoreDown(Yield &yield, SMMUSemaphore &sem)
void doWaitForSignal(Yield &yield, SMMUSignal &sig)
void doRead(Yield &yield, Addr addr, void *ptr, size_t size)
void store(const Entry &incoming, AllocPolicy alloc)
const Entry * lookup(uint32_t sid, uint32_t ssid, Addr va, bool updStats=true)
bool ifcTLBLookup(Yield &yield, TranslResult &tr, bool &wasPrefetched)
void configCacheUpdate(Yield &yield, const TranslContext &tc)
TranslResult translateStage1And2(Yield &yield, Addr addr)
void smmuTLBUpdate(Yield &yield, const TranslResult &tr)
SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
TranslResult smmuTranslation(Yield &yield)
TranslResult bypass(Addr addr) const
void beginTransaction(const SMMUTranslRequest &req)
bool findConfig(Yield &yield, TranslContext &tc, TranslResult &tr)
void walkCacheLookup(Yield &yield, const WalkCache::Entry *&walkEntry, Addr addr, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level)
bool smmuTLBLookup(Yield &yield, TranslResult &tr)
virtual void main(Yield &yield)
void abortTransaction(Yield &yield, const TranslResult &tr)
SMMUEvent generateEvent(const TranslResult &tr)
void hazardIdRegister()
Used to force ordering on transactions with the same orderId.
TranslResult walkStage2(Yield &yield, Addr addr, bool final_tr, const ArmISA::PageTableOps *pt_ops, unsigned level, Addr walkPtr)
TranslResult translateStage2(Yield &yield, Addr addr, bool final_tr)
void doReadConfig(Yield &yield, Addr addr, void *ptr, size_t size, uint32_t sid, uint32_t ssid)
void doReadSTE(Yield &yield, StreamTableEntry &ste, uint32_t sid)
SMMUv3DeviceInterface & ifc
bool microTLBLookup(Yield &yield, TranslResult &tr)
bool configCacheLookup(Yield &yield, TranslContext &tc)
TranslResult doReadCD(Yield &yield, ContextDescriptor &cd, const StreamTableEntry &ste, uint32_t sid, uint32_t ssid)
bool hazard4kCheck()
Used to force ordering on transactions with same (SID, SSID, 4k page) to avoid multiple identical pag...
void ifcTLBUpdate(Yield &yield, const TranslResult &tr)
TranslResult walkStage1And2(Yield &yield, Addr addr, const ArmISA::PageTableOps *pt_ops, unsigned level, Addr walkPtr)
void sendEvent(Yield &yield, const SMMUEvent &ev)
void microTLBUpdate(Yield &yield, const TranslResult &tr)
void doReadPTE(Yield &yield, Addr va, Addr addr, void *ptr, unsigned stage, unsigned level)
void walkCacheUpdate(Yield &yield, Addr va, Addr vaMask, Addr pa, unsigned stage, unsigned level, bool leaf, uint8_t permissions)
void completePrefetch(Yield &yield)
GEM5_CLASS_VAR_USED Tick faultTick
TranslResult combineTranslations(const TranslResult &s1tr, const TranslResult &s2tr) const
void completeTransaction(Yield &yield, const TranslResult &tr)
void sendEventInterrupt(Yield &yield)
std::list< SMMUTranslationProcess * > dependentReads[SMMU_MAX_TRANS_ID]
std::list< SMMUTranslationProcess * > dependentWrites[SMMU_MAX_TRANS_ID]
std::list< SMMUTranslationProcess * > duplicateReqs
const System & system
Definition smmu_v3.hh:95
const Cycles tlbLat
Definition smmu_v3.hh:135
const bool ipaCacheEnable
Definition smmu_v3.hh:113
const unsigned requestPortWidth
Definition smmu_v3.hh:120
gem5::SMMUv3::SMMUv3Stats stats
SMMUSemaphore requestPortSem
Definition smmu_v3.hh:128
SMMUSemaphore walkSem
Definition smmu_v3.hh:127
const bool configCacheEnable
Definition smmu_v3.hh:112
SMMUSemaphore ptwSem
Definition smmu_v3.hh:131
SMMUSemaphore cycleSem
Definition smmu_v3.hh:132
SMMUSemaphore tlbSem
Definition smmu_v3.hh:122
ARMArchTLB tlb
Definition smmu_v3.hh:106
SMMUSemaphore transSem
Definition smmu_v3.hh:130
ConfigCache configCache
Definition smmu_v3.hh:107
WalkCache walkCache
Definition smmu_v3.hh:109
const Cycles configLat
Definition smmu_v3.hh:138
const bool tlbEnable
Definition smmu_v3.hh:111
const Cycles smmuIfcLat
Definition smmu_v3.hh:137
ArmInterruptPin *const eventqInterrupt
Definition smmu_v3.hh:104
const Cycles ifcSmmuLat
Definition smmu_v3.hh:136
const bool walkCacheNonfinalEnable
Definition smmu_v3.hh:117
const Cycles walkLat
Definition smmu_v3.hh:140
const unsigned walkCacheS1Levels
Definition smmu_v3.hh:118
const bool walkCacheEnable
Definition smmu_v3.hh:114
void scheduleDeviceRetries()
Definition smmu_v3.cc:218
const unsigned walkCacheS2Levels
Definition smmu_v3.hh:119
IPACache ipaCache
Definition smmu_v3.hh:108
SMMUSemaphore ifcSmmuSem
Definition smmu_v3.hh:123
SMMUSemaphore smmuIfcSem
Definition smmu_v3.hh:124
SMMURegs regs
Definition smmu_v3.hh:159
SMMUAction runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
Definition smmu_v3.cc:287
const Cycles ipaLat
Definition smmu_v3.hh:139
SMMUSemaphore configSem
Definition smmu_v3.hh:125
SMMUSemaphore ipaSem
Definition smmu_v3.hh:126
bool isAtomicMode() const
Is the system in atomic mode?
Definition system.hh:258
bool isTimingMode() const
Is the system in timing mode?
Definition system.hh:270
void store(const Entry &incoming)
const Entry * lookup(Addr va, Addr vaMask, uint16_t asid, uint16_t vmid, unsigned stage, unsigned level, bool updStats=true)
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
STL list class.
Definition stl.hh:51
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
std::enable_if_t<!std::is_same_v< T, void >, T > get()
get() is the way we can extrapolate arguments from the coroutine caller.
Definition coroutine.hh:141
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
static OrderID orderId(PacketPtr pkt)
Definition amba.hh:52
const PageTableOps * getPageTableOps(GrainSize trans_granule)
Definition pagetable.cc:476
Bitfield< 32 > cd
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 9 > e
Definition misc_types.hh:65
Bitfield< 23 > span
Bitfield< 8 > a
Definition misc_types.hh:66
const GrainSize GrainMap_tg0[]
Definition pagetable.cc:49
Bitfield< 39, 12 > pa
Bitfield< 8 > va
Bitfield< 10, 5 > event
Bitfield< 30, 0 > index
Bitfield< 51, 12 > base
Definition pagetable.hh:141
Bitfield< 20 > level
Definition intmessage.hh:51
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
@ STE_CONFIG_STAGE1_ONLY
@ STE_CONFIG_STAGE2_ONLY
@ STE_CONFIG_BYPASS
@ STE_CONFIG_STAGE1_AND_2
@ STAGE1_CFG_2L_4K
@ STAGE1_CFG_1L
@ STAGE1_CFG_2L_64K
@ ST_CFG_FMT_LINEAR
@ ST_CFG_FMT_2LEVEL
@ ST_CFG_SPLIT_MASK
@ Q_BASE_ADDR_MASK
@ ST_L2_ADDR_MASK
@ Q_BASE_SIZE_MASK
@ ST_CFG_SIZE_MASK
@ ST_L2_SPAN_MASK
@ ST_CFG_FMT_MASK
@ VMT_BASE_ADDR_MASK
@ E_BASE_ADDR_MASK
@ CR0_EVENTQEN_MASK
@ CR0_SMMUEN_MASK
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
Bitfield< 10 > ats
@ ST_CFG_SPLIT_SHIFT
@ STE_S2TTB_SHIFT
@ ST_CD_ADDR_SHIFT
@ CD_TTB_SHIFT
@ TRANS_GRANULE_INVALID
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
@ SMMU_MAX_TRANS_ID
@ ACTION_SEND_RESP
@ ACTION_TERMINATE
@ ACTION_INITIAL_NOP
@ ACTION_SEND_RESP_ATS
@ ACTION_SEND_REQ_FINAL
This is an implementation of the SMMUv3 architecture.
virtual bool isWritable(pte_t pte, unsigned level, bool stage2) const =0
Addr walkMask(unsigned level) const
Definition pagetable.cc:55
virtual LookupLevel lastLevel() const =0
virtual Addr nextLevelPointer(pte_t pte, unsigned level) const =0
virtual bool isValid(pte_t pte, unsigned level) const =0
virtual Addr pageMask(pte_t pte, unsigned level) const =0
virtual Addr index(Addr va, unsigned level, int tsz) const =0
virtual bool isLeaf(pte_t pte, unsigned level) const =0
SMMUActionType type
Bitfield< 63, 32 > streamId
struct gem5::SMMUEvent::Data data
std::string print() const
static SMMUTranslRequest fromPacket(PacketPtr pkt, bool ats=false)
static SMMUTranslRequest prefetch(Addr addr, uint32_t sid, uint32_t ssid)
statistics::Distribution ptwTimeDist
Definition smmu_v3.hh:151
statistics::Scalar cdL1Fetches
Definition smmu_v3.hh:148
statistics::Scalar steL1Fetches
Definition smmu_v3.hh:146
statistics::Scalar steFetches
Definition smmu_v3.hh:147
statistics::Distribution translationTimeDist
Definition smmu_v3.hh:150
statistics::Scalar cdFetches
Definition smmu_v3.hh:149
Bitfield< 51, 6 > s1ctxptr
Bitfield< 3, 1 > config
Bitfield< 5, 4 > s1fmt
Bitfield< 37, 32 > s2t0sz
Bitfield< 47, 46 > s2tg
Bitfield< 63, 59 > s1cdmax
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:03 for gem5 by doxygen 1.11.0