gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
smmu_v3.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013, 2018-2020, 2024 Arm Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "dev/arm/smmu_v3.hh"
39
40#include <cstddef>
41#include <cstring>
42
43#include "base/bitfield.hh"
44#include "base/cast.hh"
45#include "base/compiler.hh"
46#include "base/logging.hh"
47#include "base/trace.hh"
48#include "base/types.hh"
49#include "debug/Checkpoint.hh"
50#include "debug/SMMUv3.hh"
51#include "dev/arm/base_gic.hh"
53#include "mem/packet_access.hh"
54#include "params/SMMUv3.hh"
55#include "sim/system.hh"
56
57namespace gem5
58{
59
60SMMUv3::SMMUv3(const SMMUv3Params &params) :
63 requestorId(params.system->getRequestorId(this)),
64 requestPort(name() + ".request", *this),
65 tableWalkPort(name() + ".walker", *this),
66 controlPort(name() + ".control", *this, params.reg_map),
67 eventqInterrupt(params.eventq_irq ? params.eventq_irq->get() : nullptr),
68 tlb(params.tlb_entries, params.tlb_assoc, params.tlb_policy, this),
69 configCache(params.cfg_entries, params.cfg_assoc, params.cfg_policy, this),
70 ipaCache(params.ipa_entries, params.ipa_assoc, params.ipa_policy, this),
71 walkCache({ { params.walk_S1L0, params.walk_S1L1,
72 params.walk_S1L2, params.walk_S1L3,
73 params.walk_S2L0, params.walk_S2L1,
74 params.walk_S2L2, params.walk_S2L3 } },
75 params.walk_assoc, params.walk_policy, this),
76 tlbEnable(params.tlb_enable),
77 configCacheEnable(params.cfg_enable),
78 ipaCacheEnable(params.ipa_enable),
79 walkCacheEnable(params.walk_enable),
80 tableWalkPortEnable(false),
81 walkCacheNonfinalEnable(params.wc_nonfinal_enable),
82 walkCacheS1Levels(params.wc_s1_levels),
83 walkCacheS2Levels(params.wc_s2_levels),
84 requestPortWidth(params.request_port_width),
85 tlbSem(params.tlb_slots),
86 ifcSmmuSem(1),
87 smmuIfcSem(1),
88 configSem(params.cfg_slots),
89 ipaSem(params.ipa_slots),
90 walkSem(params.walk_slots),
91 requestPortSem(1),
92 transSem(params.xlate_slots),
93 ptwSem(params.ptw_slots),
94 cycleSem(1),
95 tlbLat(params.tlb_lat),
96 ifcSmmuLat(params.ifc_smmu_lat),
97 smmuIfcLat(params.smmu_ifc_lat),
98 configLat(params.cfg_lat),
99 ipaLat(params.ipa_lat),
100 walkLat(params.walk_lat),
101 stats(this),
102 deviceInterfaces(params.device_interfaces),
103 commandExecutor(name() + ".cmd_exec", *this),
104 regsMap(params.reg_map),
105 processCommandsEvent(*this)
106{
107 fatal_if(regsMap.size() != SMMU_REG_SIZE,
108 "Invalid register map size: %#x different than SMMU_REG_SIZE = %#x\n",
109 regsMap.size(), SMMU_REG_SIZE);
110
111 // Init smmu registers to 0
112 memset(&regs, 0, sizeof(regs));
113
114 // Setup RO ID registers
115 regs.idr0 = params.smmu_idr0;
116 regs.idr1 = params.smmu_idr1;
117 regs.idr2 = params.smmu_idr2;
118 regs.idr3 = params.smmu_idr3;
119 regs.idr4 = params.smmu_idr4;
120 regs.idr5 = params.smmu_idr5;
121 regs.iidr = params.smmu_iidr;
122 regs.aidr = params.smmu_aidr;
123
124 // TODO: At the moment it possible to set the ID registers to hold
125 // any possible value. It would be nice to have a sanity check here
126 // at construction time in case some idx registers are programmed to
127 // store an unallowed values or if the are configuration conflicts.
128 warn("SMMUv3 IDx register values unchecked\n");
129
130 for (auto ifc : deviceInterfaces)
131 ifc->setSMMU(this);
132}
133
134bool
136{
137 DPRINTF(SMMUv3, "[t] requestor resp addr=%#x size=%#x\n",
138 pkt->getAddr(), pkt->getSize());
139
140 // @todo: We need to pay for this and not just zero it out
141 pkt->headerDelay = pkt->payloadDelay = 0;
142
143 SMMUProcess *proc =
145
146 runProcessTiming(proc, pkt);
147
148 return true;
149}
150
151void
153{
154 assert(!packetsToRetry.empty());
155
156 while (!packetsToRetry.empty()) {
157 SMMUAction a = packetsToRetry.front();
158
159 assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
160
161 DPRINTF(SMMUv3, "[t] requestor retr addr=%#x size=%#x\n",
162 a.pkt->getAddr(), a.pkt->getSize());
163
164 if (!requestPort.sendTimingReq(a.pkt))
165 break;
166
167 packetsToRetry.pop();
168
169 /*
170 * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
171 * on the requestor interface; this means that we no longer hold on to
172 * that transaction and therefore can accept a new one.
173 * If the response port was stalled then unstall it (send retry).
174 */
175 if (a.type == ACTION_SEND_REQ_FINAL)
177 }
178}
179
180bool
182{
183 DPRINTF(SMMUv3, "[t] requestor HWTW resp addr=%#x size=%#x\n",
184 pkt->getAddr(), pkt->getSize());
185
186 // @todo: We need to pay for this and not just zero it out
187 pkt->headerDelay = pkt->payloadDelay = 0;
188
189 SMMUProcess *proc =
191
192 runProcessTiming(proc, pkt);
193
194 return true;
195}
196
197void
199{
200 assert(tableWalkPortEnable);
201 assert(!packetsTableWalkToRetry.empty());
202
203 while (!packetsTableWalkToRetry.empty()) {
205
206 assert(a.type==ACTION_SEND_REQ);
207
208 DPRINTF(SMMUv3, "[t] requestor HWTW retr addr=%#x size=%#x\n",
209 a.pkt->getAddr(), a.pkt->getSize());
210
211 if (!tableWalkPort.sendTimingReq(a.pkt))
212 break;
213
215 }
216}
217
218void
220{
221 for (auto ifc : deviceInterfaces) {
222 ifc->scheduleDeviceRetry();
223 }
224}
225
228{
229 if (system.isAtomicMode()) {
230 return runProcessAtomic(proc, pkt);
231 } else if (system.isTimingMode()) {
232 return runProcessTiming(proc, pkt);
233 } else {
234 panic("Not in timing or atomic mode!");
235 }
236}
237
240{
241 SMMUAction action;
242 Tick delay = 0;
243 bool finished = false;
244
245 do {
246 action = proc->run(pkt);
247
248 switch (action.type) {
249 case ACTION_SEND_REQ:
250 // Send an MMU initiated request on the table walk port if
251 // it is enabled. Otherwise, fall through and handle same
252 // as the final ACTION_SEND_REQ_FINAL request.
254 delay += tableWalkPort.sendAtomic(action.pkt);
255 pkt = action.pkt;
256 break;
257 }
258 [[fallthrough]];
260 delay += requestPort.sendAtomic(action.pkt);
261 pkt = action.pkt;
262 break;
263
264 case ACTION_SEND_RESP:
266 case ACTION_SLEEP:
267 finished = true;
268 break;
269
270 case ACTION_DELAY:
271 delay += action.delay;
272 break;
273
274 case ACTION_TERMINATE:
275 panic("ACTION_TERMINATE in atomic mode\n");
276
277 default:
278 panic("Unknown action\n");
279 }
280 } while (!finished);
281
282 action.delay = delay;
283
284 return action;
285}
286
289{
290 SMMUAction action = proc->run(pkt);
291
292 switch (action.type) {
293 case ACTION_SEND_REQ:
294 // Send an MMU initiated request on the table walk port if it is
295 // enabled. Otherwise, fall through and handle same as the final
296 // ACTION_SEND_REQ_FINAL request.
298 action.pkt->pushSenderState(proc);
299
300 DPRINTF(SMMUv3, "[t] requestor HWTW req addr=%#x size=%#x\n",
301 action.pkt->getAddr(), action.pkt->getSize());
302
303 if (packetsTableWalkToRetry.empty()
304 && tableWalkPort.sendTimingReq(action.pkt)) {
306 } else {
307 DPRINTF(SMMUv3, "[t] requestor HWTW req needs retry,"
308 " qlen=%d\n", packetsTableWalkToRetry.size());
309 packetsTableWalkToRetry.push(action);
310 }
311
312 break;
313 }
314 [[fallthrough]];
316 action.pkt->pushSenderState(proc);
317
318 DPRINTF(SMMUv3, "[t] requestor req addr=%#x size=%#x\n",
319 action.pkt->getAddr(), action.pkt->getSize());
320
321 if (packetsToRetry.empty() &&
322 requestPort.sendTimingReq(action.pkt)) {
324 } else {
325 DPRINTF(SMMUv3, "[t] requestor req needs retry, qlen=%d\n",
326 packetsToRetry.size());
327 packetsToRetry.push(action);
328 }
329
330 break;
331
332 case ACTION_SEND_RESP:
333 // @todo: We need to pay for this and not just zero it out
334 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
335
336 DPRINTF(SMMUv3, "[t] responder resp addr=%#x size=%#x\n",
337 action.pkt->getAddr(),
338 action.pkt->getSize());
339
340 assert(action.ifc);
341 action.ifc->schedTimingResp(action.pkt);
342
343 delete proc;
344 break;
345
347 // @todo: We need to pay for this and not just zero it out
348 action.pkt->headerDelay = action.pkt->payloadDelay = 0;
349
350 DPRINTF(SMMUv3, "[t] ATS responder resp addr=%#x size=%#x\n",
351 action.pkt->getAddr(), action.pkt->getSize());
352
353 assert(action.ifc);
354 action.ifc->schedAtsTimingResp(action.pkt);
355
356 delete proc;
357 break;
358
359 case ACTION_DELAY:
360 case ACTION_SLEEP:
361 break;
362
363 case ACTION_TERMINATE:
364 delete proc;
365 break;
366
367 default:
368 panic("Unknown action\n");
369 }
370
371 return action;
372}
373
374void
376{
377 DPRINTF(SMMUv3, "processCommands()\n");
378
379 if (system.isAtomicMode()) {
381 (void) a;
382 } else if (system.isTimingMode()) {
383 if (!commandExecutor.isBusy())
385 } else {
386 panic("Not in timing or atomic mode!");
387 }
388}
389
390void
392{
393 switch (cmd.dw0.type) {
394 case CMD_PRF_CONFIG:
395 DPRINTF(SMMUv3, "CMD_PREFETCH_CONFIG - ignored\n");
396 break;
397
398 case CMD_PRF_ADDR:
399 DPRINTF(SMMUv3, "CMD_PREFETCH_ADDR - ignored\n");
400 break;
401
402 case CMD_CFGI_STE: {
403 DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid);
404 configCache.invalidateSID(cmd.dw0.sid);
405
406 for (auto dev_interface : deviceInterfaces) {
407 dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
408 dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
409 }
410 break;
411 }
412
413 case CMD_CFGI_STE_RANGE: {
414 const auto range = cmd.dw1.range;
415 if (range == 31) {
416 // CMD_CFGI_ALL is an alias of CMD_CFGI_STE_RANGE with
417 // range = 31
418 DPRINTF(SMMUv3, "CMD_CFGI_ALL\n");
419 configCache.invalidateAll();
420
421 for (auto dev_interface : deviceInterfaces) {
422 dev_interface->microTLB->invalidateAll();
423 dev_interface->mainTLB->invalidateAll();
424 }
425 } else {
426 DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n");
427 const auto start_sid = cmd.dw0.sid & ~((1 << (range + 1)) - 1);
428 const auto end_sid = start_sid + (1 << (range + 1)) - 1;
429 for (auto sid = start_sid; sid <= end_sid; sid++) {
430 configCache.invalidateSID(sid);
431
432 for (auto dev_interface : deviceInterfaces) {
433 dev_interface->microTLB->invalidateSID(sid);
434 dev_interface->mainTLB->invalidateSID(sid);
435 }
436 }
437 }
438 break;
439 }
440
441 case CMD_CFGI_CD: {
442 DPRINTF(SMMUv3, "CMD_CFGI_CD sid=%#x ssid=%#x\n",
443 cmd.dw0.sid, cmd.dw0.ssid);
444 configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid);
445
446 for (auto dev_interface : deviceInterfaces) {
447 dev_interface->microTLB->invalidateSSID(
448 cmd.dw0.sid, cmd.dw0.ssid);
449 dev_interface->mainTLB->invalidateSSID(
450 cmd.dw0.sid, cmd.dw0.ssid);
451 }
452 break;
453 }
454
455 case CMD_CFGI_CD_ALL: {
456 DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid);
457 configCache.invalidateSID(cmd.dw0.sid);
458
459 for (auto dev_interface : deviceInterfaces) {
460 dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
461 dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
462 }
463 break;
464 }
465
466 case CMD_TLBI_NH_ALL: {
467 DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid);
468 for (auto dev_interface : deviceInterfaces) {
469 dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
470 dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
471 }
472 tlb.invalidateVMID(cmd.dw0.vmid);
473 walkCache.invalidateVMID(cmd.dw0.vmid);
474 break;
475 }
476
477 case CMD_TLBI_NH_ASID: {
478 DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
479 cmd.dw0.asid, cmd.dw0.vmid);
480 for (auto dev_interface : deviceInterfaces) {
481 dev_interface->microTLB->invalidateASID(
482 cmd.dw0.asid, cmd.dw0.vmid);
483 dev_interface->mainTLB->invalidateASID(
484 cmd.dw0.asid, cmd.dw0.vmid);
485 }
486 tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
487 walkCache.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
488 break;
489 }
490
491 case CMD_TLBI_NH_VAA: {
492 const Addr addr = cmd.addr();
493 DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
494 addr, cmd.dw0.vmid);
495 for (auto dev_interface : deviceInterfaces) {
496 dev_interface->microTLB->invalidateVAA(
497 addr, cmd.dw0.vmid);
498 dev_interface->mainTLB->invalidateVAA(
499 addr, cmd.dw0.vmid);
500 }
501 tlb.invalidateVAA(addr, cmd.dw0.vmid);
502 const bool leaf_only = cmd.dw1.leaf ? true : false;
503 walkCache.invalidateVAA(addr, cmd.dw0.vmid, leaf_only);
504 break;
505 }
506
507 case CMD_TLBI_NH_VA: {
508 const Addr addr = cmd.addr();
509 DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
510 addr, cmd.dw0.asid, cmd.dw0.vmid);
511 for (auto dev_interface : deviceInterfaces) {
512 dev_interface->microTLB->invalidateVA(
513 addr, cmd.dw0.asid, cmd.dw0.vmid);
514 dev_interface->mainTLB->invalidateVA(
515 addr, cmd.dw0.asid, cmd.dw0.vmid);
516 }
517 tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid);
518 const bool leaf_only = cmd.dw1.leaf ? true : false;
519 walkCache.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid,
520 leaf_only);
521 break;
522 }
523
524 case CMD_TLBI_S2_IPA: {
525 const Addr addr = cmd.addr();
526 DPRINTF(SMMUv3, "CMD_TLBI_S2_IPA ipa=%#08x vmid=%#x\n",
527 addr, cmd.dw0.vmid);
528 // This does not invalidate TLBs containing
529 // combined Stage1 + Stage2 translations, as per the spec.
530 ipaCache.invalidateIPA(addr, cmd.dw0.vmid);
531
532 if (!cmd.dw1.leaf)
533 walkCache.invalidateVMID(cmd.dw0.vmid);
534 break;
535 }
536
537 case CMD_TLBI_S12_VMALL: {
538 DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid);
539 for (auto dev_interface : deviceInterfaces) {
540 dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
541 dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
542 }
543 tlb.invalidateVMID(cmd.dw0.vmid);
544 ipaCache.invalidateVMID(cmd.dw0.vmid);
545 walkCache.invalidateVMID(cmd.dw0.vmid);
546 break;
547 }
548
549 case CMD_TLBI_NSNH_ALL: {
550 DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n");
551 for (auto dev_interface : deviceInterfaces) {
552 dev_interface->microTLB->invalidateAll();
553 dev_interface->mainTLB->invalidateAll();
554 }
555 tlb.invalidateAll();
556 ipaCache.invalidateAll();
557 walkCache.invalidateAll();
558 break;
559 }
560
561 case CMD_RESUME:
562 DPRINTF(SMMUv3, "CMD_RESUME\n");
563 panic("resume unimplemented");
564 break;
565
566 default:
567 warn("Unimplemented command %#x\n", cmd.dw0.type);
568 break;
569 }
570}
571
572Tick
574{
575 DPRINTF(SMMUv3, "readControl: addr=%08x size=%d\n",
576 pkt->getAddr(), pkt->getSize());
577
578 int offset = pkt->getAddr() - regsMap.start();
579 assert(offset >= 0 && offset < SMMU_REG_SIZE);
580
581 if (inSecureBlock(offset)) {
582 warn("smmu: secure registers (0x%x) are not implemented\n",
583 offset);
584 }
585
586 auto reg_ptr = regs.data + offset;
587
588 switch (pkt->getSize()) {
589 case sizeof(uint32_t):
590 pkt->setLE<uint32_t>(*reinterpret_cast<uint32_t *>(reg_ptr));
591 break;
592 case sizeof(uint64_t):
593 pkt->setLE<uint64_t>(*reinterpret_cast<uint64_t *>(reg_ptr));
594 break;
595 default:
596 panic("smmu: unallowed access size: %d bytes\n", pkt->getSize());
597 break;
598 }
599
600 pkt->makeAtomicResponse();
601
602 return 0;
603}
604
605Tick
607{
608 int offset = pkt->getAddr() - regsMap.start();
609 assert(offset >= 0 && offset < SMMU_REG_SIZE);
610
611 DPRINTF(SMMUv3, "writeControl: addr=%08x size=%d data=%16x\n",
612 pkt->getAddr(), pkt->getSize(),
613 pkt->getSize() == sizeof(uint64_t) ?
614 pkt->getLE<uint64_t>() : pkt->getLE<uint32_t>());
615
616 switch (offset) {
617 case offsetof(SMMURegs, cr0):
618 assert(pkt->getSize() == sizeof(uint32_t));
619 regs.cr0 = regs.cr0ack = pkt->getLE<uint32_t>();
620 break;
621 case offsetof(SMMURegs, irq_ctrl):
622 assert(pkt->getSize() == sizeof(uint32_t));
623 warn("SMMUv3::%s No support for GERROR and PRI interrupt sources",
624 __func__);
625 regs.irq_ctrl = regs.irq_ctrlack = pkt->getLE<uint32_t>();
626 break;
627
628 case offsetof(SMMURegs, cr1):
629 case offsetof(SMMURegs, cr2):
630 case offsetof(SMMURegs, strtab_base_cfg):
631 case offsetof(SMMURegs, eventq_cons):
632 case offsetof(SMMURegs, eventq_irq_cfg1):
633 case offsetof(SMMURegs, priq_cons):
634 assert(pkt->getSize() == sizeof(uint32_t));
635 *reinterpret_cast<uint32_t *>(regs.data + offset) =
636 pkt->getLE<uint32_t>();
637 break;
638
639 case offsetof(SMMURegs, cmdq_cons):
640 assert(pkt->getSize() == sizeof(uint32_t));
641 if (regs.cr0 & CR0_CMDQEN_MASK) {
642 warn("CMDQ is enabled: ignoring write to CMDQ_CONS\n");
643 } else {
644 *reinterpret_cast<uint32_t *>(regs.data + offset) =
645 pkt->getLE<uint32_t>();
646 }
647 break;
648
649 case offsetof(SMMURegs, cmdq_prod):
650 assert(pkt->getSize() == sizeof(uint32_t));
651 *reinterpret_cast<uint32_t *>(regs.data + offset) =
652 pkt->getLE<uint32_t>();
654 break;
655
656 case offsetof(SMMURegs, strtab_base):
657 case offsetof(SMMURegs, eventq_irq_cfg0):
658 assert(pkt->getSize() == sizeof(uint64_t));
659 *reinterpret_cast<uint64_t *>(regs.data + offset) =
660 pkt->getLE<uint64_t>();
661 break;
662
663 case offsetof(SMMURegs, cmdq_base):
664 assert(pkt->getSize() == sizeof(uint64_t));
665 if (regs.cr0 & CR0_CMDQEN_MASK) {
666 warn("CMDQ is enabled: ignoring write to CMDQ_BASE\n");
667 } else {
668 *reinterpret_cast<uint64_t *>(regs.data + offset) =
669 pkt->getLE<uint64_t>();
670 regs.cmdq_cons = 0;
671 regs.cmdq_prod = 0;
672 }
673 break;
674
675 case offsetof(SMMURegs, eventq_base):
676 assert(pkt->getSize() == sizeof(uint64_t));
677 *reinterpret_cast<uint64_t *>(regs.data + offset) =
678 pkt->getLE<uint64_t>();
679 regs.eventq_cons = 0;
680 regs.eventq_prod = 0;
681 break;
682
683 case offsetof(SMMURegs, priq_base):
684 assert(pkt->getSize() == sizeof(uint64_t));
685 *reinterpret_cast<uint64_t *>(regs.data + offset) =
686 pkt->getLE<uint64_t>();
687 regs.priq_cons = 0;
688 regs.priq_prod = 0;
689 break;
690
691 default:
692 if (inSecureBlock(offset)) {
693 warn("smmu: secure registers (0x%x) are not implemented\n",
694 offset);
695 } else {
696 warn("smmu: write to read-only/undefined register at 0x%x\n",
697 offset);
698 }
699 }
700
701 pkt->makeAtomicResponse();
702
703 return 0;
704}
705
706bool
707SMMUv3::inSecureBlock(uint32_t offs) const
708{
709 if (offs >= offsetof(SMMURegs, _secure_regs) && offs < SMMU_SECURE_SZ)
710 return true;
711 else
712 return false;
713}
714
715void
717{
718 // make sure both sides are connected and have the same block size
719 if (!requestPort.isConnected())
720 fatal("Request port is not connected.\n");
721
722 // If the second request port is connected for the table walks, enable
723 // the mode to send table walks through this port instead
724 if (tableWalkPort.isConnected())
725 tableWalkPortEnable = true;
726
727 // notify the request side of our address ranges
728 for (auto ifc : deviceInterfaces) {
729 ifc->sendRange();
730 }
731
732 if (controlPort.isConnected())
733 controlPort.sendRangeChange();
734}
735
737 : statistics::Group(parent),
738 ADD_STAT(steL1Fetches, statistics::units::Count::get(), "STE L1 fetches"),
739 ADD_STAT(steFetches, statistics::units::Count::get(), "STE fetches"),
740 ADD_STAT(cdL1Fetches, statistics::units::Count::get(), "CD L1 fetches"),
741 ADD_STAT(cdFetches, statistics::units::Count::get(), "CD fetches"),
743 "Time to translate address"),
745 "Time to walk page tables")
746{
747 using namespace statistics;
748
750 .flags(pdf);
751
753 .flags(pdf);
754
756 .flags(pdf);
757
759 .flags(pdf);
760
762 .init(0, 2000000, 2000)
763 .flags(pdf);
764
766 .init(0, 2000000, 2000)
767 .flags(pdf);
768}
769
772{
773 // Wait until the Command Executor is not busy
774 if (commandExecutor.isBusy()) {
776 }
777 return DrainState::Drained;
778}
779
780void
782{
783 DPRINTF(Checkpoint, "Serializing SMMUv3\n");
784
785 SERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
786}
787
788void
790{
791 DPRINTF(Checkpoint, "Unserializing SMMUv3\n");
792
793 UNSERIALIZE_ARRAY(regs.data, sizeof(regs.data) / sizeof(regs.data[0]));
794}
795
796Port&
797SMMUv3::getPort(const std::string &name, PortID id)
798{
799 if (name == "request") {
800 return requestPort;
801 } else if (name == "walker") {
802 return tableWalkPort;
803 } else if (name == "control") {
804 return controlPort;
805 } else {
806 return ClockedObject::getPort(name, id);
807 }
808}
809
810} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,...
Base class for ARM GIC implementations.
ClockedObject(const ClockedObjectParams &p)
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
virtual std::string name() const
Definition named.hh:60
Addr getAddr() const
Definition packet.hh:807
void setLE(T v)
Set the value in the data pointer to v as little endian.
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition packet.hh:449
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition packet.hh:431
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition packet.cc:334
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition packet.cc:342
unsigned getSize() const
Definition packet.hh:817
void makeAtomicResponse()
Definition packet.hh:1074
T getLE() const
Get the data in the packet byte swapped from little endian to host endian.
Ports are used to interface objects to each other.
Definition port.hh:62
SMMUAction run(PacketPtr pkt)
void schedAtsTimingResp(PacketPtr pkt)
void schedTimingResp(PacketPtr pkt)
const System & system
Definition smmu_v3.hh:96
SMMUCommandExecProcess commandExecutor
Definition smmu_v3.hh:157
const AddrRange regsMap
Definition smmu_v3.hh:159
Tick readControl(PacketPtr pkt)
Definition smmu_v3.cc:573
void recvReqRetry()
Definition smmu_v3.cc:152
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition smmu_v3.cc:781
SMMUAction runProcess(SMMUProcess *proc, PacketPtr pkt)
Definition smmu_v3.cc:227
std::vector< SMMUv3DeviceInterface * > deviceInterfaces
Definition smmu_v3.hh:155
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition smmu_v3.cc:789
ARMArchTLB tlb
Definition smmu_v3.hh:107
ConfigCache configCache
Definition smmu_v3.hh:108
SMMUControlPort controlPort
Definition smmu_v3.hh:101
SMMUTableWalkPort tableWalkPort
Definition smmu_v3.hh:100
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition smmu_v3.cc:716
WalkCache walkCache
Definition smmu_v3.hh:110
SMMUAction runProcessAtomic(SMMUProcess *proc, PacketPtr pkt)
Definition smmu_v3.cc:239
bool recvTimingResp(PacketPtr pkt)
Definition smmu_v3.cc:135
bool inSecureBlock(uint32_t offs) const
Definition smmu_v3.cc:707
ArmInterruptPin *const eventqInterrupt
Definition smmu_v3.hh:105
virtual Port & getPort(const std::string &name, PortID id=InvalidPortID) override
Get a port with a given name and index.
Definition smmu_v3.cc:797
Tick writeControl(PacketPtr pkt)
Definition smmu_v3.cc:606
SMMURequestPort requestPort
Definition smmu_v3.hh:99
void scheduleDeviceRetries()
Definition smmu_v3.cc:219
std::queue< SMMUAction > packetsTableWalkToRetry
Definition smmu_v3.hh:165
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
Definition smmu_v3.cc:771
IPACache ipaCache
Definition smmu_v3.hh:109
bool tableWalkPortEnable
Definition smmu_v3.hh:116
const RequestorID requestorId
Definition smmu_v3.hh:97
std::queue< SMMUAction > packetsToRetry
Definition smmu_v3.hh:164
void processCommand(const SMMUCommand &cmd)
Definition smmu_v3.cc:391
void tableWalkRecvReqRetry()
Definition smmu_v3.cc:198
SMMURegs regs
Definition smmu_v3.hh:160
SMMUv3(const SMMUv3Params &p)
Definition smmu_v3.cc:60
MemberEventWrapper<&SMMUv3::processCommands > processCommandsEvent
Definition smmu_v3.hh:175
void processCommands()
Definition smmu_v3.cc:375
SMMUAction runProcessTiming(SMMUProcess *proc, PacketPtr pkt)
Definition smmu_v3.cc:288
bool tableWalkRecvTimingResp(PacketPtr pkt)
Definition smmu_v3.cc:181
friend class SMMUProcess
Definition smmu_v3.hh:91
Statistics container.
Definition group.hh:93
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
DrainState
Object drain/handover states.
Definition drain.hh:76
@ Draining
Draining buffers pending serialization/handover.
Definition drain.hh:78
@ Drained
Buffers drained, ready for serialization/handover.
Definition drain.hh:79
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:268
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
#define UNSERIALIZE_ARRAY(member, size)
Definition serialize.hh:618
#define SERIALIZE_ARRAY(member, size)
Definition serialize.hh:610
const Params & params() const
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
#define warn(...)
Definition logging.hh:288
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 8 > a
Definition misc_types.hh:66
Bitfield< 3 > addr
Definition types.hh:84
Units for Stats.
Definition units.hh:113
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition info.hh:61
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
T safe_cast(U &&ref_or_ptr)
Definition cast.hh:74
@ CR0_CMDQEN_MASK
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
uint64_t Tick
Tick count type.
Definition types.hh:58
@ CMD_CFGI_STE
@ CMD_TLBI_S2_IPA
@ CMD_TLBI_S12_VMALL
@ CMD_CFGI_STE_RANGE
@ CMD_TLBI_NH_ASID
@ CMD_TLBI_NH_VA
@ CMD_CFGI_CD_ALL
@ CMD_PRF_CONFIG
@ CMD_TLBI_NSNH_ALL
@ CMD_CFGI_CD
@ CMD_TLBI_NH_ALL
@ CMD_PRF_ADDR
@ CMD_TLBI_NH_VAA
Packet * PacketPtr
@ SMMU_SECURE_SZ
@ SMMU_REG_SIZE
@ ACTION_SEND_RESP
@ ACTION_DELAY
@ ACTION_SLEEP
@ ACTION_TERMINATE
@ ACTION_SEND_REQ
@ ACTION_SEND_RESP_ATS
@ ACTION_SEND_REQ_FINAL
This is an implementation of the SMMUv3 architecture.
SMMUActionType type
SMMUv3DeviceInterface * ifc
Bitfield< 63, 48 > asid
Bitfield< 63, 32 > sid
Bitfield< 47, 32 > vmid
Bitfield< 31, 12 > ssid
Bitfield< 4, 0 > range
uint64_t addr() const
SMMUv3Stats(statistics::Group *parent)
Definition smmu_v3.cc:736
statistics::Distribution ptwTimeDist
Definition smmu_v3.hh:152
statistics::Scalar cdL1Fetches
Definition smmu_v3.hh:149
statistics::Scalar steL1Fetches
Definition smmu_v3.hh:147
statistics::Scalar steFetches
Definition smmu_v3.hh:148
statistics::Distribution translationTimeDist
Definition smmu_v3.hh:151
statistics::Scalar cdFetches
Definition smmu_v3.hh:150
const std::string & name()
Definition trace.cc:48

Generated on Mon Oct 27 2025 04:13:01 for gem5 by doxygen 1.14.0