gem5 [DEVELOP-FOR-25.0]
Loading...
Searching...
No Matches
amdgpu_vm.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2021 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
33
36#include "arch/generic/mmu.hh"
37#include "base/trace.hh"
38#include "debug/AMDGPUDevice.hh"
41#include "mem/packet_access.hh"
42
43namespace gem5
44{
45
47{
48 // Zero out contexts
49 memset(&vmContext0, 0, sizeof(AMDGPUSysVMContext));
50
52 for (int i = 0; i < AMDGPU_VM_COUNT; ++i) {
53 memset(&vmContexts[0], 0, sizeof(AMDGPUVMContext));
54 }
55
56 for (int i = 0; i < NUM_MMIO_RANGES; ++i) {
58 }
59}
60
61void
63{
64 mmioRanges[mmio_aperture] = range;
65}
66
69{
70 return mmioRanges[mmio_aperture];
71}
72
73const AddrRange&
75{
76 for (int i = 0; i < NUM_MMIO_RANGES; ++i) {
77 if (mmioRanges[i].contains(offset)) {
78 return mmioRanges[i];
79 }
80 }
81
82 // Default to NBIO
84}
85
86Addr
88{
89 return vmContext0.ptBase;
90}
91
92Addr
94{
95 return vmContext0.ptEnd - vmContext0.ptStart;
96}
97
98void
100{
101 uint32_t value = pkt->getLE<uint32_t>();
102
103 switch (offset) {
104 // MMHUB MMIOs
106 DPRINTF(AMDGPUDevice, "Marking invalidate ENG17 SEM acquired\n");
107 pkt->setLE<uint32_t>(1);
108 break;
110 // This is only used by driver initialization and only expects an ACK
111 // for VMID 0 which is the first bit in the response.
112 DPRINTF(AMDGPUDevice, "Telling driver invalidate ENG17 is complete\n");
113 pkt->setLE<uint32_t>(1);
114 break;
116 mmhubBase = ((Addr)bits(value, 23, 0) << 24);
117 DPRINTF(AMDGPUDevice, "MMHUB FB base set to %#x\n", mmhubBase);
118 break;
120 mmhubTop = ((Addr)bits(value, 23, 0) << 24) | 0xFFFFFFULL;
121 DPRINTF(AMDGPUDevice, "MMHUB FB top set to %#x\n", mmhubTop);
122 break;
123 // GRBM MMIOs
126 DPRINTF(AMDGPUDevice, "Overwritting invalidation ENG17 ACK\n");
127 pkt->setLE<uint32_t>(1);
128 break;
129 default:
130 DPRINTF(AMDGPUDevice, "GPUVM read of unknown MMIO %#x\n", offset);
131 break;
132 }
133}
134
135void
137{
138 switch (offset) {
139 // VMID0 MMIOs
141 vmContext0.ptBaseL = pkt->getLE<uint32_t>();
142 // Clear extra bits not part of address
143 vmContext0.ptBaseL = insertBits(vmContext0.ptBaseL, 0, 0, 0);
144 break;
146 vmContext0.ptBaseH = pkt->getLE<uint32_t>();
147 break;
149 vmContext0.ptStartL = pkt->getLE<uint32_t>();
150 break;
152 vmContext0.ptStartH = pkt->getLE<uint32_t>();
153 break;
155 vmContext0.ptEndL = pkt->getLE<uint32_t>();
156 break;
158 vmContext0.ptEndH = pkt->getLE<uint32_t>();
159 break;
160 case mmMC_VM_AGP_TOP: {
161 uint32_t val = pkt->getLE<uint32_t>();
162 vmContext0.agpTop = (((Addr)bits(val, 23, 0)) << 24) | 0xffffff;
163 } break;
164 case mmMC_VM_AGP_BOT: {
165 uint32_t val = pkt->getLE<uint32_t>();
166 vmContext0.agpBot = ((Addr)bits(val, 23, 0)) << 24;
167 } break;
168 case mmMC_VM_AGP_BASE: {
169 uint32_t val = pkt->getLE<uint32_t>();
170 vmContext0.agpBase = ((Addr)bits(val, 23, 0)) << 24;
171 } break;
173 uint32_t val = pkt->getLE<uint32_t>();
174 vmContext0.fbTop = (((Addr)bits(val, 23, 0)) << 24) | 0xffffff;
175 } break;
177 uint32_t val = pkt->getLE<uint32_t>();
178 vmContext0.fbBase = ((Addr)bits(val, 23, 0)) << 24;
179 } break;
180 case mmMC_VM_FB_OFFSET: {
181 uint32_t val = pkt->getLE<uint32_t>();
182 vmContext0.fbOffset = ((Addr)bits(val, 23, 0)) << 24;
183 } break;
185 uint32_t val = pkt->getLE<uint32_t>();
186 vmContext0.sysAddrL = ((Addr)bits(val, 29, 0)) << 18;
187 } break;
189 uint32_t val = pkt->getLE<uint32_t>();
190 vmContext0.sysAddrH = ((Addr)bits(val, 29, 0)) << 18;
191 } break;
192 default:
193 break;
194 }
195}
196
197void
199{
200 switch (offset) {
201 // VMID0 MMIOs
203 vmContext0.ptBaseL = pkt->getLE<uint32_t>();
204 // Clear extra bits not part of address
205 vmContext0.ptBaseL = insertBits(vmContext0.ptBaseL, 0, 0, 0);
206 break;
208 vmContext0.ptBaseH = pkt->getLE<uint32_t>();
209 break;
211 vmContext0.ptStartL = pkt->getLE<uint32_t>();
212 break;
214 vmContext0.ptStartH = pkt->getLE<uint32_t>();
215 break;
217 vmContext0.ptEndL = pkt->getLE<uint32_t>();
218 break;
220 vmContext0.ptEndH = pkt->getLE<uint32_t>();
221 break;
222 case MI300X_VM_AGP_TOP: {
223 uint32_t val = pkt->getLE<uint32_t>();
224 vmContext0.agpTop = (((Addr)bits(val, 23, 0)) << 24) | 0xffffff;
225 } break;
226 case MI300X_VM_AGP_BOT: {
227 uint32_t val = pkt->getLE<uint32_t>();
228 vmContext0.agpBot = ((Addr)bits(val, 23, 0)) << 24;
229 } break;
230 case MI300X_VM_AGP_BASE: {
231 uint32_t val = pkt->getLE<uint32_t>();
232 vmContext0.agpBase = ((Addr)bits(val, 23, 0)) << 24;
233 } break;
235 uint32_t val = pkt->getLE<uint32_t>();
236 vmContext0.fbTop = (((Addr)bits(val, 23, 0)) << 24) | 0xffffff;
237 } break;
239 uint32_t val = pkt->getLE<uint32_t>();
240 vmContext0.fbBase = ((Addr)bits(val, 23, 0)) << 24;
241 } break;
242 case MI300X_VM_FB_OFFSET: {
243 uint32_t val = pkt->getLE<uint32_t>();
244 vmContext0.fbOffset = ((Addr)bits(val, 23, 0)) << 24;
245 } break;
247 uint32_t val = pkt->getLE<uint32_t>();
248 vmContext0.sysAddrL = ((Addr)bits(val, 29, 0)) << 18;
249 } break;
251 uint32_t val = pkt->getLE<uint32_t>();
252 vmContext0.sysAddrH = ((Addr)bits(val, 29, 0)) << 18;
253 } break;
254 default:
255 break;
256 }
257}
258
259void
261{
262 // There are multiple functions due to MMIO addresses being aliased to
263 // something different from a previous GFX version. So far this has not
264 // been the case for supported MMIO reads.
265 if (gpuDevice->getGfxVersion() == GfxVersion::gfx942) {
267 } else {
269 }
270}
271
272void
274{
275 DPRINTF(AMDGPUDevice, "Registered a TLB with device\n");
276 gpu_tlbs.push_back(tlb);
277}
278
279void
281{
282 DPRINTF(AMDGPUDevice, "Invalidating all TLBs\n");
283 for (auto &tlb : gpu_tlbs) {
284 tlb->invalidateAll();
285 DPRINTF(AMDGPUDevice, " ... TLB invalidated\n");
286 }
287}
288
289void
291{
292 Addr vm0PTBase = vmContext0.ptBase;
293 Addr vm0PTStart = vmContext0.ptStart;
294 Addr vm0PTEnd = vmContext0.ptEnd;
295 uint64_t gartTableSize;
296 SERIALIZE_SCALAR(vm0PTBase);
297 SERIALIZE_SCALAR(vm0PTStart);
298 SERIALIZE_SCALAR(vm0PTEnd);
299
308
311
312 Addr ptBase[AMDGPU_VM_COUNT];
313 Addr ptStart[AMDGPU_VM_COUNT];
314 Addr ptEnd[AMDGPU_VM_COUNT];
315 for (int i = 0; i < AMDGPU_VM_COUNT; i++) {
316 ptBase[i] = vmContexts[i].ptBase;
317 ptStart[i] = vmContexts[i].ptStart;
318 ptEnd[i] = vmContexts[i].ptEnd;
319 }
323
324 gartTableSize = gartTable.size();
325 uint64_t* gartTableKey = new uint64_t[gartTableSize];
326 uint64_t* gartTableValue = new uint64_t[gartTableSize];
327 SERIALIZE_SCALAR(gartTableSize);
328 int i = 0;
329 for (auto it = gartTable.begin(); it != gartTable.end(); ++it) {
330 gartTableKey[i] = it->first;
331 gartTableValue[i] = it->second;
332 i++;
333 }
334 SERIALIZE_ARRAY(gartTableKey, gartTableSize);
335 SERIALIZE_ARRAY(gartTableValue, gartTableSize);
336 delete[] gartTableKey;
337 delete[] gartTableValue;
338}
339
340void
342{
343 // Unserialize requires fields not be packed
344 Addr vm0PTBase;
345 Addr vm0PTStart;
346 Addr vm0PTEnd;
347 uint64_t gartTableSize, *gartTableKey, *gartTableValue;
348 UNSERIALIZE_SCALAR(vm0PTBase);
349 UNSERIALIZE_SCALAR(vm0PTStart);
350 UNSERIALIZE_SCALAR(vm0PTEnd);
351 vmContext0.ptBase = vm0PTBase;
352 vmContext0.ptStart = vm0PTStart;
353 vmContext0.ptEnd = vm0PTEnd;
354
363
366
367 Addr ptBase[AMDGPU_VM_COUNT];
368 Addr ptStart[AMDGPU_VM_COUNT];
369 Addr ptEnd[AMDGPU_VM_COUNT];
373 for (int i = 0; i < AMDGPU_VM_COUNT; i++) {
374 vmContexts[i].ptBase = ptBase[i];
375 vmContexts[i].ptStart = ptStart[i];
376 vmContexts[i].ptEnd = ptEnd[i];
377 }
378 UNSERIALIZE_SCALAR(gartTableSize);
379 gartTableKey = new uint64_t[gartTableSize];
380 gartTableValue = new uint64_t[gartTableSize];
381 UNSERIALIZE_ARRAY(gartTableKey, gartTableSize);
382 UNSERIALIZE_ARRAY(gartTableValue, gartTableSize);
383 for (uint64_t i = 0; i < gartTableSize; i++) {
384 gartTable[gartTableKey[i]] = gartTableValue[i];
385 }
386 delete[] gartTableKey;
387 delete[] gartTableValue;
388}
389
390void
392{
393 assert(vm->inAGP(range.vaddr));
394
395 Addr next = roundUp(range.vaddr, AMDGPU_AGP_PAGE_SIZE);
396 if (next == range.vaddr)
397 next += AMDGPU_AGP_PAGE_SIZE;
398
399 range.size = std::min(range.size, next - range.vaddr);
400 range.paddr = range.vaddr - vm->getAGPBot() + vm->getAGPBase();
401
402 DPRINTF(AMDGPUDevice, "AMDGPUVM: AGP translation %#lx -> %#lx\n",
403 range.vaddr, range.paddr);
404}
405
406void
408{
410 if (next == range.vaddr)
411 next += AMDGPU_GART_PAGE_SIZE;
412 range.size = std::min(range.size, next - range.vaddr);
413
414 Addr gart_addr = bits(range.vaddr, 63, 12);
415
416 // This table is a bit hard to iterate over. If we cross a page, the next
417 // PTE is not necessarily the next entry but actually 7 entries away.
418 Addr lsb = bits(gart_addr, 2, 0);
419 gart_addr += lsb * 7;
420
421 // GART is a single level translation, so the value at the "virtual" addr
422 // is the PTE containing the physical address.
423 auto result = vm->gartTable.find(gart_addr);
424 if (result == vm->gartTable.end()) {
425 // There is no reason to fault as there is no recovery mechanism for
426 // invalid GART entries. Simply panic in this case
427 warn("GART translation for %p not found", range.vaddr);
428
429 // Some PM4 packets have register addresses which we ignore. In that
430 // case just return the vaddr rather than faulting.
431 range.paddr = range.vaddr;
432 } else {
433 Addr pte = result->second;
434 Addr lower_bits = bits(range.vaddr, 11, 0);
435 range.paddr = (bits(pte, 47, 12) << 12) | lower_bits;
436 }
437
438 DPRINTF(AMDGPUDevice, "AMDGPUVM: GART translation %#lx -> %#lx\n",
439 range.vaddr, range.paddr);
440}
441
442void
444{
445 assert(vm->inMMHUB(range.vaddr));
446
448 if (next == range.vaddr)
450
451 range.size = std::min(range.size, next - range.vaddr);
452 range.paddr = range.vaddr - vm->getMMHUBBase();
453
454 DPRINTF(AMDGPUDevice, "AMDGPUVM: MMHUB translation %#lx -> %#lx\n",
455 range.vaddr, range.paddr);
456}
457
458void
460{
461 // Get base address of the page table for this vmid
462 Addr base = vm->getPageTableBase(vmid);
463 Addr start = vm->getPageTableStart(vmid);
464 DPRINTF(AMDGPUDevice, "User tl base %#lx start %#lx walker %p\n",
465 base, start, walker);
466
467 bool system_bit;
468 unsigned logBytes;
469 Addr paddr = range.vaddr;
470 Fault fault = walker->startFunctional(base, paddr, logBytes,
471 BaseMMU::Mode::Read, system_bit);
472 if (fault != NoFault) {
473 fatal("User translation fault");
474 }
475
476 // GPU page size is variable. Use logBytes to determine size.
477 const Addr page_size = 1 << logBytes;
478 Addr next = roundUp(range.vaddr, page_size);
479 if (next == range.vaddr) {
480 // We don't know the size of the next page, use default.
481 next += AMDGPU_USER_PAGE_SIZE;
482 }
483
484 // If we are not in system/host memory, change the address to the MMHUB
485 // aperture. This is mapped to the same backing memory as device memory.
486 if (!system_bit) {
487 paddr += vm->getMMHUBBase();
488 assert(vm->inMMHUB(paddr));
489 }
490
491 range.size = std::min(range.size, next - range.vaddr);
492 range.paddr = paddr;
493}
494
495} // namespace gem5
#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
Definition amdgpu_vm.hh:58
#define mmMMHUB_VM_FB_LOCATION_BASE
Definition amdgpu_vm.hh:90
#define MI300X_VM_SYSTEM_APERTURE_LOW_ADDR
Definition amdgpu_vm.hh:84
#define mmMMHUB_VM_FB_LOCATION_TOP
Definition amdgpu_vm.hh:91
#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
Definition amdgpu_vm.hh:59
#define MI300X_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
Definition amdgpu_vm.hh:63
#define MI300X_VM_FB_LOCATION_TOP
Definition amdgpu_vm.hh:80
#define MI300X_VM_FB_OFFSET
Definition amdgpu_vm.hh:78
#define mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
Definition amdgpu_vm.hh:75
#define mmMC_VM_FB_LOCATION_BASE
Definition amdgpu_vm.hh:70
#define MI300X_VM_SYSTEM_APERTURE_HIGH_ADDR
Definition amdgpu_vm.hh:85
#define MI300X_VM_INVALIDATE_ENG17_ACK
Definition amdgpu_vm.hh:107
#define MI300X_VM_AGP_TOP
Definition amdgpu_vm.hh:81
#define MI300X_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
Definition amdgpu_vm.hh:64
#define MI300X_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
Definition amdgpu_vm.hh:66
#define mmMC_VM_AGP_BASE
Definition amdgpu_vm.hh:74
static constexpr int AMDGPU_MMHUB_PAGE_SIZE
Definition amdgpu_vm.hh:115
#define MI300X_VM_FB_LOCATION_BASE
Definition amdgpu_vm.hh:79
#define mmMC_VM_AGP_BOT
Definition amdgpu_vm.hh:73
#define mmMMHUB_VM_INVALIDATE_ENG17_SEM
Definition amdgpu_vm.hh:87
#define mmMC_VM_AGP_TOP
Definition amdgpu_vm.hh:72
#define MI300X_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
Definition amdgpu_vm.hh:67
#define MI300X_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
Definition amdgpu_vm.hh:65
#define mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
Definition amdgpu_vm.hh:76
#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
Definition amdgpu_vm.hh:55
#define mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
Definition amdgpu_vm.hh:60
#define mmMC_VM_FB_OFFSET
Definition amdgpu_vm.hh:69
#define mmMC_VM_FB_LOCATION_TOP
Definition amdgpu_vm.hh:71
#define MI300X_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
Definition amdgpu_vm.hh:62
static constexpr int AMDGPU_GART_PAGE_SIZE
Definition amdgpu_vm.hh:114
static constexpr int AMDGPU_USER_PAGE_SIZE
Definition amdgpu_vm.hh:118
#define mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
Definition amdgpu_vm.hh:57
#define mmVM_INVALIDATE_ENG17_ACK
MMIO offsets for graphics register bus manager (GRBM).
Definition amdgpu_vm.hh:54
#define MI300X_VM_AGP_BOT
Definition amdgpu_vm.hh:82
#define MI300X_VM_AGP_BASE
Definition amdgpu_vm.hh:83
#define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
Definition amdgpu_vm.hh:56
static constexpr int AMDGPU_AGP_PAGE_SIZE
Definition amdgpu_vm.hh:113
#define mmMMHUB_VM_INVALIDATE_ENG17_ACK
Definition amdgpu_vm.hh:89
#define DPRINTF(x,...)
Definition trace.hh:209
Device model for an AMD GPU.
void translate(Range &range) const override
Subclasses implement this function to complete TranslationGen.
Definition amdgpu_vm.cc:391
void translate(Range &range) const override
Subclasses implement this function to complete TranslationGen.
Definition amdgpu_vm.cc:407
void translate(Range &range) const override
Subclasses implement this function to complete TranslationGen.
Definition amdgpu_vm.cc:443
void translate(Range &range) const override
Subclasses implement this function to complete TranslationGen.
Definition amdgpu_vm.cc:459
uint64_t mmhubBase
Definition amdgpu_vm.hh:189
void writeMMIOGfx900(PacketPtr pkt, Addr offset)
Different MMIO implements for different GFX versions with overlapping MMIO addresses.
Definition amdgpu_vm.cc:136
void setMMIOAperture(mmio_range_t mmio_aperture, AddrRange range)
Definition amdgpu_vm.cc:62
std::vector< AMDGPUVMContext > vmContexts
Definition amdgpu_vm.hh:185
std::array< AddrRange, NUM_MMIO_RANGES > mmioRanges
Definition amdgpu_vm.hh:205
AddrRange getMMIORange(mmio_range_t mmio_aperture)
Definition amdgpu_vm.cc:68
void invalidateTLBs()
Definition amdgpu_vm.cc:280
struct gem5::AMDGPUVM::GEM5_PACKED AMDGPUVMContext
std::unordered_map< uint64_t, uint64_t > gartTable
Copy of GART table.
Definition amdgpu_vm.hh:231
std::vector< VegaISA::GpuTLB * > gpu_tlbs
List of TLBs associated with the GPU device.
Definition amdgpu_vm.hh:196
void readMMIO(PacketPtr pkt, Addr offset)
Definition amdgpu_vm.cc:99
const AddrRange & getMMIOAperture(Addr addr)
Definition amdgpu_vm.cc:74
void writeMMIO(PacketPtr pkt, Addr offset)
Definition amdgpu_vm.cc:260
uint64_t mmhubTop
Definition amdgpu_vm.hh:190
AMDGPUSysVMContext vmContext0
Definition amdgpu_vm.hh:184
AMDGPUDevice * gpuDevice
Definition amdgpu_vm.hh:138
Addr gartBase()
Return base address of GART table in framebuffer.
Definition amdgpu_vm.cc:87
void writeMMIOGfx940(PacketPtr pkt, Addr offset)
Definition amdgpu_vm.cc:198
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition amdgpu_vm.cc:341
Addr gartSize()
Return size of GART in number of PTEs.
Definition amdgpu_vm.cc:93
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition amdgpu_vm.cc:290
void registerTLB(VegaISA::GpuTLB *tlb)
Control methods for TLBs associated with the GPU device.
Definition amdgpu_vm.cc:273
The AddrRange class encapsulates an address range, and supports a number of tests to check if two ran...
Definition addr_range.hh:82
void setLE(T v)
Set the value in the data pointer to v as little endian.
T getLE() const
Get the data in the packet byte swapped from little endian to host endian.
static constexpr T roundUp(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:260
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
constexpr T insertBits(T val, unsigned first, unsigned last, B bit_val)
Returns val with bits first to last set to the LSBs of bit_val.
Definition bitfield.hh:185
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
#define UNSERIALIZE_ARRAY(member, size)
Definition serialize.hh:618
#define SERIALIZE_ARRAY(member, size)
Definition serialize.hh:610
#define warn(...)
Definition logging.hh:288
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 59, 56 > tlb
Bitfield< 63 > val
Definition misc.hh:804
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
Packet * PacketPtr
static constexpr int AMDGPU_VM_COUNT
mmio_range_t
Definition amdgpu_vm.hh:124
@ NUM_MMIO_RANGES
Definition amdgpu_vm.hh:130
@ NBIO_MMIO_RANGE
Definition amdgpu_vm.hh:125
constexpr decltype(nullptr) NoFault
Definition types.hh:253
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
This structure represents a single, contiguous translation, or carries information about whatever fau...

Generated on Mon May 26 2025 09:19:09 for gem5 by doxygen 1.13.2