gem5 [DEVELOP-FOR-25.0]
Loading...
Searching...
No Matches
fetch_unit.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2014-2017 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
33
36#include "base/bitfield.hh"
37#include "debug/GPUFetch.hh"
38#include "debug/GPUPort.hh"
39#include "debug/GPUTLB.hh"
43#include "gpu-compute/shader.hh"
46
47namespace gem5
48{
49
51
52FetchUnit::FetchUnit(const ComputeUnitParams &p, ComputeUnit &cu)
54 waveList(nullptr), fetchDepth(p.fetch_depth)
55{
56}
57
59{
60 fetchQueue.clear();
61 fetchStatusQueue.clear();
62}
63
64void
66{
67 timingSim = computeUnit.shader->timingSim;
68 fetchQueue.clear();
69 fetchStatusQueue.resize(computeUnit.shader->n_wf);
70 fetchBuf.resize(computeUnit.shader->n_wf, FetchBufDesc());
71
72 for (int i = 0; i < computeUnit.shader->n_wf; ++i) {
73 Wavefront *wf = waveList->at(i);
74 assert(wf->wfSlotId == i);
75 fetchStatusQueue[i] = std::make_pair(wf, false);
76 fetchBuf[i].allocateBuf(fetchDepth, computeUnit.cacheLineSize(), wf);
77 fetchBuf[i].decoder(&decoder);
78 decoder.setGfxVersion(computeUnit.shader->getGfxVersion());
79 }
80
81 fetchScheduler.bindList(&fetchQueue);
82}
83
84void
86{
95 for (auto &fetch_buf : fetchBuf) {
96 if (!fetch_buf.hasFreeSpace()) {
97 fetch_buf.checkWaveReleaseBuf();
98 }
99 if (fetch_buf.hasFetchDataToProcess()) {
100 fetch_buf.decodeInsts();
101 }
102 }
103
104 // re-evaluate waves which are marked as not ready for fetch
105 for (int j = 0; j < computeUnit.shader->n_wf; ++j) {
106 // Following code assumes 64-bit opertaion and all insts are
107 // represented by 64-bit pointers to inst objects.
108 Wavefront *curWave = fetchStatusQueue[j].first;
109 assert (curWave);
110
111 // The wavefront has to be active, the IB occupancy has to be
112 // 4 or less instructions and it can not have any branches to
113 // prevent speculative instruction fetches
114 if (!fetchStatusQueue[j].second) {
115 if ((curWave->getStatus() == Wavefront::S_RUNNING ||
116 curWave->getStatus() == Wavefront::S_WAITCNT) &&
117 fetchBuf[j].hasFreeSpace() &&
118 !curWave->stopFetch() &&
119 !curWave->pendingFetch) {
120 fetchQueue.push_back(curWave);
121 fetchStatusQueue[j].second = true;
122 }
123 }
124 }
125
126 // Fetch only if there is some wave ready to be fetched
127 // An empty fetchQueue will cause the schedular to panic
128 if (fetchQueue.size()) {
129 Wavefront *waveToBeFetched = fetchScheduler.chooseWave();
130 waveToBeFetched->pendingFetch = true;
131 fetchStatusQueue[waveToBeFetched->wfSlotId].second = false;
132 initiateFetch(waveToBeFetched);
133 }
134}
135
136void
138{
139 assert(fetchBuf.at(wavefront->wfSlotId).hasFreeSpace());
140
147 Addr vaddr = fetchBuf.at(wavefront->wfSlotId).nextFetchAddr();
148
149 // this should already be aligned to a cache line
151 computeUnit.getCacheLineBits()));
152
153 // shouldn't be fetching a line that is already buffered
154 assert(!fetchBuf.at(wavefront->wfSlotId).pcBuffered(vaddr));
155
156 fetchBuf.at(wavefront->wfSlotId).reserveBuf(vaddr);
157
158 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: Id%d: Initiate fetch "
159 "from pc: %d %#x\n", computeUnit.cu_id, wavefront->simdId,
160 wavefront->wfSlotId, wavefront->wfDynId, wavefront->pc(), vaddr);
161
162 DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Initiating fetch translation: %#x\n",
163 computeUnit.cu_id, wavefront->simdId, wavefront->wfSlotId, vaddr);
164
165 // set up virtual request
166 RequestPtr req = std::make_shared<Request>(
167 vaddr, computeUnit.cacheLineSize(), Request::INST_FETCH,
168 computeUnit.requestorId(), 0, 0, nullptr);
169
170 PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
171
172 if (timingSim) {
173 // SenderState needed on Return
175
176 // Sender State needed by TLB hierarchy
177 pkt->senderState =
179 computeUnit.shader->gpuTc,
180 false, pkt->senderState);
181
182 if (computeUnit.sqcTLBPort.isStalled()) {
183 assert(computeUnit.sqcTLBPort.retries.size() > 0);
184
185 DPRINTF(GPUTLB, "Failed to send TLB req for FETCH addr %#x\n",
186 vaddr);
187
188 computeUnit.sqcTLBPort.retries.push_back(pkt);
189 } else if (!computeUnit.sqcTLBPort.sendTimingReq(pkt)) {
190 // Stall the data port;
191 // No more packet is issued till
192 // ruby indicates resources are freed by
193 // a recvReqRetry() call back on this port.
194 computeUnit.sqcTLBPort.stallPort();
195
196 DPRINTF(GPUTLB, "Failed to send TLB req for FETCH addr %#x\n",
197 vaddr);
198
199 computeUnit.sqcTLBPort.retries.push_back(pkt);
200 } else {
201 DPRINTF(GPUTLB, "sent FETCH translation request for %#x\n", vaddr);
202 }
203 } else {
204 pkt->senderState =
206 computeUnit.shader->gpuTc);
207
208 computeUnit.sqcTLBPort.sendFunctional(pkt);
209
215 if (!pkt->req->systemReq()) {
216 pkt->req->requestorId(computeUnit.vramRequestorId());
217 }
218
219 GpuTranslationState *sender_state =
221
222 delete sender_state->tlbEntry;
223 delete sender_state;
224 // fetch the instructions from the SQC when we operate in
225 // functional mode only
226 fetch(pkt, wavefront);
227 }
228}
229
230void
232{
233 assert(pkt->req->hasPaddr());
234 assert(pkt->req->hasSize());
235
236 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: Fetch Access: %#x\n",
237 computeUnit.cu_id, wavefront->simdId, wavefront->wfSlotId,
238 pkt->req->getPaddr());
239
246 PacketPtr oldPkt = pkt;
247 pkt = new Packet(oldPkt->req, oldPkt->cmd);
248 delete oldPkt;
249
256 if (!fetchBuf.at(wavefront->wfSlotId).isReserved(pkt->req->getVaddr())) {
257 wavefront->dropFetch = false;
258 wavefront->pendingFetch = false;
259 return;
260 }
261
267 if (!pkt->req->systemReq()) {
268 pkt->req->requestorId(computeUnit.vramRequestorId());
269 }
270
276 pkt->dataStatic(fetchBuf.at(wavefront->wfSlotId)
277 .reservedBuf(pkt->req->getVaddr()));
278
279 // New SenderState for the memory access
281
282 if (timingSim) {
283 // translation is done. Send the appropriate timing memory request.
284
285 if (pkt->req->systemReq()) {
286 SystemHubEvent *resp_event = new SystemHubEvent(pkt, this);
287 assert(computeUnit.shader->systemHub);
288 computeUnit.shader->systemHub->sendRequest(pkt, resp_event);
289 } else if (!computeUnit.sqcPort.sendTimingReq(pkt)) {
290 computeUnit.sqcPort.retries.push_back(std::make_pair(pkt,
291 wavefront));
292
293 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Fetch addr %#x failed!\n",
294 computeUnit.cu_id, wavefront->simdId, wavefront->wfSlotId,
295 pkt->req->getPaddr());
296 } else {
297 DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Fetch addr %#x sent!\n",
298 computeUnit.cu_id, wavefront->simdId, wavefront->wfSlotId,
299 pkt->req->getPaddr());
300 }
301 } else {
302 computeUnit.sqcPort.sendFunctional(pkt);
304 }
305}
306
307void
309{
312
313 Wavefront *wavefront = sender_state->wavefront;
314
315 DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: Fetch addr %#x returned "
316 "%d bytes!\n", computeUnit.cu_id, wavefront->simdId,
317 wavefront->wfSlotId, pkt->req->getPaddr(), pkt->req->getSize());
318
319 if (wavefront->dropFetch) {
320 assert(wavefront->instructionBuffer.empty());
321 assert(!fetchBuf.at(wavefront->wfSlotId).hasFetchDataToProcess());
322 wavefront->dropFetch = false;
323 } else {
324 fetchBuf.at(wavefront->wfSlotId).fetchDone(pkt);
325 }
326
327 wavefront->pendingFetch = false;
328
329 delete pkt->senderState;
330 delete pkt;
331}
332
333void
335{
336 fetchBuf.at(wfSlotId).flushBuf();
337}
338
339void
341{
342 waveList = wave_list;
343}
344
346void
347FetchUnit::FetchBufDesc::allocateBuf(int fetch_depth, int cache_line_size,
348 Wavefront *wf)
349{
350 wavefront = wf;
351 fetchDepth = fetch_depth;
352 maxIbSize = wavefront->maxIbSize;
353 cacheLineSize = cache_line_size;
355
356 // Calculate the number of bits to address a cache line
358 "Cache line size should be a power of two.");
360
361 bufStart = new uint8_t[maxFbSize];
364
365 for (int i = 0; i < fetchDepth; ++i) {
366 freeList.emplace_back(readPtr + i * cacheLineSize);
367 }
368}
369
370void
372{
373 restartFromBranch = true;
378 freeList.clear();
379 bufferedPCs.clear();
380 reservedPCs.clear();
382
383 for (int i = 0; i < fetchDepth; ++i) {
384 freeList.push_back(bufStart + i * cacheLineSize);
385 }
386
387 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d Fetch dropped, flushing fetch "
388 "buffer\n", wavefront->simdId, wavefront->wfSlotId,
389 wavefront->wfDynId);
390}
391
392Addr
394{
395 Addr next_line = 0;
396
398 Addr last_line_fetched = 0;
399 if (!reservedLines()) {
404 last_line_fetched = bufferedPCs.rbegin()->first;
405 } else {
406 last_line_fetched = reservedPCs.rbegin()->first;
407 }
408
409 next_line = last_line_fetched + cacheLineSize;
410
415 assert(bufferedPCs.find(next_line) == bufferedPCs.end());
416 assert(reservedPCs.find(next_line) == reservedPCs.end());
417 } else {
428
435 if (restartFromBranch) {
436 restartFromBranch = false;
437 int byte_offset
440 readPtr += byte_offset;
441 }
442 }
443
444 return next_line;
445}
446
447void
449{
450 // we should have free buffer space, and the line
451 // at vaddr should not already be cached.
452 assert(hasFreeSpace());
453 assert(bufferedPCs.find(vaddr) == bufferedPCs.end());
454 assert(reservedPCs.find(vaddr) == reservedPCs.end());
456
457 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d reserved fetch buffer entry "
458 "for PC = %#x\n", wavefront->simdId, wavefront->wfSlotId,
459 wavefront->wfDynId, vaddr);
460
467 uint8_t *inst_buf = freeList.front();
468 reservedPCs.emplace(vaddr, inst_buf);
469 freeList.pop_front();
470}
471
472void
474{
475 // If the return command is MemSyncResp, then it belongs to
476 // an SQC invalidation request. This request calls
477 // incLGKMInstsIssued() function in its execution path.
478 // Since there is no valid memory return response associated with
479 // this instruction, decLGKMInstsIssued() is not executed. Do this
480 // here to decrement the counter and invalidate all buffers
481 if (pkt->cmd == MemCmd::MemSyncResp) {
482 wavefront->decLGKMInstsIssued();
483 flushBuf();
484 restartFromBranch = false;
485 return;
486 }
487
488 Addr vaddr = pkt->req->getVaddr();
489
490 assert(bufferedPCs.find(vaddr) == bufferedPCs.end());
491 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d done fetching for addr %#x\n",
492 wavefront->simdId, wavefront->wfSlotId,
493 wavefront->wfDynId, vaddr);
494
500 auto reserved_pc = reservedPCs.find(vaddr);
501 assert(reserved_pc != reservedPCs.end());
502 bufferedPCs.emplace(vaddr, reserved_pc->second);
503
504 if (readPtr == bufEnd) {
506 }
507
508 reserved_pc->second = nullptr;
509 reservedPCs.erase(reserved_pc);
510}
511
512bool
514{
515 return fetchBytesRemaining() >= sizeof(TheGpuISA::RawMachInst);
516}
517
518void
520{
521 Addr cur_wave_pc = roundDown(wavefront->pc(),
522 wavefront->computeUnit->cacheLineSize());
523 if (reservedPCs.find(cur_wave_pc) != reservedPCs.end()) {
524 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d current wave PC(%#x) still "
525 "being fetched.\n", wavefront->simdId, wavefront->wfSlotId,
526 wavefront->wfDynId, cur_wave_pc);
527
528 // should be reserved, but not buffered yet
529 assert(bufferedPCs.find(cur_wave_pc) == bufferedPCs.end());
530
531 return;
532 }
533
534 auto current_buffered_pc = bufferedPCs.find(cur_wave_pc);
535 auto oldest_buffered_pc = bufferedPCs.begin();
536
537 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d checking if PC block addr = %#x"
538 "(PC = %#x) can be released.\n", wavefront->simdId,
539 wavefront->wfSlotId, wavefront->wfDynId, cur_wave_pc,
540 wavefront->pc());
541
542#ifdef GEM5_DEBUG
543 int idx = 0;
544 for (const auto &buf_pc : bufferedPCs) {
545 DPRINTF(GPUFetch, "PC[%d] = %#x\n", idx, buf_pc.first);
546 ++idx;
547 }
548#endif
549
550 // if we haven't buffered data for this PC, we shouldn't
551 // be fetching from it.
552 assert(current_buffered_pc != bufferedPCs.end());
553
560 if (current_buffered_pc != oldest_buffered_pc) {
561 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d done fetching for PC = %#x, "
562 "removing it from the fetch buffer.\n", wavefront->simdId,
563 wavefront->wfSlotId, wavefront->wfDynId,
564 oldest_buffered_pc->first);
565
566 freeList.emplace_back(oldest_buffered_pc->second);
567 oldest_buffered_pc->second = nullptr;
568 bufferedPCs.erase(oldest_buffered_pc);
569 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d has %d lines buffered.\n",
570 wavefront->simdId, wavefront->wfSlotId, wavefront->wfDynId,
571 bufferedLines());
572 }
573}
574
575void
577{
578 assert(readPtr);
579
580 if (splitDecode()) {
582 }
583
584 while (wavefront->instructionBuffer.size() < maxIbSize
586 if (splitDecode()) {
588 } else {
589 TheGpuISA::MachInst mach_inst
590 = reinterpret_cast<TheGpuISA::MachInst>(readPtr);
591 GPUStaticInst *gpu_static_inst = _decoder->decode(mach_inst);
592 readPtr += gpu_static_inst->instSize();
593
594 assert(readPtr <= bufEnd);
595
596 GPUDynInstPtr gpu_dyn_inst
597 = std::make_shared<GPUDynInst>(wavefront->computeUnit,
598 wavefront, gpu_static_inst,
599 wavefront->computeUnit->
600 getAndIncSeqNum());
601 wavefront->instructionBuffer.push_back(gpu_dyn_inst);
602
603 DPRINTF(GPUFetch, "WF[%d][%d]: Id%ld decoded %s (%d bytes). "
604 "%d bytes remain.\n", wavefront->simdId,
605 wavefront->wfSlotId, wavefront->wfDynId,
606 gpu_static_inst->disassemble(),
607 gpu_static_inst->instSize(),
609 }
610 }
611}
612
613void
615{
616 TheGpuISA::RawMachInst split_inst = 0;
617 int dword_size = sizeof(uint32_t);
618 int num_dwords = sizeof(TheGpuISA::RawMachInst) / dword_size;
619
620 for (int i = 0; i < num_dwords; ++i) {
621 replaceBits(split_inst, 32*(i+1)-1, 32*i,
622 *reinterpret_cast<uint32_t*>(readPtr));
623 if (readPtr + dword_size >= bufEnd) {
625 }
626 }
627
628 assert(readPtr == bufStart);
629
630 TheGpuISA::MachInst mach_inst
631 = reinterpret_cast<TheGpuISA::MachInst>(&split_inst);
632 GPUStaticInst *gpu_static_inst = _decoder->decode(mach_inst);
633 readPtr += (gpu_static_inst->instSize() - dword_size);
634 assert(readPtr < bufEnd);
635
636 GPUDynInstPtr gpu_dyn_inst
637 = std::make_shared<GPUDynInst>(wavefront->computeUnit,
638 wavefront, gpu_static_inst,
639 wavefront->computeUnit->
640 getAndIncSeqNum());
641 wavefront->instructionBuffer.push_back(gpu_dyn_inst);
642
643 DPRINTF(GPUFetch, "WF[%d][%d]: Id%d decoded split inst %s (%#x) "
644 "(%d bytes). %d bytes remain in %d buffered lines.\n",
645 wavefront->simdId, wavefront->wfSlotId, wavefront->wfDynId,
646 gpu_static_inst->disassemble(), split_inst,
647 gpu_static_inst->instSize(), fetchBytesRemaining(),
648 bufferedLines());
649}
650
651bool
653{
658 bool is_split = (readPtr + sizeof(TheGpuISA::RawMachInst)) > bufEnd;
659
660 return is_split;
661}
662
663int
665{
666 int bytes_remaining = 0;
667
668 if (bufferedLines() && readPtr != bufEnd) {
669 auto last_buf_pc = bufferedPCs.rbegin();
670 uint8_t *end_ptr = last_buf_pc->second + cacheLineSize;
671 int byte_diff = end_ptr - readPtr;
672
673 if (end_ptr > readPtr) {
674 bytes_remaining = byte_diff;
675 } else if (end_ptr < readPtr) {
676 bytes_remaining = bufferedBytes() + byte_diff;
677 }
678 }
679
680 assert(bytes_remaining <= bufferedBytes());
681 return bytes_remaining;
682}
683
684void
686{
687 reqPkt->makeResponse();
688 fetchUnit->computeUnit.handleSQCReturn(reqPkt);
689}
690
691} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
fetch buffer descriptor.
Definition fetch_unit.hh:75
std::map< Addr, uint8_t * > reservedPCs
void reserveBuf(Addr vaddr)
reserve an entry in the fetch buffer for PC = vaddr,
uint8_t * readPtr
pointer that points to the next chunk of inst data to be decoded.
int fetchBytesRemaining() const
calculates the number of fetched bytes that have yet to be decoded.
void checkWaveReleaseBuf()
checks if the wavefront can release any of its fetch buffer entries.
bool hasFetchDataToProcess() const
checks if the buffer contains valid data.
std::map< Addr, uint8_t * > bufferedPCs
the set of PCs (fetch addresses) that are currently buffered.
void allocateBuf(int fetch_depth, int cache_line_size, Wavefront *wf)
allocate the fetch buffer space, and set the fetch depth (number of lines that may be buffered),...
void fetchDone(PacketPtr ptr)
bool splitDecode() const
check if the next instruction to be processed out of the fetch buffer is split across the end/beginni...
void decodeInsts()
each time the fetch stage is ticked, we check if there are any data in the fetch buffer that may be d...
uint8_t * bufStart
raw instruction buffer.
TheGpuISA::Decoder * _decoder
std::deque< uint8_t * > freeList
represents the fetch buffer free list.
static uint32_t globalFetchUnitID
Definition fetch_unit.hh:67
Scheduler fetchScheduler
std::vector< Wavefront * > * waveList
void bindWaveList(std::vector< Wavefront * > *list)
FetchUnit(const ComputeUnitParams &p, ComputeUnit &cu)
Definition fetch_unit.cc:52
void fetch(PacketPtr pkt, Wavefront *wavefront)
std::vector< Wavefront * > fetchQueue
void initiateFetch(Wavefront *wavefront)
int fetchDepth
number of cache lines we can fetch and buffer.
TheGpuISA::Decoder decoder
ComputeUnit & computeUnit
void processFetchReturn(PacketPtr pkt)
std::vector< FetchBufDesc > fetchBuf
void flushBuf(int wfSlotId)
std::vector< std::pair< Wavefront *, bool > > fetchStatusQueue
const std::string & disassemble()
virtual int instSize() const =0
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
@ INST_FETCH
The request was an instruction fetch.
Definition request.hh:115
Addr pc() const
const int simdId
Definition wavefront.hh:102
std::deque< GPUDynInstPtr > instructionBuffer
Definition wavefront.hh:112
status_e getStatus()
Definition wavefront.hh:142
const int wfSlotId
Definition wavefront.hh:99
@ S_WAITCNT
wavefront has unsatisfied wait counts
Definition wavefront.hh:89
uint64_t wfDynId
Definition wavefront.hh:235
STL vector class.
Definition stl.hh:37
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
Definition intmath.hh:59
static constexpr bool isPowerOf2(const T &n)
Definition intmath.hh:98
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:279
constexpr void replaceBits(T &val, unsigned first, unsigned last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
Definition bitfield.hh:216
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:246
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 0 > p
Addr makeLineAddress(Addr addr, int cacheLineBits)
Definition Address.cc:61
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
T safe_cast(U &&ref_or_ptr)
Definition cast.hh:74
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition misc.hh:49
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
Packet * PacketPtr
SenderState is information carried along with the packet throughout the TLB hierarchy.
GPU TranslationState: this currently is a somewhat bastardization of the usage of SenderState,...

Generated on Mon May 26 2025 09:19:10 for gem5 by doxygen 1.13.2