gem5 v24.0.0.0
Loading...
Searching...
No Matches
gpu_command_processor.hh
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
43#ifndef __DEV_HSA_GPU_COMMAND_PROCESSOR_HH__
44#define __DEV_HSA_GPU_COMMAND_PROCESSOR_HH__
45
46#include <cstdint>
47#include <functional>
48
50#include "base/logging.hh"
51#include "base/trace.hh"
52#include "base/types.hh"
53#include "debug/GPUCommandProc.hh"
56#include "dev/hsa/hsa_signal.hh"
60#include "params/GPUCommandProcessor.hh"
61#include "sim/full_system.hh"
62
63namespace gem5
64{
65
66struct GPUCommandProcessorParams;
67class GPUComputeDriver;
68class GPUDispatcher;
69class Shader;
70
72{
73 public:
74 typedef GPUCommandProcessorParams Params;
75 typedef std::function<void(const uint64_t &)> HsaSignalCallbackFunction;
76
79
82
83 void setGPUDevice(AMDGPUDevice *gpu_device);
84 void setShader(Shader *shader);
85 Shader* shader();
87
89 {
90 Nop = 0,
91 Steal = 1
92 };
93
94 void submitAgentDispatchPkt(void *raw_pkt, uint32_t queue_id,
95 Addr host_pkt_addr);
96 void submitDispatchPkt(void *raw_pkt, uint32_t queue_id,
97 Addr host_pkt_addr);
98 void submitVendorPkt(void *raw_pkt, uint32_t queue_id,
99 Addr host_pkt_addr);
101
102 void dispatchKernelObject(AMDKernelCode *akc, void *raw_pkt,
103 uint32_t queue_id, Addr host_pkt_addr);
104 void dispatchPkt(HSAQueueEntry *task);
105 void signalWakeupEvent(uint32_t event_id);
106
107 Tick write(PacketPtr pkt) override { return 0; }
108 Tick read(PacketPtr pkt) override { return 0; }
109 AddrRangeList getAddrRanges() const override;
110 System *system();
111
112 void sendCompletionSignal(Addr signal_handle);
113 void updateHsaSignal(Addr signal_handle, uint64_t signal_value,
115 [] (const uint64_t &) { });
116 void updateHsaSignalAsync(Addr signal_handle, int64_t diff);
117 void updateHsaSignalData(Addr value_addr, int64_t diff,
118 uint64_t *prev_value);
119 void updateHsaSignalDone(uint64_t *signal_value);
120 void updateHsaMailboxData(Addr signal_handle, uint64_t *mailbox_value);
121 void updateHsaEventData(Addr signal_handle, uint64_t *event_value);
122 void updateHsaEventTs(Addr signal_handle, amd_event_t *event_value);
123
124 uint64_t functionalReadHsaSignal(Addr signal_handle);
125
127 {
128 return signal_handle + offsetof(amd_signal_t, value);
129 }
130
132 {
133 return signal_handle + offsetof(amd_signal_t, event_mailbox_ptr);
134 }
135
137 {
138 return signal_handle + offsetof(amd_signal_t, event_id);
139 }
140
141 private:
147
148 // Typedefing dmaRead and dmaWrite function pointer
149 typedef void (DmaDevice::*DmaFnPtr)(Addr, int, Event*, uint8_t*, Tick);
150 void initABI(HSAQueueEntry *task);
151 void sanityCheckAKC(AMDKernelCode *akc);
153 TranslationGenPtr translate(Addr vaddr, Addr size) override;
154
155 // Running counter of dispatched tasks
157
158 // Running counter of dispatched user (non-blit) kernels
160
161 // Skip all user (non-blit) kernels until reaching this kernel
163
164 // Keep track of start times for task dispatches.
165 std::unordered_map<Addr, Tick> dispatchStartTime;
166
179 void
181 const uint32_t &readDispIdOffset)
182 {
191 task->queueId())->hostReadIndexPtr - readDispIdOffset;
192
197 auto *mqdDmaEvent = new DmaVirtCallback<int>(
198 [ = ] (const int &) { MQDDmaEvent(task); });
199
201 sizeof(_amd_queue_t), mqdDmaEvent, &task->amdQueue);
202 }
203
211 void
213 {
228 // TODO: Raising this signal will potentially nuke scratch
229 // space for in-flight kernels that were launched from this
230 // queue. We need to drain all kernels and deschedule the
231 // queue before raising this signal. For now, just assert if
232 // there are any in-flight kernels and tell the user that this
233 // feature still needs to be implemented.
234 fatal_if(hsaPP->inFlightPkts(task->queueId()) > 1,
235 "Needed more scratch, but kernels are in flight for "
236 "this queue and it is unsafe to reallocate scratch. "
237 "We need to implement additional intelligence in the "
238 "hardware scheduling logic to support CP-driven "
239 "queue draining and scheduling.");
240 DPRINTF(GPUCommandProc, "Not enough scratch space to launch "
241 "kernel (%x available, %x requested bytes per "
242 "workitem). Asking host runtime to allocate more "
243 "space.\n",
245 task->privMemPerItem());
246
248 [ = ] (const uint64_t &dma_buffer)
249 { WaitScratchDmaEvent(task, dma_buffer); });
250
251 } else {
252 DPRINTF(GPUCommandProc, "Sufficient scratch space, launching "
253 "kernel (%x available, %x requested bytes per "
254 "workitem).\n",
256 task->privMemPerItem());
257 dispatchPkt(task);
258 }
259 }
260
265 void
266 WaitScratchDmaEvent(HSAQueueEntry *task, const uint64_t &dmaBuffer)
267 {
268 if (dmaBuffer == 0) {
269 DPRINTF(GPUCommandProc, "Host scratch allocation complete. "
270 "Attempting to re-read MQD\n");
279 auto cb = new DmaVirtCallback<int>(
280 [ = ] (const int &) { MQDDmaEvent(task); });
281
282 dmaReadVirt(task->hostAMDQueueAddr, sizeof(_amd_queue_t), cb,
283 &task->amdQueue);
284 } else {
289 Addr value_addr = getHsaSignalValueAddr(
291 DPRINTF(GPUCommandProc, "Polling queue inactive signal at "
292 "%p.\n", value_addr);
293 auto cb = new DmaVirtCallback<uint64_t>(
294 [ = ] (const uint64_t &dma_buffer)
295 { WaitScratchDmaEvent(task, dma_buffer); } );
296
304 dmaReadVirt(value_addr, sizeof(Addr), cb, &cb->dmaBuffer, 1e9);
305 }
306 }
307};
308
309} // namespace gem5
310
311#endif // __DEV_HSA_GPU_COMMAND_PROCESSOR_HH__
#define DPRINTF(x,...)
Definition trace.hh:210
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,...
Device model for an AMD GPU.
Wraps a std::function object in a DmaCallback.
void dmaReadVirt(Addr host_addr, unsigned size, DmaCallback *cb, void *data, Tick delay=0)
Initiate a DMA read from virtual address host_addr.
void sendCompletionSignal(Addr signal_handle)
void submitDispatchPkt(void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
submitDispatchPkt() is the entry point into the CP from the HSAPP and is only meant to be used with A...
void ReadDispIdOffsetDmaEvent(HSAQueueEntry *task, const uint32_t &readDispIdOffset)
Perform a DMA read of the read_dispatch_id_field_base_byte_offset field, which follows directly after...
RequestorID vramRequestorId()
Forward the VRAM requestor ID needed for device memory from GPU device.
Addr getHsaSignalMailboxAddr(Addr signal_handle)
void(DmaDevice::* DmaFnPtr)(Addr, int, Event *, uint8_t *, Tick)
void setGPUDevice(AMDGPUDevice *gpu_device)
TranslationGenPtr translate(Addr vaddr, Addr size) override
Function used to translate a range of addresses from virtual to physical addresses.
void signalWakeupEvent(uint32_t event_id)
void updateHsaSignal(Addr signal_handle, uint64_t signal_value, HsaSignalCallbackFunction function=[](const uint64_t &) { })
void updateHsaSignalDone(uint64_t *signal_value)
HSAPacketProcessor & hsaPacketProc()
void submitAgentDispatchPkt(void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
submitAgentDispatchPkt() is for accepting agent dispatch packets.
Addr getHsaSignalValueAddr(Addr signal_handle)
void updateHsaEventTs(Addr signal_handle, amd_event_t *event_value)
void dispatchKernelObject(AMDKernelCode *akc, void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
void MQDDmaEvent(HSAQueueEntry *task)
Perform a DMA read of the MQD that corresponds to a hardware queue descriptor (HQD).
void attachDriver(GPUComputeDriver *driver)
void initABI(HSAQueueEntry *task)
The CP is responsible for traversing all HSA-ABI-related data structures from memory and initializing...
void updateHsaSignalAsync(Addr signal_handle, int64_t diff)
std::unordered_map< Addr, Tick > dispatchStartTime
Addr getHsaSignalEventAddr(Addr signal_handle)
AddrRangeList getAddrRanges() const override
Every PIO device is obliged to provide an implementation that returns the address ranges the device r...
void submitVendorPkt(void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
submitVendorPkt() is for accepting vendor-specific packets from the HSAPP.
void sanityCheckAKC(AMDKernelCode *akc)
Tick write(PacketPtr pkt) override
Pure virtual function that the device must implement.
GPUCommandProcessorParams Params
void dispatchPkt(HSAQueueEntry *task)
Once the CP has finished extracting all relevant information about a task and has initialized the ABI...
Tick read(PacketPtr pkt) override
Pure virtual function that the device must implement.
void updateHsaMailboxData(Addr signal_handle, uint64_t *mailbox_value)
void updateHsaEventData(Addr signal_handle, uint64_t *event_value)
std::function< void(const uint64_t &)> HsaSignalCallbackFunction
uint64_t functionalReadHsaSignal(Addr signal_handle)
void WaitScratchDmaEvent(HSAQueueEntry *task, const uint64_t &dmaBuffer)
Poll on queue_inactive signal until the runtime can get around to taking care of our lack of scratch ...
void updateHsaSignalData(Addr value_addr, int64_t diff, uint64_t *prev_value)
uint64_t inFlightPkts(uint32_t queId)
HSAQueueDescriptor * getQueueDesc(uint32_t queId)
_amd_queue_t amdQueue
Keep a copy of the AMD HSA queue because we need info from some of its fields to initialize register ...
uint32_t queueId() const
Addr hostAMDQueueAddr
Host-side addr of the amd_queue_t on which this task was queued.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Nop class.
Definition nop.hh:49
The GPUDispatcher is the component of the shader that is responsible for creating and dispatching WGs...
The GPUComputeDriver implements an HSADriver for an HSA AMD GPU agent.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
HSAQueuEntry is the simulator's internal representation of an AQL queue entry (task).
Bitfield< 0 > p
const int NumVecElemPerVecReg(64)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
uint16_t RequestorID
Definition request.hh:95
std::unique_ptr< TranslationGen > TranslationGenPtr
PM4 packets.
_hsa_signal_t queue_inactive_signal
Definition hsa_queue.hh:87
uint32_t compute_tmpring_size_wavesize
Definition hsa_queue.hh:79

Generated on Tue Jun 18 2024 16:24:04 for gem5 by doxygen 1.11.0