gem5  [DEVELOP-FOR-23.0]
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
gpu_command_processor.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived from this
17  * software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
43 #ifndef __DEV_HSA_GPU_COMMAND_PROCESSOR_HH__
44 #define __DEV_HSA_GPU_COMMAND_PROCESSOR_HH__
45 
46 #include <cstdint>
47 #include <functional>
48 
49 #include "base/logging.hh"
50 #include "base/trace.hh"
51 #include "base/types.hh"
52 #include "debug/GPUCommandProc.hh"
53 #include "dev/dma_virt_device.hh"
55 #include "dev/hsa/hsa_signal.hh"
59 #include "params/GPUCommandProcessor.hh"
60 #include "sim/full_system.hh"
61 
62 namespace gem5
63 {
64 
65 struct GPUCommandProcessorParams;
66 class GPUComputeDriver;
67 class GPUDispatcher;
68 class Shader;
69 
71 {
72  public:
73  typedef GPUCommandProcessorParams Params;
74  typedef std::function<void(const uint64_t &)> HsaSignalCallbackFunction;
75 
76  GPUCommandProcessor() = delete;
78 
81 
82  void setGPUDevice(AMDGPUDevice *gpu_device);
83  void setShader(Shader *shader);
84  Shader* shader();
86 
87  enum AgentCmd
88  {
89  Nop = 0,
90  Steal = 1
91  };
92 
93  void submitAgentDispatchPkt(void *raw_pkt, uint32_t queue_id,
94  Addr host_pkt_addr);
95  void submitDispatchPkt(void *raw_pkt, uint32_t queue_id,
96  Addr host_pkt_addr);
97  void submitVendorPkt(void *raw_pkt, uint32_t queue_id,
98  Addr host_pkt_addr);
100 
101  void dispatchPkt(HSAQueueEntry *task);
102  void signalWakeupEvent(uint32_t event_id);
103 
104  Tick write(PacketPtr pkt) override { return 0; }
105  Tick read(PacketPtr pkt) override { return 0; }
106  AddrRangeList getAddrRanges() const override;
107  System *system();
108 
109  void updateHsaSignal(Addr signal_handle, uint64_t signal_value,
110  HsaSignalCallbackFunction function =
111  [] (const uint64_t &) { });
112 
113  uint64_t functionalReadHsaSignal(Addr signal_handle);
114 
116  {
117  return signal_handle + offsetof(amd_signal_t, value);
118  }
119 
121  {
122  return signal_handle + offsetof(amd_signal_t, event_mailbox_ptr);
123  }
124 
126  {
127  return signal_handle + offsetof(amd_signal_t, event_id);
128  }
129 
130  private:
136 
137  // Typedefing dmaRead and dmaWrite function pointer
138  typedef void (DmaDevice::*DmaFnPtr)(Addr, int, Event*, uint8_t*, Tick);
139  void initABI(HSAQueueEntry *task);
141  TranslationGenPtr translate(Addr vaddr, Addr size) override;
142 
155  void
157  const uint32_t &readDispIdOffset)
158  {
167  task->queueId())->hostReadIndexPtr - readDispIdOffset;
168 
173  auto *mqdDmaEvent = new DmaVirtCallback<int>(
174  [ = ] (const int &) { MQDDmaEvent(task); });
175 
177  sizeof(_amd_queue_t), mqdDmaEvent, &task->amdQueue);
178  }
179 
187  void
189  {
202  if (task->privMemPerItem() >
204  // TODO: Raising this signal will potentially nuke scratch
205  // space for in-flight kernels that were launched from this
206  // queue. We need to drain all kernels and deschedule the
207  // queue before raising this signal. For now, just assert if
208  // there are any in-flight kernels and tell the user that this
209  // feature still needs to be implemented.
210  fatal_if(hsaPP->inFlightPkts(task->queueId()) > 1,
211  "Needed more scratch, but kernels are in flight for "
212  "this queue and it is unsafe to reallocate scratch. "
213  "We need to implement additional intelligence in the "
214  "hardware scheduling logic to support CP-driven "
215  "queue draining and scheduling.");
216  DPRINTF(GPUCommandProc, "Not enough scratch space to launch "
217  "kernel (%x available, %x requested bytes per "
218  "workitem). Asking host runtime to allocate more "
219  "space.\n",
221  task->privMemPerItem());
222 
224  [ = ] (const uint64_t &dma_buffer)
225  { WaitScratchDmaEvent(task, dma_buffer); });
226 
227  } else {
228  DPRINTF(GPUCommandProc, "Sufficient scratch space, launching "
229  "kernel (%x available, %x requested bytes per "
230  "workitem).\n",
232  task->privMemPerItem());
233  dispatchPkt(task);
234  }
235  }
236 
241  void
242  WaitScratchDmaEvent(HSAQueueEntry *task, const uint64_t &dmaBuffer)
243  {
244  if (dmaBuffer == 0) {
245  DPRINTF(GPUCommandProc, "Host scratch allocation complete. "
246  "Attempting to re-read MQD\n");
255  auto cb = new DmaVirtCallback<int>(
256  [ = ] (const int &) { MQDDmaEvent(task); });
257 
258  dmaReadVirt(task->hostAMDQueueAddr, sizeof(_amd_queue_t), cb,
259  &task->amdQueue);
260  } else {
265  Addr value_addr = getHsaSignalValueAddr(
267  DPRINTF(GPUCommandProc, "Polling queue inactive signal at "
268  "%p.\n", value_addr);
269  auto cb = new DmaVirtCallback<uint64_t>(
270  [ = ] (const uint64_t &dma_buffer)
271  { WaitScratchDmaEvent(task, dma_buffer); } );
272 
280  dmaReadVirt(value_addr, sizeof(Addr), cb, &cb->dmaBuffer, 1e9);
281  }
282  }
283 };
284 
285 } // namespace gem5
286 
287 #endif // __DEV_HSA_GPU_COMMAND_PROCESSOR_HH__
gem5::_amd_queue_t
Definition: hsa_queue.hh:64
gem5::GPUCommandProcessor::Params
GPUCommandProcessorParams Params
Definition: gpu_command_processor.hh:73
gem5::GPUCommandProcessor::getHsaSignalEventAddr
Addr getHsaSignalEventAddr(Addr signal_handle)
Definition: gpu_command_processor.hh:125
hsa_queue_entry.hh
gem5::GPUCommandProcessor::read
Tick read(PacketPtr pkt) override
Pure virtual function that the device must implement.
Definition: gpu_command_processor.hh:105
gem5::GPUCommandProcessor::shader
Shader * shader()
Definition: gpu_command_processor.cc:460
gem5::GPUCommandProcessor::hsaPP
HSAPacketProcessor * hsaPP
Definition: gpu_command_processor.hh:140
gem5::SparcISA::Nop
Nop class.
Definition: nop.hh:48
gem5::HSAQueueEntry::privMemPerItem
int privMemPerItem() const
Definition: hsa_queue_entry.hh:218
gem5::GPUCommandProcessor::signalWakeupEvent
void signalWakeupEvent(uint32_t event_id)
Definition: gpu_command_processor.cc:408
gem5::GPUCommandProcessor::attachDriver
void attachDriver(GPUComputeDriver *driver)
Definition: gpu_command_processor.cc:308
gem5::HSAQueueDescriptor::hostReadIndexPtr
uint64_t hostReadIndexPtr
Definition: hsa_packet_processor.hh:86
gem5::DmaVirtDevice::DmaVirtCallback
Wraps a std::function object in a DmaCallback.
Definition: dma_virt_device.hh:51
gem5::DmaVirtDevice::dmaReadVirt
void dmaReadVirt(Addr host_addr, unsigned size, DmaCallback *cb, void *data, Tick delay=0)
Initiate a DMA read from virtual address host_addr.
Definition: dma_virt_device.cc:38
hsa_packet_processor.hh
gem5::GPUCommandProcessor::DmaFnPtr
void(DmaDevice::* DmaFnPtr)(Addr, int, Event *, uint8_t *, Tick)
Definition: gpu_command_processor.hh:138
gem5::HSAQueueEntry
Definition: hsa_queue_entry.hh:60
gem5::HSAPacketProcessor::getQueueDesc
HSAQueueDescriptor * getQueueDesc(uint32_t queId)
Definition: hsa_packet_processor.hh:330
gem5::HSAQueueEntry::queueId
uint32_t queueId() const
Definition: hsa_queue_entry.hh:171
gem5::GPUCommandProcessor::getAddrRanges
AddrRangeList getAddrRanges() const override
Every PIO device is obliged to provide an implementation that returns the address ranges the device r...
Definition: gpu_command_processor.cc:440
gem5::GPUCommandProcessor::GPUCommandProcessor
GPUCommandProcessor()=delete
gem5::HSAPacketProcessor::inFlightPkts
uint64_t inFlightPkts(uint32_t queId)
Definition: hsa_packet_processor.hh:341
gem5::GPUCommandProcessor::WaitScratchDmaEvent
void WaitScratchDmaEvent(HSAQueueEntry *task, const uint64_t &dmaBuffer)
Poll on queue_inactive signal until the runtime can get around to taking care of our lack of scratch ...
Definition: gpu_command_processor.hh:242
gem5::GPUCommandProcessor::hsaPacketProc
HSAPacketProcessor & hsaPacketProc()
Definition: gpu_command_processor.cc:65
gem5::GPUCommandProcessor::getHsaSignalMailboxAddr
Addr getHsaSignalMailboxAddr(Addr signal_handle)
Definition: gpu_command_processor.hh:120
gem5::GPUCommandProcessor::submitDispatchPkt
void submitDispatchPkt(void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
submitDispatchPkt() is the entry point into the CP from the HSAPP and is only meant to be used with A...
Definition: gpu_command_processor.cc:116
gem5::GPUCommandProcessor::updateHsaSignal
void updateHsaSignal(Addr signal_handle, uint64_t signal_value, HsaSignalCallbackFunction function=[](const uint64_t &) { })
Definition: gpu_command_processor.cc:263
gem5::GPUCommandProcessor::MQDDmaEvent
void MQDDmaEvent(HSAQueueEntry *task)
Perform a DMA read of the MQD that corresponds to a hardware queue descriptor (HQD).
Definition: gpu_command_processor.hh:188
gem5::HSAQueueEntry::amdQueue
_amd_queue_t amdQueue
Keep a copy of the AMD HSA queue because we need info from some of its fields to initialize register ...
Definition: hsa_queue_entry.hh:331
gem5::GPUCommandProcessor::AgentCmd
AgentCmd
Definition: gpu_command_processor.hh:87
gem5::System
Definition: system.hh:74
gem5::GPUCommandProcessor
Definition: gpu_command_processor.hh:70
gem5::GPUCommandProcessor::dispatchPkt
void dispatchPkt(HSAQueueEntry *task)
Once the CP has finished extracting all relevant information about a task and has initialized the ABI...
Definition: gpu_command_processor.cc:402
gem5::GPUCommandProcessor::setShader
void setShader(Shader *shader)
Definition: gpu_command_processor.cc:454
gem5::VegaISA::p
Bitfield< 54 > p
Definition: pagetable.hh:70
gem5::HSAPacketProcessor
Definition: hsa_packet_processor.hh:254
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:210
gem5::Event
Definition: eventq.hh:254
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::GPUCommandProcessor::_driver
GPUComputeDriver * _driver
Definition: gpu_command_processor.hh:133
gem5::DmaDevice
Definition: dma_device.hh:218
gem5::GPUCommandProcessor::walker
VegaISA::Walker * walker
Definition: gpu_command_processor.hh:135
gem5::GPUCommandProcessor::write
Tick write(PacketPtr pkt) override
Pure virtual function that the device must implement.
Definition: gpu_command_processor.hh:104
gem5::GPUCommandProcessor::submitAgentDispatchPkt
void submitAgentDispatchPkt(void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
submitAgentDispatchPkt() is for accepting agent dispatch packets.
Definition: gpu_command_processor.cc:355
gem5::DmaDevice::Params
DmaDeviceParams Params
Definition: dma_device.hh:224
gem5::AMDGPUDevice
Device model for an AMD GPU.
Definition: amdgpu_device.hh:62
gem5::GPUCommandProcessor::gpuDevice
AMDGPUDevice * gpuDevice
Definition: gpu_command_processor.hh:134
gem5::GPUCommandProcessor::getHsaSignalValueAddr
Addr getHsaSignalValueAddr(Addr signal_handle)
Definition: gpu_command_processor.hh:115
gem5::HSAQueueEntry::hostAMDQueueAddr
Addr hostAMDQueueAddr
Host-side addr of the amd_queue_t on which this task was queued.
Definition: hsa_queue_entry.hh:324
hsa_signal.hh
gem5::GPUCommandProcessor::setGPUDevice
void setGPUDevice(AMDGPUDevice *gpu_device)
Definition: gpu_command_processor.cc:447
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::GPUCommandProcessor::system
System * system()
Definition: gpu_command_processor.cc:434
full_system.hh
gem5::_hsa_signal_t::handle
uint64_t handle
Definition: hsa_queue.hh:48
gem5::GPUCommandProcessor::_shader
Shader * _shader
Definition: gpu_command_processor.hh:131
types.hh
gem5::GPUCommandProcessor::ReadDispIdOffsetDmaEvent
void ReadDispIdOffsetDmaEvent(HSAQueueEntry *task, const uint32_t &readDispIdOffset)
Perform a DMA read of the read_dispatch_id_field_base_byte_offset field, which follows directly after...
Definition: gpu_command_processor.hh:156
gem5::GPUCommandProcessor::submitVendorPkt
void submitVendorPkt(void *raw_pkt, uint32_t queue_id, Addr host_pkt_addr)
submitVendorPkt() is for accepting vendor-specific packets from the HSAPP.
Definition: gpu_command_processor.cc:341
gem5::GPUCommandProcessor::functionalReadHsaSignal
uint64_t functionalReadHsaSignal(Addr signal_handle)
Definition: gpu_command_processor.cc:254
gem5::GPUComputeDriver
Definition: gpu_compute_driver.hh:62
logging.hh
gem5::GPUCommandProcessor::vramRequestorId
RequestorID vramRequestorId()
Forward the VRAM requestor ID needed for device memory from GPU device.
Definition: gpu_command_processor.cc:74
dma_virt_device.hh
gem5::RequestorID
uint16_t RequestorID
Definition: request.hh:95
trace.hh
gem5::GPUDispatcher
Definition: dispatcher.hh:62
dispatcher.hh
gem5::DmaVirtDevice
Definition: dma_virt_device.hh:41
gpu_compute_driver.hh
gem5::MipsISA::vaddr
vaddr
Definition: pra_constants.hh:278
gem5::_amd_queue_t::queue_inactive_signal
_hsa_signal_t queue_inactive_signal
Definition: hsa_queue.hh:87
std::list< AddrRange >
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:236
gem5::_amd_queue_t::compute_tmpring_size_wavesize
uint32_t compute_tmpring_size_wavesize
Definition: hsa_queue.hh:79
gem5::amd_signal_s
Definition: hsa_signal.hh:50
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::GPUCommandProcessor::dispatcher
GPUDispatcher & dispatcher
Definition: gpu_command_processor.hh:132
gem5::GPUCommandProcessor::Steal
@ Steal
Definition: gpu_command_processor.hh:90
gem5::GPUCommandProcessor::HsaSignalCallbackFunction
std::function< void(const uint64_t &)> HsaSignalCallbackFunction
Definition: gpu_command_processor.hh:74
gem5::GPUCommandProcessor::translate
TranslationGenPtr translate(Addr vaddr, Addr size) override
Function used to translate a range of addresses from virtual to physical addresses.
Definition: gpu_command_processor.cc:80
gem5::TranslationGenPtr
std::unique_ptr< TranslationGen > TranslationGenPtr
Definition: translation_gen.hh:128
gem5::VegaISA::Walker
Definition: pagetable_walker.hh:54
gem5::GPUCommandProcessor::driver
GPUComputeDriver * driver()
Definition: gpu_command_processor.cc:318
gem5::Shader
Definition: shader.hh:83
gem5::GPUCommandProcessor::initABI
void initABI(HSAQueueEntry *task)
The CP is responsible for traversing all HSA-ABI-related data structures from memory and initializing...
Definition: gpu_command_processor.cc:420

Generated on Sun Jul 30 2023 01:56:57 for gem5 by doxygen 1.8.17