Go to the documentation of this file.
37 #include "debug/GPUExec.hh"
38 #include "debug/GPUInitAbi.hh"
39 #include "debug/WavefrontStack.hh"
51 :
SimObject(
p), wfSlotId(
p.wf_slot_id), simdId(
p.simdId),
52 maxIbSize(
p.max_ib_size), _gpuISA(*this),
53 vmWaitCnt(-1), expWaitCnt(-1), lgkmWaitCnt(-1),
54 vmemInstsIssued(0), expInstsIssued(0), lgkmInstsIssued(0),
55 sleepCnt(0), barId(
WFBarrier::InvalidID), stats(this)
93 for (
int i = 0;
i < 3; ++
i) {
129 uint32_t wiCount = 0;
130 uint32_t firstWave = 0;
131 int orderedAppendTerm = 0;
133 uint32_t finalValue = 0;
136 Addr hidden_priv_base(0);
145 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
146 "Setting PrivateSegBuffer: s[%d] = %x\n",
156 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
157 "Setting PrivateSegBuffer: s[%d] = %x\n",
167 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
168 "Setting PrivateSegBuffer: s[%d] = %x\n",
179 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
180 "Setting PrivateSegBuffer: s[%d] = %x\n",
189 bits(host_disp_pkt_addr, 31, 0));
191 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
192 "Setting DispatchPtr: s[%d] = %x\n",
195 bits(host_disp_pkt_addr, 31, 0));
200 bits(host_disp_pkt_addr, 63, 32));
201 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
202 "Setting DispatchPtr: s[%d] = %x\n",
205 bits(host_disp_pkt_addr, 63, 32));
215 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
216 "Setting QueuePtr: s[%d] = %x\n",
225 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
226 "Setting QueuePtr: s[%d] = %x\n",
237 bits(kernarg_addr, 31, 0));
239 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
240 "Setting KernargSegPtr: s[%d] = %x\n",
243 bits(kernarg_addr, 31, 0));
248 bits(kernarg_addr, 63, 32));
249 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
250 "Setting KernargSegPtr: s[%d] = %x\n",
253 bits(kernarg_addr, 63, 32));
264 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
265 "Setting FlatScratch Addr: s[%d] = %x\n",
278 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
279 "Setting FlatScratch size: s[%d] = %x\n",
309 & 0x000000000000ffff) << 32);
323 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
324 "Setting num WG X: s[%d] = %x\n",
337 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
338 "Setting num WG Y: s[%d] = %x\n",
351 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
352 "Setting num WG Z: s[%d] = %x\n",
363 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
364 "Setting WG ID X: s[%d] = %x\n",
375 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
376 "Setting WG ID Y: s[%d] = %x\n",
387 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
388 "Setting WG ID Z: s[%d] = %x\n",
413 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
414 "Setting Private Seg Offset: s[%d] = %x\n",
421 firstWave = (
wfId == 0) ? 1 : 0;
422 numWfsInWg =
divCeil(wgSizeInWorkItems,
424 finalValue = firstWave << ((
sizeof(uint32_t) * 8) - 1);
425 finalValue |= (orderedAppendTerm << 6);
426 finalValue |= numWfsInWg;
430 write(physSgprIdx, finalValue);
433 DPRINTF(GPUInitAbi,
"CU%d: WF[%d][%d]: wave[%d] "
434 "Setting WG Info: s[%d] = %x\n",
439 fatal(
"SGPR enable bit %i not supported\n", en_bit);
451 uint32_t physVgprIdx = 0;
462 for (
int lane = 0; lane <
workItemId[0].size(); ++lane) {
478 for (
int lane = 0; lane <
workItemId[1].size(); ++lane) {
490 mapVgpr(
this, regInitIdx);
494 for (
int lane = 0; lane <
workItemId[2].size(); ++lane) {
545 "CU%d has been idle for %d ticks at tick %d",
570 if (ii->isGlobalMem() ||
571 (ii->isFlat() && ii->executedAs() == enums::SC_GLOBAL)) {
581 if (ii->isLocalMem() ||
582 (ii->isFlat() && ii->executedAs() == enums::SC_GROUP)) {
611 if (ii->isWaitcnt()) {
613 assert(ii->isScalar());
626 if (
status !=
S_STOPPED && ii->isScalar() && (ii->isNop() || ii->isReturn()
627 || ii->isEndOfKernel() || ii->isBranch() || ii->isALU() ||
628 (ii->isKernArgSeg() && ii->isLoad()))) {
642 ii->isReturn() || ii->isBranch() || ii->isALU() || ii->isEndOfKernel()
643 || (ii->isKernArgSeg() && ii->isLoad()))) {
733 if (ii->isReturn() || ii->isBranch() ||
734 ii->isEndOfKernel()) {
753 "Negative requests in pipe for WF%d for slot%d"
754 " and SIMD%d: Rd GlobalMem Reqs=%d, Wr GlobalMem Reqs=%d,"
755 " Rd LocalMem Reqs=%d, Wr LocalMem Reqs=%d,"
756 " Outstanding Reqs=%d\n",
764 if (!ii->isScalar()) {
767 }
else if (ii->isStore()) {
769 }
else if (ii->isAtomic() || ii->isMemSync()) {
773 panic(
"Invalid memory operation!\n");
779 }
else if (ii->isStore()) {
781 }
else if (ii->isAtomic() || ii->isMemSync()) {
785 panic(
"Invalid memory operation!\n");
795 "Scalar instructions can not access Shared memory!!!");
798 }
else if (ii->isStore()) {
800 }
else if (ii->isAtomic() || ii->isMemSync()) {
804 panic(
"Invalid memory operation!\n");
821 if (ii->isALU() || ii->isSpecialOp() ||
822 ii->isBranch() || ii->isNop() ||
823 (ii->isKernArgSeg() && ii->isLoad()) || ii->isArgSeg() ||
824 ii->isReturn() || ii->isEndOfKernel()) {
825 if (!ii->isScalar()) {
831 }
else if (ii->isBarrier()) {
833 }
else if (ii->isFlat()) {
834 assert(!ii->isScalar());
844 }
else if (ii->isGlobalMem()) {
846 }
else if (ii->isLocalMem()) {
848 }
else if (ii->isPrivateSeg()) {
850 "Scalar instructions can not access Private memory!!!");
853 panic(
"reserveResources -> Couldn't process op!\n");
859 assert(execUnitIds.size());
893 DPRINTF(GPUExec,
"CU%d: WF[%d][%d]: wave[%d] Executing inst: %s "
895 wfDynId, ii->disassemble(), old_pc, ii->seqNum());
905 if (!ii->isScalar()) {
931 for (
const auto& srcVecOp : ii->srcVecRegOperands()) {
932 for (
const auto& virtIdx : srcVecOp.virtIndices()) {
943 for (
const auto& dstVecOp : ii->dstVecRegOperands()) {
944 for (
const auto& virtIdx : dstVecOp.virtIndices()) {
958 if (
pc() == old_pc) {
963 DPRINTF(GPUExec,
"CU%d: WF[%d][%d]: wave%d %s taken branch\n",
968 DPRINTF(GPUExec,
"CU%d: WF[%d][%d]: wave[%d] (pc: %#x)\n",
972 const int num_active_lanes =
execMask().count();
976 if (ii->isF16() && ii->isALU()) {
977 if (ii->isF32() || ii->isF64()) {
978 fatal(
"Instruction is tagged as both (1) F16, and (2)"
979 "either F32 or F64.");
987 else if (ii->isMAC()) {
992 else if (ii->isMAD()) {
998 if (ii->isF32() && ii->isALU()) {
999 if (ii->isF16() || ii->isF64()) {
1000 fatal(
"Instruction is tagged as both (1) F32, and (2)"
1001 "either F16 or F64.");
1007 += num_active_lanes;
1009 else if (ii->isMAC()) {
1012 += num_active_lanes;
1014 else if (ii->isMAD()) {
1017 += num_active_lanes;
1020 if (ii->isF64() && ii->isALU()) {
1021 if (ii->isF16() || ii->isF32()) {
1022 fatal(
"Instruction is tagged as both (1) F64, and (2)"
1023 "either F16 or F32.");
1029 += num_active_lanes;
1031 else if (ii->isMAC()) {
1034 += num_active_lanes;
1036 else if (ii->isMAD()) {
1039 += num_active_lanes;
1061 bool flat_as_gm =
false;
1062 bool flat_as_lm =
false;
1064 flat_as_gm = (ii->executedAs() == enums::SC_GLOBAL) ||
1065 (ii->executedAs() == enums::SC_PRIVATE);
1066 flat_as_lm = (ii->executedAs() == enums::SC_GROUP);
1071 if (ii->isALU() || ii->isSpecialOp() ||
1072 ii->isBranch() || ii->isNop() ||
1073 (ii->isKernArgSeg() && ii->isLoad()) ||
1074 ii->isArgSeg() || ii->isEndOfKernel() || ii->isReturn()) {
1076 if (!ii->isScalar()) {
1084 }
else if (ii->isBarrier()) {
1088 }
else if (ii->isLoad() && (ii->isGlobalMem() || flat_as_gm)) {
1089 if (!ii->isScalar()) {
1105 }
else if (ii->isStore() && (ii->isGlobalMem() || flat_as_gm)) {
1106 if (!ii->isScalar()) {
1121 }
else if ((ii->isAtomic() || ii->isMemSync()) &&
1122 (ii->isGlobalMem() || flat_as_gm)) {
1123 if (!ii->isScalar()) {
1139 }
else if (ii->isLoad() && (ii->isLocalMem() || flat_as_lm)) {
1147 }
else if (ii->isStore() && (ii->isLocalMem() || flat_as_lm)) {
1155 }
else if ((ii->isAtomic() || ii->isMemSync()) &&
1156 (ii->isLocalMem() || flat_as_lm)) {
1164 panic(
"Bad instruction type!\n");
1279 assert(vm_wait_cnt >= 0);
1280 assert(exp_wait_cnt >= 0);
1281 assert(lgkm_wait_cnt >= 0);
1284 assert(vm_wait_cnt <= 0xf);
1285 assert(exp_wait_cnt <= 0x7);
1286 assert(lgkm_wait_cnt <= 0x1f);
1304 if (vm_wait_cnt != 0xf)
1307 if (exp_wait_cnt != 0x7)
1310 if (lgkm_wait_cnt != 0x1f)
1419 assert(bar_id < computeUnit->numBarrierSlots());
1442 : statistics::
Group(parent),
1444 "number of instructions executed by this WF slot"),
1445 ADD_STAT(schCycles,
"number of cycles spent in schedule stage"),
1446 ADD_STAT(schStalls,
"number of cycles WF is stalled in SCH stage"),
1447 ADD_STAT(schRfAccessStalls,
"number of cycles wave selected in SCH but "
1448 "RF denied adding instruction"),
1449 ADD_STAT(schResourceStalls,
"number of cycles stalled in sch by resource"
1451 ADD_STAT(schOpdNrdyStalls,
"number of cycles stalled in sch waiting for "
1452 "RF reads to complete"),
1454 "number of cycles wave stalled due to LDS-VRF arbitration"),
1456 ADD_STAT(numTimesBlockedDueWAXDependencies,
"number of times the wf's "
1457 "instructions are blocked due to WAW or WAR dependencies"),
1459 ADD_STAT(numTimesBlockedDueRAWDependencies,
"number of times the wf's "
1460 "instructions are blocked due to RAW dependencies"),
1462 "Count of RAW distance in dynamic instructions for this WF"),
1463 ADD_STAT(readsPerWrite,
"Count of Vector reads per write for this WF")
std::vector< uint32_t > workItemFlatId
Tick curTick()
The universal simulation clock.
#define fatal(...)
This implements a cprintf based fatal() function.
std::vector< int > vecReads
std::vector< Addr > lastAddr
bool isOldestInstFlatMem()
void computeActualWgSz(HSAQueueEntry *task)
statistics::VectorDistribution instInterleave
statistics::Scalar numVecOpsExecutedTwoOpFP
std::vector< uint64_t > lastExecCycle
void flushBuf(int wfSlotId)
void setSleepTime(int sleep_time)
bool isOldestInstBarrier()
std::vector< ScalarRegisterFile * > srf
void incVectorInstDstOperand(int num_operands)
std::vector< uint32_t > oldVgpr
std::vector< uint64_t > oldDgpr
statistics::Scalar numVecOpsExecutedF64
std::bitset< std::numeric_limits< unsigned long long >::digits > VectorMask
gem5::ComputeUnit::ComputeUnitStats stats
bool isOldestInstVectorALU()
void setWaitCnts(int vm_wait_cnt, int exp_wait_cnt, int lgkm_wait_cnt)
WaitClass srfToScalarMemPipeBus
statistics::Vector instCyclesScMemPerSimd
void initShHiddenPrivateBase(Addr queueBase, uint32_t offset)
WavefrontStats(statistics::Group *parent)
void initRegState(HSAQueueEntry *task, int wgSizeInWorkItems)
statistics::Scalar numVecOpsExecutedFMA32
WaitClass vrfToGlobalMemPipeBus
std::vector< PoolManager * > vrfPoolMgrs
statistics::Distribution activeLanesPerLMemInstrDist
TheGpuISA::GPUISA _gpuISA
statistics::Scalar numVecOpsExecutedMAD64
const FlagsType none
Nothing extra to print.
std::unordered_map< int, uint64_t > rawDist
statistics::Scalar numVecOpsExecutedMAC32
statistics::Scalar numVecOpsExecutedMAD32
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
std::vector< VectorRegisterFile * > vrf
statistics::Vector instCyclesVMemPerSimd
_amd_queue_t amdQueue
Keep a copy of the AMD HSA queue because we need info from some of its fields to initialize register ...
std::vector< uint32_t > workItemId[3]
Cycles is a wrapper class for representing cycle counts, i.e.
bool isGmInstruction(GPUDynInstPtr ii)
void setStatus(status_e newStatus)
int mapSgpr(Wavefront *w, int sgprIndex)
void start(uint64_t _wfDynId, uint64_t _base_ptr)
void freeRegisterFile()
Freeing VRF space.
void validateRequestCounters()
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
statistics::Scalar numVecOpsExecuted
static const int InvalidID
Tick cyclesToTicks(Cycles c) const
WaitClass vectorSharedMemUnit
statistics::Distribution activeLanesPerGMemInstrDist
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
void resizeRegFiles(int num_vregs, int num_sregs)
void incLGKMInstsIssued()
bool vgprBitEnabled(int bit) const
statistics::Scalar numVecOpsExecutedMAC64
int scalarOutstandingReqsRdGm
RegisterManager * registerManager
statistics::Scalar numInstrExecuted
@ S_BARRIER
WF is stalled at a barrier.
Addr hostDispPktAddr() const
int scalarOutstandingReqsWrGm
Cycles vrf_lm_bus_latency
Wavefront(const Params &p)
int wgSize(int dim) const
uint32_t scratch_workitem_byte_size
uint32_t scratch_resource_descriptor[4]
bool isOldestInstWaitcnt()
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
int mapWaveToScalarMem(Wavefront *w) const
int mapWaveToGlobalMem(Wavefront *w) const
void deleteFromPipeMap(Wavefront *w)
Abstract superclass for simulation objects.
void reserveLmResource(GPUDynInstPtr ii)
int gridSize(int dim) const
std::vector< WaitClass > scalarALUs
GPUDynInstPtr nextInstr()
std::vector< uint64_t > instExecPerSimd
Addr hostAMDQueueAddr
Host-side addr of the amd_queue_t on which this task was queued.
ComputeUnit * computeUnit
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
TokenManager * getTokenManager()
statistics::Scalar numVecOpsExecutedF16
statistics::Scalar numInstrExecuted
FetchUnit & fetchUnit(int simdId)
int mapVgpr(Wavefront *w, int vgprIndex)
int mapWaveToScalarAlu(Wavefront *w) const
std::shared_ptr< GPUDynInst > GPUDynInstPtr
void decVMemInstsIssued()
std::unordered_set< uint64_t > pipeMap
static constexpr T divCeil(const T &a, const U &b)
void updateInstStats(GPUDynInstPtr gpuDynInst)
statistics::Vector instCyclesLdsPerSimd
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
void decLGKMInstsIssued()
int vmWaitCnt
the following are used for waitcnt instructions vmWaitCnt: once set, we wait for the oustanding numbe...
statistics::Scalar numVecOpsExecutedMAD16
std::vector< int > reserveResources()
statistics::Scalar numVecOpsExecutedMAC16
virtual void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
Cycles vrf_gm_bus_latency
statistics::Distribution controlFlowDivergenceDist
int mapWaveToLocalMem(Wavefront *w) const
statistics::Scalar numVecOpsExecutedFMA64
@ S_WAITCNT
wavefront has unsatisfied wait counts
bool sgprBitEnabled(int bit) const
void recvTokens(int num_tokens)
Increment the number of available tokens by num_tokens.
WaitClass vrfToLocalMemPipeBus
statistics::Distribution execRateDist
statistics::Scalar numVecOpsExecutedF32
std::vector< WaitClass > vectorALUs
bool isOldestInstScalarALU()
statistics::Distribution readsPerWrite
std::deque< GPUDynInstPtr > instructionBuffer
bool isLmInstruction(GPUDynInstPtr ii)
gem5::Wavefront::WavefrontStats stats
bool isOldestInstPrivMem()
void incVMemInstsIssued()
uint64_t scratch_backing_memory_location
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
uint32_t compute_tmpring_size_wavesize
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
WaitClass vectorGlobalMemUnit
statistics::Scalar totalCycles
int mapWaveToScalarAluGlobalIdx(Wavefront *w) const
statistics::Distribution vecRawDistance
void incVectorInstSrcOperand(int num_operands)
VecRegContainer< sizeof(VecElemU32) *NumVecElemPerVecReg > VecRegContainerU32
void reserveGmResource(GPUDynInstPtr ii)
Cycles srf_scm_bus_latency
bool isOldestInstScalarMem()
statistics::Scalar numVecOpsExecutedFMA16
#define panic(...)
This implements a cprintf based panic() function.
Counter value() const
Return the current value of this stat as its base type.
Generated on Wed Jul 28 2021 12:10:27 for gem5 by doxygen 1.8.17