39#include "debug/GPUDisp.hh"
40#include "debug/GPUExec.hh"
41#include "debug/GPUFetch.hh"
42#include "debug/GPUMem.hh"
43#include "debug/GPUPort.hh"
44#include "debug/GPUPrefetch.hh"
45#include "debug/GPUReg.hh"
46#include "debug/GPURename.hh"
47#include "debug/GPUSync.hh"
48#include "debug/GPUTLB.hh"
67 numVectorGlobalMemUnits(
p.num_global_mem_pipes),
68 numVectorSharedMemUnits(
p.num_shared_mem_pipes),
69 numScalarMemUnits(
p.num_scalar_mem_pipes),
70 numVectorALUs(
p.num_SIMDs),
71 numScalarALUs(
p.num_scalar_cores),
72 vrfToCoalescerBusWidth(
p.vrf_to_coalescer_bus_width),
73 coalescerToVrfBusWidth(
p.coalescer_to_vrf_bus_width),
74 registerManager(
p.register_manager),
76 scoreboardCheckStage(
p, *this, scoreboardCheckToSchedule),
77 scheduleStage(
p, *this, scoreboardCheckToSchedule, scheduleToExecute),
78 execStage(
p, *this, scheduleToExecute),
79 globalMemoryPipe(
p, *this),
80 localMemoryPipe(
p, *this),
81 scalarMemoryPipe(
p, *this),
82 tickEvent([this]{
exec(); },
"Compute unit tick event",
85 vrf(
p.vector_register_file), srf(
p.scalar_register_file),
86 rfc(
p.register_file_cache),
87 simdWidth(
p.simd_width),
88 spBypassPipeLength(
p.spbypass_pipe_length),
89 dpBypassPipeLength(
p.dpbypass_pipe_length),
90 rfcPipeLength(
p.rfc_pipe_length),
91 scalarPipeStages(
p.scalar_pipe_length),
92 operandNetworkLength(
p.operand_network_length),
93 issuePeriod(
p.issue_period),
94 vrf_gm_bus_latency(
p.vrf_gm_bus_latency),
95 srf_scm_bus_latency(
p.srf_scm_bus_latency),
96 vrf_lm_bus_latency(
p.vrf_lm_bus_latency),
97 perLaneTLB(
p.perLaneTLB), prefetchDepth(
p.prefetch_depth),
98 prefetchStride(
p.prefetch_stride), prefetchType(
p.prefetch_prev_type),
99 debugSegFault(
p.debugSegFault),
100 functionalTLB(
p.functionalTLB), localMemBarrier(
p.localMemBarrier),
101 countPages(
p.countPages),
102 req_tick_latency(
p.mem_req_latency *
p.clk_domain->clockPeriod()),
103 resp_tick_latency(
p.mem_resp_latency *
p.clk_domain->clockPeriod()),
104 scalar_req_tick_latency(
105 p.scalar_mem_req_latency *
p.clk_domain->clockPeriod()),
106 scalar_resp_tick_latency(
107 p.scalar_mem_resp_latency *
p.clk_domain->clockPeriod()),
108 _requestorId(
p.system->getRequestorId(
this,
"ComputeUnit")),
109 lds(*
p.localDataStore), gmTokenPort(
name() +
".gmTokenPort",
this),
115 _cacheLineSize(
p.system->cacheLineSize()),
116 _numBarrierSlots(
p.num_barrier_slots),
117 globalSeqNum(0), wavefrontSize(
p.wf_size),
118 scoreboardCheckToSchedule(
p),
119 scheduleToExecute(
p),
126 "Functional TLB not supported in full-system GPU simulation");
137 fatal_if(
p.wf_size > std::numeric_limits<unsigned long long>::digits ||
139 "WF size is larger than the host can support");
141 "Wavefront size should be a power of 2");
144 numCyclesPerStoreTransfer =
145 (uint32_t)ceil((
double)(wfSize() *
sizeof(uint32_t)) /
146 (double)vrfToCoalescerBusWidth);
148 numCyclesPerLoadTransfer = (wfSize() *
sizeof(uint32_t))
149 / coalescerToVrfBusWidth;
152 idleWfs =
p.n_wf * numVectorALUs;
153 lastVaddrWF.resize(numVectorALUs);
154 wfList.resize(numVectorALUs);
156 wfBarrierSlots.resize(
p.num_barrier_slots,
WFBarrier());
158 for (
int i = 0;
i <
p.num_barrier_slots; ++
i) {
159 freeBarrierIds.insert(
i);
162 for (
int j = 0; j < numVectorALUs; ++j) {
163 lastVaddrWF[j].resize(
p.n_wf);
165 for (
int i = 0;
i <
p.n_wf; ++
i) {
166 lastVaddrWF[j][
i].resize(wfSize());
168 wfList[j].push_back(
p.wavefronts[j *
p.n_wf +
i]);
169 wfList[j][
i]->setParent(
this);
171 for (
int k = 0;
k < wfSize(); ++
k) {
172 lastVaddrWF[j][
i][
k] = 0;
177 lastVaddrSimd.resize(numVectorALUs);
179 for (
int i = 0;
i < numVectorALUs; ++
i) {
180 lastVaddrSimd[
i].resize(wfSize(), 0);
183 lastVaddrCU.resize(wfSize());
187 if (
p.execPolicy ==
"OLDEST-FIRST") {
189 }
else if (
p.execPolicy ==
"ROUND-ROBIN") {
192 fatal(
"Invalid WF execution policy (CU)\n");
195 for (
int i = 0;
i <
p.port_memory_port_connection_count; ++
i) {
199 for (
int i = 0;
i <
p.port_translation_port_connection_count; ++
i) {
205 memPortTokens =
new TokenManager(
p.max_cu_tokens);
209 lastExecCycle.resize(numVectorALUs, 0);
211 for (
int i = 0;
i < vrf.size(); ++
i) {
212 vrf[
i]->setParent(
this);
213 rfc[
i]->setParent(
this);
215 for (
int i = 0;
i < srf.size(); ++
i) {
216 srf[
i]->setParent(
this);
218 numVecRegsPerSimd = vrf[0]->numRegs();
219 numScalarRegsPerSimd = srf[0]->numRegs();
221 registerManager->setParent(
this);
225 instExecPerSimd.resize(numVectorALUs, 0);
229 "Cache line size should be a power of two.");
230 cacheLineBits =
floorLog2(_cacheLineSize);
313 w->workGroupSz[0] = task->
wgSize(0);
314 w->workGroupSz[1] = task->
wgSize(1);
315 w->workGroupSz[2] = task->
wgSize(2);
316 w->wgSz =
w->workGroupSz[0] *
w->workGroupSz[1] *
w->workGroupSz[2];
320 w->computeActualWgSz(task);
327 static int _n_wave = 0;
333 if (
k + waveId *
wfSize() <
w->actualWgSzTotal)
337 w->execMask() = init_mask;
341 w->initMask = init_mask.to_ullong();
344 w->barrierId(bar_id);
346 assert(!
w->hasBarrier());
350 w->workItemId[0][
k] = (
k + waveId *
wfSize()) %
w->actualWgSz[0];
351 w->workItemId[1][
k] = ((
k + waveId *
wfSize()) /
w->actualWgSz[0]) %
353 w->workItemId[2][
k] = (
k + waveId *
wfSize()) /
354 (
w->actualWgSz[0] *
w->actualWgSz[1]);
356 w->workItemFlatId[
k] =
w->workItemId[2][
k] *
w->actualWgSz[0] *
357 w->actualWgSz[1] +
w->workItemId[1][
k] *
w->actualWgSz[0] +
364 w->workGroupId[0] =
w->wgId % task->
numWg(0);
365 w->workGroupId[1] = (
w->wgId / task->
numWg(0)) % task->
numWg(1);
366 w->workGroupId[2] =
w->wgId / (task->
numWg(0) * task->
numWg(1));
369 w->ldsChunk = ldsChunk;
371 [[maybe_unused]] int32_t refCount =
373 DPRINTF(GPUDisp,
"CU%d: increase ref ctr wg[%d] to [%d]\n",
374 cu_id,
w->wgId, refCount);
376 w->instructionBuffer.clear();
381 DPRINTF(GPUDisp,
"Scheduling wfDynId/barrier_id %d/%d on CU%d: "
382 "WF[%d][%d]. Ref cnt:%d\n", _n_wave,
w->barrierId(),
cu_id,
383 w->simdId,
w->wfSlotId, refCount);
385 w->initRegState(task,
w->actualWgSzTotal);
391 panic_if(
w->wrGmReqsInPipe,
"GM write counter for wavefront non-zero\n");
392 panic_if(
w->rdGmReqsInPipe,
"GM read counter for wavefront non-zero\n");
393 panic_if(
w->wrLmReqsInPipe,
"LM write counter for wavefront non-zero\n");
394 panic_if(
w->rdLmReqsInPipe,
"GM read counter for wavefront non-zero\n");
396 "Outstanding reqs counter for wavefront non-zero\n");
407 = std::make_shared<GPUDynInst>(
this,
nullptr,
411 gpuDynInst->kern_id = kernId;
413 req->setContext(gpuDynInst->wfDynId);
436 = std::make_shared<GPUDynInst>(
this,
nullptr,
440 gpuDynInst->kern_id = kernId;
442 req->setContext(gpuDynInst->wfDynId);
444 gpuDynInst->staticInstruction()->setFlag(GPUStaticInst::Scalar);
466 DPRINTF(GPUDisp,
"CU%d: Scheduling wakeup next cycle\n",
cu_id);
480 panic_if(!ldsChunk,
"was not able to reserve space for this WG");
494 if (num_wfs_in_wg > 1) {
501 assert(!wf_barrier.maxBarrierCnt());
502 assert(!wf_barrier.numAtBarrier());
503 wf_barrier.setMaxBarrierCnt(num_wfs_in_wg);
505 DPRINTF(GPUSync,
"CU[%d] - Dispatching WG with barrier Id%d. "
506 "%d waves using this barrier.\n",
cu_id, barrier_id,
526 DPRINTF(GPURename,
"SIMD[%d] wfSlotId[%d] WF[%d] "
527 "vregDemand[%d] sregDemand[%d]\n",
i, j,
w->wfDynId,
528 vregDemand, sregDemand);
543 "Instruction Buffer of WF%d can't be empty",
w->wgId);
552 "Instruction Buffer of WF%d can't be empty",
w->wgId);
555 auto it =
pipeMap.find(ii->seqNum());
565 int trueWgSizeTotal = 1;
571 trueWgSizeTotal *= trueWgSize[
d];
572 DPRINTF(GPUDisp,
"trueWgSize[%d] = %d\n",
d, trueWgSize[
d]);
575 DPRINTF(GPUDisp,
"trueWgSizeTotal = %d\n", trueWgSizeTotal);
578 int numWfs = (trueWgSizeTotal +
wfSize() - 1) /
wfSize();
579 num_wfs_in_wg = numWfs;
581 bool barrier_avail =
true;
584 barrier_avail =
false;
597 "WG with %d WFs and %d VGPRs per WI can not be allocated to CU "
598 "that has %d VGPRs\n",
601 "WG with %d WFs and %d SGPRs per WI can not be scheduled to CU "
608 int numMappedWfs = 0;
620 if (numMappedWfs < numWfs &&
634 assert(numMappedWfs <= numWfs);
636 bool vregAvail =
true;
637 bool sregAvail =
true;
639 if (numMappedWfs < numWfs) {
655 DPRINTF(GPUDisp,
"Free WF slots = %d, Mapped WFs = %d, \
656 VGPR Availability = %d, SGPR Availability = %d\n",
657 freeWfSlots, numMappedWfs, vregAvail, sregAvail);
674 if (!barrier_avail) {
683 bool can_dispatch = numMappedWfs == numWfs && vregAvail && sregAvail
684 && ldsAvail && barrier_avail;
692 return wf_barrier.numYetToReachBarrier();
699 return wf_barrier.allAtBarrier();
706 wf_barrier.incNumAtBarrier();
713 return wf_barrier.numAtBarrier();
720 return wf_barrier.maxBarrierCnt();
734 wf_barrier.decMaxBarrierCnt();
741 wf_barrier.release();
764 for (
auto &vecRegFile :
vrf) {
768 for (
auto &scRegFile :
srf) {
812 "No support for multiple Global Memory Pipelines exists!!!");
819 "No support for multiple Local Memory Pipelines exists!!!");
826 "No support for multiple Scalar Memory Pipelines exists!!!");
871 assert(cu !=
nullptr);
873 if (pkt->
req->isInvL2()) {
877 panic(
"Unknown MemSyncResp not from an instruction");
889 if (gpuDynInst->isKernelLaunch()) {
892 assert(pkt->
req->isKernel());
893 assert(pkt->
req->isInvL1());
908 && gpuDynInst->isEndOfKernel()) {
914 assert(pkt->
req->isKernel());
915 assert(pkt->
req->isGL2CacheFlush());
931 DPRINTF(GPUDisp,
"CU%d: WF[%d][%d][wv=%d]: WG %d completed\n",
932 computeUnit->cu_id,
w->simdId,
w->wfSlotId,
933 w->wfDynId,
w->wgId);
939 if (!pkt->
req->isKernel()) {
940 w = computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId];
941 DPRINTF(GPUExec,
"MemSyncResp: WF[%d][%d] WV%d %s decrementing "
942 "outstanding reqs %d => %d\n", gpuDynInst->simdId,
943 gpuDynInst->wfSlotId, gpuDynInst->wfDynId,
944 gpuDynInst->disassemble(),
w->outstandingReqs,
945 w->outstandingReqs - 1);
946 computeUnit->globalMemoryPipe.handleResponse(gpuDynInst);
955 computeUnit->memPort[
index].createMemRespEvent(pkt);
958 "CU%d: WF[%d][%d]: gpuDynInst: %d, index %d, addr %#x received!\n",
959 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
960 gpuDynInst->seqNum(),
index, pkt->
req->getPaddr());
962 computeUnit->schedule(mem_resp_event,
963 curTick() + computeUnit->resp_tick_latency);
971 return handleResponse(pkt);
978 if (pkt->
req->isKernel()) {
985 assert(!pkt->
req->isKernel());
992 assert(gpuDynInst->numScalarReqs > 0);
994 gpuDynInst->numScalarReqs--;
1004 if (!gpuDynInst->numScalarReqs) {
1005 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
1006 computeUnit->scalarMemoryPipe.getGMLdRespFIFO().push(
1009 computeUnit->scalarMemoryPipe.getGMStRespFIFO().push(
1023 for (
const auto &pkt : retries) {
1024 if (!sendTimingReq(pkt)) {
1027 retries.pop_front();
1039 for (
int i = 0;
i <
len; ++
i) {
1041 [[maybe_unused]]
GPUDynInstPtr gpuDynInst = retries.front().second;
1042 DPRINTF(GPUMem,
"CU%d: WF[%d][%d]: retry mem inst addr %#x\n",
1043 computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
1044 pkt->
req->getPaddr());
1049 if (!sendTimingReq(pkt)) {
1050 DPRINTF(GPUMem,
"failed again!\n");
1053 DPRINTF(GPUMem,
"successful!\n");
1054 retries.pop_front();
1067 if (sender_state->
wavefront !=
nullptr) {
1068 computeUnit->handleSQCReturn(pkt);
1086 int len = retries.size();
1090 for (
int i = 0;
i <
len; ++
i) {
1092 [[maybe_unused]]
Wavefront *wavefront = retries.front().second;
1093 DPRINTF(GPUFetch,
"CU%d: WF[%d][%d]: retrying FETCH addr %#x\n",
1095 pkt->
req->getPaddr());
1096 if (!sendTimingReq(pkt)) {
1097 DPRINTF(GPUFetch,
"failed again!\n");
1100 DPRINTF(GPUFetch,
"successful!\n");
1101 retries.pop_front();
1109 return "ComputeUnit SQC memory request event";
1118 assert(!pkt->req->systemReq());
1130 Addr tmp_vaddr = pkt->
req->getVaddr();
1135 pkt->
req->setPC(gpuDynInst->wavefront()->pc());
1137 pkt->
req->setReqInstSeqNum(gpuDynInst->seqNum());
1163 }
else if (pkt->
isRead()) {
1166 fatal(
"pkt is not a read nor a write\n");
1180 unsigned size = pkt->
getSize();
1183 panic(
"CU%d: WF[%d][%d]: Access to addr %#x is unaligned!\n",
1184 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
vaddr);
1189 if (!
p->pTable->translate(
vaddr, paddr)) {
1190 if (!
p->fixupFault(
vaddr)) {
1191 panic(
"CU%d: WF[%d][%d]: Fault on addr %#x!\n",
1192 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
1209 tlbPort[tlbPort_index].sendFunctional(pkt);
1212 int hit_level = translation_state->
hitLevel;
1213 assert(hit_level != -1);
1221 delete sender_state->
saved;
1222 delete sender_state;
1224 assert(pkt->
req->hasPaddr());
1225 assert(pkt->
req->hasSize());
1235 uint8_t *tmpData = oldPkt->
getPtr<uint8_t>();
1246 gpuDynInst->memStatusVector[pkt->
getAddr()].push_back(
index);
1247 gpuDynInst->tlbHitLevel[
index] = hit_level;
1254 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: index %d, addr %#x data "
1255 "scheduled\n",
cu_id, gpuDynInst->simdId,
1256 gpuDynInst->wfSlotId,
index, pkt->
req->getPaddr());
1259 }
else if (
tlbPort[tlbPort_index].isStalled()) {
1260 assert(
tlbPort[tlbPort_index].retries.size() > 0);
1262 DPRINTF(GPUTLB,
"CU%d: WF[%d][%d]: Translation for addr %#x "
1263 "failed!\n",
cu_id, gpuDynInst->simdId,
1264 gpuDynInst->wfSlotId, tmp_vaddr);
1266 tlbPort[tlbPort_index].retries.push_back(pkt);
1267 }
else if (!
tlbPort[tlbPort_index].sendTimingReq(pkt)) {
1272 tlbPort[tlbPort_index].stallPort();
1274 DPRINTF(GPUTLB,
"CU%d: WF[%d][%d]: Translation for addr %#x "
1275 "failed!\n",
cu_id, gpuDynInst->simdId,
1276 gpuDynInst->wfSlotId, tmp_vaddr);
1278 tlbPort[tlbPort_index].retries.push_back(pkt);
1280 DPRINTF(GPUTLB,
"CU%d: WF[%d][%d]: Translation for addr %#x from "
1281 "instruction %s sent!\n",
cu_id, gpuDynInst->simdId,
1282 gpuDynInst->wfSlotId, tmp_vaddr,
1283 gpuDynInst->disassemble().c_str());
1287 gpuDynInst->resetEntireStatusVector();
1289 gpuDynInst->decrementStatusVector(
index);
1299 tlbPort[tlbPort_index].sendFunctional(pkt);
1309 memPort[0].sendFunctional(new_pkt);
1311 DPRINTF(GPUMem,
"Functional sendRequest\n");
1312 DPRINTF(GPUMem,
"CU%d: WF[%d][%d]: index %d: addr %#x\n",
cu_id,
1313 gpuDynInst->simdId, gpuDynInst->wfSlotId,
index,
1314 new_pkt->
req->getPaddr());
1348 DPRINTF(GPUTLB,
"sent scalar %s translation request for addr %#x\n",
1350 pkt->
req->getVaddr());
1359 assert(gpuDynInst->isGlobalSeg() ||
1360 gpuDynInst->executedAs() == enums::SC_GLOBAL);
1365 req = std::make_shared<Request>(
1376 if (kernelMemSync) {
1377 if (gpuDynInst->isKernelLaunch()) {
1379 req->setReqInstSeqNum(gpuDynInst->seqNum());
1386 memPort[0].createMemReqEvent(pkt);
1388 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: index %d, addr %#x scheduling "
1389 "an acquire\n",
cu_id, gpuDynInst->simdId,
1390 gpuDynInst->wfSlotId, 0, pkt->
req->getPaddr());
1397 assert(gpuDynInst->isEndOfKernel());
1400 req->setReqInstSeqNum(gpuDynInst->seqNum());
1407 memPort[0].createMemReqEvent(pkt);
1409 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: index %d, addr %#x scheduling "
1410 "a release\n",
cu_id, gpuDynInst->simdId,
1411 gpuDynInst->wfSlotId, 0, pkt->
req->getPaddr());
1416 gpuDynInst->setRequestFlags(req);
1418 req->setReqInstSeqNum(gpuDynInst->seqNum());
1425 memPort[0].createMemReqEvent(pkt);
1428 "CU%d: WF[%d][%d]: index %d, addr %#x sync scheduled\n",
1429 cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId, 0,
1430 pkt->
req->getPaddr());
1439 auto req = std::make_shared<Request>(paddr, 64, 0,
vramRequestorId());
1443 pkt->pushSenderState(
1464 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: Response for addr %#x, index %d\n",
1465 compute_unit->
cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
1466 pkt->
req->getPaddr(),
id);
1468 Addr paddr = pkt->
req->getPaddr();
1484 int index = gpuDynInst->memStatusVector[paddr].back();
1486 DPRINTF(GPUMem,
"Response for addr %#x, index %d\n",
1487 pkt->
req->getPaddr(),
id);
1489 gpuDynInst->memStatusVector[paddr].pop_back();
1490 gpuDynInst->pAddr = pkt->
req->getPaddr();
1492 gpuDynInst->decrementStatusVector(
index);
1493 DPRINTF(GPUMem,
"bitvector is now %s\n", gpuDynInst->printStatusVector());
1495 if (gpuDynInst->allLanesZero()) {
1496 auto iter = gpuDynInst->memStatusVector.begin();
1497 auto end = gpuDynInst->memStatusVector.end();
1499 while (iter != end) {
1500 assert(iter->second.empty());
1507 if (compute_unit->
headTailMap.count(gpuDynInst)) {
1513 gpuDynInst->memStatusVector.clear();
1519 DPRINTF(GPUMem,
"CU%d: WF[%d][%d]: packet totally complete\n",
1520 compute_unit->
cu_id, gpuDynInst->simdId,
1521 gpuDynInst->wfSlotId);
1524 if (!compute_unit->
headTailMap.count(gpuDynInst)) {
1526 .insert(std::make_pair(gpuDynInst,
curTick()));
1538 Addr line = pkt->
req->getPaddr();
1540 DPRINTF(GPUTLB,
"CU%d: DTLBPort received %#x->%#x\n", computeUnit->cu_id,
1541 pkt->
req->getVaddr(), line);
1544 computeUnit->stats.tlbCycles +=
curTick();
1551 if (!translation_state->
tlbEntry) {
1556 computeUnit->wfList[sender_state->
_gpuDynInst->simdId]
1559 DPRINTFN(
"Wave %d couldn't tranlate vaddr %#x\n",
w->wfDynId,
1560 pkt->
req->getVaddr());
1564 int hit_level = translation_state->
hitLevel;
1565 computeUnit->stats.hitsPerTLBLevel[hit_level]++;
1567 delete translation_state->
tlbEntry;
1568 assert(!translation_state->
ports.size());
1574 delete translation_state;
1583 gpuDynInst->memStatusVector[line].push_back(mp_index);
1584 gpuDynInst->tlbHitLevel[mp_index] = hit_level;
1595 panic(
"unsupported response to request conversion %s\n",
1599 if (computeUnit->prefetchDepth) {
1600 int simdId = gpuDynInst->simdId;
1601 int wfSlotId = gpuDynInst->wfSlotId;
1604 switch(computeUnit->prefetchType) {
1606 last = computeUnit->lastVaddrCU[mp_index];
1608 case enums::PF_PHASE:
1609 last = computeUnit->lastVaddrSimd[simdId][mp_index];
1612 last = computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index];
1617 DPRINTF(GPUPrefetch,
"CU[%d][%d][%d][%d]: %#x was last\n",
1618 computeUnit->cu_id, simdId, wfSlotId, mp_index, last);
1626 computeUnit->lastVaddrCU[mp_index] =
vaddr;
1627 computeUnit->lastVaddrSimd[simdId][mp_index] =
vaddr;
1628 computeUnit->lastVaddrWF[simdId][wfSlotId][mp_index] =
vaddr;
1630 stride = (computeUnit->prefetchType == enums::PF_STRIDE) ?
1631 computeUnit->prefetchStride:
stride;
1633 DPRINTF(GPUPrefetch,
"%#x to: CU[%d][%d][%d][%d]\n",
vaddr,
1634 computeUnit->cu_id, simdId, wfSlotId, mp_index);
1639 for (
int pf = 1;
pf <= computeUnit->prefetchDepth; ++
pf) {
1646 RequestPtr prefetch_req = std::make_shared<Request>(
1649 computeUnit->requestorId(),
1659 computeUnit->shader->gpuTc,
true);
1662 sendFunctional(prefetch_pkt);
1672 delete prefetch_pkt;
1693 if (new_pkt->
req->systemReq()) {
1698 if (!gpuDynInst->isSystemReq()) {
1699 computeUnit->getTokenManager()->recvTokens(1);
1700 gpuDynInst->setSystemReq();
1703 new_pkt->
req->requestorId(computeUnit->vramRequestorId());
1709 computeUnit->memPort[mp_index].createMemReqEvent(new_pkt);
1711 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: index %d, addr %#x data scheduled\n",
1712 computeUnit->cu_id, gpuDynInst->simdId,
1713 gpuDynInst->wfSlotId, mp_index, new_pkt->
req->getPaddr());
1715 computeUnit->schedule(mem_req_event,
curTick() +
1716 computeUnit->req_tick_latency);
1725 [
this, pkt]{ processMemReqEvent(pkt); },
1726 "ComputeUnit memory request event",
true);
1733 [
this, pkt]{ processMemRespEvent(pkt); },
1734 "ComputeUnit memory response event",
true);
1742 [[maybe_unused]]
ComputeUnit *compute_unit = computeUnit;
1744 if (pkt->
req->systemReq()) {
1748 }
else if (!(sendTimingReq(pkt))) {
1749 retries.emplace_back(pkt, gpuDynInst);
1753 "CU%d: WF[%d][%d]: index %d, addr %#x data req failed!\n",
1754 compute_unit->
cu_id, gpuDynInst->simdId,
1755 gpuDynInst->wfSlotId,
id, pkt->
req->getPaddr());
1760 "CU%d: WF[%d][%d]: gpuDynInst: %d, index %d, addr %#x data"
1761 " req sent!\n", compute_unit->
cu_id, gpuDynInst->simdId,
1762 gpuDynInst->wfSlotId, gpuDynInst->seqNum(),
id,
1763 pkt->
req->getPaddr());
1771 return "ComputeUnit scalar memory request event";
1781 if (pkt->req->systemReq()) {
1789 "CU%d: WF[%d][%d]: addr %#x data req failed!\n",
1790 compute_unit->
cu_id, gpuDynInst->simdId,
1791 gpuDynInst->wfSlotId, pkt->req->getPaddr());
1794 "CU%d: WF[%d][%d]: gpuDynInst: %d, addr %#x data "
1795 "req sent!\n", compute_unit->
cu_id, gpuDynInst->simdId,
1796 gpuDynInst->wfSlotId, gpuDynInst->seqNum(),
1797 pkt->req->getPaddr());
1810 int len = retries.size();
1812 DPRINTF(GPUTLB,
"CU%d: DTLB recvReqRetry - %d pending requests\n",
1813 computeUnit->cu_id,
len);
1816 assert(isStalled());
1821 for (
int i = 0;
i <
len; ++
i) {
1824 DPRINTF(GPUTLB,
"CU%d: retrying D-translaton for address%#x",
vaddr);
1826 if (!sendTimingReq(pkt)) {
1829 DPRINTF(GPUTLB,
": failed again\n");
1832 DPRINTF(GPUTLB,
": successful\n");
1833 retries.pop_front();
1848 "Translation of vaddr %#x failed\n", pkt->
req->getVaddr());
1850 delete translation_state->
tlbEntry;
1851 assert(!translation_state->
ports.size());
1854 delete translation_state;
1862 [[maybe_unused]]
Wavefront *
w = gpuDynInst->wavefront();
1864 DPRINTF(GPUTLB,
"CU%d: WF[%d][%d][wv=%d]: scalar DTLB port received "
1865 "translation: PA %#x -> %#x\n", computeUnit->cu_id,
w->simdId,
1866 w->wfSlotId,
w->kernId, pkt->
req->getVaddr(), pkt->
req->getPaddr());
1875 fatal(
"Scalar DTLB receieved unexpected MemCmd response %s\n",
1894 if (req_pkt->
req->systemReq()) {
1895 gpuDynInst->setSystemReq();
1897 req_pkt->
req->requestorId(computeUnit->vramRequestorId());
1902 (computeUnit->scalarDataPort, req_pkt);
1903 computeUnit->schedule(scalar_mem_req_event,
curTick() +
1904 computeUnit->scalar_req_tick_latency);
1912 [[maybe_unused]]
Addr line = pkt->
req->getPaddr();
1913 DPRINTF(GPUTLB,
"CU%d: ITLBPort received %#x->%#x\n",
1914 computeUnit->cu_id, pkt->
req->getVaddr(), line);
1922 bool success = translation_state->
tlbEntry !=
nullptr;
1923 delete translation_state->
tlbEntry;
1924 assert(!translation_state->
ports.size());
1926 delete translation_state;
1943 computeUnit->fetchStage.fetch(pkt, wavefront);
1966 int len = retries.size();
1967 DPRINTF(GPUTLB,
"CU%d: ITLB recvReqRetry - %d pending requests\n",
len);
1970 assert(isStalled());
1976 for (
int i = 0;
i <
len; ++
i) {
1979 DPRINTF(GPUTLB,
"CU%d: retrying I-translaton for address%#x",
vaddr);
1981 if (!sendTimingReq(pkt)) {
1983 DPRINTF(GPUTLB,
": failed again\n");
1986 DPRINTF(GPUTLB,
": successful\n");
1987 retries.pop_front();
1995 if (gpuDynInst->isScalar()) {
1996 if (gpuDynInst->isALU() && !gpuDynInst->isWaitcnt()) {
1999 }
else if (gpuDynInst->isLoad()) {
2001 }
else if (gpuDynInst->isStore()) {
2005 if (gpuDynInst->isALU()) {
2013 += gpuDynInst->wavefront()->execMask().count();
2014 }
else if (gpuDynInst->isFlat()) {
2015 if (gpuDynInst->isLocalMem()) {
2020 }
else if (gpuDynInst->isFlatGlobal()) {
2022 }
else if (gpuDynInst->isFlatScratch()) {
2024 }
else if (gpuDynInst->isLocalMem()) {
2026 }
else if (gpuDynInst->isLoad()) {
2028 }
else if (gpuDynInst->isStore()) {
2032 if (gpuDynInst->isLoad()) {
2033 switch (gpuDynInst->executedAs()) {
2034 case enums::SC_SPILL:
2037 case enums::SC_GLOBAL:
2040 case enums::SC_GROUP:
2043 case enums::SC_PRIVATE:
2046 case enums::SC_READONLY:
2049 case enums::SC_KERNARG:
2055 case enums::SC_NONE:
2062 fatal(
"%s has no valid segment\n", gpuDynInst->disassemble());
2065 }
else if (gpuDynInst->isStore()) {
2066 switch (gpuDynInst->executedAs()) {
2067 case enums::SC_SPILL:
2070 case enums::SC_GLOBAL:
2073 case enums::SC_GROUP:
2076 case enums::SC_PRIVATE:
2079 case enums::SC_READONLY:
2082 case enums::SC_KERNARG:
2088 case enums::SC_NONE:
2095 fatal(
"%s has no valid segment\n", gpuDynInst->disassemble());
2119 *page_stat_file <<
"page, wavefront accesses, workitem accesses" <<
2123 *page_stat_file << std::hex << iter.first <<
",";
2124 *page_stat_file << std::dec << iter.second.first <<
",";
2125 *page_stat_file << std::dec << iter.second.second << std::endl;
2162 const uint32_t wgId)
const
2172 for (
int i_wf = 0; i_wf <
shader->
n_wf; ++i_wf){
2191 RequestPtr newRequest = std::make_shared<Request>();
2192 newRequest->setPaddr(0x0);
2221 fatal_if(!senderState,
"did not get the right sort of sender state");
2228 computeUnit->localMemoryPipe.getLMRespFIFO().push(gpuDynInst);
2242 fatal_if(!sender_state,
"packet without a valid sender state");
2247 fatal_if(retries.empty(),
"must have retries waiting to be stalled");
2251 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: LDS send failed!\n",
2252 computeUnit->cu_id, gpuDynInst->simdId,
2253 gpuDynInst->wfSlotId);
2261 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: addr %#x lds req failed!\n",
2262 computeUnit->cu_id, gpuDynInst->simdId,
2263 gpuDynInst->wfSlotId, pkt->
req->getPaddr());
2266 DPRINTF(GPUPort,
"CU%d: WF[%d][%d]: addr %#x lds req sent!\n",
2267 computeUnit->cu_id, gpuDynInst->simdId,
2268 gpuDynInst->wfSlotId, pkt->
req->getPaddr());
2282 auto queueSize = retries.size();
2284 DPRINTF(GPUPort,
"CU%d: LDSPort recvReqRetry - %d pending requests\n",
2285 computeUnit->cu_id, queueSize);
2288 "why was there a recvReqRetry() with no pending reqs?");
2290 "recvReqRetry() happened when the port was not stalled");
2294 while (!retries.empty()) {
2297 DPRINTF(GPUPort,
"CU%d: retrying LDS send\n", computeUnit->cu_id);
2302 DPRINTF(GPUPort,
": LDS send failed again\n");
2305 DPRINTF(GPUTLB,
": LDS send successful\n");
2313 : statistics::
Group(parent),
2314 ADD_STAT(vALUInsts,
"Number of vector ALU insts issued."),
2315 ADD_STAT(vALUInstsPerWF,
"The avg. number of vector ALU insts issued "
2317 ADD_STAT(sALUInsts,
"Number of scalar ALU insts issued."),
2318 ADD_STAT(sALUInstsPerWF,
"The avg. number of scalar ALU insts issued "
2321 "Number of cycles needed to execute VALU insts."),
2323 "Number of cycles needed to execute SALU insts."),
2324 ADD_STAT(threadCyclesVALU,
"Number of thread cycles used to execute "
2325 "vector ALU ops. Similar to instCyclesVALU but multiplied by "
2326 "the number of active threads."),
2328 "Percentage of active vector ALU threads in a wave."),
2329 ADD_STAT(ldsNoFlatInsts,
"Number of LDS insts issued, not including FLAT"
2330 " accesses that resolve to LDS."),
2331 ADD_STAT(ldsNoFlatInstsPerWF,
"The avg. number of LDS insts (not "
2332 "including FLAT accesses that resolve to LDS) per-wavefront."),
2334 "The number of FLAT insts that resolve to vmem issued."),
2335 ADD_STAT(flatVMemInstsPerWF,
"The average number of FLAT insts that "
2336 "resolve to vmem issued per-wavefront."),
2338 "The number of FLAT insts that resolve to LDS issued."),
2339 ADD_STAT(flatLDSInstsPerWF,
"The average number of FLAT insts that "
2340 "resolve to LDS issued per-wavefront."),
2342 "Number of vector mem write insts (excluding FLAT insts)."),
2343 ADD_STAT(vectorMemWritesPerWF,
"The average number of vector mem write "
2344 "insts (excluding FLAT insts) per-wavefront."),
2346 "Number of vector mem read insts (excluding FLAT insts)."),
2347 ADD_STAT(vectorMemReadsPerWF,
"The avg. number of vector mem read insts "
2348 "(excluding FLAT insts) per-wavefront."),
2349 ADD_STAT(scalarMemWrites,
"Number of scalar mem write insts."),
2351 "The average number of scalar mem write insts per-wavefront."),
2352 ADD_STAT(scalarMemReads,
"Number of scalar mem read insts."),
2354 "The average number of scalar mem read insts per-wavefront."),
2355 ADD_STAT(vectorMemReadsPerKiloInst,
2356 "Number of vector mem reads per kilo-instruction"),
2357 ADD_STAT(vectorMemWritesPerKiloInst,
2358 "Number of vector mem writes per kilo-instruction"),
2359 ADD_STAT(vectorMemInstsPerKiloInst,
2360 "Number of vector mem insts per kilo-instruction"),
2361 ADD_STAT(scalarMemReadsPerKiloInst,
2362 "Number of scalar mem reads per kilo-instruction"),
2363 ADD_STAT(scalarMemWritesPerKiloInst,
2364 "Number of scalar mem writes per kilo-instruction"),
2365 ADD_STAT(scalarMemInstsPerKiloInst,
2366 "Number of scalar mem insts per kilo-instruction"),
2367 ADD_STAT(instCyclesVMemPerSimd,
"Number of cycles to send address, "
2368 "command, data from VRF to vector memory unit, per SIMD"),
2369 ADD_STAT(instCyclesScMemPerSimd,
"Number of cycles to send address, "
2370 "command, data from SRF to scalar memory unit, per SIMD"),
2371 ADD_STAT(instCyclesLdsPerSimd,
"Number of cycles to send address, "
2372 "command, data from VRF to LDS unit, per SIMD"),
2373 ADD_STAT(globalReads,
"Number of reads to the global segment"),
2374 ADD_STAT(globalWrites,
"Number of writes to the global segment"),
2376 "Number of memory instructions sent to the global segment"),
2377 ADD_STAT(argReads,
"Number of reads to the arg segment"),
2378 ADD_STAT(argWrites,
"NUmber of writes to the arg segment"),
2380 "Number of memory instructions sent to the arg segment"),
2381 ADD_STAT(spillReads,
"Number of reads to the spill segment"),
2382 ADD_STAT(spillWrites,
"Number of writes to the spill segment"),
2384 "Number of memory instructions sent to the spill segment"),
2385 ADD_STAT(groupReads,
"Number of reads to the group segment"),
2386 ADD_STAT(groupWrites,
"Number of writes to the group segment"),
2388 "Number of memory instructions sent to the group segment"),
2389 ADD_STAT(privReads,
"Number of reads to the private segment"),
2390 ADD_STAT(privWrites,
"Number of writes to the private segment"),
2392 "Number of memory instructions sent to the private segment"),
2393 ADD_STAT(readonlyReads,
"Number of reads to the readonly segment"),
2395 "Number of memory instructions sent to the readonly segment"),
2397 "Number of memory instructions sent to the readonly segment"),
2398 ADD_STAT(kernargReads,
"Number of reads sent to the kernarg segment"),
2400 "Number of memory instructions sent to the kernarg segment"),
2402 "Number of memory instructions sent to the kernarg segment"),
2404 "wave level parallelism: count of active waves at wave launch"),
2405 ADD_STAT(tlbRequests,
"number of uncoalesced requests"),
2407 "total number of cycles for all uncoalesced requests"),
2408 ADD_STAT(tlbLatency,
"Avg. translation latency for data translations"),
2410 "TLB hits distribution (0 for page table, x for Lx-TLB)"),
2411 ADD_STAT(ldsBankAccesses,
"Total number of LDS bank accesses"),
2413 "Number of bank conflicts per LDS memory packet"),
2415 "pages touched per wf (over all mem. instr.)"),
2417 "dynamic non-flat global memory instruction count"),
2419 "dynamic flat global memory instruction count"),
2420 ADD_STAT(dynamicLMemInstrCnt,
"dynamic local memory intruction count"),
2421 ADD_STAT(wgBlockedDueBarrierAllocation,
2422 "WG dispatch was blocked due to lack of barrier resources"),
2423 ADD_STAT(wgBlockedDueLdsAllocation,
2424 "Workgroup blocked due to LDS capacity"),
2425 ADD_STAT(numInstrExecuted,
"number of instructions executed"),
2426 ADD_STAT(execRateDist,
"Instruction Execution Rate: Number of executed "
2427 "vector instructions per cycle"),
2429 "number of vec ops executed (e.g. WF size/inst)"),
2431 "number of f16 vec ops executed (e.g. WF size/inst)"),
2433 "number of f32 vec ops executed (e.g. WF size/inst)"),
2435 "number of f64 vec ops executed (e.g. WF size/inst)"),
2437 "number of fma16 vec ops executed (e.g. WF size/inst)"),
2439 "number of fma32 vec ops executed (e.g. WF size/inst)"),
2441 "number of fma64 vec ops executed (e.g. WF size/inst)"),
2443 "number of mac16 vec ops executed (e.g. WF size/inst)"),
2445 "number of mac32 vec ops executed (e.g. WF size/inst)"),
2447 "number of mac64 vec ops executed (e.g. WF size/inst)"),
2449 "number of mad16 vec ops executed (e.g. WF size/inst)"),
2451 "number of mad32 vec ops executed (e.g. WF size/inst)"),
2453 "number of mad64 vec ops executed (e.g. WF size/inst)"),
2455 "number of mfma vec ops executed (e.g. WF size/inst)"),
2457 "number of i8 mfma vec ops executed (e.g. WF size/inst)"),
2459 "number of f16 mfma vec ops executed (e.g. WF size/inst)"),
2461 "number of f32 mfma vec ops executed (e.g. WF size/inst)"),
2463 "number of f64 mfma vec ops executed (e.g. WF size/inst)"),
2465 "number of two op FP vec ops executed (e.g. WF size/inst)"),
2466 ADD_STAT(totalCycles,
"number of cycles the CU ran for"),
2467 ADD_STAT(
vpc,
"Vector Operations per cycle (this CU only)"),
2468 ADD_STAT(vpc_f16,
"F16 Vector Operations per cycle (this CU only)"),
2469 ADD_STAT(vpc_f32,
"F32 Vector Operations per cycle (this CU only)"),
2470 ADD_STAT(vpc_f64,
"F64 Vector Operations per cycle (this CU only)"),
2471 ADD_STAT(ipc,
"Instructions per cycle (this CU only)"),
2472 ADD_STAT(controlFlowDivergenceDist,
"number of lanes active per "
2473 "instruction (over all instructions)"),
2474 ADD_STAT(activeLanesPerGMemInstrDist,
2475 "number of active lanes per global memory instruction"),
2476 ADD_STAT(activeLanesPerLMemInstrDist,
2477 "number of active lanes per local memory instruction"),
2479 "Number of dynamic non-GM memory insts executed"),
2480 ADD_STAT(numTimesWgBlockedDueVgprAlloc,
"Number of times WGs are "
2481 "blocked due to VGPR allocation per SIMD"),
2482 ADD_STAT(numTimesWgBlockedDueSgprAlloc,
"Number of times WGs are "
2483 "blocked due to SGPR allocation per SIMD"),
2484 ADD_STAT(numCASOps,
"number of compare and swap operations"),
2486 "number of compare and swap operations that failed"),
2487 ADD_STAT(completedWfs,
"number of completed wavefronts"),
2488 ADD_STAT(completedWGs,
"number of completed workgroups"),
2489 ADD_STAT(headTailLatency,
"ticks between first and last cache block "
2490 "arrival at coalescer"),
2491 ADD_STAT(instInterleave,
"Measure of instruction interleaving per SIMD")
2544 for (
int i = 0;
i < 4; ++
i) {
void sendRequest(PacketPtr pkt, Event *callback)
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
void processMemReqEvent(PacketPtr pkt)
EventFunctionWrapper * createMemReqEvent(PacketPtr pkt)
EventFunctionWrapper * createMemRespEvent(PacketPtr pkt)
void processMemRespEvent(PacketPtr pkt)
bool handleResponse(PacketPtr pkt)
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
SenderState is information carried along with the packet, esp.
GPUDynInstPtr getMemInst() const
virtual bool recvTimingResp(PacketPtr pkt)
get the result of packets sent to the LDS when they return
virtual bool sendTimingReq(PacketPtr pkt)
attempt to send this packet, either the port is already stalled, the request is nack'd and must stall...
virtual void recvReqRetry()
the bus is telling the port that there is now space so retrying stalled requests should work now this...
const char * description() const
Return a C string describing the event.
std::deque< std::pair< PacketPtr, Wavefront * > > retries
ComputeUnit * computeUnit
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
std::deque< PacketPtr > retries
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
const char * description() const
Return a C string describing the event.
bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
ComputeUnit * computeUnit
bool handleResponse(PacketPtr pkt)
void recvReqRetry() override
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
std::deque< PacketPtr > retries
void releaseBarrier(int bar_id)
int mapWaveToScalarAlu(Wavefront *w) const
ComputeUnit(const Params &p)
WFBarrier & barrierSlot(int bar_id)
void updatePageDivergenceDist(Addr addr)
std::vector< WaitClass > scalarALUs
RequestorID vramRequestorId()
Forward the VRAM requestor ID needed for device memory from shader.
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
int numVectorGlobalMemUnits
std::unordered_set< uint64_t > pipeMap
void updateInstStats(GPUDynInstPtr gpuDynInst)
WaitClass vectorGlobalMemUnit
void doInvalidate(RequestPtr req, int kernId)
trigger invalidate operation in the CU
std::vector< int > numWfsToSched
Number of WFs to schedule to each SIMD.
LocalMemPipeline localMemoryPipe
int mapWaveToGlobalMem(Wavefront *w) const
int mapWaveToLocalMem(Wavefront *w) const
WaitClass scalarMemToSrfBus
ScalarDTLBPort scalarDTLBPort
void releaseWFsFromBarrier(int bar_id)
int numYetToReachBarrier(int bar_id)
WaitClass vrfToLocalMemPipeBus
int32_t getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const
void doSQCInvalidate(RequestPtr req, int kernId)
trigger SQCinvalidate operation in the CU
void resetBarrier(int bar_id)
std::vector< std::vector< Addr > > lastVaddrSimd
int numVectorSharedMemUnits
std::unordered_set< int > freeBarrierIds
A set used to easily retrieve a free barrier ID.
pageDataStruct pageAccesses
WaitClass srfToScalarMemPipeBus
ScalarMemPipeline scalarMemoryPipe
bool hasDispResources(HSAQueueEntry *task, int &num_wfs_in_wg)
void sendRequest(GPUDynInstPtr gpuDynInst, PortID index, PacketPtr pkt)
void sendInvL2(Addr paddr)
LDSPort ldsPort
The port to access the Local Data Store Can be connected to a LDS object.
GlobalMemPipeline globalMemoryPipe
std::map< Addr, int > pagesTouched
bool sendToLds(GPUDynInstPtr gpuDynInst)
send a general request to the LDS make sure to look at the return value here as your request might be...
int maxBarrierCnt(int bar_id)
void insertInPipeMap(Wavefront *w)
int numAtBarrier(int bar_id)
void incNumAtBarrier(int bar_id)
void injectGlobalMemFence(GPUDynInstPtr gpuDynInst, bool kernelMemSync, RequestPtr req=nullptr)
std::vector< int > vectorRegsReserved
std::vector< ScalarRegisterFile * > srf
ScoreboardCheckStage scoreboardCheckStage
std::vector< WaitClass > vectorALUs
int mapWaveToScalarMem(Wavefront *w) const
RegisterManager * registerManager
void startWavefront(Wavefront *w, int waveId, LdsChunk *ldsChunk, HSAQueueEntry *task, int bar_id, bool fetchContext=false)
EventFunctionWrapper tickEvent
TokenManager * memPortTokens
ScalarDataPort scalarDataPort
void fillKernelState(Wavefront *w, HSAQueueEntry *task)
void dispWorkgroup(HSAQueueEntry *task, int num_wfs_in_wg)
WaitClass vectorSharedMemUnit
std::vector< int > scalarRegsReserved
std::vector< DTLBPort > tlbPort
std::vector< std::vector< Wavefront * > > wfList
int mapWaveToScalarAluGlobalIdx(Wavefront *w) const
std::vector< VectorRegisterFile * > vrf
void decMaxBarrierCnt(int bar_id)
std::unordered_map< GPUDynInstPtr, Tick > headTailMap
std::vector< Addr > lastVaddrCU
WaitClass vrfToGlobalMemPipeBus
ScheduleStage scheduleStage
bool allAtBarrier(int bar_id)
bool isVectorAluIdle(uint32_t simdId) const
InstSeqNum getAndIncSeqNum()
void doFlush(GPUDynInstPtr gpuDynInst)
trigger flush operation in the cu
RequestorID requestorId()
std::vector< DataPort > memPort
The memory port for SIMD data accesses.
void deleteFromPipeMap(Wavefront *w)
void handleSQCReturn(PacketPtr pkt)
void sendScalarRequest(GPUDynInstPtr gpuDynInst, PacketPtr pkt)
gem5::ComputeUnit::ComputeUnitStats stats
void processFetchReturn(PacketPtr pkt)
GPUComputeDriver * driver()
void setMtype(RequestPtr req)
Called by the compute units right before a request is issued to ruby.
void updateInvCounter(int kern_id, int val=-1)
update the counter of oustanding inv requests for the kernel kern_id: kernel id val: +1/-1,...
bool updateWbCounter(int kern_id, int val=-1)
update the counter of oustanding wb requests for the kernel kern_id: kernel id val: +1/-1,...
void notifyWgCompl(Wavefront *wf)
When an end program instruction detects that the last WF in a WG has completed it will call this meth...
void handleResponse(GPUDynInstPtr gpuDynInst)
This method handles responses sent to this GM pipeline by the CU.
bool isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
int wgSize(int dim) const
int numVectorRegs() const
bool isInvDone() const
Is invalidate done?
int gridSize(int dim) const
int numScalarRegs() const
this represents a slice of the overall LDS, intended to be associated with an individual workgroup
int increaseRefCounter(const uint32_t dispatchId, const uint32_t wgId)
use the dynamic wave id to create or just increase the reference count
bool canReserve(uint32_t x_size) const
can this much space be reserved for a workgroup?
LdsChunk * reserveSpace(const uint32_t dispatchId, const uint32_t wgId, const uint32_t size)
assign a parent and request this amount of space be set aside for this wgid
int getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const
return the current reference count for this workgroup id
bool isLMRespFIFOWrRdy() const
bool isLMReqFIFOWrRdy(uint32_t pendReqs=0) const
const std::string & toString() const
Return the string to a cmd given by idx.
virtual std::string name() const
OutputStream * create(const std::string &name, bool binary=false, bool no_gz=false)
Creates a file in this directory (optionally compressed).
std::ostream * stream() const
Get the output underlying output stream.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
SenderState * senderState
This packet's sender state.
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
T * getPtr()
get a pointer to the data ptr.
RequestPtr req
A pointer to the original request.
unsigned size
The size of the request or transfer.
MemCmd cmd
The command field of the packet.
void allocateRegisters(Wavefront *w, int vectorDemand, int scalarDemand)
std::vector< PoolManager * > vrfPoolMgrs
bool canAllocateSgprs(int simdId, int nWfs, int demandPerWf)
std::vector< PoolManager * > srfPoolMgrs
bool canAllocateVgprs(int simdId, int nWfs, int demandPerWf)
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
@ KERNEL
The request should be marked with KERNEL.
void injectScalarMemFence(GPUDynInstPtr gpuDynInst, bool kernelMemSync, RequestPtr req)
void incNumOutstandingInvL2s()
int getNumOutstandingInvL2s() const
GPUDispatcher & dispatcher()
void decNumOutstandingInvL2s()
RequestorID vramRequestorId()
Forward the VRAM requestor ID needed for device memory from CP.
AMDGPUSystemHub * systemHub
GPUCommandProcessor & gpuCmdProc
virtual Process * getProcessPtr()=0
void setTokenManager(TokenManager *_tokenManager)
Specify a token manger, which will handle tracking of tokens for a TokenRequestPort/ResponseRequestPo...
static const int InvalidID
void init(ClockedObject *_clockedObject, uint64_t _numStages=0)
bool rdy(Cycles cycles=Cycles(0)) const
void setStatus(status_e newStatus)
std::deque< GPUDynInstPtr > instructionBuffer
void barrierId(int bar_id)
@ S_BARRIER
WF is stalled at a barrier.
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
Derived & init(size_type size)
Set this vector to have the given size.
VectorDistribution & init(size_type size, Counter min, Counter max, Counter bkt)
Initialize storage and parameters for this distribution.
The GPUDispatcher is the component of the shader that is responsible for creating and dispatching WGs...
The GPUCommandProcessor (CP) is responsible for accepting commands, in the form of HSA AQL packets,...
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
static constexpr bool isPowerOf2(const T &n)
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
bool scheduled() const
Determine if the current event is scheduled.
void schedule(Event &event, Tick when)
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
#define panic(...)
This implements a cprintf based panic() function.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
#define fatal(...)
This implements a cprintf based fatal() function.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Bitfield< 21, 20 > stride
const FlagsType pdf
Print the percent of the total that this entry represents.
const FlagsType oneline
Print all values on a single line.
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
T safe_cast(U &&ref_or_ptr)
std::shared_ptr< Request > RequestPtr
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
uint64_t Tick
Tick count type.
void exitSimLoop(const std::string &message, int exit_code, Tick when, Tick repeat, bool serialize)
Schedule an event to exit the simulation loop (returning to Python) at the end of the current cycle (...
std::bitset< std::numeric_limits< unsigned long long >::digits > VectorMask
std::string csprintf(const char *format, const Args &...args)
void registerExitCallback(const std::function< void()> &callback)
Register an exit callback.
Declarations of a non-full system Page Table.
statistics::Scalar spillReads
statistics::Scalar groupWrites
statistics::Scalar numVecOpsExecutedF64
statistics::Scalar numVecOpsExecuted
statistics::Formula vpc_f64
statistics::Scalar instCyclesSALU
statistics::Formula vectorMemWritesPerWF
statistics::Scalar argWrites
statistics::Scalar globalReads
statistics::Distribution activeLanesPerLMemInstrDist
statistics::Formula vALUInstsPerWF
statistics::Formula vectorMemWritesPerKiloInst
statistics::Formula sALUInstsPerWF
statistics::Formula readonlyMemInsts
statistics::Formula vALUUtilization
ComputeUnitStats(statistics::Group *parent, int n_wf)
statistics::Formula privMemInsts
statistics::VectorDistribution instInterleave
statistics::Scalar flatVMemInsts
statistics::Formula vpc_f16
statistics::Scalar wgBlockedDueBarrierAllocation
statistics::Scalar wgBlockedDueLdsAllocation
statistics::Scalar dynamicLMemInstrCnt
statistics::Formula flatLDSInstsPerWF
statistics::Vector instCyclesVMemPerSimd
statistics::Formula flatVMemInstsPerWF
statistics::Scalar argReads
statistics::Distribution waveLevelParallelism
statistics::Scalar numVecOpsExecutedF32
statistics::Scalar scalarMemWrites
statistics::Formula scalarMemInstsPerKiloInst
statistics::Distribution controlFlowDivergenceDist
statistics::Formula groupMemInsts
statistics::Scalar privReads
statistics::Scalar numTimesWgBlockedDueSgprAlloc
statistics::Formula numALUInstsExecuted
statistics::Scalar completedWfs
statistics::Distribution ldsBankConflictDist
statistics::Scalar vectorMemWrites
statistics::Scalar numInstrExecuted
statistics::Scalar vectorMemReads
statistics::Formula argMemInsts
statistics::Scalar tlbCycles
statistics::Formula scalarMemWritesPerKiloInst
statistics::Scalar scalarMemReads
statistics::Scalar tlbRequests
statistics::Formula kernargMemInsts
statistics::Formula vectorMemReadsPerKiloInst
statistics::Scalar numVecOpsExecutedF16
statistics::Scalar groupReads
statistics::Scalar privWrites
statistics::Scalar kernargReads
statistics::Scalar instCyclesVALU
statistics::Formula scalarMemWritesPerWF
statistics::Scalar readonlyWrites
statistics::Formula vectorMemReadsPerWF
statistics::Scalar dynamicGMemInstrCnt
statistics::Formula vpc_f32
statistics::Formula tlbLatency
statistics::Scalar vALUInsts
statistics::Formula scalarMemReadsPerKiloInst
statistics::Formula globalMemInsts
statistics::Formula scalarMemReadsPerWF
statistics::Vector hitsPerTLBLevel
statistics::Scalar numTimesWgBlockedDueVgprAlloc
statistics::Scalar threadCyclesVALU
statistics::Scalar ldsNoFlatInsts
statistics::Scalar flatLDSInsts
statistics::Scalar spillWrites
statistics::Formula ldsNoFlatInstsPerWF
statistics::Formula spillMemInsts
statistics::Vector instCyclesLdsPerSimd
statistics::Vector instCyclesScMemPerSimd
statistics::Scalar kernargWrites
statistics::Distribution pageDivergenceDist
statistics::Distribution activeLanesPerGMemInstrDist
statistics::Scalar globalWrites
statistics::Distribution headTailLatency
statistics::Scalar totalCycles
statistics::Distribution execRateDist
statistics::Formula vectorMemInstsPerKiloInst
statistics::Scalar readonlyReads
statistics::Scalar sALUInsts
SenderState is information carried along with the packet throughout the TLB hierarchy.
GPUDynInstPtr _gpuDynInst
ComputeUnit * computeUnit
GPUDynInstPtr _gpuDynInst
SenderState is information carried along with the packet throughout the TLB hierarchy.
GPUDynInstPtr _gpuDynInst
GPUDynInstPtr _gpuDynInst
GPU TranslationState: this currently is a somewhat bastardization of the usage of SenderState,...
std::vector< ResponsePort * > ports
Packet::SenderState * saved
const std::string & name()