39 #include "debug/GPUMem.hh" 40 #include "debug/GPUReg.hh" 48 computeUnit(nullptr), gmQueueSize(p->global_mem_queue_size),
49 outOfOrderDataDelivery(p->out_of_order_data_delivery), inflightStores(0),
68 bool accessVrf =
true;
73 if ((m) && (m->isLoad() || m->isAtomicRet())) {
82 accessVrf && m->statusBitVector ==
VectorMask(0) &&
95 if (m->isStore() || m->isAtomic()) {
100 if (m->isLoad() || m->isAtomic()) {
116 if (mp->isLoad() || mp->isAtomic()) {
122 }
else if (mp->isStore()) {
143 std::make_pair(mp,
false)));
148 DPRINTF(GPUMem,
"CU%d: WF[%d][%d] Popping 0 mem_op = \n",
166 if (mem_req->second.second) {
167 return mem_req->second.first;
178 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
181 }
else if (gpuDynInst->isStore()) {
187 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
190 }
else if (gpuDynInst->isStore()) {
216 if (gpuDynInst->isLoad() || gpuDynInst->isAtomic()) {
229 mem_req->second.second =
true;
237 .
name(
name() +
".load_vrf_bank_conflict_cycles")
238 .
desc(
"total number of cycles GM data are delayed before updating "
std::queue< GPUDynInstPtr > gmIssuedRequests
void handleResponse(GPUDynInstPtr gpuDynInst)
this method handles responses sent to this GM pipeline by the CU.
bool isGMStRespFIFOWrRdy() const
void completeRequest(GPUDynInstPtr gpuDynInst)
once a memory request is finished we remove it from the buffer.
GPUDynInstPtr getNextReadyResp()
find the next ready response to service.
std::bitset< std::numeric_limits< unsigned long long >::digits > VectorMask
std::map< uint64_t, std::pair< GPUDynInstPtr, bool > > gmOrderedRespBuffer
GlobalMemPipeline(const ComputeUnitParams *params)
bool isGMLdRespFIFOWrRdy() const
std::queue< GPUDynInstPtr > gmReturnedStores
ComputeUnit * computeUnit
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Stats::Scalar loadVrfBankConflictCycles
ComputeUnit * computeUnit
uint32_t outstandingReqsRdGm
const std::string & name() const
virtual const std::string name() const
bool outOfOrderDataDelivery
uint32_t outstandingReqsWrGm
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
void ScheduleAdd(uint32_t *val, Tick when, int x)
void issueRequest(GPUDynInstPtr gpuDynInst)
issues a request to the pipeline - i.e., enqueue it in the request buffer.
std::vector< VectorRegisterFile * > vrf
std::queue< GPUDynInstPtr > gmReturnedLoads
void init(ComputeUnit *cu)
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
std::vector< WaitClass > wfWait