gem5  v20.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fetch_unit.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2017 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * For use for simulation and test purposes only
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived from this
19  * software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
35 
36 #include "debug/GPUFetch.hh"
37 #include "debug/GPUPort.hh"
38 #include "debug/GPUTLB.hh"
42 #include "gpu-compute/shader.hh"
43 #include "gpu-compute/wavefront.hh"
45 
47 
48 FetchUnit::FetchUnit(const ComputeUnitParams* params) :
49  timingSim(true),
50  computeUnit(nullptr),
51  fetchScheduler(params),
52  waveList(nullptr)
53 {
54 }
55 
57 {
58  fetchQueue.clear();
59  fetchStatusQueue.clear();
60 }
61 
62 void
64 {
65  computeUnit = cu;
67  fetchQueue.clear();
69 
70  for (int j = 0; j < computeUnit->shader->n_wf; ++j) {
71  fetchStatusQueue[j] = std::make_pair(waveList->at(j), false);
72  }
73 
75 }
76 
77 void
79 {
80  // re-evaluate waves which are marked as not ready for fetch
81  for (int j = 0; j < computeUnit->shader->n_wf; ++j) {
82  // Following code assumes 64-bit opertaion and all insts are
83  // represented by 64-bit pointers to inst objects.
84  Wavefront *curWave = fetchStatusQueue[j].first;
85  assert (curWave);
86 
87  // The wavefront has to be active, the IB occupancy has to be
88  // 4 or less instructions and it can not have any branches to
89  // prevent speculative instruction fetches
90  if (!fetchStatusQueue[j].second) {
91  if (curWave->status == Wavefront::S_RUNNING &&
92  curWave->instructionBuffer.size() <= 4 &&
93  !curWave->instructionBufferHasBranch() &&
94  !curWave->pendingFetch) {
95  fetchQueue.push_back(curWave);
96  fetchStatusQueue[j].second = true;
97  }
98  }
99  }
100 
101  // Fetch only if there is some wave ready to be fetched
102  // An empty fetchQueue will cause the schedular to panic
103  if (fetchQueue.size()) {
104  Wavefront *waveToBeFetched = fetchScheduler.chooseWave();
105  waveToBeFetched->pendingFetch = true;
106  fetchStatusQueue[waveToBeFetched->wfSlotId].second = false;
107  initiateFetch(waveToBeFetched);
108  }
109 }
110 
111 void
113 {
114  // calculate the virtual address to fetch from the SQC
115  Addr vaddr = wavefront->pc();
116 
122  for (int i = 0; i < wavefront->instructionBuffer.size(); ++i) {
123  vaddr +=
124  wavefront->instructionBuffer.at(i)->staticInstruction()->instSize();
125  }
126  vaddr = wavefront->basePtr + vaddr;
127 
128  DPRINTF(GPUTLB, "CU%d: WF[%d][%d]: Initiating fetch translation: %#x\n",
129  computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId, vaddr);
130 
131  // Since this is an instruction prefetch, if you're split then just finish
132  // out the current line.
133  int block_size = computeUnit->cacheLineSize();
134  // check for split accesses
135  Addr split_addr = roundDown(vaddr + block_size - 1, block_size);
136  int size = block_size;
137 
138  if (split_addr > vaddr) {
139  // misaligned access, just grab the rest of the line
140  size = split_addr - vaddr;
141  }
142 
143  // set up virtual request
144  RequestPtr req = std::make_shared<Request>(
145  vaddr, size, Request::INST_FETCH,
146  computeUnit->masterId(), 0, 0, nullptr);
147 
148  PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
149  // This fetchBlock is kind of faux right now - because the translations so
150  // far don't actually return Data
151  uint64_t fetchBlock;
152  pkt->dataStatic(&fetchBlock);
153 
154  if (timingSim) {
155  // SenderState needed on Return
156  pkt->senderState = new ComputeUnit::ITLBPort::SenderState(wavefront);
157 
158  // Sender State needed by TLB hierarchy
159  pkt->senderState =
160  new TheISA::GpuTLB::TranslationState(BaseTLB::Execute,
162  false, pkt->senderState);
163 
164  if (computeUnit->sqcTLBPort->isStalled()) {
165  assert(computeUnit->sqcTLBPort->retries.size() > 0);
166 
167  DPRINTF(GPUTLB, "Failed to send TLB req for FETCH addr %#x\n",
168  vaddr);
169 
170  computeUnit->sqcTLBPort->retries.push_back(pkt);
171  } else if (!computeUnit->sqcTLBPort->sendTimingReq(pkt)) {
172  // Stall the data port;
173  // No more packet is issued till
174  // ruby indicates resources are freed by
175  // a recvReqRetry() call back on this port.
177 
178  DPRINTF(GPUTLB, "Failed to send TLB req for FETCH addr %#x\n",
179  vaddr);
180 
181  computeUnit->sqcTLBPort->retries.push_back(pkt);
182  } else {
183  DPRINTF(GPUTLB, "sent FETCH translation request for %#x\n", vaddr);
184  }
185  } else {
186  pkt->senderState =
187  new TheISA::GpuTLB::TranslationState(BaseTLB::Execute,
189 
191 
192  TheISA::GpuTLB::TranslationState *sender_state =
193  safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
194 
195  delete sender_state->tlbEntry;
196  delete sender_state;
197  // fetch the instructions from the SQC when we operate in
198  // functional mode only
199  fetch(pkt, wavefront);
200  }
201 }
202 
203 void
205 {
206  assert(pkt->req->hasPaddr());
207  assert(pkt->req->hasSize());
208 
209  DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: Fetch Access: %#x\n",
210  computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
211  pkt->req->getPaddr());
212 
213  // this is necessary because the GPU TLB receives packets instead of
214  // requests. when the translation is complete, all relevent fields in the
215  // request will be populated, but not in the packet. here we create the
216  // new packet so we can set the size, addr, and proper flags.
217  PacketPtr oldPkt = pkt;
218  pkt = new Packet(oldPkt->req, oldPkt->cmd);
219  delete oldPkt;
220 
222  new TheGpuISA::RawMachInst[pkt->req->getSize() /
223  sizeof(TheGpuISA::RawMachInst)];
224 
226 
227  // New SenderState for the memory access
228  pkt->senderState = new ComputeUnit::SQCPort::SenderState(wavefront);
229 
230  if (timingSim) {
231  // translation is done. Send the appropriate timing memory request.
232 
233  if (!computeUnit->sqcPort->sendTimingReq(pkt)) {
234  computeUnit->sqcPort->retries.push_back(std::make_pair(pkt,
235  wavefront));
236 
237  DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Fetch addr %#x failed!\n",
238  computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
239  pkt->req->getPaddr());
240  } else {
241  DPRINTF(GPUPort, "CU%d: WF[%d][%d]: Fetch addr %#x sent!\n",
242  computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
243  pkt->req->getPaddr());
244  }
245  } else {
247  processFetchReturn(pkt);
248  }
249 }
250 
251 void
253 {
254  ComputeUnit::SQCPort::SenderState *sender_state =
256 
257  Wavefront *wavefront = sender_state->wavefront;
258 
259  DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: Fetch addr %#x returned "
260  "%d bytes, %d instructions!\n", computeUnit->cu_id,
261  wavefront->simdId, wavefront->wfSlotId, pkt->req->getPaddr(),
262  pkt->req->getSize(), pkt->req->getSize() /
263  sizeof(TheGpuISA::RawMachInst));
264 
265  if (wavefront->dropFetch) {
266  assert(wavefront->instructionBuffer.empty());
267  wavefront->dropFetch = false;
268  } else {
269  TheGpuISA::RawMachInst *inst_index_ptr =
270  (TheGpuISA::RawMachInst*)pkt->getPtr<uint8_t>();
271 
272  assert(wavefront->instructionBuffer.size() <= 4);
273 
274  for (int i = 0; i < pkt->req->getSize() /
275  sizeof(TheGpuISA::RawMachInst); ++i) {
276  GPUStaticInst *inst_ptr = decoder.decode(inst_index_ptr[i]);
277 
278  assert(inst_ptr);
279 
280  if (inst_ptr->instSize() == 8) {
288  ++i;
289  }
290 
291  DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: added %s\n",
292  computeUnit->cu_id, wavefront->simdId,
293  wavefront->wfSlotId, inst_ptr->disassemble());
294 
295  GPUDynInstPtr gpuDynInst =
296  std::make_shared<GPUDynInst>(computeUnit, wavefront, inst_ptr,
297  computeUnit->getAndIncSeqNum());
298 
299  wavefront->instructionBuffer.push_back(gpuDynInst);
300  }
301  }
302 
303  wavefront->pendingFetch = false;
304 
305  delete pkt->senderState;
306  delete pkt;
307 }
308 
309 void
311 {
312  waveList = wave_list;
313 }
RubyTester::SenderState SenderState
Definition: Check.cc:37
#define DPRINTF(x,...)
Definition: trace.hh:225
std::deque< PacketPtr > retries
here we queue all the translation requests that were not successfully sent.
void bindList(std::vector< Wavefront *> *sched_list)
Definition: scheduler.cc:58
Bitfield< 7 > i
void init(ComputeUnit *cu)
Definition: fetch_unit.cc:63
virtual int instSize() const =0
ITLBPort * sqcTLBPort
std::shared_ptr< Request > RequestPtr
Definition: request.hh:81
static uint32_t globalFetchUnitID
Definition: fetch_unit.hh:61
ComputeUnit * computeUnit
Definition: fetch_unit.hh:65
void processFetchReturn(PacketPtr pkt)
Definition: fetch_unit.cc:252
TheGpuISA::Decoder decoder
Definition: fetch_unit.hh:66
int simdId
Definition: wavefront.hh:163
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the slave port by calling its corresponding receive function...
Definition: port.hh:441
int wfSlotId
Definition: wavefront.hh:160
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1084
uint32_t pc() const
Definition: wavefront.cc:814
std::vector< std::pair< Wavefront *, bool > > fetchStatusQueue
Definition: fetch_unit.hh:81
FetchUnit(const ComputeUnitParams *params)
Definition: fetch_unit.cc:48
STL vector class.
Definition: stl.hh:37
SQCPort * sqcPort
RequestPtr req
A pointer to the original request.
Definition: packet.hh:321
bool instructionBufferHasBranch()
Definition: wavefront.cc:264
std::deque< std::pair< PacketPtr, Wavefront * > > retries
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition: misc.hh:46
std::deque< GPUDynInstPtr > instructionBuffer
Definition: wavefront.hh:167
void bindWaveList(std::vector< Wavefront *> *list)
Definition: fetch_unit.cc:310
The request was an instruction fetch.
Definition: request.hh:103
T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:131
void initiateFetch(Wavefront *wavefront)
Definition: fetch_unit.cc:112
void exec()
Definition: fetch_unit.cc:78
ThreadContext * gpuTc
Definition: shader.hh:99
std::vector< Wavefront * > * waveList
Definition: fetch_unit.hh:84
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
T safe_cast(U ptr)
Definition: cast.hh:59
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
uint64_t basePtr
Definition: wavefront.hh:152
Bitfield< 24 > j
bool timingSim
Definition: fetch_unit.hh:64
Shader * shader
bool timingSim
Definition: shader.hh:106
bool pendingFetch
Definition: wavefront.hh:169
SenderState * senderState
This packet&#39;s sender state.
Definition: packet.hh:474
Scheduler fetchScheduler
Definition: fetch_unit.hh:72
const std::string & disassemble()
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1072
uint64_t RawMachInst
used to represnt a GPU inst in its raw format.
Definition: gpu_types.hh:43
void fetch(PacketPtr pkt, Wavefront *wavefront)
Definition: fetch_unit.cc:204
void sendFunctional(PacketPtr pkt) const
Send a functional request packet, where the data is instantly updated everywhere in the memory system...
Definition: port.hh:435
std::vector< Wavefront * > fetchQueue
Definition: fetch_unit.hh:76
int cacheLineSize() const
int n_wf
Definition: shader.hh:121
Wavefront * chooseWave()
Definition: scheduler.cc:52
const char data[]
status_e status
Definition: wavefront.hh:158
MasterID masterId()
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103

Generated on Thu May 28 2020 16:21:33 for gem5 by doxygen 1.8.13