gem5  v20.0.0.3
wavefront.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2017 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * For use for simulation and test purposes only
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived from this
19  * software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "gpu-compute/wavefront.hh"
35 
36 #include "debug/GPUExec.hh"
37 #include "debug/WavefrontStack.hh"
40 #include "gpu-compute/shader.hh"
42 
43 Wavefront*
44 WavefrontParams::create()
45 {
46  return new Wavefront(this);
47 }
48 
50  : SimObject(p), callArgMem(nullptr), _gpuISA()
51 {
52  lastTrace = 0;
53  simdId = p->simdId;
54  wfSlotId = p->wf_slot_id;
55  status = S_STOPPED;
57  startVgprIndex = 0;
58  outstandingReqs = 0;
59  memReqsInPipe = 0;
64  rdLmReqsInPipe = 0;
65  rdGmReqsInPipe = 0;
66  wrLmReqsInPipe = 0;
67  wrGmReqsInPipe = 0;
68 
69  barrierCnt = 0;
70  oldBarrierCnt = 0;
71  stalledAtBarrier = false;
72 
73  memTraceBusy = 0;
74  oldVgprTcnt = 0xffffffffffffffffll;
75  oldDgprTcnt = 0xffffffffffffffffll;
76  oldVgpr.resize(p->wfSize);
77 
78  pendingFetch = false;
79  dropFetch = false;
81  maxSpVgprs = 0;
82  maxDpVgprs = 0;
83  lastAddr.resize(p->wfSize);
84  workItemFlatId.resize(p->wfSize);
85  oldDgpr.resize(p->wfSize);
86  barCnt.resize(p->wfSize);
87  for (int i = 0; i < 3; ++i) {
88  workItemId[i].resize(p->wfSize);
89  }
90 }
91 
92 void
94 {
96 
98  .init(0, 4, 2)
99  .name(name() + ".src_reg_operand_dist")
100  .desc("number of executed instructions with N source register operands")
101  ;
102 
104  .init(0, 3, 2)
105  .name(name() + ".dst_reg_operand_dist")
106  .desc("number of executed instructions with N destination register "
107  "operands")
108  ;
109 
110  // FIXME: the name of the WF needs to be unique
112  .name(name() + ".timesBlockedDueWAXDependencies")
113  .desc("number of times the wf's instructions are blocked due to WAW "
114  "or WAR dependencies")
115  ;
116 
117  // FIXME: the name of the WF needs to be unique
119  .name(name() + ".timesBlockedDueRAWDependencies")
120  .desc("number of times the wf's instructions are blocked due to RAW "
121  "dependencies")
122  ;
123 
124  // FIXME: the name of the WF needs to be unique
126  .name(name() + ".timesBlockedDueVrfPortAvail")
127  .desc("number of times instructions are blocked due to VRF port "
128  "availability")
129  ;
130 }
131 
132 void
134 {
135  reservedVectorRegs = 0;
136  startVgprIndex = 0;
137 }
138 
139 void
140 Wavefront::resizeRegFiles(int num_cregs, int num_sregs, int num_dregs)
141 {
142  condRegState->init(num_cregs);
143  maxSpVgprs = num_sregs;
144  maxDpVgprs = num_dregs;
145 }
146 
148 {
149  if (callArgMem)
150  delete callArgMem;
151  delete condRegState;
152 }
153 
154 void
155 Wavefront::start(uint64_t _wf_dyn_id,uint64_t _base_ptr)
156 {
157  wfDynId = _wf_dyn_id;
158  basePtr = _base_ptr;
159  status = S_RUNNING;
160 }
161 
162 bool
164 {
165  if (ii->isGlobalMem() || ii->isFlat())
166  return true;
167 
168  return false;
169 }
170 
171 bool
173 {
174  if (ii->isLocalMem()) {
175  return true;
176  }
177 
178  return false;
179 }
180 
181 bool
183 {
184  assert(!instructionBuffer.empty());
185  GPUDynInstPtr ii = instructionBuffer.front();
186 
187  if (status != S_STOPPED && (ii->isNop() ||
188  ii->isReturn() || ii->isBranch() ||
189  ii->isALU() || (ii->isKernArgSeg() && ii->isLoad()))) {
190  return true;
191  }
192 
193  return false;
194 }
195 
196 bool
198 {
199  assert(!instructionBuffer.empty());
200  GPUDynInstPtr ii = instructionBuffer.front();
201 
202  if (status != S_STOPPED && ii->isBarrier()) {
203  return true;
204  }
205 
206  return false;
207 }
208 
209 bool
211 {
212  assert(!instructionBuffer.empty());
213  GPUDynInstPtr ii = instructionBuffer.front();
214 
215  if (status != S_STOPPED && ii->isGlobalMem()) {
216  return true;
217  }
218 
219  return false;
220 }
221 
222 bool
224 {
225  assert(!instructionBuffer.empty());
226  GPUDynInstPtr ii = instructionBuffer.front();
227 
228  if (status != S_STOPPED && ii->isLocalMem()) {
229  return true;
230  }
231 
232  return false;
233 }
234 
235 bool
237 {
238  assert(!instructionBuffer.empty());
239  GPUDynInstPtr ii = instructionBuffer.front();
240 
241  if (status != S_STOPPED && ii->isPrivateSeg()) {
242  return true;
243  }
244 
245  return false;
246 }
247 
248 bool
250 {
251  assert(!instructionBuffer.empty());
252  GPUDynInstPtr ii = instructionBuffer.front();
253 
254  if (status != S_STOPPED && ii->isFlat()) {
255  return true;
256  }
257 
258  return false;
259 }
260 
261 // Return true if the Wavefront's instruction
262 // buffer has branch instruction.
263 bool
265 {
266  for (auto it : instructionBuffer) {
267  GPUDynInstPtr ii = it;
268 
269  if (ii->isReturn() || ii->isBranch()) {
270  return true;
271  }
272  }
273 
274  return false;
275 }
276 
277 // Remap HSAIL register to physical VGPR.
278 // HSAIL register = virtual register assigned to an operand by HLC compiler
279 uint32_t
280 Wavefront::remap(uint32_t vgprIndex, uint32_t size, uint8_t mode)
281 {
282  assert((vgprIndex < reservedVectorRegs) && (reservedVectorRegs > 0));
283  // add the offset from where the VGPRs of the wavefront have been assigned
284  uint32_t physicalVgprIndex = startVgprIndex + vgprIndex;
285  // HSAIL double precision (DP) register: calculate the physical VGPR index
286  // assuming that DP registers are placed after SP ones in the VRF. The DP
287  // and SP VGPR name spaces in HSAIL mode are separate so we need to adjust
288  // the DP VGPR index before mapping it to the physical VRF address space
289  if (mode == 1 && size > 4) {
290  physicalVgprIndex = startVgprIndex + maxSpVgprs + (2 * vgprIndex);
291  }
292 
293  assert((startVgprIndex <= physicalVgprIndex) &&
294  (startVgprIndex + reservedVectorRegs - 1) >= physicalVgprIndex);
295 
296  // calculate absolute physical VGPR index
297  return physicalVgprIndex % computeUnit->vrf[simdId]->numRegs();
298 }
299 
300 // Return true if this wavefront is ready
301 // to execute an instruction of the specified type.
302 int
304 {
305  // Check to make sure wave is running
306  if (status == S_STOPPED || status == S_RETURNING ||
307  instructionBuffer.empty()) {
308  return 0;
309  }
310 
311  // Is the wave waiting at a barrier
312  if (stalledAtBarrier) {
315  // Are all threads at barrier?
316  return 0;
317  }
319  stalledAtBarrier = false;
320  }
321 
322  // Read instruction
323  GPUDynInstPtr ii = instructionBuffer.front();
324 
325  bool ready_inst M5_VAR_USED = false;
326  bool glbMemBusRdy = false;
327  bool glbMemIssueRdy = false;
328  if (type == I_GLOBAL || type == I_FLAT || type == I_PRIVATE) {
329  for (int j=0; j < computeUnit->numGlbMemUnits; ++j) {
330  if (computeUnit->vrfToGlobalMemPipeBus[j].prerdy())
331  glbMemBusRdy = true;
332  if (computeUnit->wfWait[j].prerdy())
333  glbMemIssueRdy = true;
334  }
335  }
336  bool locMemBusRdy = false;
337  bool locMemIssueRdy = false;
338  if (type == I_SHARED || type == I_FLAT) {
339  for (int j=0; j < computeUnit->numLocMemUnits; ++j) {
340  if (computeUnit->vrfToLocalMemPipeBus[j].prerdy())
341  locMemBusRdy = true;
342  if (computeUnit->wfWait[j].prerdy())
343  locMemIssueRdy = true;
344  }
345  }
346 
347  // The following code is very error prone and the entire process for
348  // checking readiness will be fixed eventually. In the meantime, let's
349  // make sure that we do not silently let an instruction type slip
350  // through this logic and always return not ready.
351  if (!(ii->isBarrier() || ii->isNop() || ii->isReturn() || ii->isBranch() ||
352  ii->isALU() || ii->isLoad() || ii->isStore() || ii->isAtomic() ||
353  ii->isMemFence() || ii->isFlat())) {
354  panic("next instruction: %s is of unknown type\n", ii->disassemble());
355  }
356 
357  DPRINTF(GPUExec, "CU%d: WF[%d][%d]: Checking Read for Inst : %s\n",
358  computeUnit->cu_id, simdId, wfSlotId, ii->disassemble());
359 
360  if (type == I_ALU && ii->isBarrier()) {
361  // Here for ALU instruction (barrier)
362  if (!computeUnit->wfWait[simdId].prerdy()) {
363  // Is wave slot free?
364  return 0;
365  }
366 
367  // Are there in pipe or outstanding memory requests?
368  if ((outstandingReqs + memReqsInPipe) > 0) {
369  return 0;
370  }
371 
372  ready_inst = true;
373  } else if (type == I_ALU && ii->isNop()) {
374  // Here for ALU instruction (nop)
375  if (!computeUnit->wfWait[simdId].prerdy()) {
376  // Is wave slot free?
377  return 0;
378  }
379 
380  ready_inst = true;
381  } else if (type == I_ALU && ii->isReturn()) {
382  // Here for ALU instruction (return)
383  if (!computeUnit->wfWait[simdId].prerdy()) {
384  // Is wave slot free?
385  return 0;
386  }
387 
388  // Are there in pipe or outstanding memory requests?
389  if ((outstandingReqs + memReqsInPipe) > 0) {
390  return 0;
391  }
392 
393  ready_inst = true;
394  } else if (type == I_ALU && (ii->isBranch() ||
395  ii->isALU() ||
396  (ii->isKernArgSeg() && ii->isLoad()) ||
397  ii->isArgSeg())) {
398  // Here for ALU instruction (all others)
399  if (!computeUnit->wfWait[simdId].prerdy()) {
400  // Is alu slot free?
401  return 0;
402  }
403  if (!computeUnit->vrf[simdId]->vrfOperandAccessReady(this, ii,
405  return 0;
406  }
407 
408  if (!computeUnit->vrf[simdId]->operandsReady(this, ii)) {
409  return 0;
410  }
411  ready_inst = true;
412  } else if (type == I_GLOBAL && ii->isGlobalMem()) {
413  // Here Global memory instruction
414  if (ii->isLoad() || ii->isAtomic() || ii->isMemFence()) {
415  // Are there in pipe or outstanding global memory write requests?
416  if ((outstandingReqsWrGm + wrGmReqsInPipe) > 0) {
417  return 0;
418  }
419  }
420 
421  if (ii->isStore() || ii->isAtomic() || ii->isMemFence()) {
422  // Are there in pipe or outstanding global memory read requests?
424  return 0;
425  }
426 
427  if (!glbMemIssueRdy) {
428  // Is WV issue slot free?
429  return 0;
430  }
431 
432  if (!glbMemBusRdy) {
433  // Is there an available VRF->Global memory read bus?
434  return 0;
435  }
436 
438  isGMReqFIFOWrRdy(rdGmReqsInPipe + wrGmReqsInPipe)) {
439  // Can we insert a new request to the Global Mem Request FIFO?
440  return 0;
441  }
442  // can we schedule source & destination operands on the VRF?
443  if (!computeUnit->vrf[simdId]->vrfOperandAccessReady(this, ii,
445  return 0;
446  }
447  if (!computeUnit->vrf[simdId]->operandsReady(this, ii)) {
448  return 0;
449  }
450  ready_inst = true;
451  } else if (type == I_SHARED && ii->isLocalMem()) {
452  // Here for Shared memory instruction
453  if (ii->isLoad() || ii->isAtomic() || ii->isMemFence()) {
454  if ((outstandingReqsWrLm + wrLmReqsInPipe) > 0) {
455  return 0;
456  }
457  }
458 
459  if (ii->isStore() || ii->isAtomic() || ii->isMemFence()) {
460  if ((outstandingReqsRdLm + rdLmReqsInPipe) > 0) {
461  return 0;
462  }
463  }
464 
465  if (!locMemBusRdy) {
466  // Is there an available VRF->LDS read bus?
467  return 0;
468  }
469  if (!locMemIssueRdy) {
470  // Is wave slot free?
471  return 0;
472  }
473 
475  isLMReqFIFOWrRdy(rdLmReqsInPipe + wrLmReqsInPipe)) {
476  // Can we insert a new request to the LDS Request FIFO?
477  return 0;
478  }
479  // can we schedule source & destination operands on the VRF?
480  if (!computeUnit->vrf[simdId]->vrfOperandAccessReady(this, ii,
482  return 0;
483  }
484  if (!computeUnit->vrf[simdId]->operandsReady(this, ii)) {
485  return 0;
486  }
487  ready_inst = true;
488  } else if (type == I_FLAT && ii->isFlat()) {
489  if (!glbMemBusRdy) {
490  // Is there an available VRF->Global memory read bus?
491  return 0;
492  }
493 
494  if (!locMemBusRdy) {
495  // Is there an available VRF->LDS read bus?
496  return 0;
497  }
498 
499  if (!glbMemIssueRdy) {
500  // Is wave slot free?
501  return 0;
502  }
503 
504  if (!locMemIssueRdy) {
505  return 0;
506  }
508  isGMReqFIFOWrRdy(rdGmReqsInPipe + wrGmReqsInPipe)) {
509  // Can we insert a new request to the Global Mem Request FIFO?
510  return 0;
511  }
512 
514  isLMReqFIFOWrRdy(rdLmReqsInPipe + wrLmReqsInPipe)) {
515  // Can we insert a new request to the LDS Request FIFO?
516  return 0;
517  }
518  // can we schedule source & destination operands on the VRF?
519  if (!computeUnit->vrf[simdId]->vrfOperandAccessReady(this, ii,
521  return 0;
522  }
523  // are all the operands ready? (RAW, WAW and WAR depedencies met?)
524  if (!computeUnit->vrf[simdId]->operandsReady(this, ii)) {
525  return 0;
526  }
527  ready_inst = true;
528  } else {
529  return 0;
530  }
531 
532  assert(ready_inst);
533 
534  DPRINTF(GPUExec, "CU%d: WF[%d][%d]: Ready Inst : %s\n", computeUnit->cu_id,
535  simdId, wfSlotId, ii->disassemble());
536  return 1;
537 }
538 
539 void
541 {
542  // Get current instruction
543  GPUDynInstPtr ii = instructionBuffer.front();
544  assert(ii);
545  computeUnit->vrf[simdId]->updateResources(this, ii);
546  // Single precision ALU or Branch or Return or Special instruction
547  if (ii->isALU() || ii->isSpecialOp() ||
548  ii->isBranch() ||
549  // FIXME: Kernel argument loads are currently treated as ALU operations
550  // since we don't send memory packets at execution. If we fix that then
551  // we should map them to one of the memory pipelines
552  (ii->isKernArgSeg() && ii->isLoad()) || ii->isArgSeg() ||
553  ii->isReturn()) {
555  ticks(computeUnit->spBypassLength()));
556  // this is to enforce a fixed number of cycles per issue slot per SIMD
558  ticks(computeUnit->issuePeriod));
559  } else if (ii->isBarrier()) {
561  ticks(computeUnit->issuePeriod));
562  } else if (ii->isLoad() && ii->isFlat()) {
563  assert(Enums::SC_NONE != ii->executedAs());
564  memReqsInPipe++;
565  rdGmReqsInPipe++;
566  if ( Enums::SC_SHARED == ii->executedAs() ) {
568  preset(computeUnit->shader->ticks(4));
571  } else {
573  preset(computeUnit->shader->ticks(4));
576  }
577  } else if (ii->isStore() && ii->isFlat()) {
578  assert(Enums::SC_NONE != ii->executedAs());
579  memReqsInPipe++;
580  wrGmReqsInPipe++;
581  if (Enums::SC_SHARED == ii->executedAs()) {
583  preset(computeUnit->shader->ticks(8));
586  } else {
588  preset(computeUnit->shader->ticks(8));
591  }
592  } else if (ii->isLoad() && ii->isGlobalMem()) {
593  memReqsInPipe++;
594  rdGmReqsInPipe++;
596  preset(computeUnit->shader->ticks(4));
599  } else if (ii->isStore() && ii->isGlobalMem()) {
600  memReqsInPipe++;
601  wrGmReqsInPipe++;
603  preset(computeUnit->shader->ticks(8));
606  } else if ((ii->isAtomic() || ii->isMemFence()) && ii->isGlobalMem()) {
607  memReqsInPipe++;
608  wrGmReqsInPipe++;
609  rdGmReqsInPipe++;
611  preset(computeUnit->shader->ticks(8));
614  } else if (ii->isLoad() && ii->isLocalMem()) {
615  memReqsInPipe++;
616  rdLmReqsInPipe++;
618  preset(computeUnit->shader->ticks(4));
621  } else if (ii->isStore() && ii->isLocalMem()) {
622  memReqsInPipe++;
623  wrLmReqsInPipe++;
625  preset(computeUnit->shader->ticks(8));
628  } else if ((ii->isAtomic() || ii->isMemFence()) && ii->isLocalMem()) {
629  memReqsInPipe++;
630  wrLmReqsInPipe++;
631  rdLmReqsInPipe++;
633  preset(computeUnit->shader->ticks(8));
636  }
637 }
638 
639 void
641 {
642  // ---- Exit if wavefront is inactive ----------------------------- //
643 
644  if (status == S_STOPPED || status == S_RETURNING ||
645  instructionBuffer.empty()) {
646  return;
647  }
648 
649  // Get current instruction
650 
651  GPUDynInstPtr ii = instructionBuffer.front();
652 
653  const uint32_t old_pc = pc();
654  DPRINTF(GPUExec, "CU%d: WF[%d][%d]: wave[%d] Executing inst: %s "
655  "(pc: %i)\n", computeUnit->cu_id, simdId, wfSlotId, wfDynId,
656  ii->disassemble(), old_pc);
657 
658  // update the instruction stats in the CU
659 
660  ii->execute(ii);
662  // access the VRF
663  computeUnit->vrf[simdId]->exec(ii, this);
664  srcRegOpDist.sample(ii->numSrcRegOperands());
665  dstRegOpDist.sample(ii->numDstRegOperands());
670  if (pc() == old_pc) {
671  uint32_t new_pc = _gpuISA.advancePC(old_pc, ii);
672  // PC not modified by instruction, proceed to next or pop frame
673  pc(new_pc);
674  if (new_pc == rpc()) {
676  discardFetch();
677  } else {
678  instructionBuffer.pop_front();
679  }
680  } else {
681  discardFetch();
682  }
683 
685  const int num_active_lanes = execMask().count();
686  computeUnit->controlFlowDivergenceDist.sample(num_active_lanes);
687  computeUnit->numVecOpsExecuted += num_active_lanes;
688  if (isGmInstruction(ii)) {
690  } else if (isLmInstruction(ii)) {
692  }
693  }
694 
695  // ---- Update Vector ALU pipeline and other resources ------------------ //
696  // Single precision ALU or Branch or Return or Special instruction
697  if (ii->isALU() || ii->isSpecialOp() ||
698  ii->isBranch() ||
699  // FIXME: Kernel argument loads are currently treated as ALU operations
700  // since we don't send memory packets at execution. If we fix that then
701  // we should map them to one of the memory pipelines
702  (ii->isKernArgSeg() && ii->isLoad()) ||
703  ii->isArgSeg() ||
704  ii->isReturn()) {
706  ticks(computeUnit->spBypassLength()));
707 
708  // this is to enforce a fixed number of cycles per issue slot per SIMD
710  ticks(computeUnit->issuePeriod));
711  } else if (ii->isBarrier()) {
713  ticks(computeUnit->issuePeriod));
714  } else if (ii->isLoad() && ii->isFlat()) {
715  assert(Enums::SC_NONE != ii->executedAs());
716 
717  if (Enums::SC_SHARED == ii->executedAs()) {
719  set(computeUnit->shader->ticks(4));
722  } else {
724  set(computeUnit->shader->ticks(4));
727  }
728  } else if (ii->isStore() && ii->isFlat()) {
729  assert(Enums::SC_NONE != ii->executedAs());
730  if (Enums::SC_SHARED == ii->executedAs()) {
732  set(computeUnit->shader->ticks(8));
735  } else {
737  set(computeUnit->shader->ticks(8));
740  }
741  } else if (ii->isLoad() && ii->isGlobalMem()) {
743  set(computeUnit->shader->ticks(4));
746  } else if (ii->isStore() && ii->isGlobalMem()) {
748  set(computeUnit->shader->ticks(8));
751  } else if ((ii->isAtomic() || ii->isMemFence()) && ii->isGlobalMem()) {
753  set(computeUnit->shader->ticks(8));
756  } else if (ii->isLoad() && ii->isLocalMem()) {
758  set(computeUnit->shader->ticks(4));
761  } else if (ii->isStore() && ii->isLocalMem()) {
763  set(computeUnit->shader->ticks(8));
766  } else if ((ii->isAtomic() || ii->isMemFence()) && ii->isLocalMem()) {
768  set(computeUnit->shader->ticks(8));
771  }
772 }
773 
774 bool
776 {
777  return barCnt[lane] < maxBarCnt;
778 }
779 
780 void
782  const VectorMask& mask)
783 {
784  assert(mask.count());
785  reconvergenceStack.emplace_back(new ReconvergenceStackEntry{pc, rpc, mask});
786 }
787 
788 void
790 {
791  assert(!reconvergenceStack.empty());
792 
793  DPRINTF(WavefrontStack, "[%2d, %2d, %2d, %2d] %s %3i => ",
795  execMask().to_string<char, std::string::traits_type,
796  std::string::allocator_type>().c_str(), pc());
797 
798  reconvergenceStack.pop_back();
799 
800  DPRINTF(WavefrontStack, "%3i %s\n", pc(),
801  execMask().to_string<char, std::string::traits_type,
802  std::string::allocator_type>().c_str());
803 
804 }
805 
806 void
808 {
809  instructionBuffer.clear();
811 }
812 
813 uint32_t
815 {
816  return reconvergenceStack.back()->pc;
817 }
818 
819 uint32_t
821 {
822  return reconvergenceStack.back()->rpc;
823 }
824 
827 {
828  return reconvergenceStack.back()->execMask;
829 }
830 
831 bool
832 Wavefront::execMask(int lane) const
833 {
834  return reconvergenceStack.back()->execMask[lane];
835 }
836 
837 
838 void
839 Wavefront::pc(uint32_t new_pc)
840 {
841  reconvergenceStack.back()->pc = new_pc;
842 }
843 
844 uint32_t
846 {
847  return barCnt.size() * sizeof(int) + sizeof(wfId) + sizeof(maxBarCnt) +
848  sizeof(oldBarrierCnt) + sizeof(barrierCnt) + sizeof(wgId) +
849  sizeof(computeUnit->cu_id) + sizeof(barrierId) + sizeof(initMask) +
850  sizeof(privBase) + sizeof(spillBase) + sizeof(ldsChunk) +
852 }
853 
854 void
855 Wavefront::getContext(const void *out)
856 {
857  uint8_t *iter = (uint8_t *)out;
858  for (int i = 0; i < barCnt.size(); i++) {
859  *(int *)iter = barCnt[i]; iter += sizeof(barCnt[i]);
860  }
861  *(int *)iter = wfId; iter += sizeof(wfId);
862  *(int *)iter = maxBarCnt; iter += sizeof(maxBarCnt);
863  *(int *)iter = oldBarrierCnt; iter += sizeof(oldBarrierCnt);
864  *(int *)iter = barrierCnt; iter += sizeof(barrierCnt);
865  *(int *)iter = computeUnit->cu_id; iter += sizeof(computeUnit->cu_id);
866  *(uint32_t *)iter = wgId; iter += sizeof(wgId);
867  *(uint32_t *)iter = barrierId; iter += sizeof(barrierId);
868  *(uint64_t *)iter = initMask.to_ullong(); iter += sizeof(initMask.to_ullong());
869  *(Addr *)iter = privBase; iter += sizeof(privBase);
870  *(Addr *)iter = spillBase; iter += sizeof(spillBase);
871 
872  int stackSize = reconvergenceStack.size();
873  ReconvergenceStackEntry empty = {std::numeric_limits<uint32_t>::max(),
874  std::numeric_limits<uint32_t>::max(),
875  std::numeric_limits<uint64_t>::max()};
876  for (int i = 0; i < workItemId[0].size(); i++) {
877  if (i < stackSize) {
878  *(ReconvergenceStackEntry *)iter = *reconvergenceStack.back();
879  iter += sizeof(ReconvergenceStackEntry);
880  reconvergenceStack.pop_back();
881  } else {
882  *(ReconvergenceStackEntry *)iter = empty;
883  iter += sizeof(ReconvergenceStackEntry);
884  }
885  }
886 
887  int wf_size = computeUnit->wfSize();
888  for (int i = 0; i < maxSpVgprs; i++) {
889  uint32_t vgprIdx = remap(i, sizeof(uint32_t), 1);
890  for (int lane = 0; lane < wf_size; lane++) {
891  uint32_t regVal = computeUnit->vrf[simdId]->
892  read<uint32_t>(vgprIdx,lane);
893  *(uint32_t *)iter = regVal; iter += sizeof(regVal);
894  }
895  }
896 
897  for (int i = 0; i < maxDpVgprs; i++) {
898  uint32_t vgprIdx = remap(i, sizeof(uint64_t), 1);
899  for (int lane = 0; lane < wf_size; lane++) {
900  uint64_t regVal = computeUnit->vrf[simdId]->
901  read<uint64_t>(vgprIdx,lane);
902  *(uint64_t *)iter = regVal; iter += sizeof(regVal);
903  }
904  }
905 
906  for (int i = 0; i < condRegState->numRegs(); i++) {
907  for (int lane = 0; lane < wf_size; lane++) {
908  uint64_t regVal = condRegState->read<uint64_t>(i, lane);
909  *(uint64_t *)iter = regVal; iter += sizeof(regVal);
910  }
911  }
912 
913  /* saving LDS content */
914  if (ldsChunk)
915  for (int i = 0; i < ldsChunk->size(); i++) {
916  char val = ldsChunk->read<char>(i);
917  *(char *) iter = val; iter += sizeof(val);
918  }
919 }
920 
921 void
922 Wavefront::setContext(const void *in)
923 {
924  uint8_t *iter = (uint8_t *)in;
925  for (int i = 0; i < barCnt.size(); i++) {
926  barCnt[i] = *(int *)iter; iter += sizeof(barCnt[i]);
927  }
928  wfId = *(int *)iter; iter += sizeof(wfId);
929  maxBarCnt = *(int *)iter; iter += sizeof(maxBarCnt);
930  oldBarrierCnt = *(int *)iter; iter += sizeof(oldBarrierCnt);
931  barrierCnt = *(int *)iter; iter += sizeof(barrierCnt);
932  computeUnit->cu_id = *(int *)iter; iter += sizeof(computeUnit->cu_id);
933  wgId = *(uint32_t *)iter; iter += sizeof(wgId);
934  barrierId = *(uint32_t *)iter; iter += sizeof(barrierId);
935  initMask = VectorMask(*(uint64_t *)iter); iter += sizeof(initMask);
936  privBase = *(Addr *)iter; iter += sizeof(privBase);
937  spillBase = *(Addr *)iter; iter += sizeof(spillBase);
938 
939  for (int i = 0; i < workItemId[0].size(); i++) {
941  iter += sizeof(ReconvergenceStackEntry);
942  if (newEntry.pc != std::numeric_limits<uint32_t>::max()) {
943  pushToReconvergenceStack(newEntry.pc, newEntry.rpc,
944  newEntry.execMask);
945  }
946  }
947  int wf_size = computeUnit->wfSize();
948 
949  for (int i = 0; i < maxSpVgprs; i++) {
950  uint32_t vgprIdx = remap(i, sizeof(uint32_t), 1);
951  for (int lane = 0; lane < wf_size; lane++) {
952  uint32_t regVal = *(uint32_t *)iter; iter += sizeof(regVal);
953  computeUnit->vrf[simdId]->write<uint32_t>(vgprIdx, regVal, lane);
954  }
955  }
956 
957  for (int i = 0; i < maxDpVgprs; i++) {
958  uint32_t vgprIdx = remap(i, sizeof(uint64_t), 1);
959  for (int lane = 0; lane < wf_size; lane++) {
960  uint64_t regVal = *(uint64_t *)iter; iter += sizeof(regVal);
961  computeUnit->vrf[simdId]->write<uint64_t>(vgprIdx, regVal, lane);
962  }
963  }
964 
965  for (int i = 0; i < condRegState->numRegs(); i++) {
966  for (int lane = 0; lane < wf_size; lane++) {
967  uint64_t regVal = *(uint64_t *)iter; iter += sizeof(regVal);
968  condRegState->write<uint64_t>(i, lane, regVal);
969  }
970  }
972  if (ldsChunk)
973  for (int i = 0; i < ldsChunk->size(); i++) {
974  char val = *(char *) iter; iter += sizeof(val);
975  ldsChunk->write<char>(i, val);
976  }
977 }
978 
979 void
981 {
982  actualWgSzTotal = 1;
983  for (int d = 0; d < 3; ++d) {
984  actualWgSz[d] = std::min(workGroupSz[d],
985  gridSz[d] - ndr->wgId[d] * workGroupSz[d]);
987  }
988 }
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
std::vector< uint32_t > oldVgpr
Definition: wavefront.hh:234
#define DPRINTF(x,...)
Definition: trace.hh:225
uint32_t workGroupSz[3]
Definition: wavefront.hh:195
void discardFetch()
Definition: wavefront.cc:807
Addr spillBase
Definition: wavefront.hh:261
bool isOldestInstGMem()
Definition: wavefront.cc:210
std::vector< uint8_t >::size_type size() const
get the size of this chunk
Definition: lds_state.hh:96
Stats::Scalar numTimesBlockedDueRAWDependencies
Definition: wavefront.hh:290
Bitfield< 7 > i
void setContext(const void *in)
Sets the hardware context fromt a stream of bytes This method is designed for HSAIL execution...
Definition: wavefront.cc:922
void write(int regIdx, int threadId, T value)
uint32_t barrierCnt
Definition: wavefront.hh:155
Stats::Scalar numTimesBlockedDueVrfPortAvail
Definition: wavefront.hh:284
std::deque< std::unique_ptr< ReconvergenceStackEntry > > reconvergenceStack
Stack containing Control Flow Graph nodes (i.e., kernel instructions) to be visited by the wavefront...
Definition: wavefront.hh:390
Stats::Distribution controlFlowDivergenceDist
int maxBarCnt
Definition: wavefront.hh:252
uint32_t getStaticContextSize() const
Returns the size of the static hardware context of a particular wavefront This should be updated ever...
Definition: wavefront.cc:845
uint32_t gridSz[3]
Definition: wavefront.hh:196
std::bitset< std::numeric_limits< unsigned long long >::digits > VectorMask
Definition: misc.hh:43
VectorMask initMask
Definition: wavefront.hh:248
int simdId
Definition: wavefront.hh:163
bool dropFetch
Definition: wavefront.hh:170
void pushToReconvergenceStack(uint32_t pc, uint32_t rpc, const VectorMask &exec_mask)
Definition: wavefront.cc:781
uint32_t dispatchId
Definition: wavefront.hh:206
T read(int regIdx, int threadId)
class ConditionRegisterState * condRegState
Definition: wavefront.hh:173
bool isOldestInstFlatMem()
Definition: wavefront.cc:249
Bitfield< 4, 0 > mode
bool isOldestInstPrivMem()
Definition: wavefront.cc:236
int wfSlotId
Definition: wavefront.hh:160
bool stalledAtBarrier
Definition: wavefront.hh:254
uint32_t maxSpVgprs
Definition: wavefront.hh:175
LdsChunk * ldsChunk
Definition: wavefront.hh:258
Stats::Scalar numTimesBlockedDueWAXDependencies
Definition: wavefront.hh:287
int spBypassLength()
uint32_t pc() const
Definition: wavefront.cc:814
Stats::Scalar numInstrExecuted
uint64_t lastTrace
Definition: wavefront.hh:226
Bitfield< 63 > val
Definition: misc.hh:769
std::vector< WaitClass > vrfToLocalMemPipeBus
bool instructionBufferHasBranch()
Definition: wavefront.cc:264
std::vector< WaitClass > aluPipe
uint64_t wfDynId
Definition: wavefront.hh:280
CallArgMem * callArgMem
Definition: wavefront.hh:298
GlobalMemPipeline globalMemoryPipe
Definition: compute_unit.hh:98
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition: misc.hh:46
Stats::Distribution activeLanesPerLMemInstrDist
Stats::Distribution srcRegOpDist
Definition: wavefront.hh:293
uint8_t type
Definition: inet.hh:328
std::vector< uint32_t > workItemId[3]
Definition: wavefront.hh:191
uint32_t pc
PC of current instruction.
Definition: wavefront.hh:64
std::deque< GPUDynInstPtr > instructionBuffer
Definition: wavefront.hh:167
Stats::Distribution execRateDist
int ShrMemUnitId()
uint32_t actualWgSz[3]
Definition: wavefront.hh:200
uint32_t wfId
Definition: wavefront.hh:204
uint32_t rdLmReqsInPipe
Definition: wavefront.hh:220
void regStats()
Callback to set stat parameters.
Definition: wavefront.cc:93
Addr privBase
Definition: wavefront.hh:268
std::vector< uint32_t > workItemFlatId
Definition: wavefront.hh:192
Wavefront(const Params *p)
Definition: wavefront.cc:49
void updateResources()
Definition: wavefront.cc:540
std::vector< WaitClass > vrfToGlobalMemPipeBus
void updateInstStats(GPUDynInstPtr gpuDynInst)
std::vector< int > barCnt
Definition: wavefront.hh:251
Bitfield< 9 > d
ComputeUnit * computeUnit
Definition: wavefront.hh:165
uint32_t wgId
Definition: wavefront.hh:197
Stats::Distribution dstRegOpDist
Definition: wavefront.hh:294
uint32_t rdGmReqsInPipe
Definition: wavefront.hh:221
bool isLmInstruction(GPUDynInstPtr ii)
Definition: wavefront.cc:172
virtual void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: wavefront.cc:133
uint32_t outstandingReqsWrLm
Definition: wavefront.hh:215
uint32_t actualWgSzTotal
Definition: wavefront.hh:201
int wfSize() const
void getContext(const void *out)
Returns the hardware context as a stream of bytes This method is designed for HSAIL execution...
Definition: wavefront.cc:855
uint32_t outstandingReqsRdGm
Definition: wavefront.hh:217
int ready(itype_e type)
Definition: wavefront.cc:303
int memTraceBusy
Definition: wavefront.hh:225
int AllAtBarrier(uint32_t _barrier_id, uint32_t bcnt, uint32_t bslots)
void computeActualWgSz(NDRange *ndr)
Definition: wavefront.cc:980
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
Definition: statistics.hh:2606
T read(const uint32_t index)
a read operation
Definition: lds_state.hh:71
WavefrontParams Params
Definition: wavefront.hh:319
void exec()
Definition: wavefront.cc:640
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
bool isOldestInstALU()
Definition: wavefront.cc:182
uint32_t outstandingReqsRdLm
Definition: wavefront.hh:219
uint64_t basePtr
Definition: wavefront.hh:152
uint32_t outstandingReqs
Definition: wavefront.hh:208
Bitfield< 24 > j
bool isOldestInstBarrier()
Definition: wavefront.cc:197
uint32_t outstandingReqsWrGm
Definition: wavefront.hh:213
bool isGmInstruction(GPUDynInstPtr ii)
Definition: wavefront.cc:163
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:276
std::vector< Addr > lastAddr
Definition: wavefront.hh:190
virtual const std::string name() const
Definition: sim_object.hh:129
Shader * shader
TheGpuISA::GPUISA _gpuISA
Definition: wavefront.hh:382
int32_t getRefCounter(const uint32_t dispatchId, const uint32_t wgId) const
Stats::Distribution activeLanesPerGMemInstrDist
uint32_t oldBarrierCnt
Definition: wavefront.hh:154
uint64_t oldDgprTcnt
Definition: wavefront.hh:245
bool pendingFetch
Definition: wavefront.hh:169
uint64_t oldVgprTcnt
Definition: wavefront.hh:238
uint32_t memReqsInPipe
Definition: wavefront.hh:211
bool isOldestInstLMem()
Definition: wavefront.cc:223
int reservedVectorRegs
Definition: wavefront.hh:228
uint32_t startVgprIndex
Definition: wavefront.hh:231
A reconvergence stack entry conveys the necessary state to implement control flow divergence...
Definition: wavefront.hh:60
Stats::Scalar numVecOpsExecuted
uint32_t wrGmReqsInPipe
Definition: wavefront.hh:223
std::vector< VectorRegisterFile * > vrf
void start(uint64_t _wfDynId, uint64_t _base_ptr)
Definition: wavefront.cc:155
uint32_t rpc() const
Definition: wavefront.cc:820
Tick ticks(int numCycles) const
Definition: shader.hh:91
std::vector< uint64_t > oldDgpr
Definition: wavefront.hh:241
VectorMask execMask() const
Definition: wavefront.cc:826
void popFromReconvergenceStack()
Definition: wavefront.cc:789
int nextLocRdBus()
void resizeRegFiles(int num_cregs, int num_sregs, int num_dregs)
Definition: wavefront.cc:140
hsail_mode_e hsail_mode
Definition: shader.hh:107
void write(const uint32_t index, const T value)
a write operation
Definition: lds_state.hh:84
uint32_t wrLmReqsInPipe
Definition: wavefront.hh:222
Bitfield< 3, 0 > mask
Definition: types.hh:62
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:309
std::vector< uint64_t > lastExecCycle
uint32_t maxDpVgprs
Definition: wavefront.hh:177
std::vector< WaitClass > wfWait
LocalMemPipeline localMemoryPipe
Definition: compute_unit.hh:99
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:64
int GlbMemUnitId()
uint32_t remap(uint32_t vgprIndex, uint32_t size, uint8_t mode=0)
Definition: wavefront.cc:280
uint32_t barrierId
Definition: wavefront.hh:156
Stats::Scalar totalCycles
Bitfield< 0 > p
int wgId[3]
Definition: ndrange.hh:48
int nextGlbRdBus()
Counter value() const
Return the current value of this stat as its base type.
Definition: statistics.hh:700
Abstract superclass for simulation objects.
Definition: sim_object.hh:93
status_e status
Definition: wavefront.hh:158
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1896
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60
uint32_t rpc
PC of the immediate post-dominator instruction, i.e., the value of pc for the first instruction that ...
Definition: wavefront.hh:70
bool waitingAtBarrier(int lane)
Definition: wavefront.cc:775
VectorMask execMask
Execution mask.
Definition: wavefront.hh:74

Generated on Fri Jul 3 2020 15:53:03 for gem5 by doxygen 1.8.13