gem5  v20.1.0.0
gpu_dyn_inst.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015-2017 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * For use for simulation and test purposes only
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived from this
19  * software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
35 
36 #include "debug/GPUMem.hh"
39 #include "gpu-compute/shader.hh"
40 #include "gpu-compute/wavefront.hh"
41 
43  GPUStaticInst *static_inst, InstSeqNum instSeqNum)
44  : GPUExecContext(_cu, _wf), scalarAddr(0), addr(computeUnit()->wfSize(),
45  (Addr)0), numScalarReqs(0), isSaveRestore(false),
46  _staticInst(static_inst), _seqNum(instSeqNum)
47 {
49  tlbHitLevel.assign(computeUnit()->wfSize(), -1);
50  // vector instructions can have up to 4 source/destination operands
51  d_data = new uint8_t[computeUnit()->wfSize() * 4 * sizeof(double)];
52  a_data = new uint8_t[computeUnit()->wfSize() * 8];
53  x_data = new uint8_t[computeUnit()->wfSize() * 8];
54  // scalar loads can read up to 16 Dwords of data (see publicly
55  // available GCN3 ISA manual)
56  scalar_data = new uint8_t[16 * sizeof(uint32_t)];
57  for (int i = 0; i < (16 * sizeof(uint32_t)); ++i) {
58  scalar_data[i] = 0;
59  }
60  for (int i = 0; i < (computeUnit()->wfSize() * 8); ++i) {
61  a_data[i] = 0;
62  x_data[i] = 0;
63  }
64  for (int i = 0; i < (computeUnit()->wfSize() * 4 * sizeof(double)); ++i) {
65  d_data[i] = 0;
66  }
67  time = 0;
68 
69  cu_id = _cu->cu_id;
70  if (_wf) {
71  simdId = _wf->simdId;
72  wfDynId = _wf->wfDynId;
73  kern_id = _wf->kernId;
74  wg_id = _wf->wgId;
75  wfSlotId = _wf->wfSlotId;
76  } else {
77  simdId = -1;
78  wfDynId = -1;
79  kern_id = -1;
80  wg_id = -1;
81  wfSlotId = -1;
82  }
83 }
84 
86 {
87  delete[] d_data;
88  delete[] a_data;
89  delete[] x_data;
90  delete[] scalar_data;
91  delete _staticInst;
92 }
93 
94 void
96 {
97  _staticInst->execute(gpuDynInst);
98 }
99 
100 int
102 {
103  return _staticInst->numSrcRegOperands();
104 }
105 
106 int
108 {
109  return _staticInst->numDstRegOperands();
110 }
111 
112 int
114 {
115  return _staticInst->numSrcVecOperands();
116 }
117 
118 int
120 {
121  return _staticInst->numDstVecOperands();
122 }
123 
124 int
126 {
127  return _staticInst->numSrcVecDWORDs();
128 }
129 
130 int
132 {
133  return _staticInst->numDstVecDWORDs();
134 }
135 
136 int
138 {
139  return _staticInst->numOpdDWORDs(operandIdx);
140 }
141 
142 int
144 {
145  return _staticInst->getNumOperands();
146 }
147 
148 bool
150 {
151  return _staticInst->isVectorRegister(operandIdx);
152 }
153 
154 bool
156 {
157  return _staticInst->isScalarRegister(operandIdx);
158 }
159 
160 int
161 GPUDynInst::getRegisterIndex(int operandIdx, GPUDynInstPtr gpuDynInst)
162 {
163  return _staticInst->getRegisterIndex(operandIdx, gpuDynInst);
164 }
165 
166 int
168 {
169  return _staticInst->getOperandSize(operandIdx);
170 }
171 
172 bool
174 {
175  return _staticInst->isDstOperand(operandIdx);
176 }
177 
178 bool
180 {
181  return _staticInst->isSrcOperand(operandIdx);
182 }
183 
184 bool
186 {
187  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
189  return true;
190  }
191  }
192  return false;
193 }
194 
195 bool
197 {
198  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
200  return true;
201  }
202  }
203  return false;
204 }
205 
206 bool
208 {
209  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
211  return true;
212  }
213  }
214  return false;
215 }
216 
217 bool
219 {
220  assert(index >= 0 && index < _staticInst->getNumOperands());
223  return true;
224  }
225  return false;
226 }
227 
228 bool
230 {
231  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
233  return true;
234  }
235  }
236  return false;
237 }
238 
239 bool
240 GPUDynInst::isOpcode(const std::string& opcodeStr,
241  const std::string& extStr) const
242 {
243  return _staticInst->opcode().find(opcodeStr) != std::string::npos &&
244  _staticInst->opcode().find(extStr) != std::string::npos;
245 }
246 
247 bool
248 GPUDynInst::isOpcode(const std::string& opcodeStr) const
249 {
250  return _staticInst->opcode().find(opcodeStr) != std::string::npos;
251 }
252 
253 const std::string&
255 {
256  return _staticInst->disassemble();
257 }
258 
261 {
262  return _seqNum;
263 }
264 
265 Enums::StorageClassType
267 {
268  return _staticInst->executed_as;
269 }
270 
271 bool
273 {
274  assert(s);
275  for (int i = 0; i < getNumOperands(); ++i) {
276  if (isVectorRegister(i) && isSrcOperand(i)) {
277  for (int j = 0; j < s->getNumOperands(); ++j) {
278  if (s->isVectorRegister(j) && s->isDstOperand(j)) {
279  if (i == j)
280  return true;
281  }
282  }
283  }
284  }
285  return false;
286 }
287 
288 bool
290 {
291  assert(s);
292  for (int i = 0; i < getNumOperands(); ++i) {
293  if (isScalarRegister(i) && isSrcOperand(i)) {
294  for (int j = 0; j < s->getNumOperands(); ++j) {
295  if (s->isScalarRegister(j) && s->isDstOperand(j)) {
296  if (i == j)
297  return true;
298  }
299  }
300  }
301  }
302  return false;
303 }
304 
305 // Process a memory instruction and (if necessary) submit timing request
306 void
308 {
309  DPRINTF(GPUMem, "CU%d: WF[%d][%d]: mempacket status bitvector=%#x\n",
311 
312  _staticInst->initiateAcc(gpuDynInst);
313 }
314 
315 void
317 {
318  DPRINTF(GPUMem, "CU%d: WF[%d][%d]: mempacket status bitvector="
319  "%#x\n complete",
321 
322  _staticInst->completeAcc(gpuDynInst);
323 }
324 
329 bool
331 {
332  return _staticInst->isALU();
333 }
334 
335 bool
337 {
338  return _staticInst->isBranch();
339 }
340 
341 bool
343 {
344  return _staticInst->isCondBranch();
345 }
346 
347 bool
349 {
350  return _staticInst->isNop();
351 }
352 
353 bool
355 {
356  return _staticInst->isEndOfKernel();
357 }
358 
359 bool
361 {
362  return _staticInst->isKernelLaunch();
363 }
364 
365 bool
367 {
368  return _staticInst->isSDWAInst();
369 }
370 
371 bool
373 {
374  return _staticInst->isDPPInst();
375 }
376 
377 bool
379 {
380  return _staticInst->isReturn();
381 }
382 
383 bool
385 {
387 }
388 
389 bool
391 {
392  return _staticInst->isSpecialOp();
393 }
394 
395 bool
397 {
398  return _staticInst->isWaitcnt();
399 }
400 
401 bool
403 {
404  return _staticInst->isBarrier();
405 }
406 
407 bool
409 {
410  return _staticInst->isMemSync();
411 }
412 
413 bool
415 {
416  return _staticInst->isMemRef();
417 }
418 
419 bool
421 {
422  return _staticInst->isFlat();
423 }
424 
425 bool
427 {
428  return _staticInst->isLoad();
429 }
430 
431 bool
433 {
434  return _staticInst->isStore();
435 }
436 
437 bool
439 {
440  return _staticInst->isAtomic();
441 }
442 
443 bool
445 {
446  return _staticInst->isAtomicNoRet();
447 }
448 
449 bool
451 {
452  return _staticInst->isAtomicRet();
453 }
454 
455 bool
457 {
458  return !_staticInst->isScalar();
459 }
460 
461 bool
463 {
464  return _staticInst->isScalar();
465 }
466 
467 bool
469 {
470  return _staticInst->readsSCC();
471 }
472 
473 bool
475 {
476  return _staticInst->writesSCC();
477 }
478 
479 bool
481 {
482  return _staticInst->readsVCC();
483 }
484 
485 bool
487 {
488  return _staticInst->writesVCC();
489 }
490 
491 bool
493 {
494  return _staticInst->readsMode();
495 }
496 
497 bool
499 {
500  return _staticInst->writesMode();
501 }
502 
503 bool
505 {
506  return _staticInst->readsEXEC();
507 }
508 
509 bool
511 {
512  return _staticInst->writesEXEC();
513 }
514 
515 bool
517 {
518  return _staticInst->ignoreExec();
519 }
520 
521 bool
523 {
524  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
525  return _staticInst->isDstOperand(i) &&
527  }
528  return false;
529 }
530 
531 bool
533 {
534  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
535  return _staticInst->isSrcOperand(i) &&
537  }
538  return false;
539 }
540 
541 bool
543 {
544  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
547  }
548  }
549  return false;
550 }
551 
552 bool
554 {
555  for (int i = 0; i < _staticInst->getNumOperands(); ++i) {
558  }
559  }
560  return false;
561 }
562 
563 bool
565 {
566  return _staticInst->isAtomicAnd();
567 }
568 
569 bool
571 {
572  return _staticInst->isAtomicOr();
573 }
574 
575 bool
577 {
578  return _staticInst->isAtomicXor();
579 }
580 
581 bool
583 {
584  return _staticInst->isAtomicCAS();
585 }
586 
588 {
589  return _staticInst->isAtomicExch();
590 }
591 
592 bool
594 {
595  return _staticInst->isAtomicAdd();
596 }
597 
598 bool
600 {
601  return _staticInst->isAtomicSub();
602 }
603 
604 bool
606 {
607  return _staticInst->isAtomicInc();
608 }
609 
610 bool
612 {
613  return _staticInst->isAtomicDec();
614 }
615 
616 bool
618 {
619  return _staticInst->isAtomicMax();
620 }
621 
622 bool
624 {
625  return _staticInst->isAtomicMin();
626 }
627 
628 bool
630 {
631  return _staticInst->isArgLoad();
632 }
633 
634 bool
636 {
637  return _staticInst->isGlobalMem();
638 }
639 
640 bool
642 {
643  return _staticInst->isLocalMem();
644 }
645 
646 bool
648 {
649  return _staticInst->isArgSeg();
650 }
651 
652 bool
654 {
655  return _staticInst->isGlobalSeg();
656 }
657 
658 bool
660 {
661  return _staticInst->isGroupSeg();
662 }
663 
664 bool
666 {
667  return _staticInst->isKernArgSeg();
668 }
669 
670 bool
672 {
673  return _staticInst->isPrivateSeg();
674 }
675 
676 bool
678 {
679  return _staticInst->isReadOnlySeg();
680 }
681 
682 bool
684 {
685  return _staticInst->isSpillSeg();
686 }
687 
688 bool
690 {
692 }
693 
694 bool
696 {
697  return _staticInst->isSystemCoherent();
698 }
699 
700 bool
702 {
703  return _staticInst->isF16();
704 }
705 
706 bool
708 {
709  return _staticInst->isF32();
710 }
711 
712 bool
714 {
715  return _staticInst->isF64();
716 }
717 
718 bool
720 {
721  return _staticInst->isFMA();
722 }
723 
724 bool
726 {
727  return _staticInst->isMAC();
728 }
729 
730 bool
732 {
733  return _staticInst->isMAD();
734 }
735 
736 void
738 {
739  assert(mask.any());
740  // find the segment of the first active address, after
741  // that we check that all other active addresses also
742  // fall within the same APE
743  for (int lane = 0; lane < computeUnit()->wfSize(); ++lane) {
744  if (mask[lane]) {
745  if (computeUnit()->shader->isLdsApe(addr[lane])) {
746  // group segment
747  staticInstruction()->executed_as = Enums::SC_GROUP;
748  break;
749  } else if (computeUnit()->shader->isScratchApe(addr[lane])) {
750  // private segment
751  staticInstruction()->executed_as = Enums::SC_PRIVATE;
752  break;
753  } else if (computeUnit()->shader->isGpuVmApe(addr[lane])) {
754  // we won't support GPUVM
755  fatal("flat access is in GPUVM APE\n");
756  } else if (bits(addr[lane], 63, 47) != 0x1FFFF &&
757  bits(addr[lane], 63, 47)) {
758  // we are in the "hole", this is a memory violation
759  fatal("flat access at addr %#x has a memory violation\n",
760  addr[lane]);
761  } else {
762  // global memory segment
763  staticInstruction()->executed_as = Enums::SC_GLOBAL;
764  break;
765  }
766  }
767  }
768 
769  // we should have found the segment
770  assert(executedAs() != Enums::SC_NONE);
771 
772  // flat accesses should not straddle multiple APEs so we
773  // must check that all addresses fall within the same APE
774  if (executedAs() == Enums::SC_GROUP) {
775  for (int lane = 0; lane < computeUnit()->wfSize(); ++lane) {
776  if (mask[lane]) {
777  // if the first valid addr we found above was LDS,
778  // all the rest should be
779  assert(computeUnit()->shader->isLdsApe(addr[lane]));
780  }
781  }
782  } else if (executedAs() == Enums::SC_PRIVATE) {
783  for (int lane = 0; lane < computeUnit()->wfSize(); ++lane) {
784  if (mask[lane]) {
785  // if the first valid addr we found above was private,
786  // all the rest should be
787  assert(computeUnit()->shader->isScratchApe(addr[lane]));
788  }
789  }
790  } else {
791  for (int lane = 0; lane < computeUnit()->wfSize(); ++lane) {
792  if (mask[lane]) {
793  // if the first valid addr we found above was global,
794  // all the rest should be. because we don't have an
795  // explicit range of the global segment, we just make
796  // sure that the address fall in no other APE and that
797  // it is not a memory violation
798  assert(!computeUnit()->shader->isLdsApe(addr[lane]));
799  assert(!computeUnit()->shader->isScratchApe(addr[lane]));
800  assert(!computeUnit()->shader->isGpuVmApe(addr[lane]));
801  assert(!(bits(addr[lane], 63, 47) != 0x1FFFF
802  && bits(addr[lane], 63, 47)));
803  }
804  }
805  }
806 }
807 
808 void
810 {
812 
813 
814  // Now that we know the aperature, do the following:
815  // 1. Transform the flat address to its segmented equivalent.
816  // 2. Set the execUnitId based an the aperture check.
817  // 3. Decrement any extra resources that were reserved. Other
818  // resources are released as normal, below.
819  if (executedAs() == Enums::SC_GLOBAL) {
820  // no transormation for global segment
823  if (isLoad()) {
825  } else if (isStore()) {
827  } else if (isAtomic() || isMemSync()) {
830  } else {
831  panic("Invalid memory operation!\n");
832  }
833  } else if (executedAs() == Enums::SC_GROUP) {
834  for (int lane = 0; lane < wavefront()->computeUnit->wfSize(); ++lane) {
835  if (mask[lane]) {
836  // flat address calculation goes here.
837  // addr[lane] = segmented address
838  panic("Flat group memory operation is unimplemented!\n");
839  }
840  }
843  if (isLoad()) {
845  } else if (isStore()) {
847  } else if (isAtomic() || isMemSync()) {
850  } else {
851  panic("Invalid memory operation!\n");
852  }
853  } else if (executedAs() == Enums::SC_PRIVATE) {
883  uint32_t numSgprs = wavefront()->maxSgprs;
884  uint32_t physSgprIdx =
886  numSgprs - 3);
887  uint32_t offset =
888  wavefront()->computeUnit->srf[simdId]->read(physSgprIdx);
889  physSgprIdx =
891  numSgprs - 4);
892  uint32_t size =
893  wavefront()->computeUnit->srf[simdId]->read(physSgprIdx);
894  for (int lane = 0; lane < wavefront()->computeUnit->wfSize(); ++lane) {
895  if (mask[lane]) {
896  addr[lane] = addr[lane] + lane * size + offset +
899  }
900  }
903  if (isLoad()) {
905  } else if (isStore()) {
907  } else if (isAtomic() || isMemSync()) {
910  } else {
911  panic("Invalid memory operation!\n");
912  }
913  } else {
914  for (int lane = 0; lane < wavefront()->computeUnit->wfSize(); ++lane) {
915  if (mask[lane]) {
916  panic("flat addr %#llx maps to bad segment %d\n",
917  addr[lane], executedAs());
918  }
919  }
920  }
921 }
922 
925 {
926  return _staticInst->srcLiteral();
927 }
928 
929 void
931 {
932  if (_staticInst->isLocalMem()) {
933  // access to LDS (shared) memory
935  } else if (_staticInst->isFlat()) {
937  } else {
938  // access to global memory
939 
940  // update PageDivergence histogram
941  int number_pages_touched = cu->pagesTouched.size();
942  assert(number_pages_touched);
943  cu->pageDivergenceDist.sample(number_pages_touched);
944 
946 
947  for (auto it : cu->pagesTouched) {
948  // see if this page has been touched before. if not, this also
949  // inserts the page into the table.
950  ret = cu->pageAccesses
951  .insert(ComputeUnit::pageDataStruct::value_type(it.first,
952  std::make_pair(1, it.second)));
953 
954  // if yes, then update the stats
955  if (!ret.second) {
956  ret.first->second.first++;
957  ret.first->second.second += it.second;
958  }
959  }
960 
961  cu->pagesTouched.clear();
962 
963  // total number of memory instructions (dynamic)
964  // Atomics are counted as a single memory instruction.
965  // this is # memory instructions per wavefronts, not per workitem
967  }
968 }
969 
970 void
971 GPUDynInst::profileRoundTripTime(Tick currentTime, int hopId)
972 {
973  // Only take the first measurement in the case of coalescing
974  if (roundTripTime.size() > hopId)
975  return;
976 
977  roundTripTime.push_back(currentTime);
978 }
979 
980 void
982 {
983  if (lineAddressTime.count(addr)) {
984  if (lineAddressTime[addr].size() > hopId) {
985  return;
986  }
987 
988  lineAddressTime[addr].push_back(currentTime);
989  } else if (hopId == 0) {
990  auto addressTimeVec = std::vector<Tick> { currentTime };
991  lineAddressTime.insert(std::make_pair(addr, addressTimeVec));
992  }
993 }
GPUDynInst::writesEXEC
bool writesEXEC() const
Definition: gpu_dyn_inst.cc:510
GPUStaticInst::isAtomicExch
bool isAtomicExch() const
Definition: gpu_static_inst.hh:156
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
GPUDynInst::isF32
bool isF32() const
Definition: gpu_dyn_inst.cc:707
GPUStaticInst::readsMode
bool readsMode() const
Definition: gpu_static_inst.hh:148
Wavefront::wrGmReqsInPipe
int wrGmReqsInPipe
Definition: wavefront.hh:179
GPUDynInst::isBranch
bool isBranch() const
Definition: gpu_dyn_inst.cc:336
GPUStaticInst::writesVCC
bool writesVCC() const
Definition: gpu_static_inst.hh:143
GPUDynInst::isBarrier
bool isBarrier() const
Definition: gpu_dyn_inst.cc:402
GPUDynInst::readsMode
bool readsMode() const
Definition: gpu_dyn_inst.cc:492
GPUDynInst::GPUDynInst
GPUDynInst(ComputeUnit *_cu, Wavefront *_wf, GPUStaticInst *static_inst, uint64_t instSeqNum)
Definition: gpu_dyn_inst.cc:42
GPUDynInst::isAtomic
bool isAtomic() const
Definition: gpu_dyn_inst.cc:438
GPUStaticInst::isLoad
bool isLoad() const
Definition: gpu_static_inst.hh:127
GPUDynInst::initiateAcc
void initiateAcc(GPUDynInstPtr gpuDynInst)
Definition: gpu_dyn_inst.cc:307
GPUDynInst::readsExecMask
bool readsExecMask() const
Definition: gpu_dyn_inst.cc:532
ComputeUnit::pagesTouched
std::map< Addr, int > pagesTouched
Definition: compute_unit.hh:381
GPUStaticInst::numSrcVecOperands
int numSrcVecOperands()
Definition: gpu_static_inst.cc:55
GPUStaticInst::readsSCC
bool readsSCC() const
Definition: gpu_static_inst.hh:140
shader.hh
GPUDynInst::numDstVecDWORDs
int numDstVecDWORDs()
Definition: gpu_dyn_inst.cc:131
GPUDynInst::d_data
uint8_t * d_data
Definition: gpu_dyn_inst.hh:127
GPUStaticInst::isAtomicXor
bool isAtomicXor() const
Definition: gpu_static_inst.hh:154
MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:44
GPUDynInst::hasSgprRawDependence
bool hasSgprRawDependence(GPUDynInstPtr s)
Definition: gpu_dyn_inst.cc:289
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
GPUDynInst::srcIsVgpr
bool srcIsVgpr(int index) const
Definition: gpu_dyn_inst.cc:218
GPUDynInst::isAtomicAdd
bool isAtomicAdd() const
Definition: gpu_dyn_inst.cc:593
GPUStaticInst::isEndOfKernel
bool isEndOfKernel() const
Definition: gpu_static_inst.hh:109
ComputeUnit::dynamicLMemInstrCnt
Stats::Scalar dynamicLMemInstrCnt
Definition: compute_unit.hh:553
GPUStaticInst::isSystemCoherent
bool isSystemCoherent() const
Definition: gpu_static_inst.hh:203
GPUDynInst::writesMode
bool writesMode() const
Definition: gpu_dyn_inst.cc:498
GPUStaticInst::readsVCC
bool readsVCC() const
Definition: gpu_static_inst.hh:142
GPUStaticInst::isAtomicInc
bool isAtomicInc() const
Definition: gpu_static_inst.hh:159
GPUDynInst::isF64
bool isF64() const
Definition: gpu_dyn_inst.cc:713
GPUDynInst::isKernArgSeg
bool isKernArgSeg() const
Definition: gpu_dyn_inst.cc:665
GPUDynInst::isDPPInst
bool isDPPInst() const
Definition: gpu_dyn_inst.cc:372
GPUStaticInst::numSrcRegOperands
virtual int numSrcRegOperands()=0
GPUDynInst::isDstOperand
bool isDstOperand(int operandIdx)
Definition: gpu_dyn_inst.cc:173
GPUStaticInst::isMAC
bool isMAC() const
Definition: gpu_static_inst.hh:212
GPUStaticInst::isALU
bool isALU() const
Definition: gpu_static_inst.hh:104
GPUDynInst::isKernelLaunch
bool isKernelLaunch() const
Definition: gpu_dyn_inst.cc:360
GPUStaticInst::isF32
bool isF32() const
Definition: gpu_static_inst.hh:207
GPUStaticInst::getNumOperands
virtual int getNumOperands()=0
GPUDynInst::isMAC
bool isMAC() const
Definition: gpu_dyn_inst.cc:725
GPUDynInst::numSrcRegOperands
int numSrcRegOperands()
Definition: gpu_dyn_inst.cc:101
gpu_static_inst.hh
ComputeUnit::dynamicFlatMemInstrCnt
Stats::Scalar dynamicFlatMemInstrCnt
Definition: compute_unit.hh:552
Wavefront::decVMemInstsIssued
void decVMemInstsIssued()
Definition: wavefront.cc:1368
GPUDynInst::hasSourceVgpr
bool hasSourceVgpr() const
Definition: gpu_dyn_inst.cc:196
GPUDynInst::isSpillSeg
bool isSpillSeg() const
Definition: gpu_dyn_inst.cc:683
Tick
uint64_t Tick
Tick count type.
Definition: types.hh:63
GPUStaticInst::isKernelLaunch
bool isKernelLaunch() const
Definition: gpu_static_inst.hh:110
GPUDynInst::isFMA
bool isFMA() const
Definition: gpu_dyn_inst.cc:719
GPUStaticInst::numOpdDWORDs
int numOpdDWORDs(int operandIdx)
Definition: gpu_static_inst.cc:125
GPUStaticInst::isFMA
bool isFMA() const
Definition: gpu_static_inst.hh:211
RegisterManager::mapSgpr
int mapSgpr(Wavefront *w, int sgprIndex)
Definition: register_manager.cc:101
GPUDynInst::getOperandSize
int getOperandSize(int operandIdx)
Definition: gpu_dyn_inst.cc:167
ComputeUnit::cu_id
int cu_id
Definition: compute_unit.hh:289
GPUStaticInst::srcLiteral
virtual TheGpuISA::ScalarRegU32 srcLiteral() const
Definition: gpu_static_inst.hh:75
GPUDynInst::isWaitcnt
bool isWaitcnt() const
Definition: gpu_dyn_inst.cc:396
GPUDynInst::time
Tick time
Definition: gpu_dyn_inst.hh:152
GPUStaticInst::isArgLoad
bool isArgLoad() const
Definition: gpu_static_inst.hh:165
GPUStaticInst::executed_as
Enums::StorageClassType executed_as
Definition: gpu_static_inst.hh:236
ComputeUnit::registerManager
RegisterManager * registerManager
Definition: compute_unit.hh:275
GPUDynInst::isScalarRegister
bool isScalarRegister(int operandIdx)
Definition: gpu_dyn_inst.cc:155
std::vector< Tick >
GPUStaticInst::isStore
bool isStore() const
Definition: gpu_static_inst.hh:128
GPUStaticInst::isAtomic
bool isAtomic() const
Definition: gpu_static_inst.hh:131
GPUStaticInst::isAtomicAnd
bool isAtomicAnd() const
Definition: gpu_static_inst.hh:152
GPUDynInst::profileRoundTripTime
void profileRoundTripTime(Tick currentTime, int hopId)
Definition: gpu_dyn_inst.cc:971
GPUStaticInst::getRegisterIndex
virtual int getRegisterIndex(int operandIndex, GPUDynInstPtr gpuDynInst)=0
GPUDynInst::readsSCC
bool readsSCC() const
Definition: gpu_dyn_inst.cc:468
GPUStaticInst::isGroupSeg
bool isGroupSeg() const
Definition: gpu_static_inst.hh:186
ComputeUnit::pageDivergenceDist
Stats::Distribution pageDivergenceDist
Definition: compute_unit.hh:548
GPUStaticInst::disassemble
const std::string & disassemble()
Definition: gpu_static_inst.cc:44
GPUDynInst::completeAcc
void completeAcc(GPUDynInstPtr gpuDynInst)
Definition: gpu_dyn_inst.cc:316
GPUDynInst::isAtomicXor
bool isAtomicXor() const
Definition: gpu_dyn_inst.cc:576
GPUStaticInst::numDstRegOperands
virtual int numDstRegOperands()=0
wavefront.hh
GPUStaticInst::isVectorRegister
virtual bool isVectorRegister(int operandIndex)=0
GPUStaticInst::isFlat
bool isFlat() const
Definition: gpu_static_inst.hh:126
GPUDynInst::isAtomicMax
bool isAtomicMax() const
Definition: gpu_dyn_inst.cc:617
Wavefront::wfSlotId
const int wfSlotId
Definition: wavefront.hh:89
GPUDynInst::isSpecialOp
bool isSpecialOp() const
Definition: gpu_dyn_inst.cc:390
GPUDynInst::wfSlotId
int wfSlotId
Definition: gpu_dyn_inst.hh:148
GPUDynInst::numSrcVecOperands
int numSrcVecOperands()
Definition: gpu_dyn_inst.cc:113
GPUDynInst::isArgSeg
bool isArgSeg() const
Definition: gpu_dyn_inst.cc:647
GPUDynInst::lineAddressTime
std::map< Addr, std::vector< Tick > > lineAddressTime
Definition: gpu_dyn_inst.hh:442
GPUStaticInst::opcode
const std::string & opcode() const
Definition: gpu_static_inst.hh:257
GPUDynInst::isReturn
bool isReturn() const
Definition: gpu_dyn_inst.cc:378
GPUDynInst::writesFlatScratch
bool writesFlatScratch() const
Definition: gpu_dyn_inst.cc:542
GPUDynInst::isSystemCoherent
bool isSystemCoherent() const
Definition: gpu_dyn_inst.cc:695
GPUExecContext::wavefront
Wavefront * wavefront()
Definition: gpu_exec_context.cc:49
GPUStaticInst::execute
virtual void execute(GPUDynInstPtr gpuDynInst)=0
GPUDynInst::doApertureCheck
void doApertureCheck(const VectorMask &mask)
Definition: gpu_dyn_inst.cc:737
Wavefront::wrLmReqsInPipe
int wrLmReqsInPipe
Definition: wavefront.hh:178
GPUStaticInst::isDPPInst
bool isDPPInst() const
Definition: gpu_static_inst.hh:112
ComputeUnit
Definition: compute_unit.hh:198
GPUStaticInst::isExecMaskRegister
virtual bool isExecMaskRegister(int opIdx)=0
GPUStaticInst::initiateAcc
virtual void initiateAcc(GPUDynInstPtr gpuDynInst)
Definition: gpu_static_inst.hh:219
ArmISA::j
Bitfield< 24 > j
Definition: miscregs_types.hh:54
GPUDynInst::isScalar
bool isScalar() const
Definition: gpu_dyn_inst.cc:462
Wavefront::computeUnit
ComputeUnit * computeUnit
Definition: wavefront.hh:99
ComputeUnit::srf
std::vector< ScalarRegisterFile * > srf
Definition: compute_unit.hh:294
GPUDynInst::isAtomicExch
bool isAtomicExch() const
Definition: gpu_dyn_inst.cc:587
GPUStaticInst::isSpillSeg
bool isSpillSeg() const
Definition: gpu_static_inst.hh:190
GPUDynInst::addr
std::vector< Addr > addr
Definition: gpu_dyn_inst.hh:123
GPUStaticInst::isMemRef
bool isMemRef() const
Definition: gpu_static_inst.hh:125
GPUDynInst::isMemRef
bool isMemRef() const
Definition: gpu_dyn_inst.cc:414
GPUDynInst::isUnconditionalJump
bool isUnconditionalJump() const
Definition: gpu_dyn_inst.cc:384
GPUDynInst::hasVgprRawDependence
bool hasVgprRawDependence(GPUDynInstPtr s)
Definition: gpu_dyn_inst.cc:272
GPUDynInst::staticInstruction
GPUStaticInst * staticInstruction()
Definition: gpu_dyn_inst.hh:166
GPUStaticInst::isAtomicRet
bool isAtomicRet() const
Definition: gpu_static_inst.hh:137
GPUDynInst::isLoad
bool isLoad() const
Definition: gpu_dyn_inst.cc:426
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
GPUDynInst::isGlobalSeg
bool isGlobalSeg() const
Definition: gpu_dyn_inst.cc:653
GPUDynInst::executedAs
Enums::StorageClassType executedAs()
Definition: gpu_dyn_inst.cc:266
GPUStaticInst::isDstOperand
virtual bool isDstOperand(int operandIndex)=0
GPUDynInst::hasDestinationVgpr
bool hasDestinationVgpr() const
Definition: gpu_dyn_inst.cc:229
GPUDynInst::isAtomicMin
bool isAtomicMin() const
Definition: gpu_dyn_inst.cc:623
GPUStaticInst::isBarrier
bool isBarrier() const
Definition: gpu_static_inst.hh:123
GPUStaticInst::isMemSync
bool isMemSync() const
Definition: gpu_static_inst.hh:124
GPUStaticInst::isAtomicCAS
bool isAtomicCAS() const
Definition: gpu_static_inst.hh:155
GPUDynInst::writesVCC
bool writesVCC() const
Definition: gpu_dyn_inst.cc:486
GPUDynInst::resolveFlatSegment
void resolveFlatSegment(const VectorMask &mask)
Definition: gpu_dyn_inst.cc:809
GPUStaticInst::isSrcOperand
virtual bool isSrcOperand(int operandIndex)=0
ArmISA::NumVecElemPerVecReg
constexpr unsigned NumVecElemPerVecReg
Definition: registers.hh:66
GPUStaticInst::isReturn
bool isReturn() const
Definition: gpu_static_inst.hh:108
GPUDynInst::srcLiteral
TheGpuISA::ScalarRegU32 srcLiteral() const
Definition: gpu_dyn_inst.cc:924
GPUDynInst::simdId
int simdId
Definition: gpu_dyn_inst.hh:138
GPUStaticInst::isUnconditionalJump
bool isUnconditionalJump() const
Definition: gpu_static_inst.hh:115
GPUDynInst::readsFlatScratch
bool readsFlatScratch() const
Definition: gpu_dyn_inst.cc:553
GPUDynInst::execute
void execute(GPUDynInstPtr gpuDynInst)
Definition: gpu_dyn_inst.cc:95
GPUStaticInst::isArgSeg
bool isArgSeg() const
Definition: gpu_static_inst.hh:184
GPUStaticInst::isF16
bool isF16() const
Definition: gpu_static_inst.hh:206
GPUDynInst::isAtomicSub
bool isAtomicSub() const
Definition: gpu_dyn_inst.cc:599
GPUDynInst::a_data
uint8_t * a_data
Definition: gpu_dyn_inst.hh:131
scalar_register_file.hh
GPUStaticInst::isFlatScratchRegister
virtual bool isFlatScratchRegister(int opIdx)=0
Shader::getHiddenPrivateBase
Addr getHiddenPrivateBase()
Definition: shader.hh:186
GPUStaticInst::isScalarRegister
virtual bool isScalarRegister(int operandIndex)=0
gpu_dyn_inst.hh
GPUDynInst::isCondBranch
bool isCondBranch() const
Definition: gpu_dyn_inst.cc:342
GPUDynInst::isVector
bool isVector() const
Definition: gpu_dyn_inst.cc:456
GPUDynInst::isGloballyCoherent
bool isGloballyCoherent() const
Definition: gpu_dyn_inst.cc:689
GPUDynInst::seqNum
InstSeqNum seqNum() const
Definition: gpu_dyn_inst.cc:260
GPUStaticInst::isGloballyCoherent
bool isGloballyCoherent() const
Coherence domain of a memory instruction.
Definition: gpu_static_inst.hh:202
Wavefront::flatGmUnitId
int flatGmUnitId
Definition: wavefront.hh:97
GPUStaticInst::numDstVecOperands
int numDstVecOperands()
Definition: gpu_static_inst.cc:71
GPUDynInst::updateStats
void updateStats()
Definition: gpu_dyn_inst.cc:930
GPUStaticInst::isAtomicMin
bool isAtomicMin() const
Definition: gpu_static_inst.hh:162
GPUDynInst::x_data
uint8_t * x_data
Definition: gpu_dyn_inst.hh:133
GPUDynInst::isAtomicNoRet
bool isAtomicNoRet() const
Definition: gpu_dyn_inst.cc:444
GPUStaticInst::isGlobalSeg
bool isGlobalSeg() const
Definition: gpu_static_inst.hh:185
GPUStaticInst::writesEXEC
bool writesEXEC() const
Definition: gpu_static_inst.hh:147
GPUDynInst::isAtomicDec
bool isAtomicDec() const
Definition: gpu_dyn_inst.cc:611
GPUStaticInst::readsEXEC
bool readsEXEC() const
Definition: gpu_static_inst.hh:146
GPUDynInst::isArgLoad
bool isArgLoad() const
Definition: gpu_dyn_inst.cc:629
Wavefront::maxSgprs
uint32_t maxSgprs
Definition: wavefront.hh:126
GPUStaticInst
Definition: gpu_static_inst.hh:58
GPUStaticInst::isAtomicAdd
bool isAtomicAdd() const
Definition: gpu_static_inst.hh:157
GPUDynInst::isOpcode
bool isOpcode(const std::string &opcodeStr) const
Definition: gpu_dyn_inst.cc:248
GPUDynInst::getNumOperands
int getNumOperands()
Definition: gpu_dyn_inst.cc:143
GPUStaticInst::getOperandSize
virtual int getOperandSize(int operandIndex)=0
GPUStaticInst::isNop
bool isNop() const
Definition: gpu_static_inst.hh:107
std::pair
STL pair class.
Definition: stl.hh:58
GPUDynInst::isPrivateSeg
bool isPrivateSeg() const
Definition: gpu_dyn_inst.cc:671
InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:37
Wavefront::simdId
const int simdId
Definition: wavefront.hh:92
GPUDynInst::getRegisterIndex
int getRegisterIndex(int operandIdx, GPUDynInstPtr gpuDynInst)
Definition: gpu_dyn_inst.cc:161
GPUDynInst::isReadOnlySeg
bool isReadOnlySeg() const
Definition: gpu_dyn_inst.cc:677
GPUDynInst::readsVCC
bool readsVCC() const
Definition: gpu_dyn_inst.cc:480
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
GPUDynInst::numDstVecOperands
int numDstVecOperands()
Definition: gpu_dyn_inst.cc:119
Wavefront::rdLmReqsInPipe
int rdLmReqsInPipe
Definition: wavefront.hh:176
GPUDynInst::disassemble
const std::string & disassemble() const
Definition: gpu_dyn_inst.cc:254
GPUDynInst::isAtomicCAS
bool isAtomicCAS() const
Definition: gpu_dyn_inst.cc:582
GPUStaticInst::isSpecialOp
bool isSpecialOp() const
Definition: gpu_static_inst.hh:120
GPUDynInst::profileLineAddressTime
void profileLineAddressTime(Addr addr, Tick currentTime, int hopId)
Definition: gpu_dyn_inst.cc:981
GPUDynInst::isAtomicAnd
bool isAtomicAnd() const
Definition: gpu_dyn_inst.cc:564
Wavefront::decLGKMInstsIssued
void decLGKMInstsIssued()
Definition: wavefront.cc:1380
GPUDynInst::isEndOfKernel
bool isEndOfKernel() const
Definition: gpu_dyn_inst.cc:354
GPUDynInst::isLocalMem
bool isLocalMem() const
Definition: gpu_dyn_inst.cc:641
GPUStaticInst::isReadOnlySeg
bool isReadOnlySeg() const
Definition: gpu_static_inst.hh:189
GPUDynInst::hasDestinationSgpr
bool hasDestinationSgpr() const
Definition: gpu_dyn_inst.cc:207
Wavefront::execUnitId
int execUnitId
Definition: wavefront.hh:95
GPUDynInst::exec_mask
VectorMask exec_mask
Definition: gpu_dyn_inst.hh:135
GPUDynInst::isSDWAInst
bool isSDWAInst() const
Definition: gpu_dyn_inst.cc:366
GPUDynInst::cu_id
int cu_id
Definition: gpu_dyn_inst.hh:144
Shader::getScratchBase
Addr getScratchBase()
Definition: shader.hh:180
GPUDynInst::isGroupSeg
bool isGroupSeg() const
Definition: gpu_dyn_inst.cc:659
GPUStaticInst::isF64
bool isF64() const
Definition: gpu_static_inst.hh:208
GPUStaticInst::isAtomicOr
bool isAtomicOr() const
Definition: gpu_static_inst.hh:153
GPUDynInst::_staticInst
GPUStaticInst * _staticInst
Definition: gpu_dyn_inst.hh:430
GPUDynInst::readsEXEC
bool readsEXEC() const
Definition: gpu_dyn_inst.cc:504
GPUDynInst::writesExecMask
bool writesExecMask() const
Definition: gpu_dyn_inst.cc:522
GPUDynInst::ignoreExec
bool ignoreExec() const
Definition: gpu_dyn_inst.cc:516
GPUDynInst::tlbHitLevel
std::vector< int > tlbHitLevel
Definition: gpu_dyn_inst.hh:410
GPUStaticInst::isScalar
bool isScalar() const
Definition: gpu_static_inst.hh:139
ComputeUnit::pageAccesses
pageDataStruct pageAccesses
Definition: compute_unit.hh:627
GPUDynInst::isSrcOperand
bool isSrcOperand(int operandIdx)
Definition: gpu_dyn_inst.cc:179
GPUExecContext::cu
ComputeUnit * cu
Definition: gpu_exec_context.hh:61
GPUDynInst::roundTripTime
std::vector< Tick > roundTripTime
Definition: gpu_dyn_inst.hh:438
GPUDynInst::isAtomicOr
bool isAtomicOr() const
Definition: gpu_dyn_inst.cc:570
GPUStaticInst::isAtomicSub
bool isAtomicSub() const
Definition: gpu_static_inst.hh:158
GPUDynInst::kern_id
int kern_id
Definition: gpu_dyn_inst.hh:142
Wavefront
Definition: wavefront.hh:57
GPUExecContext
Definition: gpu_exec_context.hh:44
GPUDynInst::isVectorRegister
bool isVectorRegister(int operandIdx)
Definition: gpu_dyn_inst.cc:149
GPUStaticInst::isLocalMem
bool isLocalMem() const
Definition: gpu_static_inst.hh:179
Wavefront::wgId
uint32_t wgId
Definition: wavefront.hh:152
GPUDynInst::isGlobalMem
bool isGlobalMem() const
Definition: gpu_dyn_inst.cc:635
addr
ip6_addr_t addr
Definition: inet.hh:423
ComputeUnit::wfSize
int wfSize() const
Definition: compute_unit.hh:397
GPUDynInst::_seqNum
const InstSeqNum _seqNum
Definition: gpu_dyn_inst.hh:431
GPUStaticInst::isSDWAInst
bool isSDWAInst() const
Definition: gpu_static_inst.hh:111
GPUStaticInst::isKernArgSeg
bool isKernArgSeg() const
Definition: gpu_static_inst.hh:187
GPUDynInstPtr
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition: misc.hh:48
GPUDynInst::writesSCC
bool writesSCC() const
Definition: gpu_dyn_inst.cc:474
GPUDynInst::isAtomicInc
bool isAtomicInc() const
Definition: gpu_dyn_inst.cc:605
Stats::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1924
GPUDynInst::numDstRegOperands
int numDstRegOperands()
Definition: gpu_dyn_inst.cc:107
GPUStaticInst::isMAD
bool isMAD() const
Definition: gpu_static_inst.hh:213
Gcn3ISA::ScalarRegU32
uint32_t ScalarRegU32
Definition: registers.hh:154
GPUDynInst::hasSourceSgpr
bool hasSourceSgpr() const
Definition: gpu_dyn_inst.cc:185
GPUStaticInst::isGlobalMem
bool isGlobalMem() const
Definition: gpu_static_inst.hh:171
sc_core::SC_NONE
@ SC_NONE
Definition: sc_report.hh:50
GPUStaticInst::isAtomicDec
bool isAtomicDec() const
Definition: gpu_static_inst.hh:160
Wavefront::kernId
int kernId
Definition: wavefront.hh:90
GPUDynInst::wg_id
int wg_id
Definition: gpu_dyn_inst.hh:146
GPUDynInst::numOpdDWORDs
int numOpdDWORDs(int operandIdx)
Definition: gpu_dyn_inst.cc:137
GPUDynInst::isNop
bool isNop() const
Definition: gpu_dyn_inst.cc:348
GPUStaticInst::isPrivateSeg
bool isPrivateSeg() const
Definition: gpu_static_inst.hh:188
GPUDynInst::numSrcVecDWORDs
int numSrcVecDWORDs()
Definition: gpu_dyn_inst.cc:125
GPUExecContext::computeUnit
ComputeUnit * computeUnit()
Definition: gpu_exec_context.cc:43
GPUStaticInst::isAtomicMax
bool isAtomicMax() const
Definition: gpu_static_inst.hh:161
GPUStaticInst::numDstVecDWORDs
int numDstVecDWORDs()
Definition: gpu_static_inst.cc:106
GPUDynInst::isStore
bool isStore() const
Definition: gpu_dyn_inst.cc:432
GPUDynInst::isALU
bool isALU() const
accessor methods for the attributes of the underlying GPU static instruction
Definition: gpu_dyn_inst.cc:330
GPUDynInst::scalar_data
uint8_t * scalar_data
Definition: gpu_dyn_inst.hh:129
GPUStaticInst::writesMode
bool writesMode() const
Definition: gpu_static_inst.hh:149
GPUStaticInst::isAtomicNoRet
bool isAtomicNoRet() const
Definition: gpu_static_inst.hh:136
Shader::isLdsApe
bool isLdsApe(Addr addr) const
Definition: shader.hh:163
Wavefront::rdGmReqsInPipe
int rdGmReqsInPipe
Definition: wavefront.hh:177
GPUStaticInst::isCondBranch
bool isCondBranch() const
Definition: gpu_static_inst.hh:106
Wavefront::flatLmUnitId
int flatLmUnitId
Definition: wavefront.hh:96
VectorMask
std::bitset< std::numeric_limits< unsigned long long >::digits > VectorMask
Definition: misc.hh:44
ArmISA::s
Bitfield< 4 > s
Definition: miscregs_types.hh:556
GPUStaticInst::isBranch
bool isBranch() const
Definition: gpu_static_inst.hh:105
GPUStaticInst::completeAcc
virtual void completeAcc(GPUDynInstPtr gpuDynInst)
Definition: gpu_static_inst.hh:226
GPUDynInst::isMemSync
bool isMemSync() const
Definition: gpu_dyn_inst.cc:408
GPUDynInst::statusVector
std::vector< int > statusVector
Definition: gpu_dyn_inst.hh:408
GPUDynInst::isAtomicRet
bool isAtomicRet() const
Definition: gpu_dyn_inst.cc:450
ComputeUnit::shader
Shader * shader
Definition: compute_unit.hh:356
GPUDynInst::isF16
bool isF16() const
Definition: gpu_dyn_inst.cc:701
GPUDynInst::~GPUDynInst
~GPUDynInst()
Definition: gpu_dyn_inst.cc:85
ArmISA::mask
Bitfield< 28, 24 > mask
Definition: miscregs_types.hh:711
GPUStaticInst::numSrcVecDWORDs
int numSrcVecDWORDs()
Definition: gpu_static_inst.cc:87
ComputeUnit::dynamicGMemInstrCnt
Stats::Scalar dynamicGMemInstrCnt
Definition: compute_unit.hh:550
GPUStaticInst::writesSCC
bool writesSCC() const
Definition: gpu_static_inst.hh:141
GPUStaticInst::isWaitcnt
bool isWaitcnt() const
Definition: gpu_static_inst.hh:121
Wavefront::wfDynId
uint64_t wfDynId
Definition: wavefront.hh:218
GPUStaticInst::ignoreExec
bool ignoreExec() const
Definition: gpu_static_inst.hh:150
GPUDynInst::isMAD
bool isMAD() const
Definition: gpu_dyn_inst.cc:731
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:153
GPUDynInst::wfDynId
int wfDynId
Definition: gpu_dyn_inst.hh:140
bits
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:75
GPUDynInst::isFlat
bool isFlat() const
Definition: gpu_dyn_inst.cc:420

Generated on Wed Sep 30 2020 14:02:12 for gem5 by doxygen 1.8.17