32#ifndef __GPU_STATIC_INST_HH__
33#define __GPU_STATIC_INST_HH__
48#include "enums/GPUStaticInstFlags.hh"
49#include "enums/StorageClassType.hh"
78 virtual TheGpuISA::ScalarRegU32
srcLiteral()
const {
return 0; }
121 return _flags[UnconditionalJump];
231 fatal(
"calling initiateAcc() on a non-memory instruction.\n");
238 fatal(
"calling completeAcc() on a non-memory instruction.\n");
347 fatal(
"kernel launch instruction should not be executed\n");
Wrapper that groups a few flag bits under the same undelying container.
virtual int numDstRegOperands()=0
std::bitset< Num_Flags > _flags
virtual TheGpuISA::ScalarRegU32 srcLiteral() const
bool isGloballyCoherent() const
Coherence domain of a memory instruction.
std::vector< OperandInfo > srcOps
const std::string & disassemble()
const std::vector< OperandInfo > & dstVecRegOperands() const
const std::vector< OperandInfo > & srcScalarRegOperands() const
bool isReadOnlySeg() const
std::vector< OperandInfo > dstScalarRegOps
enums::StorageClassType executed_as
const std::string & opcode() const
virtual void generateDisassembly()=0
bool isPrivateSeg() const
virtual int getOperandSize(int operandIndex)=0
virtual bool isExecMaskRegister(int opIdx)=0
const std::vector< OperandInfo > & srcOperands() const
const std::vector< OperandInfo > & dstScalarRegOperands() const
GPUStaticInst(const std::string &opcode)
std::vector< OperandInfo > dstVecRegOps
int _ipdInstNum
Identifier of the immediate post-dominator instruction.
std::vector< OperandInfo > dstOps
static uint64_t dynamic_id_count
bool isKernArgSeg() const
bool isEndOfKernel() const
bool isCondBranch() const
virtual bool isFlatScratchRegister(int opIdx)=0
const std::vector< OperandInfo > & dstOperands() const
bool isKernelLaunch() const
bool isFlatGlobal() const
bool isSystemCoherent() const
virtual void initOperandInfo()=0
virtual void execute(GPUDynInstPtr gpuDynInst)=0
const std::vector< OperandInfo > & srcVecRegOperands() const
virtual void completeAcc(GPUDynInstPtr gpuDynInst)
std::vector< OperandInfo > srcVecRegOps
int numDstScalarOperands()
virtual int getNumOperands()=0
int numSrcScalarOperands()
void generateVirtToPhysMap(Wavefront *wf, ComputeUnit *cu, OperandInfo &op, std::vector< OperandInfo > &opVec, OpType opType)
virtual uint32_t getTargetPc()
virtual int instSize() const =0
std::vector< OperandInfo > srcScalarRegOps
bool isAtomicExch() const
const std::string _opcode
bool isAtomicNoRet() const
virtual int coalescerTokenCount() const
bool isUnconditionalJump() const
void instAddr(int inst_addr)
virtual void initiateAcc(GPUDynInstPtr gpuDynInst)
void initDynOperandInfo(Wavefront *wf, ComputeUnit *cu)
virtual int numSrcRegOperands()=0
int(RegisterManager::* MapRegFn)(Wavefront *, int)
bool isFlatScratch() const
bool isFlatScratchRegister(int opIdx) override
void generateDisassembly() override
int numDstRegOperands() override
int getOperandSize(int operandIndex) override
void execute(GPUDynInstPtr gpuDynInst) override
int getNumOperands() override
int instSize() const override
int numSrcRegOperands() override
bool isExecMaskRegister(int opIdx) override
void initOperandInfo() override
Base class for branch operations.
This is a simple scalar statistic, like a counter.
#define fatal(...)
This implements a cprintf based fatal() function.
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
std::shared_ptr< GPUDynInst > GPUDynInstPtr
def format Nop(code, *opt_flags)