Go to the documentation of this file.
54 #include "debug/Drain.hh"
55 #include "debug/Fetch.hh"
56 #include "debug/HtmCpu.hh"
57 #include "debug/LSQ.hh"
58 #include "debug/Writeback.hh"
59 #include "params/O3CPU.hh"
68 _request(request), isLoad(is_load), needWB(is_load)
74 return inst->contextId();
90 params.smtLSQThreshold)),
92 params.smtLSQThreshold)),
103 if (
lsqPolicy == SMTQueuePolicy::Dynamic) {
104 DPRINTF(
LSQ,
"LSQ sharing policy set to Dynamic\n");
105 }
else if (
lsqPolicy == SMTQueuePolicy::Partitioned) {
106 DPRINTF(
Fetch,
"LSQ sharing policy set to Partitioned: "
107 "%i entries per LQ | %i entries per SQ\n",
109 }
else if (
lsqPolicy == SMTQueuePolicy::Threshold) {
111 assert(params.smtLSQThreshold > params.LQEntries);
112 assert(params.smtLSQThreshold > params.SQEntries);
114 DPRINTF(
LSQ,
"LSQ sharing policy set to Threshold: "
115 "%i entries per LQ | %i entries per SQ\n",
118 panic(
"Invalid LSQ sharing policy. Options are: Dynamic, "
119 "Partitioned, Threshold");
125 thread[tid].init(
cpu, iew_ptr, params,
this, tid);
159 DPRINTF(Drain,
"Not drained, LQ not empty.\n");
164 DPRINTF(Drain,
"Not drained, SQ not empty.\n");
178 thread[tid].takeOverFrom();
231 ThreadID tid = load_inst->threadNumber;
233 thread[tid].insertLoad(load_inst);
239 ThreadID tid = store_inst->threadNumber;
241 thread[tid].insertStore(store_inst);
249 return thread[tid].executeLoad(inst);
257 return thread[tid].executeStore(inst);
263 thread.at(tid).commitLoads(youngest_inst);
269 thread.at(tid).commitStores(youngest_inst);
278 while (threads != end) {
282 DPRINTF(Writeback,
"[tid:%i] Writing back stores. %i stores "
286 thread[tid].writebackStores();
293 thread.at(tid).squash(squashed_num);
303 while (threads != end) {
318 return thread.at(tid).getMemDepViolator();
324 return thread.at(tid).getLoadHead();
330 return thread.at(tid).getLoadHeadSeqNum();
336 return thread.at(tid).getStoreHead();
342 return thread.at(tid).getStoreHeadSeqNum();
357 return thread[tid].numHtmStarts();
365 return thread[tid].numHtmStops();
372 thread[tid].resetHtmStartsStops();
381 return thread[tid].getLatestHtmUid();
388 thread[tid].setLastRetiredHtmUid(htmUid);
414 DPRINTF(
LSQ,
"Got error packet back for address: %#X\n",
418 panic_if(!senderState,
"Got packet back with unknown sender state\n");
434 DPRINTF(
LSQ,
"received invalidation with response for addr:%#x\n",
438 thread[tid].checkSnoop(pkt);
442 senderState->request()->packetReplied();
455 DPRINTF(
LSQ,
"received invalidation for addr:%#x\n",
458 thread[tid].checkSnoop(pkt);
471 while (threads != end) {
488 while (threads != end) {
505 while (threads != end) {
522 while (threads != end) {
539 while (threads != end) {
551 return thread[tid].numFreeLoadEntries();
557 return thread[tid].numFreeStoreEntries();
566 while (threads != end) {
581 if (
lsqPolicy == SMTQueuePolicy::Dynamic)
599 while (threads != end) {
615 while (threads != end) {
631 while (threads != end) {
646 if (
lsqPolicy == SMTQueuePolicy::Dynamic)
649 return thread[tid].lqFull();
658 while (threads != end) {
673 if (
lsqPolicy == SMTQueuePolicy::Dynamic)
676 return thread[tid].sqFull();
685 while (threads != end) {
698 if (
lsqPolicy == SMTQueuePolicy::Dynamic)
701 return thread[tid].isStalled();
710 while (threads != end) {
723 return thread.at(tid).hasStoresToWB();
729 return thread.at(tid).numStoresToWB();
738 while (threads != end) {
751 return thread.at(tid).willWB();
760 while (threads != end) {
770 thread.at(tid).dumpInsts();
781 GEM5_VAR_USED
bool isAtomic = !isLoad && amo_op;
794 assert(!isAtomic || (isAtomic && !needs_burst));
798 if (inst->translationStarted()) {
799 req = inst->savedReq;
803 assert(
addr == 0x0lu);
806 }
else if (needs_burst) {
808 size, flags,
data, res);
811 size, flags,
data, res, std::move(amo_op));
829 inst->effSize = size;
830 inst->effAddrValid(
true);
833 inst->reqToVerify = std::make_shared<Request>(*req->
request());
837 fault =
cpu->
read(req, inst->lqIdx);
845 inst->getFault() = fault;
847 inst->setMemAccPredicate(
false);
855 inst->traceData->setMem(
addr, size, flags);
857 return inst->getFault();
869 if (
_inst->isSquashed()) {
872 _inst->strictlyOrdered(req->isStrictlyOrdered());
874 flags.
set(Flag::TranslationFinished);
876 _inst->physEffAddr = req->getPaddr();
877 _inst->memReqFlags = req->getFlags();
878 if (req->isCondSwap()) {
880 req->setExtraData(*
_res);
897 for (
i = 0;
i < _requests.size() && _requests[
i] != req;
i++);
898 assert(
i < _requests.size());
901 numInTranslationFragments--;
902 numTranslatedFragments++;
905 mainReq->setFlags(req->getFlags());
907 if (numTranslatedFragments == _requests.size()) {
908 if (_inst->isSquashed()) {
911 _inst->strictlyOrdered(mainReq->isStrictlyOrdered());
912 flags.set(Flag::TranslationFinished);
913 _inst->translationCompleted(
true);
915 for (
i = 0;
i < _fault.size() && _fault[
i] ==
NoFault;
i++);
917 _inst->physEffAddr = request(0)->getPaddr();
918 _inst->memReqFlags = mainReq->getFlags();
919 if (mainReq->isCondSwap()) {
920 assert (
i == _fault.size());
922 mainReq->setExtraData(*_res);
924 if (
i == _fault.size()) {
926 setState(State::Request);
928 _inst->fault = _fault[
i];
929 setState(State::PartialFault);
932 _inst->fault = _fault[0];
943 assert(_requests.size() == 0);
945 addRequest(_addr, _size, _byteEnable);
947 if (_requests.size() > 0) {
948 _requests.back()->setReqInstSeqNum(_inst->seqNum);
949 _requests.back()->taskId(_taskId);
950 _inst->translationStarted(
true);
951 setState(State::Translation);
952 flags.set(Flag::TranslationStarted);
954 _inst->savedReq =
this;
955 sendFragmentToTranslation(0);
957 _inst->setMemAccPredicate(
false);
976 auto cacheLineSize = _port.cacheLineSize();
977 Addr base_addr = _addr;
980 uint32_t size_so_far = 0;
982 mainReq = std::make_shared<Request>(base_addr,
983 _size, _flags, _inst->requestorId(),
984 _inst->instAddr(), _inst->contextId());
985 mainReq->setByteEnable(_byteEnable);
991 mainReq->setPaddr(0);
994 auto it_start = _byteEnable.begin();
995 auto it_end = _byteEnable.begin() + (next_addr - base_addr);
996 addRequest(base_addr, next_addr - base_addr,
998 size_so_far = next_addr - base_addr;
1001 base_addr = next_addr;
1002 while (base_addr != final_addr) {
1003 auto it_start = _byteEnable.begin() + size_so_far;
1004 auto it_end = _byteEnable.begin() + size_so_far + cacheLineSize;
1005 addRequest(base_addr, cacheLineSize,
1007 size_so_far += cacheLineSize;
1008 base_addr += cacheLineSize;
1012 if (size_so_far < _size) {
1013 auto it_start = _byteEnable.begin() + size_so_far;
1014 auto it_end = _byteEnable.end();
1015 addRequest(base_addr, _size - size_so_far,
1019 if (_requests.size() > 0) {
1021 for (
auto&
r: _requests) {
1022 r->setReqInstSeqNum(_inst->seqNum);
1026 _inst->translationStarted(
true);
1027 setState(State::Translation);
1028 flags.set(Flag::TranslationStarted);
1029 _inst->savedReq =
this;
1030 numInTranslationFragments = 0;
1031 numTranslatedFragments = 0;
1032 _fault.resize(_requests.size());
1034 for (uint32_t
i = 0;
i < _requests.size();
i++) {
1035 sendFragmentToTranslation(
i);
1038 _inst->setMemAccPredicate(
false);
1044 _state(
State::NotIssued), _senderState(nullptr),
1045 _port(*port), _inst(inst), _data(nullptr),
1046 _res(nullptr), _addr(0), _size(0), _flags(0),
1047 _numOutstandingPackets(0), _amo_op(nullptr)
1051 _inst->isStoreConditional() ||
_inst->isAtomic());
1060 : _state(
State::NotIssued), _senderState(nullptr),
1061 numTranslatedFragments(0),
1062 numInTranslationFragments(0),
1063 _port(*port), _inst(inst), _data(
data),
1064 _res(res), _addr(
addr), _size(size),
1066 _numOutstandingPackets(0),
1067 _amo_op(
std::move(amo_op))
1071 _inst->isStoreConditional() ||
_inst->isAtomic());
1080 _port.loadQueue[_inst->lqIdx].setRequest(
this);
1084 _port.storeQueue[_inst->sqIdx].setRequest(
this);
1095 auto request = std::make_shared<Request>(
1096 addr, size, _flags, _inst->requestorId(),
1097 _inst->instAddr(), _inst->contextId(),
1098 std::move(_amo_op));
1099 request->setByteEnable(byte_enable);
1100 _requests.push_back(request);
1106 assert(!isAnyOutstandingRequest());
1107 _inst->savedReq =
nullptr;
1109 delete _senderState;
1111 for (
auto r: _packets)
1118 numInTranslationFragments++;
1119 _port.getMMUPtr()->translateTiming(request(
i), _inst->thread->getTC(),
1126 assert(_numOutstandingPackets == 1);
1130 assert(pkt == _packets.front());
1131 _port.completeDataAccess(pkt);
1139 uint32_t pktIdx = 0;
1140 while (pktIdx < _packets.size() && pkt != _packets[pktIdx])
1142 assert(pktIdx < _packets.size());
1143 numReceivedPackets++;
1144 state->outstanding--;
1145 if (numReceivedPackets == _packets.size()) {
1156 _port.completeDataAccess(resp);
1165 assert(_senderState);
1167 if (_packets.size() == 0) {
1172 _packets.back()->dataStatic(_inst->memData);
1173 _packets.back()->senderState = _senderState;
1178 if (_inst->inHtmTransactionalState()) {
1179 _packets.back()->setHtmTransactional(
1180 _inst->getHtmTransactionUid());
1183 "HTM %s pc=0x%lx - vaddr=0x%lx - paddr=0x%lx - htmUid=%u\n",
1184 isLoad() ?
"LD" :
"ST",
1186 _packets.back()->req->hasVaddr() ?
1187 _packets.back()->req->getVaddr() : 0lu,
1188 _packets.back()->getAddr(),
1189 _inst->getHtmTransactionUid());
1192 assert(_packets.size() == 1);
1199 Addr base_address = _addr;
1201 if (_packets.size() == 0) {
1205 _mainPacket->dataStatic(_inst->memData);
1210 if (_inst->inHtmTransactionalState()) {
1211 _mainPacket->setHtmTransactional(
1212 _inst->getHtmTransactionUid());
1214 "HTM LD.0 pc=0x%lx-vaddr=0x%lx-paddr=0x%lx-htmUid=%u\n",
1216 _mainPacket->req->hasVaddr() ?
1217 _mainPacket->req->getVaddr() : 0lu,
1218 _mainPacket->getAddr(),
1219 _inst->getHtmTransactionUid());
1222 for (
int i = 0;
i < _requests.size() && _fault[
i] ==
NoFault;
i++) {
1226 ptrdiff_t
offset =
r->getVaddr() - base_address;
1230 uint8_t* req_data =
new uint8_t[
r->getSize()];
1231 std::memcpy(req_data,
1237 _packets.push_back(pkt);
1242 if (_inst->inHtmTransactionalState()) {
1243 _packets.back()->setHtmTransactional(
1244 _inst->getHtmTransactionUid());
1246 "HTM %s.%d pc=0x%lx-vaddr=0x%lx-paddr=0x%lx-htmUid=%u\n",
1247 isLoad() ?
"LD" :
"ST",
1250 _packets.back()->req->hasVaddr() ?
1251 _packets.back()->req->getVaddr() : 0lu,
1252 _packets.back()->getAddr(),
1253 _inst->getHtmTransactionUid());
1257 assert(_packets.size() > 0);
1263 assert(_numOutstandingPackets == 0);
1264 if (lsqUnit()->trySendPacket(isLoad(), _packets.at(0)))
1265 _numOutstandingPackets = 1;
1272 while (numReceivedPackets + _numOutstandingPackets < _packets.size() &&
1273 lsqUnit()->trySendPacket(isLoad(),
1274 _packets.at(numReceivedPackets + _numOutstandingPackets))) {
1275 _numOutstandingPackets++;
1283 return pkt->
req->localAccessor(
thread, pkt);
1293 for (
auto r: _requests) {
1330 bool is_hit =
false;
1331 for (
auto &
r: _requests) {
1341 if (
r->hasPaddr() && (
r->getPaddr() & blockMask) == blockAddr) {
1352 return lsq->recvTimingResp(pkt);
1363 lsq->recvTimingSnoopReq(pkt);
1369 lsq->recvReqRetry();
1375 nullptr, nullptr, nullptr)
1385 _requests.back()->setInstCount(
_inst->getCpuPtr()->totalInsts());
1391 _inst->savedReq =
this;
1395 panic(
"unexpected behaviour");
1407 flags.set(Flag::TranslationStarted);
1408 flags.set(Flag::TranslationFinished);
1410 _inst->translationStarted(
true);
1411 _inst->translationCompleted(
true);
1413 setState(State::Request);
1420 panic(
"unexpected behaviour");
1428 return thread.at(tid).read(req, load_idx);
1436 return thread.at(tid).write(req,
data, store_idx);
unsigned LQEntries
Total Size of LQ Entries.
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
SMTQueuePolicy lsqPolicy
The LSQ policy for SMT mode.
void insertStore(const DynInstPtr &store_inst)
Inserts a store into the LSQ.
Memory operation metadata.
virtual void initiateTranslation()
constexpr decltype(nullptr) NoFault
void commitLoads(InstSeqNum &youngest_inst, ThreadID tid)
Commits loads up until the given sequence number for a specific thread.
int numStoresToWB(ThreadID tid)
Returns the number of stores a specific thread has to write back.
bool cacheBlocked() const
Is D-cache blocked?
bool isTranslationComplete()
std::vector< bool > _byteEnable
ThreadID numThreads
Number of Threads.
void taskId(const uint32_t &v)
void dumpInsts() const
Debugging function to print out all instructions.
int getCount()
Returns the number of instructions in all of the queues.
virtual void buildPackets()
void writebackStores()
Attempts to write back stores until all cache ports are used or the interface becomes blocked.
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets the pointer to the list of active threads.
Fault read(LSQRequest *req, int load_idx)
Executes a read operation, using the load specified at the load index.
bool lqEmpty() const
Returns if all of the LQs are empty.
void set(Type mask)
Set all flag's bits matching the given mask.
RequestPtr req
A pointer to the original request.
bool _cacheBlocked
D-cache is blocked.
virtual void buildPackets()
void install()
Install the request in the LQ/SQ.
virtual void initiateTranslation()
unsigned int cacheLineSize() const
Get the cache line size of the system.
void setState(const State &newState)
bool doMonitor(PacketPtr pkt)
virtual Cycles handleLocalAccess(gem5::ThreadContext *thread, PacketPtr pkt)
Memory mapped IPR accesses.
bool isMemAccessRequired()
InstSeqNum getStoreHeadSeqNum(ThreadID tid)
Returns the sequence number of the head of the store queue.
int numLoads()
Returns the total number of loads in the load queue.
virtual bool isCacheBlockHit(Addr blockAddr, Addr cacheBlockMask)
Caches may probe into the load-store queue to enforce memory ordering guarantees.
bool cachePortAvailable(bool is_load) const
Is any store port available to use?
uint8_t outstanding
Number of outstanding packets to complete.
static const FlagsType HTM_CMD
bool squashed() const override
This function is used by the page table walker to determine if it should translate the a pending requ...
void recvTimingSnoopReq(PacketPtr pkt)
static PacketPtr createWrite(const RequestPtr &req)
int getStoreHead(ThreadID tid)
Returns the head index of the store queue.
Fetch class handles both single threaded and SMT fetch.
Addr getVaddr(int idx=0) const
virtual void sendPacketToCache()
std::string name() const
Returns the name of the LSQ.
virtual ~LSQRequest()
Destructor.
DcachePort dcachePort
Data port.
virtual void recvTimingSnoopReq(PacketPtr pkt)
Receive a timing snoop request from the peer.
bool isEmpty() const
Returns if the LSQ is empty (both LQ and SQ are empty).
DcachePort(LSQ *_lsq, CPU *_cpu)
Default constructor.
std::list< ThreadID > * activeThreads
List of Active Threads in System.
virtual bool recvTimingResp(PacketPtr pkt)
virtual void sendPacketToCache()
bool isStalled()
Returns if the LSQ is stalled due to a memory operation that must be replayed.
Fault executeStore(const DynInstPtr &inst)
Executes a store.
LSQSenderState(LSQRequest *request, bool is_load)
Default constructor.
int cacheStorePorts
The number of cache ports available each cycle (stores only).
bool sqFull()
Returns if any of the SQs are full.
void resetHtmStartsStops(ThreadID tid)
gem5::Checker< DynInstPtr > * checker
Pointer to the checker, which can dynamically verify instruction results at run time.
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
bool violation()
Returns whether or not there was a memory ordering violation.
Cycles is a wrapper class for representing cycle counts, i.e.
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
void completeDataAccess(PacketPtr pkt)
int numHtmStarts(ThreadID tid) const
int numStores()
Returns the total number of stores in the store queue.
Class that implements the actual LQ and SQ for each specific thread.
HtmCmdRequest(LSQUnit *port, const DynInstPtr &inst, const Request::Flags &flags_)
unsigned SQEntries
Total Size of SQ Entries.
uint32_t taskId() const
Get cpu task id.
void tick()
Ticks the LSQ.
virtual void wakeup(ThreadID tid) override
O3CPU class, has each of the stages (fetch through commit) within it, as well as all of the time buff...
Fault write(LSQRequest *req, uint8_t *data, int store_idx)
Executes a store operation, using the store specified at the store index.
unsigned maxSQEntries
Max SQ Size - Used to Enforce Sharing Policies.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
std::shared_ptr< FaultBase > Fault
Addr addrBlockAlign(Addr addr, Addr block_size)
Returns the address of the closest aligned fixed-size block to the given address.
bool isDrained() const
Has the LSQ drained?
IEW * iewStage
The IEW stage pointer.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
bool transferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
Returns true if the given memory access (address, size) needs to be fragmented across aligned fixed-s...
ProbePointArg< PacketInfo > Packet
Packet probe point.
IEW handles both single threaded and SMT IEW (issue/execute/writeback).
std::shared_ptr< Request > RequestPtr
uint64_t getLatestHtmUid(ThreadID tid) const
bool willWB()
Returns if the LSQ will write back to memory this cycle.
unsigned numFreeLoadEntries()
Returns the number of free load entries.
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
const ThreadID InvalidThreadID
void cachePortBusy(bool is_load)
Another store port is in use.
virtual void recvReqRetry()
Handles doing a retry of the previous send.
ThreadID contextToThread(ContextID cid)
Convert ContextID to threadID.
RequestPtr request(int idx=0)
virtual PacketPtr mainPacket()
void setLastRetiredHtmUid(ThreadID tid, uint64_t htmUid)
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
virtual bool isCacheBlockHit(Addr blockAddr, Addr cacheBlockMask)
Test if the request accesses a particular cache line.
SenderState * senderState
This packet's sender state.
DynInstPtr getMemDepViolator(ThreadID tid)
Gets the instruction that caused the memory ordering violation.
void squash(const InstSeqNum &squashed_num, ThreadID tid)
Squash instructions from a thread until the specified sequence number.
bool sqEmpty() const
Returns if all of the SQs are empty.
void drainSanityCheck() const
Perform sanity checks after a drain.
static uint32_t maxLSQAllocation(SMTQueuePolicy pol, uint32_t entries, uint32_t numThreads, uint32_t SMTThreshold)
Auxiliary function to calculate per-thread max LSQ allocation limit.
virtual bool recvTimingResp(PacketPtr pkt)
void recvReqRetry()
Retry the previous send that failed.
virtual Cycles handleLocalAccess(gem5::ThreadContext *thread, PacketPtr pkt)
Memory mapped IPR accesses.
static constexpr int MaxThreads
virtual void initiateTranslation()
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Fault write(LSQRequest *req, uint8_t *data, int store_idx)
CPU write function, forwards write to LSQ.
void insertLoad(const DynInstPtr &load_inst)
Inserts a load into the LSQ.
Derived class to hold any sender state the LSQ needs.
LSQRequest(LSQUnit *port, const DynInstPtr &inst, bool isLoad)
CPU * cpu
The CPU pointer.
bool hasStoresToWB()
Returns whether or not there are any stores to write back to memory.
Overload hash function for BasicBlockRange type.
int getLoadHead(ThreadID tid)
Returns the head index of the load queue for a specific thread.
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
std::vector< RequestPtr > _requests
unsigned maxLQEntries
Max LQ Size - Used to Enforce Sharing Policies.
virtual RequestPtr mainRequest()
void cacheUnblocked()
Notifies that the cache has become unblocked.
InstSeqNum getLoadHeadSeqNum(ThreadID tid)
Returns the sequence number of the head of the load queue.
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
int ContextID
Globally unique thread context ID.
unsigned numFreeStoreEntries()
Returns the number of free store entries.
virtual void initiateTranslation()=0
int usedStorePorts
The number of used cache ports in this cycle by stores.
LSQ(CPU *cpu_ptr, IEW *iew_ptr, const O3CPUParams ¶ms)
Constructs an LSQ with the given parameters.
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Fault pushRequest(const DynInstPtr &inst, bool isLoad, uint8_t *data, unsigned int size, Addr addr, Request::Flags flags, uint64_t *res, AtomicOpFunctorPtr amo_op, const std::vector< bool > &byte_enable)
uint32_t numTranslatedFragments
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
void takeOverFrom()
Takes over execution from another CPU's thread.
void commitStores(InstSeqNum &youngest_inst, ThreadID tid)
Commits stores up until the given sequence number for a specific thread.
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
const FlagsType total
Print the total.
bool lqFull()
Returns if any of the LQs are full.
int numHtmStops(ThreadID tid) const
void sendFragmentToTranslation(int i)
std::vector< Fault > _fault
void addRequest(Addr addr, unsigned size, const std::vector< bool > &byte_enable)
Helper function used to add a (sub)request, given its address addr, size size and byte-enable mask by...
Fault executeLoad(const DynInstPtr &inst)
Executes a load.
uint32_t numInTranslationFragments
Fault read(LSQRequest *req, int load_idx)
CPU read function, forwards read to LSQ.
int usedLoadPorts
The number of used cache ports in this cycle by loads.
std::vector< LSQUnit > thread
The LSQ units for individual threads.
bool isFull()
Returns if the LSQ is full (either LQ or SQ is full).
int16_t ThreadID
Thread index/ID type.
bool recvTimingResp(PacketPtr pkt)
Handles writing back and completing the load or store that has returned from memory.
std::string name() const
Returns the name of the IEW stage.
bool isInvalidate() const
#define panic(...)
This implements a cprintf based panic() function.
T * getPtr()
get a pointer to the data ptr.
int cacheLoadPorts
The number of cache ports available each cycle (loads only).
Generated on Tue Sep 7 2021 14:53:44 for gem5 by doxygen 1.8.17