Go to the documentation of this file.
53 instRequestorID(params.
system->getRequestorId(this,
"inst")),
54 dataRequestorID(params.
system->getRequestorId(this,
"data")),
55 instTraceFile(params.instTraceFile),
56 dataTraceFile(params.dataTraceFile),
57 icacheGen(*this,
".iside", icachePort, instRequestorID, instTraceFile),
58 dcacheGen(*this,
".dside", dcachePort, dataRequestorID, dataTraceFile,
61 dcacheNextEvent([
this]{ schedDcacheNext(); },
name()),
62 oneTraceComplete(
false),
64 execCompleteEvent(
nullptr),
65 enableEarlyExit(params.enableEarlyExit),
66 progressMsgInterval(params.progressMsgInterval),
67 progressMsgThreshold(params.progressMsgInterval), traceStats(
this)
74 fatal_if(params.sizeROB > UINT16_MAX,
75 "ROB size set to %d exceeds the max. value of %d.",
76 params.sizeROB, UINT16_MAX);
77 fatal_if(params.sizeStoreBuffer > UINT16_MAX,
78 "ROB size set to %d exceeds the max. value of %d.",
79 params.sizeROB, UINT16_MAX);
80 fatal_if(params.sizeLoadBuffer > UINT16_MAX,
81 "Load buffer size set to %d exceeds the max. value of %d.",
82 params.sizeLoadBuffer, UINT16_MAX);
107 DPRINTF(TraceCPUInst,
"Instruction fetch request trace file is \"%s\".\n",
109 DPRINTF(TraceCPUData,
"Data memory request trace file is \"%s\".\n",
121 traceOffset = std::min(first_icache_tick, first_dcache_tick);
122 inform(
"%s: Time offset (tick) found as min of both traces is %lli.",
150 DPRINTF(TraceCPUInst,
"IcacheGen event.\n");
157 "Scheduling next icacheGen event at %d.\n",
176 DPRINTF(TraceCPUData,
"DcacheGen event.\n");
179 baseStats.numCycles = clockEdge() / clockPeriod();
208 statistics::
Group(trace),
209 ADD_STAT(numSchedDcacheEvent, statistics::units::Count::get(),
210 "Number of events scheduled to trigger data request generator"),
211 ADD_STAT(numSchedIcacheEvent, statistics::units::Count::get(),
212 "Number of events scheduled to trigger instruction request "
214 ADD_STAT(numOps, statistics::units::Count::get(),
215 "Number of micro-ops simulated by the Trace CPU"),
216 ADD_STAT(cpi, statistics::units::Rate<
217 statistics::units::Cycle, statistics::units::Count>::get(),
218 "Cycles per micro-op used as a proxy for CPI",
219 trace->baseStats.numCycles / numOps)
226 const std::string& _name) :
227 statistics::
Group(parent, _name.c_str()),
228 ADD_STAT(maxDependents, statistics::units::Count::get(),
229 "Max number of dependents observed on a node"),
230 ADD_STAT(maxReadyListSize, statistics::units::Count::get(),
231 "Max size of the ready list observed"),
232 ADD_STAT(numSendAttempted, statistics::units::Count::get(),
233 "Number of first attempts to send a request"),
234 ADD_STAT(numSendSucceeded, statistics::units::Count::get(),
235 "Number of successful first attempts"),
236 ADD_STAT(numSendFailed, statistics::units::Count::get(),
237 "Number of failed first attempts"),
238 ADD_STAT(numRetrySucceeded, statistics::units::Count::get(),
239 "Number of successful retries"),
240 ADD_STAT(numSplitReqs, statistics::units::Count::get(),
241 "Number of split requests"),
242 ADD_STAT(numSOLoads, statistics::units::Count::get(),
243 "Number of strictly ordered loads"),
244 ADD_STAT(numSOStores, statistics::units::Count::get(),
245 "Number of strictly ordered stores"),
247 "Last tick simulated from the elastic data trace")
254 DPRINTF(TraceCPUData,
"Initializing data memory request generator "
255 "DcacheGen: elastic issue with retry.\n");
258 "Trace has %d elements. It must have at least %d elements.",
260 DPRINTF(TraceCPUData,
"After 1st read, depGraph size:%d.\n",
264 "Trace has %d elements. It must have at least %d elements.",
266 DPRINTF(TraceCPUData,
"After 2st read, depGraph size:%d.\n",
270 if (debug::TraceCPUData) {
275 "Execute tick of the first dependency free node %lli is %d.\n",
276 free_itr->seqNum, free_itr->execTick);
279 return (free_itr->execTick);
286 free_node.execTick -=
offset;
300 DPRINTF(TraceCPUData,
"Reading next window from file.\n");
308 DPRINTF(TraceCPUData,
"Start read: Size of depGraph is %d.\n",
311 uint32_t num_read = 0;
321 DPRINTF(TraceCPUData,
"\tTrace complete!\n");
335 if (new_node->
robDep.empty() && new_node->
regDep.empty()) {
343 DPRINTF(TraceCPUData,
"End read: Size of depGraph is %d.\n",
352 auto dep_it = dep_list.begin();
353 while (dep_it != dep_list.end()) {
355 auto parent_itr =
depGraph.find(*dep_it);
360 parent_itr->second->dependents.push_back(new_node);
361 auto num_depts = parent_itr->second->dependents.size();
369 dep_it = dep_list.erase(dep_it);
377 DPRINTF(TraceCPUData,
"Execute start occupancy:\n");
378 DPRINTFR(TraceCPUData,
"\tdepGraph = %d, readyList = %d, "
396 "Removing from depFreeQueue: seq. num %lli.\n",
411 graph_itr =
depGraph.find(free_itr->seqNum);
412 assert(graph_itr !=
depGraph.end());
420 panic(
"Retry packet's seqence number does not match "
421 "the first node in the readyList.\n");
451 "Node seq. num %lli sent. Waking up dependents..\n",
454 auto child_itr = (node_ptr->
dependents).begin();
455 while (child_itr != (node_ptr->
dependents).end()) {
458 if (!(*child_itr)->isStore() &&
459 (*child_itr)->removeRobDep(node_ptr->
seqNum)) {
462 if ((*child_itr)->robDep.empty() &&
463 (*child_itr)->regDep.empty()) {
471 child_itr = node_ptr->
dependents.erase(child_itr);
482 DPRINTF(TraceCPUData,
"Node seq. num %lli done. Waking"
483 " up dependents..\n", node_ptr->
seqNum);
488 if (child->removeDepOnInst(node_ptr->
seqNum)) {
522 if (debug::TraceCPUData) {
524 DPRINTF(TraceCPUData,
"Execute end occupancy:\n");
525 DPRINTFR(TraceCPUData,
"\tdepGraph = %d, readyList = %d, "
532 DPRINTF(TraceCPUData,
"Not scheduling an event as expecting a retry"
533 "event from the cache for seq. num %lli.\n",
550 DPRINTF(TraceCPUData,
"Attempting to schedule @%lli.\n",
555 DPRINTF(TraceCPUData,
"Attempting to schedule @%lli.\n",
564 DPRINTF(TraceCPUData,
"\tExecution Complete!\n");
573 DPRINTF(TraceCPUData,
"Executing memory request %lli (phys addr %d, "
574 "virt addr %d, pc %#x, size %d, flags %d).\n",
583 DPRINTF(TraceCPUData,
"Skipping strictly ordered request %lli.\n",
594 unsigned blk_size =
owner.cacheLineSize();
596 if (!(blk_offset + node_ptr->
size <= blk_size)) {
597 node_ptr->
size = blk_size - blk_offset;
602 auto req = std::make_shared<Request>(
604 req->setReqInstSeqNum(node_ptr->
seqNum);
609 req->setPC(node_ptr->
pc);
616 req->setReqInstSeqNum(node_ptr->
seqNum);
620 uint8_t* pkt_data =
new uint8_t[req->getSize()];
625 memset(pkt_data, 0xA, req->getSize());
637 DPRINTF(TraceCPUData,
"Send failed. Saving packet for retry.\n");
650 assert(node_ptr->
robDep.empty() && node_ptr->
regDep.empty());
654 DPRINTFR(TraceCPUData,
"\t\tseq. num %lli(%s) with rob num %lli is now"
662 DPRINTFR(TraceCPUData,
"\t\tResources available for seq. num %lli. "
663 "Adding to readyList, occupying resources.\n",
675 DPRINTFR(TraceCPUData,
"\t\tResources unavailable for seq. num "
676 "%lli. Adding to depFreeQueue.\n", node_ptr->
seqNum);
679 DPRINTFR(TraceCPUData,
"\t\tResources unavailable for seq. num "
680 "%lli. Still pending issue.\n", node_ptr->
seqNum);
700 auto graph_itr =
depGraph.find(pkt->
req->getReqInstSeqNum());
701 assert(graph_itr !=
depGraph.end());
707 DPRINTF(TraceCPUData,
"Load seq. num %lli response received. Waking up"
708 " dependents..\n", node_ptr->
seqNum);
711 if (child->removeDepOnInst(node_ptr->
seqNum)) {
726 if (debug::TraceCPUData) {
747 DPRINTF(TraceCPUData,
"Attempting to schedule @%lli.\n",
758 ready_node.
seqNum = seq_num;
778 if (
retryPkt->
req->getReqInstSeqNum() == itr->seqNum)
785 while (!found && itr !=
readyList.end()) {
788 if (exec_tick < itr->execTick) {
792 }
else if (exec_tick == itr->execTick) {
795 if (seq_num < itr->seqNum) {
818 DPRINTF(TraceCPUData,
"readyList is empty.\n");
821 DPRINTF(TraceCPUData,
"Printing readyList:\n");
823 auto graph_itr =
depGraph.find(itr->seqNum);
824 [[maybe_unused]]
GraphNode* node_ptr = graph_itr->second;
825 DPRINTFR(TraceCPUData,
"\t%lld(%s), %lld\n", itr->seqNum,
832 uint16_t max_rob, uint16_t max_stores, uint16_t max_loads) :
834 sizeStoreBuffer(max_stores),
835 sizeLoadBuffer(max_loads),
836 oldestInFlightRobNum(UINT64_MAX),
848 oldestInFlightRobNum = inFlightNodes.begin()->second;
853 }
else if (new_node->
isStore()) {
863 assert(!inFlightNodes.empty());
865 "\tClearing done seq. num %d from inFlightNodes..\n",
868 assert(inFlightNodes.find(done_node->
seqNum) != inFlightNodes.end());
869 inFlightNodes.erase(done_node->
seqNum);
871 if (inFlightNodes.empty()) {
874 oldestInFlightRobNum = UINT64_MAX;
878 oldestInFlightRobNum = inFlightNodes.begin()->second;
882 "\tCleared. inFlightNodes.size() = %d, "
883 "oldestInFlightRobNum = %d\n", inFlightNodes.size(),
884 oldestInFlightRobNum);
890 if (done_node->
isLoad()) {
891 assert(numInFlightLoads != 0);
899 releaseStoreBuffer();
906 assert(numInFlightStores != 0);
914 uint16_t num_in_flight_nodes;
915 if (inFlightNodes.empty()) {
916 num_in_flight_nodes = 0;
917 DPRINTFR(TraceCPUData,
"\t\tChecking resources to issue seq. num %lli:"
918 " #in-flight nodes = 0", new_node->
seqNum);
919 }
else if (new_node->
robNum > oldestInFlightRobNum) {
923 num_in_flight_nodes = new_node->
robNum - oldestInFlightRobNum;
924 DPRINTFR(TraceCPUData,
"\t\tChecking resources to issue seq. num %lli:"
925 " #in-flight nodes = %d - %d = %d", new_node->
seqNum,
926 new_node->
robNum, oldestInFlightRobNum, num_in_flight_nodes);
933 num_in_flight_nodes = 0;
934 DPRINTFR(TraceCPUData,
"\t\tChecking resources to issue seq. num %lli:"
935 " new oldestInFlightRobNum = %d, #in-flight nodes ignored",
938 DPRINTFR(TraceCPUData,
", LQ = %d/%d, SQ = %d/%d.\n",
939 numInFlightLoads, sizeLoadBuffer,
940 numInFlightStores, sizeStoreBuffer);
942 if (num_in_flight_nodes >= sizeROB) {
945 if (new_node->
isLoad() && numInFlightLoads >= sizeLoadBuffer) {
948 if (new_node->
isStore() && numInFlightStores >= sizeStoreBuffer) {
958 return (numInFlightStores != 0 || numInFlightLoads != 0);
964 DPRINTFR(TraceCPUData,
"oldestInFlightRobNum = %d, "
965 "LQ = %d/%d, SQ = %d/%d.\n",
966 oldestInFlightRobNum,
967 numInFlightLoads, sizeLoadBuffer,
968 numInFlightStores, sizeStoreBuffer);
973 statistics::
Group(parent, _name.c_str()),
974 ADD_STAT(numSendAttempted, statistics::units::Count::get(),
975 "Number of first attempts to send a request"),
976 ADD_STAT(numSendSucceeded, statistics::units::Count::get(),
977 "Number of successful first attempts"),
978 ADD_STAT(numSendFailed, statistics::units::Count::get(),
979 "Number of failed first attempts"),
980 ADD_STAT(numRetrySucceeded, statistics::units::Count::get(),
981 "Number of successful retries"),
983 "Last tick simulated from the fixed inst trace")
991 DPRINTF(TraceCPUInst,
"Initializing instruction fetch request generator"
992 " IcacheGen: fixed issue with retry.\n");
998 panic(
"Read of first message in the trace failed.\n");
1008 DPRINTF(TraceCPUInst,
"Trying to send retry packet.\n");
1012 DPRINTF(TraceCPUInst,
"Retry packet sending failed.\n");
1017 DPRINTF(TraceCPUInst,
"Trying to send packet for currElement.\n");
1026 DPRINTF(TraceCPUInst,
"currElement sending failed.\n");
1037 DPRINTF(TraceCPUInst,
"Packet sent successfully, trying to read next "
1077 DPRINTF(TraceCPUInst,
"inst fetch: %c addr %d pc %#x size %d tick %d\n",
1102 uint8_t* pkt_data =
new uint8_t[req->getSize()];
1106 memset(pkt_data, 0xA, req->getSize());
1124 DPRINTF(TraceCPUInst,
"Icache retry received. Scheduling next IcacheGen"
1125 " event @%lli.\n",
curTick());
1134 DPRINTF(TraceCPUData,
"Dcache retry received. Scheduling next DcacheGen"
1135 " event @%lli.\n",
curTick());
1143 DPRINTF(TraceCPUData,
"Scheduling next DcacheGen event at %lli.\n",
1148 DPRINTF(TraceCPUData,
"Re-scheduling next dcache event from %lli"
1174 DPRINTF(TraceCPUData,
"Received timing response from Dcache.\n");
1198 const std::string& filename,
const double time_multiplier) :
1200 timeMultiplier(time_multiplier),
1204 ProtoMessage::InstDepRecordHeader header_msg;
1206 panic(
"Failed to read packet header from %s\n", filename);
1209 panic(
"Trace %s was recorded with a different tick frequency %d\n",
1210 header_msg.tick_freq());
1228 ProtoMessage::InstDepRecord pkt_msg;
1231 element->
seqNum = pkt_msg.seq_num();
1232 element->
type = pkt_msg.type();
1234 element->
compDelay = pkt_msg.comp_delay() * timeMultiplier;
1238 for (
int i = 0;
i < (pkt_msg.rob_dep()).size();
i++) {
1239 element->
robDep.push_back(pkt_msg.rob_dep(
i));
1244 for (
int i = 0;
i < (pkt_msg.reg_dep()).size();
i++) {
1248 bool duplicate =
false;
1249 for (
auto &dep: element->
robDep) {
1250 duplicate |= (pkt_msg.reg_dep(
i) == dep);
1253 element->
regDep.push_back(pkt_msg.reg_dep(
i));
1257 if (pkt_msg.has_p_addr())
1258 element->
physAddr = pkt_msg.p_addr();
1262 if (pkt_msg.has_v_addr())
1263 element->
virtAddr = pkt_msg.v_addr();
1267 if (pkt_msg.has_size())
1268 element->
size = pkt_msg.size();
1272 if (pkt_msg.has_flags())
1273 element->
flags = pkt_msg.flags();
1277 if (pkt_msg.has_pc())
1278 element->
pc = pkt_msg.pc();
1284 if (pkt_msg.has_weight()) {
1285 microOpCount += pkt_msg.weight();
1287 element->
robNum = microOpCount;
1298 for (
auto it = regDep.begin(); it != regDep.end(); it++) {
1299 if (*it == reg_dep) {
1303 "\tFor %lli: Marking register dependency %lli done.\n",
1316 for (
auto it = robDep.begin(); it != robDep.end(); it++) {
1317 if (*it == rob_dep) {
1321 "\tFor %lli: Marking ROB dependency %lli done.\n",
1333 if (!removeRobDep(done_seq_num)) {
1337 [[maybe_unused]]
bool regdep_found = removeRegDep(done_seq_num);
1338 assert(regdep_found);
1341 return robDep.empty() && regDep.empty();
1348 DPRINTFR(TraceCPUData,
"%lli", seqNum);
1349 DPRINTFR(TraceCPUData,
",%s", typeToStr());
1350 if (isLoad() || isStore()) {
1352 DPRINTFR(TraceCPUData,
",%i", size);
1353 DPRINTFR(TraceCPUData,
",%i", flags);
1355 DPRINTFR(TraceCPUData,
",%lli", compDelay);
1357 for (
auto &dep: robDep) {
1358 DPRINTFR(TraceCPUData,
",%lli", dep);
1361 for (
auto &dep: regDep) {
1362 DPRINTFR(TraceCPUData,
",%lli", dep);
1364 auto child_itr = dependents.begin();
1365 DPRINTFR(TraceCPUData,
"dependents:");
1366 while (child_itr != dependents.end()) {
1367 DPRINTFR(TraceCPUData,
":%lli", (*child_itr)->seqNum);
1372 #endif // TRACING_ON
1378 return Record::RecordType_Name(
type);
1385 ProtoMessage::PacketHeader header_msg;
1387 panic(
"Failed to read packet header from %s\n", filename);
1390 panic(
"Trace %s was recorded with a different tick frequency %d\n",
1391 header_msg.tick_freq());
1407 element->
cmd = pkt_msg.
cmd();
1408 element->
addr = pkt_msg.addr();
1410 element->
tick = pkt_msg.tick();
1411 element->
flags = pkt_msg.has_flags() ? pkt_msg.flags() : 0;
1412 element->
pc = pkt_msg.has_pc() ? pkt_msg.pc() : 0;
bool isLoad() const
Is the node a load.
void dcacheRecvTimingResp(PacketPtr pkt)
When data cache port receives a response, this calls the dcache generator method handle to complete t...
Tick curTick()
The universal simulation clock.
void addToSortedReadyList(NodeSeqNum seq_num, Tick exec_tick)
Add a ready node to the readyList.
TraceStats(TraceCPU *trace)
bool removeRegDep(NodeSeqNum reg_dep)
Remove completed instruction from register dependency array.
Tick when() const
Get the time that the event is scheduled.
gem5::TraceCPU::TraceStats traceStats
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
void checkAndSchedExitEvent()
This is called when either generator finishes executing from the trace.
bool removeRobDep(NodeSeqNum rob_dep)
Remove completed instruction from order dependency array.
std::string instTraceFile
File names for input instruction and data traces.
statistics::Scalar dataLastTick
Tick when ElasticDataGen completes execution.
Tick init()
Called from TraceCPU init().
PacketPtr retryPkt
PacketPtr used to store the packet to retry.
statistics::Scalar maxReadyListSize
gem5::TraceCPU::ElasticDataGen::ElasticDataGenStatGroup elasticStats
void schedDcacheNextEvent(Tick when)
Schedule event dcacheNextEvent at the given tick.
const uint32_t windowSize
Window size within which to check for dependencies.
NodeSeqNum seqNum
Instruction sequence number.
Port & getInstPort()
Used to get a reference to the icache port.
void occupy(const GraphNode *new_node)
Occupy appropriate structures for an issued node.
uint64_t NodeSeqNum
Node sequence number type.
void takeOverFrom(BaseCPU *oldCPU)
void exit()
Exit the FixedRetryGen.
bool removeDepOnInst(NodeSeqNum done_seq_num)
Check for all dependencies on completed inst.
MemCmd cmd
Specifies if the request is to be a read or a write.
InputStream trace
Input stream used for reading the input trace file.
RequestPtr req
A pointer to the original request.
statistics::Scalar numSOStores
bool recvTimingResp(PacketPtr pkt)
Receive the timing reponse and call dcacheRecvTimingResp() method of the dcacheGen to handle completi...
std::list< ReadyNode > readyList
List of nodes that are ready to execute.
std::string typeToStr() const
Return string specifying the type of the node.
Addr addr
The address for the request.
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
bool isValid() const
Check validity of this element.
statistics::Scalar numSplitReqs
static PacketPtr createWrite(const RequestPtr &req)
PacketPtr retryPkt
PacketPtr used to store the packet to retry.
NodeSeqNum seqNum
The sequence number of the ready node.
void completeMemAccess(PacketPtr pkt)
When a load writeback is received, that is when the load completes, release the dependents on it.
void takeOverFrom(Port *old)
A utility function to make it easier to swap out ports.
void addDepsOnParent(GraphNode *new_node, T &dep_list)
Iterate over the dependencies of a new node and add the new node to the list of dependents of the par...
Addr blocksize
The size of the access for the request.
bool isExecComplete() const
Returns the execComplete variable which is set when the last node is executed.
void exit()
Exit the ElasticDataGen.
void icacheRetryRecvd()
When instruction cache port receives a retry, schedule event icacheNextEvent.
void exitSimLoop(const std::string &message, int exit_code, Tick when, Tick repeat, bool serialize)
Schedule an event to exit the simulation loop (returning to Python) at the end of the current cycle (...
bool isAvailable(const GraphNode *new_node) const
Check if structures required to issue a node are free.
statistics::Scalar numSendSucceeded
void schedIcacheNext()
This is the control flow that uses the functionality of the icacheGen to replay the trace.
Tick traceOffset
This stores the time offset in the trace, which is taken away from the ready times of requests.
Cycles is a wrapper class for representing cycle counts, i.e.
statistics::Formula cpi
Stat for the CPI.
TraceCPU & owner
Reference of the TraceCPU.
HardwareResource hwResource
Hardware resources required to contain in-flight nodes and to throttle issuing of new nodes when reso...
EventFunctionWrapper icacheNextEvent
Event for the control flow method schedIcacheNext()
bool traceComplete
Set to true when end of trace is reached.
bool nextExecute()
Reads a line of the trace file.
statistics::Scalar numRetrySucceeded
ElasticDataGen dcacheGen
Instance of ElasticDataGen to replay data read and write requests.
statistics::Scalar numSendFailed
Addr virtAddr
The virtual address for the request if any.
void release(const GraphNode *done_node)
Release appropriate structures for a completed node.
PacketPtr executeMemReq(GraphNode *node_ptr)
Creates a new request for a load or store assigning the request parameters.
FixedRetryGen icacheGen
Instance of FixedRetryGen to replay instruction read requests.
statistics::Scalar maxDependents
Stats for data memory accesses replayed.
void writeElementAsTrace() const
Write out element in trace-compatible format using debug flag TraceCPUData.
The struct GraphNode stores an instruction in the trace file.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
NodeRobNum robNum
ROB occupancy number.
Struct to store a ready-to-execute node and its execution tick.
FixedRetryGenStatGroup(statistics::Group *parent, const std::string &_name)
name is the extension to the name for these stats
ProbePointArg< PacketInfo > Packet
Packet probe point.
RecordType type
Type of the node corresponding to the instruction modeled by it.
uint64_t Tick
Tick count type.
RequestPort & port
Reference of the port to be used to issue memory requests.
statistics::Scalar numSendSucceeded
void adjustInitTraceOffset(Tick &offset)
Adjust traceOffset based on what TraceCPU init() determines on comparing the offsets in the fetch req...
bool readNextWindow()
Reads a line of the trace file.
void updateNumOps(uint64_t rob_num)
void recvReqRetry()
Handle a retry signalled by the cache if data access failed in the first attempt.
uint64_t compDelay
Computational delay.
void releaseStoreBuffer()
Release store buffer entry for a completed store.
Tick execTick
The tick at which the ready node must be executed.
statistics::Scalar numOps
Stat for number of simulated micro-ops.
bool isStrictlyOrdered() const
Return true if node has a request which is strictly ordered.
InputStream trace
Input stream used for reading the input trace file.
bool tryNext()
This tries to send current or retry packet and returns true if successfull.
const RequestorID requestorId
RequestorID used for the requests being sent.
statistics::Scalar instLastTick
Last simulated tick by the FixedRetryGen.
void clear()
Make this element invalid.
This struct stores a line in the trace file.
bool recvTimingResp(PacketPtr pkt)
Receive the timing reponse and simply delete the packet since instruction fetch requests are issued a...
std::unordered_map< NodeSeqNum, GraphNode * > depGraph
Store the depGraph of GraphNodes.
gem5::TraceCPU::FixedRetryGen::FixedRetryGenStatGroup fixedStats
Request::Flags flags
Request flags if any.
Derived & precision(int _precision)
Set the precision and marks this stat to print at the end of simulation.
TraceElement currElement
Store an element read from the trace to send as the next packet.
Port & getDataPort()
Used to get a reference to the dcache port.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Request::FlagsType flags
Potential request flags to use.
const std::string & name()
Tick init()
Called from TraceCPU init().
bool traceComplete
Set to true when end of trace is reached.
statistics::Scalar numSchedIcacheEvent
statistics::Scalar numSOLoads
bool isTraceComplete()
Returns the traceComplete variable which is set when end of the input trace file is reached.
statistics::Scalar numSendAttempted
Stats for instruction accesses replayed.
const bool enableEarlyExit
Exit when any one Trace CPU completes its execution.
bool nextRead
Set to true when the next window of instructions need to be read.
statistics::Scalar numSendFailed
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
EventFunctionWrapper dcacheNextEvent
Event for the control flow method schedDcacheNext()
std::vector< GraphNode * > dependents
A vector of nodes dependent (outgoing) on this node.
static int numTraceCPUs
Number of Trace CPUs in the system used as a shared variable and passed to the CountedExitEvent event...
std::queue< const GraphNode * > depFreeQueue
Queue of dependency-free nodes that are pending issue because resources are not available.
void schedDcacheNext()
This is the control flow that uses the functionality of the dcacheGen to replay the trace.
TraceCPU & owner
Reference of the TraceCPU.
statistics::Scalar numSendAttempted
Tick tick
The time at which the request should be sent.
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
int ContextID
Globally unique thread context ID.
bool awaitingResponse() const
Check if there are any outstanding requests, i.e.
const RequestorID requestorId
RequestorID used for the requests being sent.
CountedExitEvent * execCompleteEvent
A CountedExitEvent which when serviced decrements the counter.
const FlagsType init
This Stat is Initialized.
void execute()
This is the main execute function which consumes nodes from the sorted readyList.
uint64_t progressMsgThreshold
void dcacheRetryRecvd()
When data cache port receives a retry, schedule event dcacheNextEvent.
const uint64_t progressMsgInterval
Interval of committed instructions specified by the user at which a progress info message is printed.
HardwareResource(uint16_t max_rob, uint16_t max_stores, uint16_t max_loads)
Constructor that initializes the sizes of the structures.
void printOccupancy()
Print resource occupancy for debugging.
bool oneTraceComplete
Set to true when one of the generators finishes replaying its trace.
bool send(Addr addr, unsigned size, const MemCmd &cmd, Request::FlagsType flags, Addr pc)
Creates a new request assigning the request parameters passed by the arguments.
void recvReqRetry()
Handle a retry signalled by the cache if instruction read failed in the first attempt.
The trace cpu replays traces generated using the elastic trace probe attached to the O3 CPU model.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
bool checkAndIssue(const GraphNode *node_ptr, bool first=true)
Attempts to issue a node once the node's source dependencies are complete.
statistics::Scalar numSchedDcacheEvent
RegDepList regDep
List of register dependencies (incoming) if any.
TraceCPU(const TraceCPUParams ¶ms)
std::string dataTraceFile
uint32_t size
Size of request if any.
RequestPort & port
Reference of the port to be used to issue memory requests.
ElasticDataGenStatGroup(statistics::Group *parent, const std::string &_name)
name is the extension to the name for these stats
bool execComplete
Set true when execution of trace is complete.
statistics::Scalar numRetrySucceeded
RobDepList robDep
List of order dependencies.
bool scheduled() const
Determine if the current event is scheduled.
void printReadyList()
Print readyList for debugging using debug flag TraceCPUData.
Addr physAddr
The address for the request if any.
bool isStore() const
Is the node a store.
#define panic(...)
This implements a cprintf based panic() function.
int64_t delta
Stores the difference in the send ticks of the current and last packets.
Counter value() const
Return the current value of this stat as its base type.
Generated on Wed May 4 2022 12:13:55 for gem5 by doxygen 1.8.17