36 #include "debug/MemoryAccess.hh" 37 #include "debug/ProtocolTrace.hh" 38 #include "debug/RubySequencer.hh" 39 #include "debug/RubyStats.hh" 42 #include "mem/ruby/protocol/PrefetchBit.hh" 43 #include "mem/ruby/protocol/RubyAccessMode.hh" 51 RubySequencerParams::create()
57 :
RubyPort(p), m_IncompleteTimes(MachineType_NUM),
58 deadlockCheckEvent([this]{
wakeup(); },
"Sequencer deadlock check")
89 int total_outstanding = 0;
92 for (
const auto seq_req : table_entry.second) {
96 panic(
"Possible Deadlock detected. Aborting!\n version: %d " 97 "request.paddr: 0x%x m_readRequestTable: %d current time: " 98 "%u issue_time: %d difference: %d\n",
m_version,
99 seq_req.pkt->getAddr(), table_entry.second.size(),
104 total_outstanding += table_entry.second.size();
121 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
125 for (
int j = 0;
j < MachineType_NUM;
j++) {
131 for (
int i = 0;
i < MachineType_NUM;
i++) {
148 RubyRequestType secondary_type)
160 seq_req_list.emplace_back(pkt, primary_type, secondary_type,
curCycle());
163 if (seq_req_list.size() > 1) {
164 return RequestStatus_Aliased;
169 return RequestStatus_Ready;
200 if (request->
m_type == RubyRequestType_Store_Conditional) {
206 request->
pkt->
req->setExtraData(0);
213 request->
pkt->
req->setExtraData(1);
219 }
else if (request->
m_type == RubyRequestType_Load_Linked) {
236 const MachineType respondingMach,
237 bool isExternalHit,
Cycles initialRequestTime,
238 Cycles forwardRequestTime,
246 Cycles total_lat = completion_time - issued_time;
248 if (initialRequestTime < issued_time) {
257 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
268 if (respondingMach != MachineType_NUM) {
272 if ((issued_time <= initialRequestTime) &&
273 (initialRequestTime <= forwardRequestTime) &&
274 (forwardRequestTime <= firstResponseTime) &&
275 (firstResponseTime <= completion_time)) {
278 initialRequestTime - issued_time);
280 forwardRequestTime - initialRequestTime);
282 firstResponseTime - forwardRequestTime);
284 completion_time - firstResponseTime);
293 if (respondingMach != MachineType_NUM) {
302 const bool externalHit,
const MachineType mach,
303 const Cycles initialRequestTime,
304 const Cycles forwardRequestTime,
305 const Cycles firstResponseTime)
318 bool ruby_request =
true;
319 int aliased_stores = 0;
320 int aliased_loads = 0;
321 while (!seq_req_list.empty()) {
324 assert(seq_req.
m_type != RubyRequestType_LD);
325 assert(seq_req.
m_type != RubyRequestType_IFETCH);
329 if ((seq_req.
m_type != RubyRequestType_LD) &&
330 (seq_req.
m_type != RubyRequestType_IFETCH)) {
344 if (seq_req.
m_type == RubyRequestType_Locked_RMW_Read) {
352 }
else if (seq_req.
m_type == RubyRequestType_Locked_RMW_Write) {
358 initialRequestTime, forwardRequestTime,
363 hitCallback(&seq_req, data, success, mach, externalHit,
364 initialRequestTime, forwardRequestTime,
368 assert(!ruby_request);
370 hitCallback(&seq_req, data,
true, mach, externalHit,
371 initialRequestTime, forwardRequestTime,
374 seq_req_list.pop_front();
376 ruby_request =
false;
380 if (seq_req_list.empty()) {
387 bool externalHit,
const MachineType mach,
388 Cycles initialRequestTime,
389 Cycles forwardRequestTime,
403 bool ruby_request =
true;
404 int aliased_loads = 0;
405 while (!seq_req_list.empty()) {
408 assert((seq_req.
m_type == RubyRequestType_LD) ||
409 (seq_req.
m_type == RubyRequestType_IFETCH));
413 if ((seq_req.
m_type != RubyRequestType_LD) &&
414 (seq_req.
m_type != RubyRequestType_IFETCH)) {
421 initialRequestTime, forwardRequestTime,
424 hitCallback(&seq_req, data,
true, mach, externalHit,
425 initialRequestTime, forwardRequestTime,
427 seq_req_list.pop_front();
429 ruby_request =
false;
433 if (seq_req_list.empty()) {
441 const MachineType mach,
const bool externalHit,
442 const Cycles initialRequestTime,
443 const Cycles forwardRequestTime,
444 const Cycles firstResponseTime)
446 warn_once(
"Replacement policy updates recently became the responsibility " 447 "of SLICC state machines. Make sure to setMRU() near callbacks " 459 if ((type == RubyRequestType_LD) ||
460 (type == RubyRequestType_IFETCH) ||
461 (type == RubyRequestType_RMW_Read) ||
462 (type == RubyRequestType_Locked_RMW_Read) ||
463 (type == RubyRequestType_Load_Linked)) {
466 DPRINTF(RubySequencer,
"read data %s\n", data);
467 }
else if (pkt->
req->isSwap()) {
472 data.
setData(&overwrite_val[0],
474 DPRINTF(RubySequencer,
"swap data %s\n", data);
475 }
else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
480 DPRINTF(RubySequencer,
"set data %s\n", data);
488 DPRINTF(RubySequencer,
"hitCallback %s 0x%x using RubyTester\n",
492 assert(testerSenderState);
493 testerSenderState->subBlock.mergeFrom(data);
520 return RequestStatus_BufferFull;
523 RubyRequestType primary_type = RubyRequestType_NULL;
524 RubyRequestType secondary_type = RubyRequestType_NULL;
536 DPRINTF(RubySequencer,
"Issuing SC\n");
537 primary_type = RubyRequestType_Store_Conditional;
539 DPRINTF(RubySequencer,
"Issuing LL\n");
541 primary_type = RubyRequestType_Load_Linked;
543 secondary_type = RubyRequestType_ATOMIC;
544 }
else if (pkt->
req->isLockedRMW()) {
552 DPRINTF(RubySequencer,
"Issuing Locked RMW Write\n");
553 primary_type = RubyRequestType_Locked_RMW_Write;
555 DPRINTF(RubySequencer,
"Issuing Locked RMW Read\n");
557 primary_type = RubyRequestType_Locked_RMW_Read;
559 secondary_type = RubyRequestType_ST;
570 primary_type = secondary_type = RubyRequestType_ST;
571 }
else if (pkt->
isRead()) {
572 if (pkt->
req->isInstFetch()) {
573 primary_type = secondary_type = RubyRequestType_IFETCH;
575 bool storeCheck =
false;
578 uint32_t flags = pkt->
req->getFlags();
583 primary_type = RubyRequestType_RMW_Read;
584 secondary_type = RubyRequestType_ST;
586 primary_type = secondary_type = RubyRequestType_LD;
590 primary_type = secondary_type = RubyRequestType_FLUSH;
592 panic(
"Unsupported ruby packet type\n");
598 (primary_type != RubyRequestType_Locked_RMW_Write)) {
602 return RequestStatus_Aliased;
608 if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
612 if (status != RequestStatus_Aliased)
616 return RequestStatus_Issued;
630 if (pkt->
req->hasPC()) {
631 pc = pkt->
req->getPC();
636 std::shared_ptr<RubyRequest> msg =
639 nullptr : pkt->
getPtr<uint8_t>(),
641 RubyAccessMode_Supervisor, pkt,
642 PrefetchBit_No, proc_id, core_id);
644 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %#x %s\n",
647 RubyRequestType_to_string(secondary_type));
657 template <
class KEY,
class VALUE>
659 operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
661 for (
const auto &table_entry : map) {
662 out <<
"[ " << table_entry.first <<
" =";
663 for (
const auto &seq_req : table_entry.second) {
664 out <<
" " << RubyRequestType_to_string(seq_req.m_second_type);
691 DPRINTF(RubyStats,
"Recorded statistic: %s\n",
692 SequencerRequestType_to_string(requestType));
714 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
725 for (
int i = 0;
i < MachineType_NUM;
i++) {
745 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
749 for (
int j = 0;
j < MachineType_NUM;
j++) {
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
#define panic(...)
This implements a cprintf based panic() function.
int m_max_outstanding_requests
void resetStats()
Callback to reset stats.
const uint8_t * getData(int offset, int len) const
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Cycles is a wrapper class for representing cycle counts, i.e.
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
std::vector< Stats::Histogram * > m_missTypeLatencyHist
void print(std::ostream &out) const
bool isLocked(int context) const
CacheRecorder * m_cache_recorder
EventFunctionWrapper deadlockCheckEvent
AbstractController * m_controller
DrainState drainState() const
Return the current drain state of an object.
virtual void regStats()
Callback to set stat parameters.
std::vector< Stats::Histogram * > m_FirstResponseToCompletionDelayHist
void recordRequestType(SequencerRequestType requestType)
Histogram & init(size_type size)
Set the parameters of this histogram.
Overload hash function for BasicBlockRange type.
Stats::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Stats::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
CacheMemory * m_dataCache_ptr
std::vector< Stats::Histogram * > m_typeLatencyHist
T * getPtr()
get a pointer to the data ptr.
RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
RubySystem * m_ruby_system
Stats::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
std::vector< Stats::Histogram * > m_ForwardToFirstResponseDelayHist
CacheMemory * m_instCache_ptr
RequestPtr req
A pointer to the original request.
std::vector< Stats::Histogram * > m_InitialToForwardDelayHist
static bool getWarmupEnabled()
Tick cyclesToTicks(Cycles c) const
Tick curTick()
The current simulated tick.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
bool scheduled() const
Determine if the current event is scheduled.
void ruby_eviction_callback(Addr address)
void invalidateSC(Addr address)
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
uint64_t Tick
Tick count type.
void evictionCallback(Addr address)
std::vector< Stats::Histogram * > m_hitTypeLatencyHist
std::vector< Stats::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
bool handleLlsc(Addr address, SequencerRequest *request)
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Addr getOffset(Addr addr)
static bool getCooldownEnabled()
RubyRequestType m_second_type
std::vector< Stats::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
void ruby_hit_callback(PacketPtr pkt)
void setLocked(int context)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
void issueRequest(PacketPtr pkt, RubyRequestType type)
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Addr makeLineAddress(Addr addr)
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
std::string printAddress(Addr addr)
void reset()
Reset stat value to default.
void blockOnQueue(Addr, MessageBuffer *)
MessageBuffer * m_mandatory_q_ptr
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime)
std::vector< std::vector< Stats::Histogram * > > m_missTypeMachLatencyHist
std::vector< Stats::Counter > m_IncompleteTimes
void regStats()
Callback to set stat parameters.
Declaration of the Packet class.
virtual Cycles mandatoryQueueLatency(const RubyRequestType ¶m_type)
bool m_runningGarnetStandalone
const ContextID InvalidContextID
void schedule(Event &event, Tick when)
const T * getConstPtr() const
void setData(const uint8_t *data, int offset, int len)
std::vector< Stats::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages...
Cycles m_deadlock_threshold
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
RequestStatus makeRequest(PacketPtr pkt)
AbstractCacheEntry * lookup(Addr address)
bool isBlocked(Addr) const
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
void enqueue(MsgPtr message, Tick curTime, Tick delta)
Stats::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
int ContextID
Globally unique thread context ID.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
void checkCoherence(Addr address)
std::vector< std::vector< Stats::Histogram * > > m_hitTypeMachLatencyHist
Arch getArch() const
Get the architecture.
Sequencer(const Params *)