48 #include "debug/LLSC.hh"
49 #include "debug/MemoryAccess.hh"
50 #include "debug/ProtocolTrace.hh"
51 #include "debug/RubySequencer.hh"
52 #include "debug/RubyStats.hh"
55 #include "mem/ruby/protocol/PrefetchBit.hh"
56 #include "mem/ruby/protocol/RubyAccessMode.hh"
65 RubySequencerParams::create()
71 :
RubyPort(
p), m_IncompleteTimes(MachineType_NUM),
72 deadlockCheckEvent([this]{
wakeup(); },
"Sequencer deadlock check")
74 m_outstanding_count = 0;
76 m_instCache_ptr =
p->icache;
77 m_dataCache_ptr =
p->dcache;
78 m_max_outstanding_requests =
p->max_outstanding_requests;
79 m_deadlock_threshold =
p->deadlock_threshold;
82 assert(m_max_outstanding_requests > 0);
83 assert(m_deadlock_threshold > 0);
84 assert(m_instCache_ptr != NULL);
85 assert(m_dataCache_ptr != NULL);
87 m_runningGarnetStandalone =
p->garnet_standalone;
100 DPRINTF(LLSC,
"LLSC Monitor - inserting load linked - "
101 "addr=0x%lx - cpu=%u\n", claddr,
m_version);
111 DPRINTF(LLSC,
"LLSC Monitor - clearing due to store - "
112 "addr=0x%lx - cpu=%u\n", claddr,
m_version);
123 DPRINTF(LLSC,
"LLSC Monitor - clearing due to "
124 "store conditional - "
125 "addr=0x%lx - cpu=%u\n",
167 int total_outstanding = 0;
170 for (
const auto &seq_req : table_entry.second) {
174 panic(
"Possible Deadlock detected. Aborting!\n version: %d "
175 "request.paddr: 0x%x m_readRequestTable: %d current time: "
176 "%u issue_time: %d difference: %d\n",
m_version,
177 seq_req.pkt->getAddr(), table_entry.second.size(),
182 total_outstanding += table_entry.second.size();
199 for (
const auto& seq_req : table_entry.second) {
200 if (seq_req.functionalWrite(func_pkt))
214 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
218 for (
int j = 0;
j < MachineType_NUM;
j++) {
224 for (
int i = 0;
i < MachineType_NUM;
i++) {
241 RubyRequestType secondary_type)
253 seq_req_list.emplace_back(pkt, primary_type,
257 if (seq_req_list.size() > 1) {
258 return RequestStatus_Aliased;
263 return RequestStatus_Ready;
274 const MachineType respondingMach,
275 bool isExternalHit,
Cycles initialRequestTime,
276 Cycles forwardRequestTime,
284 Cycles total_lat = completion_time - issued_time;
286 if (initialRequestTime < issued_time) {
295 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
306 if (respondingMach != MachineType_NUM) {
310 if ((issued_time <= initialRequestTime) &&
311 (initialRequestTime <= forwardRequestTime) &&
312 (forwardRequestTime <= firstResponseTime) &&
313 (firstResponseTime <= completion_time)) {
316 initialRequestTime - issued_time);
318 forwardRequestTime - initialRequestTime);
320 firstResponseTime - forwardRequestTime);
322 completion_time - firstResponseTime);
331 if (respondingMach != MachineType_NUM) {
347 const bool externalHit,
const MachineType mach,
348 const Cycles initialRequestTime,
349 const Cycles forwardRequestTime,
350 const Cycles firstResponseTime)
363 bool ruby_request =
true;
364 int aliased_stores = 0;
365 int aliased_loads = 0;
366 while (!seq_req_list.empty()) {
369 assert(seq_req.
m_type != RubyRequestType_LD);
370 assert(seq_req.
m_type != RubyRequestType_Load_Linked);
371 assert(seq_req.
m_type != RubyRequestType_IFETCH);
375 if ((seq_req.
m_type != RubyRequestType_LD) &&
376 (seq_req.
m_type != RubyRequestType_Load_Linked) &&
377 (seq_req.
m_type != RubyRequestType_IFETCH)) {
381 if (seq_req.
m_type != RubyRequestType_Store_Conditional) {
389 seq_req.
pkt->
req->setExtraData(success ? 1 : 0);
395 if (seq_req.
m_type == RubyRequestType_Locked_RMW_Read) {
403 }
else if (seq_req.
m_type == RubyRequestType_Locked_RMW_Write) {
409 initialRequestTime, forwardRequestTime,
415 ruby_request =
false;
417 initialRequestTime, forwardRequestTime,
421 assert(!ruby_request);
423 ruby_request =
false;
426 initialRequestTime, forwardRequestTime,
429 seq_req_list.pop_front();
433 if (seq_req_list.empty()) {
440 bool externalHit,
const MachineType mach,
441 Cycles initialRequestTime,
442 Cycles forwardRequestTime,
456 bool ruby_request =
true;
457 int aliased_loads = 0;
458 while (!seq_req_list.empty()) {
461 assert((seq_req.
m_type == RubyRequestType_LD) ||
462 (seq_req.
m_type == RubyRequestType_Load_Linked) ||
463 (seq_req.
m_type == RubyRequestType_IFETCH));
467 if ((seq_req.
m_type != RubyRequestType_LD) &&
468 (seq_req.
m_type != RubyRequestType_Load_Linked) &&
469 (seq_req.
m_type != RubyRequestType_IFETCH)) {
476 initialRequestTime, forwardRequestTime,
480 ruby_request =
false;
482 initialRequestTime, forwardRequestTime,
484 seq_req_list.pop_front();
488 if (seq_req_list.empty()) {
496 const MachineType mach,
const bool externalHit,
497 const Cycles initialRequestTime,
498 const Cycles forwardRequestTime,
499 const Cycles firstResponseTime)
501 warn_once(
"Replacement policy updates recently became the responsibility "
502 "of SLICC state machines. Make sure to setMRU() near callbacks "
510 if (
type == RubyRequestType_Load_Linked) {
520 if ((
type == RubyRequestType_LD) ||
521 (
type == RubyRequestType_IFETCH) ||
522 (
type == RubyRequestType_RMW_Read) ||
523 (
type == RubyRequestType_Locked_RMW_Read) ||
524 (
type == RubyRequestType_Load_Linked)) {
528 }
else if (pkt->
req->isSwap()) {
533 data.setData(&overwrite_val[0],
536 }
else if (
type != RubyRequestType_Store_Conditional || llscSuccess) {
549 DPRINTF(RubySequencer,
"hitCallback %s 0x%x using RubyTester\n",
553 assert(testerSenderState);
561 rs->m_cache_recorder->enqueueNextFetchRequest();
564 rs->m_cache_recorder->enqueueNextFlushRequest();
583 !pkt->
req->isHTMAbort()) {
584 return RequestStatus_BufferFull;
587 RubyRequestType primary_type = RubyRequestType_NULL;
588 RubyRequestType secondary_type = RubyRequestType_NULL;
602 DPRINTF(RubySequencer,
"Issuing SC\n");
603 primary_type = RubyRequestType_Store_Conditional;
604 #if defined (PROTOCOL_MESI_Three_Level) || defined (PROTOCOL_MESI_Three_Level_HTM)
605 secondary_type = RubyRequestType_Store_Conditional;
607 secondary_type = RubyRequestType_ST;
610 DPRINTF(RubySequencer,
"Issuing LL\n");
612 primary_type = RubyRequestType_Load_Linked;
613 secondary_type = RubyRequestType_LD;
615 }
else if (pkt->
req->isLockedRMW()) {
623 DPRINTF(RubySequencer,
"Issuing Locked RMW Write\n");
624 primary_type = RubyRequestType_Locked_RMW_Write;
626 DPRINTF(RubySequencer,
"Issuing Locked RMW Read\n");
628 primary_type = RubyRequestType_Locked_RMW_Read;
630 secondary_type = RubyRequestType_ST;
641 primary_type = secondary_type = RubyRequestType_ST;
642 }
else if (pkt->
isRead()) {
644 if (pkt->
req->isHTMCmd()) {
646 }
else if (pkt->
req->isInstFetch()) {
647 primary_type = secondary_type = RubyRequestType_IFETCH;
649 bool storeCheck =
false;
652 uint32_t flags = pkt->
req->getFlags();
657 primary_type = RubyRequestType_RMW_Read;
658 secondary_type = RubyRequestType_ST;
660 primary_type = secondary_type = RubyRequestType_LD;
664 primary_type = secondary_type = RubyRequestType_FLUSH;
666 panic(
"Unsupported ruby packet type\n");
672 (primary_type != RubyRequestType_Locked_RMW_Write)) {
676 return RequestStatus_Aliased;
682 if (
status != RequestStatus_Ready &&
status != RequestStatus_Aliased)
686 if (
status != RequestStatus_Aliased)
690 return RequestStatus_Issued;
704 if (pkt->
req->hasPC()) {
705 pc = pkt->
req->getPC();
710 std::shared_ptr<RubyRequest> msg =
713 nullptr : pkt->
getPtr<uint8_t>(),
715 RubyAccessMode_Supervisor, pkt,
716 PrefetchBit_No, proc_id, core_id);
718 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %#x %s\n",
721 RubyRequestType_to_string(secondary_type));
727 msg->m_htmFromTransaction =
true;
739 template <
class KEY,
class VALUE>
741 operator<<(ostream &out,
const std::unordered_map<KEY, VALUE> &map)
743 for (
const auto &table_entry : map) {
744 out <<
"[ " << table_entry.first <<
" =";
745 for (
const auto &seq_req : table_entry.second) {
746 out <<
" " << RubyRequestType_to_string(seq_req.m_second_type);
765 DPRINTF(RubyStats,
"Recorded statistic: %s\n",
766 SequencerRequestType_to_string(requestType));
789 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
800 for (
int i = 0;
i < MachineType_NUM;
i++) {
820 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
824 for (
int j = 0;
j < MachineType_NUM;
j++) {