49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "debug/ProtocolTrace.hh"
52#include "debug/RubyHitMiss.hh"
53#include "debug/RubySequencer.hh"
54#include "debug/RubyStats.hh"
57#include "mem/ruby/protocol/PrefetchBit.hh"
58#include "mem/ruby/protocol/RubyAccessMode.hh"
71 :
RubyPort(
p), m_IncompleteTimes(MachineType_NUM),
72 deadlockCheckEvent([this]{
wakeup(); },
"Sequencer deadlock check")
74 m_outstanding_count = 0;
76 m_ruby_system =
p.ruby_system;
78 m_dataCache_ptr =
p.dcache;
79 m_max_outstanding_requests =
p.max_outstanding_requests;
80 m_deadlock_threshold =
p.deadlock_threshold;
83 assert(m_max_outstanding_requests > 0);
84 assert(m_deadlock_threshold > 0);
86 m_unaddressedTransactionCnt = 0;
88 m_runningGarnetStandalone =
p.garnet_standalone;
90 m_num_pending_invs = 0;
91 m_cache_inv_pkt =
nullptr;
96 m_outstandReqHist.init(10);
97 m_latencyHist.init(10);
98 m_hitLatencyHist.init(10);
99 m_missLatencyHist.init(10);
101 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
103 m_typeLatencyHist[
i]->init(10);
106 m_hitTypeLatencyHist[
i]->init(10);
109 m_missTypeLatencyHist[
i]->init(10);
112 for (
int i = 0;
i < MachineType_NUM;
i++) {
114 m_hitMachLatencyHist[
i]->init(10);
117 m_missMachLatencyHist[
i]->init(10);
120 m_IssueToInitialDelayHist[
i]->init(10);
123 m_InitialToForwardDelayHist[
i]->init(10);
125 m_ForwardToFirstResponseDelayHist.push_back(
127 m_ForwardToFirstResponseDelayHist[
i]->init(10);
129 m_FirstResponseToCompletionDelayHist.push_back(
131 m_FirstResponseToCompletionDelayHist[
i]->init(10);
134 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
135 m_hitTypeMachLatencyHist.push_back(
137 m_missTypeMachLatencyHist.push_back(
140 for (
int j = 0; j < MachineType_NUM; j++) {
141 m_hitTypeMachLatencyHist[
i].push_back(
new statistics::Histogram());
142 m_hitTypeMachLatencyHist[
i][j]->init(10);
144 m_missTypeMachLatencyHist[
i].push_back(
145 new statistics::Histogram());
146 m_missTypeMachLatencyHist[
i][j]->init(10);
160 "%s must have a dcache object to support LLSC requests.",
name());
164 DPRINTF(LLSC,
"LLSC Monitor - inserting load linked - "
165 "addr=0x%lx - cpu=%u\n", claddr,
m_version);
178 DPRINTF(LLSC,
"LLSC Monitor - clearing due to store - "
179 "addr=0x%lx - cpu=%u\n", claddr,
m_version);
187 "%s must have a dcache object to support LLSC requests.",
name());
192 DPRINTF(LLSC,
"LLSC Monitor - clearing due to "
193 "store conditional - "
194 "addr=0x%lx - cpu=%u\n",
237 [[maybe_unused]]
int total_outstanding = 0;
240 for (
const auto &seq_req : table_entry.second) {
244 panic(
"Possible Deadlock detected. Aborting!\n version: %d "
245 "request.paddr: 0x%x m_readRequestTable: %d current time: "
246 "%u issue_time: %d difference: %d\n",
m_version,
247 seq_req.pkt->getAddr(), table_entry.second.size(),
252 total_outstanding += table_entry.second.size();
269 for (
const auto& seq_req : table_entry.second) {
270 if (seq_req.functionalWrite(func_pkt))
287 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
291 for (
int j = 0; j < MachineType_NUM; j++) {
297 for (
int i = 0;
i < MachineType_NUM;
i++) {
314 RubyRequestType secondary_type)
323 assert(primary_type == secondary_type);
325 switch (primary_type) {
326 case RubyRequestType_TLBI_EXT_SYNC_COMP:
329 case RubyRequestType_TLBI:
330 case RubyRequestType_TLBI_SYNC:
335 [[maybe_unused]]
auto insert_data = \
339 pkt, primary_type, secondary_type,
curCycle()));
342 assert(insert_data.second &&
343 "Another TLBI request with the same ID exists");
345 DPRINTF(RubySequencer,
"Inserting TLBI request %016x\n",
352 panic(
"Unexpected TLBI RubyRequestType");
355 return RequestStatus_Ready;
364 return RequestStatus_Aliased;
371 seq_req_list.emplace_back(pkt, primary_type,
375 if (seq_req_list.size() > 1) {
376 return RequestStatus_Aliased;
381 return RequestStatus_Ready;
392 const MachineType respondingMach,
393 bool isExternalHit,
Cycles initialRequestTime,
394 Cycles forwardRequestTime,
402 Cycles total_lat = completion_time - issued_time;
404 if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
413 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
424 if (respondingMach != MachineType_NUM) {
428 if ((issued_time <= initialRequestTime) &&
429 (initialRequestTime <= forwardRequestTime) &&
430 (forwardRequestTime <= firstResponseTime) &&
431 (firstResponseTime <= completion_time)) {
434 initialRequestTime - issued_time);
436 forwardRequestTime - initialRequestTime);
438 firstResponseTime - forwardRequestTime);
440 completion_time - firstResponseTime);
449 if (respondingMach != MachineType_NUM) {
465 const bool externalHit,
const MachineType mach,
466 const Cycles initialRequestTime,
467 const Cycles forwardRequestTime,
468 const Cycles firstResponseTime,
482 bool ruby_request =
true;
483 while (!seq_req_list.empty()) {
487 ((seq_req.
m_type == RubyRequestType_ATOMIC_RETURN) ||
488 (seq_req.
m_type == RubyRequestType_ATOMIC_NO_RETURN));
490 if ((noCoales || atomic_req) && !ruby_request) {
499 assert(seq_req.
m_type != RubyRequestType_LD);
500 assert(seq_req.
m_type != RubyRequestType_Load_Linked);
501 assert(seq_req.
m_type != RubyRequestType_IFETCH);
502 assert(seq_req.
m_type != RubyRequestType_ATOMIC_RETURN);
503 assert(seq_req.
m_type != RubyRequestType_ATOMIC_NO_RETURN);
507 if ((seq_req.
m_type != RubyRequestType_LD) &&
508 (seq_req.
m_type != RubyRequestType_Load_Linked) &&
509 (seq_req.
m_type != RubyRequestType_IFETCH)) {
513 if (seq_req.
m_type != RubyRequestType_Store_Conditional) {
521 seq_req.
pkt->
req->setExtraData(success ? 1 : 0);
527 if (seq_req.
m_type == RubyRequestType_Locked_RMW_Read) {
535 }
else if (seq_req.
m_type == RubyRequestType_Locked_RMW_Write) {
541 initialRequestTime, forwardRequestTime,
547 initialRequestTime, forwardRequestTime,
548 firstResponseTime, !ruby_request);
549 ruby_request =
false;
552 assert(!ruby_request);
555 initialRequestTime, forwardRequestTime,
556 firstResponseTime, !ruby_request);
558 seq_req_list.pop_front();
562 if (seq_req_list.empty()) {
570 const bool ruby_request,
572 const MachineType mach,
573 Cycles initialRequestTime,
574 Cycles forwardRequestTime,
578 assert((seq_req.
m_type == RubyRequestType_LD) ||
579 (seq_req.
m_type == RubyRequestType_Load_Linked) ||
580 (seq_req.
m_type == RubyRequestType_IFETCH));
582 if ((seq_req.
m_type != RubyRequestType_LD) &&
583 (seq_req.
m_type != RubyRequestType_Load_Linked) &&
584 (seq_req.
m_type != RubyRequestType_IFETCH) &&
585 (seq_req.
m_type != RubyRequestType_REPLACEMENT)) {
596 bool externalHit,
const MachineType mach,
597 Cycles initialRequestTime,
598 Cycles forwardRequestTime,
612 bool ruby_request =
true;
613 while (!seq_req_list.empty()) {
616 initialRequestTime, forwardRequestTime,
617 firstResponseTime)) {
622 initialRequestTime, forwardRequestTime,
627 initialRequestTime, forwardRequestTime,
628 firstResponseTime, !ruby_request);
629 ruby_request =
false;
630 seq_req_list.pop_front();
634 if (seq_req_list.empty()) {
641 const bool externalHit,
const MachineType mach,
642 const Cycles initialRequestTime,
643 const Cycles forwardRequestTime,
644 const Cycles firstResponseTime)
658 bool ruby_request =
true;
659 while (!seq_req_list.empty()) {
665 assert((seq_req.
m_type == RubyRequestType_ATOMIC_RETURN) ||
666 (seq_req.
m_type == RubyRequestType_ATOMIC_NO_RETURN));
668 initialRequestTime, forwardRequestTime,
682 ruby_request =
false;
684 initialRequestTime, forwardRequestTime,
685 firstResponseTime,
false);
686 seq_req_list.pop_front();
690 if (seq_req_list.empty()) {
698 const MachineType mach,
const bool externalHit,
699 const Cycles initialRequestTime,
700 const Cycles forwardRequestTime,
701 const Cycles firstResponseTime,
702 const bool was_coalesced)
704 warn_once(
"Replacement policy updates recently became the responsibility "
705 "of SLICC state machines. Make sure to setMRU() near callbacks "
721 if (
type == RubyRequestType_Load_Linked) {
726 DPRINTF(RubyHitMiss,
"Cache %s at %#x\n",
727 externalHit ?
"miss" :
"hit",
734 if ((
type == RubyRequestType_LD) ||
735 (
type == RubyRequestType_IFETCH) ||
736 (
type == RubyRequestType_RMW_Read) ||
737 (
type == RubyRequestType_Locked_RMW_Read) ||
738 (
type == RubyRequestType_Load_Linked) ||
739 (
type == RubyRequestType_ATOMIC_RETURN)) {
743 if (
type == RubyRequestType_ATOMIC_RETURN) {
744 DPRINTF(RubySequencer,
"ATOMIC RETURN data %s\n",
data);
748 }
else if (pkt->
req->isSwap()) {
754 data.setData(&overwrite_val[0],
761 DPRINTF(RubySequencer,
"AMO original data %s\n",
data);
766 }
else if (
type != RubyRequestType_Store_Conditional || llscSuccess) {
778 DPRINTF(RubySequencer,
"hitCallback %s 0x%x using RubyTester\n",
782 assert(testerSenderState);
790 rs->m_cache_recorder->enqueueNextFetchRequest();
793 rs->m_cache_recorder->enqueueNextFlushRequest();
802 RubyRequestType reqType,
803 const MachineType mach,
804 const Cycles initialRequestTime,
805 const Cycles forwardRequestTime,
806 const Cycles firstResponseTime)
808 DPRINTF(RubySequencer,
"unaddressedCallback ID:%08x type:%d\n",
809 unaddressedReqId, reqType);
812 case RubyRequestType_TLBI_EXT_SYNC:
822 case RubyRequestType_TLBI:
823 case RubyRequestType_TLBI_SYNC:
834 assert(seq_req.
m_type == reqType);
846 panic(
"Unexpected TLBI RubyRequestType");
853 for (
auto& pkt : mylist) {
860 safe_cast<RubyPort::SenderState *>(pkt->senderState);
862 assert(port != NULL);
864 pkt->senderState =
ss->predecessor;
881 rs->m_cache_recorder->enqueueNextFlushRequest();
907 "There are %d Invalidations outstanding before Cache Walk\n",
910 for (
int i = 0;
i < size;
i++) {
913 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
914 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
916 addr, 0, 0, request_type, RubyAccessMode_Supervisor,
918 DPRINTF(RubySequencer,
"Evicting addr 0x%x\n",
addr);
928 "There are %d Invalidations outstanding after Cache Walk\n",
945 !pkt->
req->isHTMAbort()) {
946 return RequestStatus_BufferFull;
949 RubyRequestType primary_type = RubyRequestType_NULL;
950 RubyRequestType secondary_type = RubyRequestType_NULL;
965 DPRINTF(RubySequencer,
"Issuing SC\n");
966 primary_type = RubyRequestType_Store_Conditional;
968 secondary_type = RubyRequestType_Store_Conditional;
970 secondary_type = RubyRequestType_ST;
973 DPRINTF(RubySequencer,
"Issuing LL\n");
975 primary_type = RubyRequestType_Load_Linked;
977 secondary_type = RubyRequestType_Load_Linked;
979 secondary_type = RubyRequestType_LD;
982 }
else if (pkt->
req->isLockedRMW()) {
990 DPRINTF(RubySequencer,
"Issuing Locked RMW Write\n");
991 primary_type = RubyRequestType_Locked_RMW_Write;
993 DPRINTF(RubySequencer,
"Issuing Locked RMW Read\n");
995 primary_type = RubyRequestType_Locked_RMW_Read;
997 secondary_type = RubyRequestType_ST;
998 }
else if (pkt->
req->isTlbiCmd()) {
1000 DPRINTF(RubySequencer,
"Issuing TLBI\n");
1001#if defined (PROTOCOL_CHI)
1003 if (pkt->
req->isAtomicReturn()){
1004 DPRINTF(RubySequencer,
"Issuing ATOMIC RETURN \n");
1005 primary_type = secondary_type =
1006 RubyRequestType_ATOMIC_RETURN;
1008 DPRINTF(RubySequencer,
"Issuing ATOMIC NO RETURN\n");
1009 primary_type = secondary_type =
1010 RubyRequestType_ATOMIC_NO_RETURN;
1014 }
else if (pkt->
req->hasNoAddr()) {
1015 primary_type = secondary_type = RubyRequestType_hasNoAddr;
1026 primary_type = secondary_type = RubyRequestType_ST;
1027 }
else if (pkt->
isRead()) {
1029 if (pkt->
req->isHTMCmd()) {
1031 }
else if (pkt->
req->isInstFetch()) {
1032 primary_type = secondary_type = RubyRequestType_IFETCH;
1034 if (pkt->
req->isReadModifyWrite()) {
1035 primary_type = RubyRequestType_RMW_Read;
1036 secondary_type = RubyRequestType_ST;
1038 primary_type = secondary_type = RubyRequestType_LD;
1042 primary_type = secondary_type = RubyRequestType_FLUSH;
1044 primary_type = secondary_type = RubyRequestType_REPLACEMENT;
1049 panic(
"Cannot convert packet [%s] to ruby request\n",
1055 if (!pkt->
req->isMemMgmt() &&
1057 (primary_type != RubyRequestType_Locked_RMW_Write)) {
1061 return RequestStatus_Aliased;
1067 if (
status != RequestStatus_Ready &&
status != RequestStatus_Aliased)
1071 if (
status != RequestStatus_Aliased)
1075 return RequestStatus_Issued;
1081 assert(pkt != NULL);
1089 if (pkt->
req->hasPC()) {
1090 pc = pkt->
req->getPC();
1097 std::shared_ptr<RubyRequest> msg;
1098 if (pkt->
req->isMemMgmt()) {
1099 msg = std::make_shared<RubyRequest>(
clockEdge(), blk_size,
1102 RubyAccessMode_Supervisor, pkt,
1105 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s\n",
1107 RubyRequestType_to_string(secondary_type));
1109 if (pkt->
req->isTlbiCmd()) {
1110 msg->m_isTlbi =
true;
1111 switch (secondary_type) {
1112 case RubyRequestType_TLBI_EXT_SYNC_COMP:
1113 msg->m_tlbiTransactionUid = pkt->
req->getExtraData();
1115 case RubyRequestType_TLBI:
1116 case RubyRequestType_TLBI_SYNC:
1117 msg->m_tlbiTransactionUid = \
1118 getCurrentUnaddressedTransactionID();
1121 panic(
"Unexpected TLBI RubyRequestType");
1123 DPRINTF(RubySequencer,
"Issuing TLBI %016x\n",
1124 msg->m_tlbiTransactionUid);
1127 msg = std::make_shared<RubyRequest>(
clockEdge(), blk_size,
1131 RubyAccessMode_Supervisor, pkt,
1132 PrefetchBit_No, proc_id, core_id);
1135 ((secondary_type == RubyRequestType_ATOMIC_RETURN) ||
1136 (secondary_type == RubyRequestType_ATOMIC_NO_RETURN))){
1140 atomicOps.push_back(std::make_pair<int,AtomicOpFunctor*>
1146 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %#x %s\n",
1149 RubyRequestType_to_string(secondary_type));
1156 msg->m_htmFromTransaction =
true;
1162 assert(latency > 0);
1170template <
class KEY,
class VALUE>
1172operator<<(std::ostream &out,
const std::unordered_map<KEY, VALUE> &map)
1174 for (
const auto &table_entry : map) {
1175 out <<
"[ " << table_entry.first <<
" =";
1176 for (
const auto &seq_req : table_entry.second) {
1177 out <<
" " << RubyRequestType_to_string(seq_req.m_second_type);
1196 DPRINTF(RubyStats,
"Recorded statistic: %s\n",
1197 SequencerRequestType_to_string(requestType));
1213 uint64_t aligned_txid = \
1216 if (aligned_txid > 0xFFFFFFFFull) {
1225 uint64_t(
m_version & 0xFFFFFFFF) << 32) |
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
Cycles is a wrapper class for representing cycle counts, i.e.
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
RequestPtr req
A pointer to the original request.
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
MemCmd cmd
The command field of the packet.
bool isMaskedWrite() const
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
bool isLocked(int context) const
void setLocked(int context)
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
virtual Cycles mandatoryQueueLatency(const RubyRequestType ¶m_type)
bool isBlocked(Addr) const
void blockOnQueue(Addr, MessageBuffer *)
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
AbstractCacheEntry * lookup(Addr address)
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool ruby_is_random, bool ruby_warmup, bool bypassStrictFIFO=false)
bool getUseSecondaryStoreConditional() const
bool getUseSecondaryLoadLinked() const
void hitCallback(PacketPtr pkt)
void ruby_hit_callback(PacketPtr pkt)
Addr makeLineAddress(Addr addr) const
void ruby_unaddressed_callback(PacketPtr pkt)
void ruby_stale_translation_callback(Addr txnId)
std::string printAddress(Addr addr) const
virtual int functionalWrite(Packet *func_pkt)
AbstractController * m_controller
void ruby_eviction_callback(Addr address)
MessageBuffer * m_mandatory_q_ptr
Addr getOffset(Addr addr) const
uint32_t getBlockSizeBits()
bool getCooldownEnabled()
uint32_t getBlockSizeBytes()
CacheRecorder * m_cache_recorder
const ProtocolInfo & getProtocolInfo()
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
void resetStats() override
Callback to reset stats.
Cycles m_deadlock_threshold
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
virtual bool empty() const
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
std::vector< statistics::Counter > m_IncompleteTimes
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
virtual int functionalWrite(Packet *func_pkt) override
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
RubySystem * m_ruby_system
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
void completeHitCallback(std::vector< PacketPtr > &list)
std::vector< statistics::Histogram * > m_typeLatencyHist
PacketPtr m_cache_inv_pkt
void atomicCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
CacheMemory * m_dataCache_ptr
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
virtual void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Sequencer(const Params &)
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
void issueRequest(PacketPtr pkt, RubyRequestType type)
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
EventFunctionWrapper deadlockCheckEvent
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
uint64_t m_unaddressedTransactionCnt
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
RequestStatus makeRequest(PacketPtr pkt) override
int m_max_outstanding_requests
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
void recordRequestType(SequencerRequestType requestType)
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
virtual void print(std::ostream &out) const
virtual bool processReadCallback(SequencerRequest &seq_req, DataBlock &data, const bool rubyRequest, bool externalHit, const MachineType mach, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
RubySequencerParams Params
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
void evictionCallback(Addr address)
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
std::vector< statistics::Histogram * > m_missTypeLatencyHist
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
void mergeFrom(const DataBlock &data)
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
void reset()
Reset stat value to default.
DrainState drainState() const
Return the current drain state of an object.
@ Draining
Draining buffers pending serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
void schedule(Event &event, Tick when)
#define panic(...)
This implements a cprintf based panic() function.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
bool isTlbiCmdRequest(RubyRequestType type)
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
Copyright (c) 2024 Arm Limited All rights reserved.
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
std::ostream & operator<<(std::ostream &os, const BaseSemihosting::InPlaceArg &ipa)
int ContextID
Globally unique thread context ID.
const ContextID InvalidContextID
Declaration of the Packet class.
RubyRequestType m_second_type