49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "debug/ProtocolTrace.hh"
52#include "debug/RubyHitMiss.hh"
53#include "debug/RubySequencer.hh"
54#include "debug/RubyStats.hh"
57#include "mem/ruby/protocol/PrefetchBit.hh"
58#include "mem/ruby/protocol/RubyAccessMode.hh"
71 :
RubyPort(
p), m_IncompleteTimes(MachineType_NUM),
72 deadlockCheckEvent([this]{
wakeup(); },
"Sequencer deadlock check")
74 m_outstanding_count = 0;
76 m_dataCache_ptr =
p.dcache;
77 m_max_outstanding_requests =
p.max_outstanding_requests;
78 m_deadlock_threshold =
p.deadlock_threshold;
81 assert(m_max_outstanding_requests > 0);
82 assert(m_deadlock_threshold > 0);
84 m_unaddressedTransactionCnt = 0;
86 m_runningGarnetStandalone =
p.garnet_standalone;
88 m_num_pending_invs = 0;
89 m_cache_inv_pkt =
nullptr;
94 m_outstandReqHist.init(10);
95 m_latencyHist.init(10);
96 m_hitLatencyHist.init(10);
97 m_missLatencyHist.init(10);
99 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
101 m_typeLatencyHist[
i]->init(10);
104 m_hitTypeLatencyHist[
i]->init(10);
107 m_missTypeLatencyHist[
i]->init(10);
110 for (
int i = 0;
i < MachineType_NUM;
i++) {
112 m_hitMachLatencyHist[
i]->init(10);
115 m_missMachLatencyHist[
i]->init(10);
118 m_IssueToInitialDelayHist[
i]->init(10);
121 m_InitialToForwardDelayHist[
i]->init(10);
123 m_ForwardToFirstResponseDelayHist.push_back(
125 m_ForwardToFirstResponseDelayHist[
i]->init(10);
127 m_FirstResponseToCompletionDelayHist.push_back(
129 m_FirstResponseToCompletionDelayHist[
i]->init(10);
132 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
133 m_hitTypeMachLatencyHist.push_back(
135 m_missTypeMachLatencyHist.push_back(
138 for (
int j = 0; j < MachineType_NUM; j++) {
139 m_hitTypeMachLatencyHist[
i].push_back(
new statistics::Histogram());
140 m_hitTypeMachLatencyHist[
i][j]->init(10);
142 m_missTypeMachLatencyHist[
i].push_back(
143 new statistics::Histogram());
144 m_missTypeMachLatencyHist[
i][j]->init(10);
158 "%s must have a dcache object to support LLSC requests.",
name());
162 DPRINTF(LLSC,
"LLSC Monitor - inserting load linked - "
163 "addr=0x%lx - cpu=%u\n", claddr,
m_version);
176 DPRINTF(LLSC,
"LLSC Monitor - clearing due to store - "
177 "addr=0x%lx - cpu=%u\n", claddr,
m_version);
185 "%s must have a dcache object to support LLSC requests.",
name());
190 DPRINTF(LLSC,
"LLSC Monitor - clearing due to "
191 "store conditional - "
192 "addr=0x%lx - cpu=%u\n",
235 [[maybe_unused]]
int total_outstanding = 0;
238 for (
const auto &seq_req : table_entry.second) {
242 panic(
"Possible Deadlock detected. Aborting!\n version: %d "
243 "request.paddr: 0x%x m_readRequestTable: %d current time: "
244 "%u issue_time: %d difference: %d\n",
m_version,
245 seq_req.pkt->getAddr(), table_entry.second.size(),
250 total_outstanding += table_entry.second.size();
267 for (
const auto& seq_req : table_entry.second) {
268 if (seq_req.functionalWrite(func_pkt))
282 for (
int i = 0;
i < RubyRequestType_NUM;
i++) {
286 for (
int j = 0; j < MachineType_NUM; j++) {
292 for (
int i = 0;
i < MachineType_NUM;
i++) {
309 RubyRequestType secondary_type)
318 assert(primary_type == secondary_type);
320 switch (primary_type) {
321 case RubyRequestType_TLBI_EXT_SYNC_COMP:
324 case RubyRequestType_TLBI:
325 case RubyRequestType_TLBI_SYNC:
330 [[maybe_unused]]
auto insert_data = \
334 pkt, primary_type, secondary_type,
curCycle()));
337 assert(insert_data.second &&
338 "Another TLBI request with the same ID exists");
340 DPRINTF(RubySequencer,
"Inserting TLBI request %016x\n",
347 panic(
"Unexpected TLBI RubyRequestType");
350 return RequestStatus_Ready;
359 return RequestStatus_Aliased;
366 seq_req_list.emplace_back(pkt, primary_type,
370 if (seq_req_list.size() > 1) {
371 return RequestStatus_Aliased;
376 return RequestStatus_Ready;
387 const MachineType respondingMach,
388 bool isExternalHit,
Cycles initialRequestTime,
389 Cycles forwardRequestTime,
397 Cycles total_lat = completion_time - issued_time;
399 if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
408 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
419 if (respondingMach != MachineType_NUM) {
423 if ((issued_time <= initialRequestTime) &&
424 (initialRequestTime <= forwardRequestTime) &&
425 (forwardRequestTime <= firstResponseTime) &&
426 (firstResponseTime <= completion_time)) {
429 initialRequestTime - issued_time);
431 forwardRequestTime - initialRequestTime);
433 firstResponseTime - forwardRequestTime);
435 completion_time - firstResponseTime);
444 if (respondingMach != MachineType_NUM) {
460 const bool externalHit,
const MachineType mach,
461 const Cycles initialRequestTime,
462 const Cycles forwardRequestTime,
463 const Cycles firstResponseTime,
477 bool ruby_request =
true;
478 while (!seq_req_list.empty()) {
482 ((seq_req.
m_type == RubyRequestType_ATOMIC_RETURN) ||
483 (seq_req.
m_type == RubyRequestType_ATOMIC_NO_RETURN));
485 if ((noCoales || atomic_req) && !ruby_request) {
494 assert(seq_req.
m_type != RubyRequestType_LD);
495 assert(seq_req.
m_type != RubyRequestType_Load_Linked);
496 assert(seq_req.
m_type != RubyRequestType_IFETCH);
497 assert(seq_req.
m_type != RubyRequestType_ATOMIC_RETURN);
498 assert(seq_req.
m_type != RubyRequestType_ATOMIC_NO_RETURN);
502 if ((seq_req.
m_type != RubyRequestType_LD) &&
503 (seq_req.
m_type != RubyRequestType_Load_Linked) &&
504 (seq_req.
m_type != RubyRequestType_IFETCH)) {
508 if (seq_req.
m_type != RubyRequestType_Store_Conditional) {
516 seq_req.
pkt->
req->setExtraData(success ? 1 : 0);
522 if (seq_req.
m_type == RubyRequestType_Locked_RMW_Read) {
530 }
else if (seq_req.
m_type == RubyRequestType_Locked_RMW_Write) {
536 initialRequestTime, forwardRequestTime,
542 initialRequestTime, forwardRequestTime,
543 firstResponseTime, !ruby_request);
544 ruby_request =
false;
547 assert(!ruby_request);
550 initialRequestTime, forwardRequestTime,
551 firstResponseTime, !ruby_request);
553 seq_req_list.pop_front();
557 if (seq_req_list.empty()) {
564 bool externalHit,
const MachineType mach,
565 Cycles initialRequestTime,
566 Cycles forwardRequestTime,
580 bool ruby_request =
true;
581 while (!seq_req_list.empty()) {
584 assert((seq_req.
m_type == RubyRequestType_LD) ||
585 (seq_req.
m_type == RubyRequestType_Load_Linked) ||
586 (seq_req.
m_type == RubyRequestType_IFETCH));
588 if ((seq_req.
m_type != RubyRequestType_LD) &&
589 (seq_req.
m_type != RubyRequestType_Load_Linked) &&
590 (seq_req.
m_type != RubyRequestType_IFETCH) &&
591 (seq_req.
m_type != RubyRequestType_REPLACEMENT)) {
598 initialRequestTime, forwardRequestTime,
603 initialRequestTime, forwardRequestTime,
604 firstResponseTime, !ruby_request);
605 ruby_request =
false;
606 seq_req_list.pop_front();
610 if (seq_req_list.empty()) {
617 const bool externalHit,
const MachineType mach,
618 const Cycles initialRequestTime,
619 const Cycles forwardRequestTime,
620 const Cycles firstResponseTime)
634 bool ruby_request =
true;
635 while (!seq_req_list.empty()) {
641 assert((seq_req.
m_type == RubyRequestType_ATOMIC_RETURN) ||
642 (seq_req.
m_type == RubyRequestType_ATOMIC_NO_RETURN));
644 initialRequestTime, forwardRequestTime,
658 ruby_request =
false;
660 initialRequestTime, forwardRequestTime,
661 firstResponseTime,
false);
662 seq_req_list.pop_front();
666 if (seq_req_list.empty()) {
674 const MachineType mach,
const bool externalHit,
675 const Cycles initialRequestTime,
676 const Cycles forwardRequestTime,
677 const Cycles firstResponseTime,
678 const bool was_coalesced)
680 warn_once(
"Replacement policy updates recently became the responsibility "
681 "of SLICC state machines. Make sure to setMRU() near callbacks "
697 if (
type == RubyRequestType_Load_Linked) {
702 DPRINTF(RubyHitMiss,
"Cache %s at %#x\n",
703 externalHit ?
"miss" :
"hit",
710 if ((
type == RubyRequestType_LD) ||
711 (
type == RubyRequestType_IFETCH) ||
712 (
type == RubyRequestType_RMW_Read) ||
713 (
type == RubyRequestType_Locked_RMW_Read) ||
714 (
type == RubyRequestType_Load_Linked) ||
715 (
type == RubyRequestType_ATOMIC_RETURN)) {
719 if (
type == RubyRequestType_ATOMIC_RETURN) {
720 DPRINTF(RubySequencer,
"ATOMIC RETURN data %s\n",
data);
724 }
else if (pkt->
req->isSwap()) {
730 data.setData(&overwrite_val[0],
737 DPRINTF(RubySequencer,
"AMO original data %s\n",
data);
742 }
else if (
type != RubyRequestType_Store_Conditional || llscSuccess) {
754 DPRINTF(RubySequencer,
"hitCallback %s 0x%x using RubyTester\n",
758 assert(testerSenderState);
766 rs->m_cache_recorder->enqueueNextFetchRequest();
769 rs->m_cache_recorder->enqueueNextFlushRequest();
778 RubyRequestType reqType,
779 const MachineType mach,
780 const Cycles initialRequestTime,
781 const Cycles forwardRequestTime,
782 const Cycles firstResponseTime)
784 DPRINTF(RubySequencer,
"unaddressedCallback ID:%08x type:%d\n",
785 unaddressedReqId, reqType);
788 case RubyRequestType_TLBI_EXT_SYNC:
798 case RubyRequestType_TLBI:
799 case RubyRequestType_TLBI_SYNC:
810 assert(seq_req.
m_type == reqType);
822 panic(
"Unexpected TLBI RubyRequestType");
829 for (
auto& pkt : mylist) {
838 assert(port != NULL);
840 pkt->senderState =
ss->predecessor;
857 rs->m_cache_recorder->enqueueNextFlushRequest();
883 "There are %d Invalidations outstanding before Cache Walk\n",
886 for (
int i = 0;
i < size;
i++) {
889 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
890 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
892 request_type, RubyAccessMode_Supervisor,
894 DPRINTF(RubySequencer,
"Evicting addr 0x%x\n",
addr);
902 "There are %d Invalidations outstanding after Cache Walk\n",
919 !pkt->
req->isHTMAbort()) {
920 return RequestStatus_BufferFull;
923 RubyRequestType primary_type = RubyRequestType_NULL;
924 RubyRequestType secondary_type = RubyRequestType_NULL;
938 DPRINTF(RubySequencer,
"Issuing SC\n");
939 primary_type = RubyRequestType_Store_Conditional;
940#if defined (PROTOCOL_MESI_Three_Level) || defined (PROTOCOL_MESI_Three_Level_HTM)
941 secondary_type = RubyRequestType_Store_Conditional;
943 secondary_type = RubyRequestType_ST;
946 DPRINTF(RubySequencer,
"Issuing LL\n");
948 primary_type = RubyRequestType_Load_Linked;
949 secondary_type = RubyRequestType_LD;
951 }
else if (pkt->
req->isLockedRMW()) {
959 DPRINTF(RubySequencer,
"Issuing Locked RMW Write\n");
960 primary_type = RubyRequestType_Locked_RMW_Write;
962 DPRINTF(RubySequencer,
"Issuing Locked RMW Read\n");
964 primary_type = RubyRequestType_Locked_RMW_Read;
966 secondary_type = RubyRequestType_ST;
967 }
else if (pkt->
req->isTlbiCmd()) {
969 DPRINTF(RubySequencer,
"Issuing TLBI\n");
970#if defined (PROTOCOL_CHI)
972 if (pkt->
req->isAtomicReturn()){
973 DPRINTF(RubySequencer,
"Issuing ATOMIC RETURN \n");
974 primary_type = secondary_type =
975 RubyRequestType_ATOMIC_RETURN;
977 DPRINTF(RubySequencer,
"Issuing ATOMIC NO RETURN\n");
978 primary_type = secondary_type =
979 RubyRequestType_ATOMIC_NO_RETURN;
993 primary_type = secondary_type = RubyRequestType_ST;
994 }
else if (pkt->
isRead()) {
996 if (pkt->
req->isHTMCmd()) {
998 }
else if (pkt->
req->isInstFetch()) {
999 primary_type = secondary_type = RubyRequestType_IFETCH;
1001 if (pkt->
req->isReadModifyWrite()) {
1002 primary_type = RubyRequestType_RMW_Read;
1003 secondary_type = RubyRequestType_ST;
1005 primary_type = secondary_type = RubyRequestType_LD;
1009 primary_type = secondary_type = RubyRequestType_FLUSH;
1011 primary_type = secondary_type = RubyRequestType_REPLACEMENT;
1016 panic(
"Unsupported ruby packet type\n");
1021 if (!pkt->
req->isMemMgmt() &&
1023 (primary_type != RubyRequestType_Locked_RMW_Write)) {
1027 return RequestStatus_Aliased;
1033 if (
status != RequestStatus_Ready &&
status != RequestStatus_Aliased)
1037 if (
status != RequestStatus_Aliased)
1041 return RequestStatus_Issued;
1047 assert(pkt != NULL);
1055 if (pkt->
req->hasPC()) {
1056 pc = pkt->
req->getPC();
1061 std::shared_ptr<RubyRequest> msg;
1062 if (pkt->
req->isMemMgmt()) {
1063 msg = std::make_shared<RubyRequest>(
clockEdge(),
1065 RubyAccessMode_Supervisor, pkt,
1068 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s\n",
1070 RubyRequestType_to_string(secondary_type));
1072 if (pkt->
req->isTlbiCmd()) {
1073 msg->m_isTlbi =
true;
1074 switch (secondary_type) {
1075 case RubyRequestType_TLBI_EXT_SYNC_COMP:
1076 msg->m_tlbiTransactionUid = pkt->
req->getExtraData();
1078 case RubyRequestType_TLBI:
1079 case RubyRequestType_TLBI_SYNC:
1080 msg->m_tlbiTransactionUid = \
1081 getCurrentUnaddressedTransactionID();
1084 panic(
"Unexpected TLBI RubyRequestType");
1086 DPRINTF(RubySequencer,
"Issuing TLBI %016x\n",
1087 msg->m_tlbiTransactionUid);
1092 RubyAccessMode_Supervisor, pkt,
1093 PrefetchBit_No, proc_id, core_id);
1096 ((secondary_type == RubyRequestType_ATOMIC_RETURN) ||
1097 (secondary_type == RubyRequestType_ATOMIC_NO_RETURN))){
1101 atomicOps.push_back(std::make_pair<int,AtomicOpFunctor*>
1107 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %#x %s\n",
1110 RubyRequestType_to_string(secondary_type));
1117 msg->m_htmFromTransaction =
true;
1123 assert(latency > 0);
1129template <
class KEY,
class VALUE>
1131operator<<(std::ostream &out,
const std::unordered_map<KEY, VALUE> &map)
1133 for (
const auto &table_entry : map) {
1134 out <<
"[ " << table_entry.first <<
" =";
1135 for (
const auto &seq_req : table_entry.second) {
1136 out <<
" " << RubyRequestType_to_string(seq_req.m_second_type);
1155 DPRINTF(RubyStats,
"Recorded statistic: %s\n",
1156 SequencerRequestType_to_string(requestType));
1172 uint64_t aligned_txid = \
1175 if (aligned_txid > 0xFFFFFFFFull) {
1184 uint64_t(
m_version & 0xFFFFFFFF) << 32) |
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
Cycles is a wrapper class for representing cycle counts, i.e.
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
RequestPtr req
A pointer to the original request.
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
MemCmd cmd
The command field of the packet.
bool isMaskedWrite() const
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
bool isLocked(int context) const
void setLocked(int context)
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
virtual Cycles mandatoryQueueLatency(const RubyRequestType ¶m_type)
bool isBlocked(Addr) const
void blockOnQueue(Addr, MessageBuffer *)
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
AbstractCacheEntry * lookup(Addr address)
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool bypassStrictFIFO=false)
void hitCallback(PacketPtr pkt)
void ruby_hit_callback(PacketPtr pkt)
void ruby_unaddressed_callback(PacketPtr pkt)
void ruby_stale_translation_callback(Addr txnId)
RubySystem * m_ruby_system
virtual int functionalWrite(Packet *func_pkt)
AbstractController * m_controller
void ruby_eviction_callback(Addr address)
MessageBuffer * m_mandatory_q_ptr
static bool getWarmupEnabled()
static uint32_t getBlockSizeBits()
CacheRecorder * m_cache_recorder
static bool getCooldownEnabled()
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
void resetStats() override
Callback to reset stats.
Cycles m_deadlock_threshold
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
virtual bool empty() const
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
std::vector< statistics::Counter > m_IncompleteTimes
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
virtual int functionalWrite(Packet *func_pkt) override
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
void completeHitCallback(std::vector< PacketPtr > &list)
std::vector< statistics::Histogram * > m_typeLatencyHist
PacketPtr m_cache_inv_pkt
void atomicCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
CacheMemory * m_dataCache_ptr
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Sequencer(const Params &)
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
void issueRequest(PacketPtr pkt, RubyRequestType type)
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
EventFunctionWrapper deadlockCheckEvent
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
uint64_t m_unaddressedTransactionCnt
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
RequestStatus makeRequest(PacketPtr pkt) override
int m_max_outstanding_requests
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
void recordRequestType(SequencerRequestType requestType)
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
virtual void print(std::ostream &out) const
RubySequencerParams Params
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
void evictionCallback(Addr address)
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
std::vector< statistics::Histogram * > m_missTypeLatencyHist
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
void mergeFrom(const DataBlock &data)
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
void reset()
Reset stat value to default.
DrainState drainState() const
Return the current drain state of an object.
@ Draining
Draining buffers pending serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
void schedule(Event &event, Tick when)
#define panic(...)
This implements a cprintf based panic() function.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
bool isTlbiCmdRequest(RubyRequestType type)
Addr makeLineAddress(Addr addr)
Addr getOffset(Addr addr)
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
std::string printAddress(Addr addr)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
T safe_cast(U &&ref_or_ptr)
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
int ContextID
Globally unique thread context ID.
const ContextID InvalidContextID
Declaration of the Packet class.
RubyRequestType m_second_type