52#include "debug/RubyCacheTrace.hh"
53#include "debug/RubySystem.hh"
113 "All controllers in a system must use the same protocol. %s != %s",
114 protocolInfo->getName().c_str(), cntl_protocol->getName().c_str()
123 for (
int idx = 0; idx <
m_networks.size(); ++idx) {
129 fatal_if(network_id < 0,
"Could not add MachineID %s. Network not found",
152 MachineID mach_id = cntrl->getMachineID();
156 "No machineID %s. Does not belong to a Ruby network?",
167 for (
auto id = 0;
id <
params().system->maxRequestors(); ++
id) {
181 uint64_t cache_trace_size,
182 uint64_t block_size_bytes)
189 ruby_port_map.push_back(
192 ruby_port_map.push_back(
196 if (ruby_port_ptr == NULL) {
197 ruby_port_ptr = ruby_port_map[cntrl];
201 assert(ruby_port_ptr != NULL);
204 if (ruby_port_map[cntrl] == NULL) {
205 ruby_port_map[cntrl] = ruby_port_ptr;
207 ruby_port_ptr = ruby_port_map[cntrl];
219 ruby_port_map, block_size_bytes,
229 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
234 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
245 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
250 while (!
eventq->empty()) {
253 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled,"
254 " not recording\n", curr_head->
name());
256 original_events.push_back(
257 std::make_pair(curr_head, curr_head->
when()));
259 eventq->deschedule(curr_head);
263 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
266 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
269 while (!
eventq->empty()) {
279 while (!original_events.empty()) {
282 original_events.pop_back();
296 warn_once(
"Ruby memory writeback is experimental. Continuing simulation "
297 "afterwards may not always work as intended.");
305 uint64_t uncompressed_trace_size)
310 int fd = creat(thefile.c_str(), 0664);
313 fatal(
"Can't open memory trace file '%s'\n", filename);
316 gzFile compressedMemory = gzdopen(
fd,
"wb");
317 if (compressedMemory == NULL)
318 fatal(
"Insufficient memory to allocate compression state for %s\n",
321 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
322 uncompressed_trace_size) {
323 fatal(
"Write failed on memory trace file '%s'\n", filename);
326 if (gzclose(compressedMemory)) {
327 fatal(
"Close failed on memory trace file '%s'\n", filename);
345 fatal(
"Call memWriteback() before serialize() to create"
350 uint8_t *raw_data =
new uint8_t[4096];
353 std::string cache_trace_file =
name() +
".cache.gz";
373 uint64_t &uncompressed_trace_size)
376 gzFile compressedTrace;
379 int fd = open(filename.c_str(), O_RDONLY);
382 fatal(
"Unable to open trace file %s", filename);
385 compressedTrace = gzdopen(
fd,
"rb");
386 if (compressedTrace == NULL) {
387 fatal(
"Insufficient memory to allocate compression state for %s\n",
391 raw_data =
new uint8_t[uncompressed_trace_size];
392 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
393 uncompressed_trace_size) {
394 fatal(
"Unable to read complete trace from file %s\n", filename);
397 if (gzclose(compressedTrace)) {
398 fatal(
"Failed to close cache trace file '%s'\n", filename);
405 uint8_t *uncompressed_trace = NULL;
413 std::string cache_trace_file;
414 uint64_t cache_trace_size = 0;
418 cache_trace_file =
cp.getCptDir() +
"/" + cache_trace_file;
455 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
476 eventq->replaceHead(eventq_head);
502 network->resetStats();
522 AccessPermission access_perm = AccessPermission_NotPresent;
526 unsigned int num_ro = 0;
527 unsigned int num_rw = 0;
528 unsigned int num_busy = 0;
529 unsigned int num_maybe_stale = 0;
530 unsigned int num_backing_store = 0;
531 unsigned int num_invalid = 0;
545 for (
auto& cntrl :
netCntrls[request_net_id]) {
546 access_perm = cntrl-> getAccessPermission(line_address);
547 if (access_perm == AccessPermission_Read_Only){
549 if (ctrl_ro ==
nullptr) ctrl_ro = cntrl;
551 else if (access_perm == AccessPermission_Read_Write){
553 if (ctrl_rw ==
nullptr) ctrl_rw = cntrl;
555 else if (access_perm == AccessPermission_Busy)
557 else if (access_perm == AccessPermission_Maybe_Stale) {
558 int priority = cntrl->functionalReadPriority();
560 if (ctrl_ms ==
nullptr) {
564 if (ctrl_ms ==
nullptr ||
priority < current_priority) {
566 }
else if (
priority == current_priority) {
567 warn(
"More than one Abstract Controller with "
568 "Maybe_Stale permission and same priority (%d) "
569 "for addr: %#x on cacheline: %#x.",
priority,
570 address, line_address);
575 }
else if (access_perm == AccessPermission_Backing_Store) {
581 if (ctrl_backing_store ==
nullptr)
582 ctrl_backing_store = cntrl;
584 else if (access_perm == AccessPermission_Invalid ||
585 access_perm == AccessPermission_NotPresent)
597 int num_controllers =
netCntrls[request_net_id].size();
598 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
600 "only copy in Backing_Store memory, read from it\n");
603 }
else if (num_ro > 0 || num_rw >= 1) {
608 warn(
"More than one Abstract Controller with RW permission for "
609 "addr: %#x on cacheline: %#x.", address, line_address);
617 num_maybe_stale, num_busy, num_ro, num_rw);
627 }
else if ((num_busy + num_maybe_stale) > 0) {
632 "(num_maybe_stale=%d, num_busy = %d)\n",
633 num_maybe_stale, num_busy);
634 for (
auto& cntrl :
netCntrls[request_net_id]) {
635 if (cntrl->functionalReadBuffers(pkt))
639 "(num_maybe_stale=%d, num_busy = %d)\n",
640 num_maybe_stale, num_busy);
642 if (network->functionalRead(pkt))
645 if (ctrl_ms !=
nullptr) {
673 switch(ctrl->getAccessPermission(line_address)) {
674 case AccessPermission_Read_Only:
675 ctrl_ro.push_back(ctrl);
677 case AccessPermission_Busy:
678 ctrl_busy.push_back(ctrl);
680 case AccessPermission_Read_Write:
681 assert(ctrl_rw ==
nullptr);
684 case AccessPermission_Backing_Store:
685 assert(ctrl_bs ==
nullptr);
688 case AccessPermission_Backing_Store_Busy:
689 assert(ctrl_bs ==
nullptr);
691 ctrl_busy.push_back(ctrl);
694 ctrl_others.push_back(ctrl);
700 "backing_store=%d\n",
701 ctrl_ro.size(), ctrl_busy.size(),
702 ctrl_rw !=
nullptr, ctrl_bs !=
nullptr);
708 if (ctrl_rw !=
nullptr) {
716 for (
auto ctrl : ctrl_ro)
717 ctrl->functionalRead(line_address, pkt, bytes);
725 if (!ctrl_busy.empty() || !bytes.
isFull()) {
727 "buffers and networks\n");
728 if (ctrl_rw !=
nullptr)
730 for (
auto ctrl : ctrl_ro)
731 ctrl->functionalReadBuffers(pkt, bytes);
732 if (ctrl_bs !=
nullptr)
734 for (
auto ctrl : ctrl_busy) {
735 ctrl->functionalRead(line_address, pkt, bytes);
736 ctrl->functionalReadBuffers(pkt, bytes);
739 network->functionalRead(pkt, bytes);
741 for (
auto ctrl : ctrl_others) {
742 ctrl->functionalRead(line_address, pkt, bytes);
743 ctrl->functionalReadBuffers(pkt, bytes);
748 "Inconsistent state on functional read for %#x %s\n",
763 AccessPermission access_perm = AccessPermission_NotPresent;
767 [[maybe_unused]] uint32_t num_functional_writes = 0;
774 for (
auto& cntrl :
netCntrls[request_net_id]) {
775 num_functional_writes += cntrl->functionalWriteBuffers(pkt);
777 access_perm = cntrl->getAccessPermission(line_addr);
778 if (access_perm != AccessPermission_Invalid &&
779 access_perm != AccessPermission_NotPresent) {
780 num_functional_writes +=
781 cntrl->functionalWrite(line_addr, pkt);
786 if (cntrl->getCPUSequencer()) {
787 num_functional_writes +=
788 cntrl->getCPUSequencer()->functionalWrite(pkt);
790 if (cntrl->getDMASequencer()) {
791 num_functional_writes +=
792 cntrl->getDMASequencer()->functionalWrite(pkt);
797 num_functional_writes += network->functionalWrite(pkt);
ClockedObject(const ClockedObjectParams &p)
ClockedObjectParams Params
Parameters of ClockedObject.
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
void resetClock() const
Reset the object's clock using the current global tick value.
void setCurTick(Tick newVal)
EventQueue * eventq
A pointer to this object's event queue.
virtual std::string name() const
RequestorID requestorId() const
MachineID getMachineID() const
virtual bool functionalReadBuffers(PacketPtr &)=0
These functions are used by ruby system to read/write the data blocks that exist with in the controll...
virtual int functionalReadPriority()
Returns the priority used by functional reads when deciding from which controller to read a Maybe_Sta...
virtual void functionalRead(const Addr &addr, PacketPtr)
void enqueueRubyEvent(Tick tick)
bool functionalWrite(Packet *ptr)
void unserialize(CheckpointIn &cp) override
Unserialize an object.
const bool m_access_backing_store
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
void registerMachineID(const MachineID &mach_id, Network *network)
std::unique_ptr< ProtocolInfo > protocolInfo
std::unordered_map< RequestorID, unsigned > requestorToNetwork
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
RubySystem(const Params &p)
void registerNetwork(Network *)
bool getCooldownEnabled()
void startup() override
startup() is the final initialization call before simulation.
std::unordered_map< MachineID, unsigned > machineToNetwork
uint32_t getBlockSizeBytes()
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
bool simpleFunctionalRead(PacketPtr pkt)
CacheRecorder * m_cache_recorder
uint32_t m_block_size_bits
std::vector< std::unique_ptr< Network > > m_networks
void drainResume() override
Resume execution after a successful drain.
uint32_t m_block_size_bytes
std::vector< AbstractController * > m_abs_cntrl_vec
void resetStats() override
Callback to reset stats.
bool partialFunctionalRead(PacketPtr pkt)
memory::SimpleMemory * m_phys_mem
std::unordered_map< unsigned, std::vector< AbstractController * > > netCntrls
uint32_t m_memory_size_bits
bool functionalRead(Packet *ptr)
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
void serialize(CheckpointOut &cp) const override
Serialize an object.
void registerAbstractController(AbstractController *, std::unique_ptr< ProtocolInfo >)
void registerRequestorIDs()
void memWriteback() override
Write back dirty buffers to memory using functional writes.
void setBlockSize(int size)
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
static constexpr bool isPowerOf2(const T &n)
bool isAutoDelete() const
The function returns true if the object is automatically deleted after the event is processed.
virtual const std::string name() const
Tick when() const
Get the time that the event is scheduled.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
#define fatal(...)
This implements a cprintf based fatal() function.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
#define UNSERIALIZE_OPT_SCALAR(scalar)
static std::string dir()
Get the current checkout directory name.
const Params & params() const
virtual void resetStats()
Callback to reset stats.
Bitfield< 3, 0 > priority
Addr makeLineAddress(Addr addr, int cacheLineBits)
std::string MachineIDToString(MachineID machine)
void registerDumpCallback(const std::function< void()> &callback)
Register a callback that should be called whenever statistics are about to be dumped.
Copyright (c) 2024 Arm Limited All rights reserved.
GlobalSimLoopExitEvent * simulate_limit_event
Tick curTick()
The universal simulation clock.
std::ostream CheckpointOut
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
#define UNSERIALIZE_SCALAR(scalar)
#define SERIALIZE_SCALAR(scalar)
SimpleMemory declaration.
Declaration of Statistics objects.