52#include "debug/RubyCacheTrace.hh"
53#include "debug/RubySystem.hh"
81 m_cache_recorder(NULL)
119 for (
int idx = 0; idx <
m_networks.size(); ++idx) {
125 fatal_if(network_id < 0,
"Could not add MachineID %s. Network not found",
148 MachineID mach_id = cntrl->getMachineID();
152 "No machineID %s. Does not belong to a Ruby network?",
163 for (
auto id = 0;
id <
params().system->maxRequestors(); ++
id) {
177 uint64_t cache_trace_size,
178 uint64_t block_size_bytes)
185 ruby_port_map.push_back(
188 ruby_port_map.push_back(
192 if (ruby_port_ptr == NULL) {
193 ruby_port_ptr = ruby_port_map[cntrl];
197 assert(ruby_port_ptr != NULL);
200 if (ruby_port_map[cntrl] == NULL) {
201 ruby_port_map[cntrl] = ruby_port_ptr;
203 ruby_port_ptr = ruby_port_map[cntrl];
225 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
230 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
241 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
249 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled,"
250 " not recording\n", curr_head->
name());
252 original_events.push_back(
253 std::make_pair(curr_head, curr_head->
when()));
259 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
262 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
275 while (!original_events.empty()) {
278 original_events.pop_back();
292 warn_once(
"Ruby memory writeback is experimental. Continuing simulation "
293 "afterwards may not always work as intended.");
301 uint64_t uncompressed_trace_size)
306 int fd = creat(thefile.c_str(), 0664);
309 fatal(
"Can't open memory trace file '%s'\n", filename);
312 gzFile compressedMemory = gzdopen(
fd,
"wb");
313 if (compressedMemory == NULL)
314 fatal(
"Insufficient memory to allocate compression state for %s\n",
317 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
318 uncompressed_trace_size) {
319 fatal(
"Write failed on memory trace file '%s'\n", filename);
322 if (gzclose(compressedMemory)) {
323 fatal(
"Close failed on memory trace file '%s'\n", filename);
341 fatal(
"Call memWriteback() before serialize() to create"
346 uint8_t *raw_data =
new uint8_t[4096];
349 std::string cache_trace_file =
name() +
".cache.gz";
369 uint64_t &uncompressed_trace_size)
372 gzFile compressedTrace;
375 int fd = open(filename.c_str(), O_RDONLY);
378 fatal(
"Unable to open trace file %s", filename);
381 compressedTrace = gzdopen(
fd,
"rb");
382 if (compressedTrace == NULL) {
383 fatal(
"Insufficient memory to allocate compression state for %s\n",
387 raw_data =
new uint8_t[uncompressed_trace_size];
388 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
389 uncompressed_trace_size) {
390 fatal(
"Unable to read complete trace from file %s\n", filename);
393 if (gzclose(compressedTrace)) {
394 fatal(
"Failed to close cache trace file '%s'\n", filename);
401 uint8_t *uncompressed_trace = NULL;
409 std::string cache_trace_file;
410 uint64_t cache_trace_size = 0;
414 cache_trace_file = cp.
getCptDir() +
"/" + cache_trace_file;
452 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
502 network->resetStats();
507#ifndef PARTIAL_FUNC_READS
514 AccessPermission access_perm = AccessPermission_NotPresent;
518 unsigned int num_ro = 0;
519 unsigned int num_rw = 0;
520 unsigned int num_busy = 0;
521 unsigned int num_maybe_stale = 0;
522 unsigned int num_backing_store = 0;
523 unsigned int num_invalid = 0;
536 for (
auto& cntrl :
netCntrls[request_net_id]) {
537 access_perm = cntrl-> getAccessPermission(line_address);
538 if (access_perm == AccessPermission_Read_Only){
540 if (ctrl_ro ==
nullptr) ctrl_ro = cntrl;
542 else if (access_perm == AccessPermission_Read_Write){
544 if (ctrl_rw ==
nullptr) ctrl_rw = cntrl;
546 else if (access_perm == AccessPermission_Busy)
548 else if (access_perm == AccessPermission_Maybe_Stale)
550 else if (access_perm == AccessPermission_Backing_Store) {
556 if (ctrl_backing_store ==
nullptr)
557 ctrl_backing_store = cntrl;
559 else if (access_perm == AccessPermission_Invalid ||
560 access_perm == AccessPermission_NotPresent)
572 int num_controllers =
netCntrls[request_net_id].size();
573 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
577 }
else if (num_ro > 0 || num_rw >= 1) {
582 warn(
"More than one Abstract Controller with RW permission for "
583 "addr: %#x on cacheline: %#x.", address, line_address);
591 num_maybe_stale, num_busy, num_ro, num_rw);
601 }
else if ((num_busy + num_maybe_stale) > 0) {
606 "(num_maybe_stale=%d, num_busy = %d)\n",
607 num_maybe_stale, num_busy);
608 for (
auto& cntrl :
netCntrls[request_net_id]) {
609 if (cntrl->functionalReadBuffers(pkt))
613 "(num_maybe_stale=%d, num_busy = %d)\n",
614 num_maybe_stale, num_busy);
616 if (network->functionalRead(pkt))
640 switch(ctrl->getAccessPermission(line_address)) {
641 case AccessPermission_Read_Only:
642 ctrl_ro.push_back(ctrl);
644 case AccessPermission_Busy:
645 ctrl_busy.push_back(ctrl);
647 case AccessPermission_Read_Write:
648 assert(ctrl_rw ==
nullptr);
651 case AccessPermission_Backing_Store:
652 assert(ctrl_bs ==
nullptr);
655 case AccessPermission_Backing_Store_Busy:
656 assert(ctrl_bs ==
nullptr);
658 ctrl_busy.push_back(ctrl);
661 ctrl_others.push_back(ctrl);
667 "backing_store=%d\n",
668 ctrl_ro.size(), ctrl_busy.size(),
669 ctrl_rw !=
nullptr, ctrl_bs !=
nullptr);
674 if (ctrl_rw !=
nullptr) {
682 for (
auto ctrl : ctrl_ro)
691 if (!ctrl_busy.empty() || !bytes.isFull()) {
693 "buffers and networks\n");
694 if (ctrl_rw !=
nullptr)
696 for (
auto ctrl : ctrl_ro)
697 ctrl->functionalReadBuffers(pkt, bytes);
698 if (ctrl_bs !=
nullptr)
700 for (
auto ctrl : ctrl_busy) {
701 ctrl->functionalRead(line_address, pkt, bytes);
702 ctrl->functionalReadBuffers(pkt, bytes);
705 network->functionalRead(pkt, bytes);
707 for (
auto ctrl : ctrl_others) {
708 ctrl->functionalRead(line_address, pkt, bytes);
709 ctrl->functionalReadBuffers(pkt, bytes);
713 panic_if(!(bytes.isFull() || bytes.isEmpty()),
714 "Inconsistent state on functional read for %#x %s\n",
717 return bytes.isFull();
730 AccessPermission access_perm = AccessPermission_NotPresent;
734 [[maybe_unused]] uint32_t num_functional_writes = 0;
741 for (
auto& cntrl :
netCntrls[request_net_id]) {
742 num_functional_writes += cntrl->functionalWriteBuffers(pkt);
744 access_perm = cntrl->getAccessPermission(line_addr);
745 if (access_perm != AccessPermission_Invalid &&
746 access_perm != AccessPermission_NotPresent) {
747 num_functional_writes +=
748 cntrl->functionalWrite(line_addr, pkt);
753 if (cntrl->getCPUSequencer()) {
754 num_functional_writes +=
755 cntrl->getCPUSequencer()->functionalWrite(pkt);
757 if (cntrl->getDMASequencer()) {
758 num_functional_writes +=
759 cntrl->getDMASequencer()->functionalWrite(pkt);
764 num_functional_writes += network->functionalWrite(pkt);
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
void resetClock() const
Reset the object's clock using the current global tick value.
void setCurTick(Tick newVal)
EventQueue * eventq
A pointer to this object's event queue.
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
RequestorID requestorId() const
MachineID getMachineID() const
virtual bool functionalReadBuffers(PacketPtr &)=0
These functions are used by ruby system to read/write the data blocks that exist with in the controll...
virtual void functionalRead(const Addr &addr, PacketPtr)
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
uint64_t getNumRecords() const
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
void enqueueRubyEvent(Tick tick)
static bool m_warmup_enabled
static bool getWarmupEnabled()
bool functionalWrite(Packet *ptr)
static uint32_t getBlockSizeBytes()
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
static bool m_cooldown_enabled
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
void registerMachineID(const MachineID &mach_id, Network *network)
static bool m_randomization
static unsigned m_systems_to_warmup
std::unordered_map< RequestorID, unsigned > requestorToNetwork
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
RubySystem(const Params &p)
void registerNetwork(Network *)
void startup() override
startup() is the final initialization call before simulation.
std::unordered_map< MachineID, unsigned > machineToNetwork
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
void registerAbstractController(AbstractController *)
CacheRecorder * m_cache_recorder
static uint32_t m_block_size_bits
std::vector< std::unique_ptr< Network > > m_networks
void drainResume() override
Resume execution after a successful drain.
static uint32_t m_block_size_bytes
std::vector< AbstractController * > m_abs_cntrl_vec
void resetStats() override
Callback to reset stats.
memory::SimpleMemory * m_phys_mem
std::unordered_map< unsigned, std::vector< AbstractController * > > netCntrls
static uint32_t m_memory_size_bits
bool functionalRead(Packet *ptr)
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
void serialize(CheckpointOut &cp) const override
Serialize an object.
void registerRequestorIDs()
void memWriteback() override
Write back dirty buffers to memory using functional writes.
static bool getCooldownEnabled()
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
static constexpr bool isPowerOf2(const T &n)
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
void deschedule(Event *event)
Deschedule the specified event.
bool isAutoDelete() const
The function returns true if the object is automatically deleted after the event is processed.
virtual const std::string name() const
bool empty() const
Returns true if no events are queued.
Tick when() const
Get the time that the event is scheduled.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
#define fatal(...)
This implements a cprintf based fatal() function.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
#define UNSERIALIZE_OPT_SCALAR(scalar)
const std::string getCptDir()
static std::string dir()
Get the current checkout directory name.
const Params & params() const
virtual void resetStats()
Callback to reset stats.
std::string MachineIDToString(MachineID machine)
Addr makeLineAddress(Addr addr)
void registerDumpCallback(const std::function< void()> &callback)
Register a callback that should be called whenever statistics are about to be dumped.
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
GlobalSimLoopExitEvent * simulate_limit_event
Tick curTick()
The universal simulation clock.
std::ostream CheckpointOut
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
#define UNSERIALIZE_SCALAR(scalar)
#define SERIALIZE_SCALAR(scalar)
SimpleMemory declaration.
Declaration of Statistics objects.