52#include "debug/RubyCacheTrace.hh"
53#include "debug/RubySystem.hh"
81 m_cache_recorder(NULL)
119 for (
int idx = 0; idx <
m_networks.size(); ++idx) {
125 fatal_if(network_id < 0,
"Could not add MachineID %s. Network not found",
148 MachineID mach_id = cntrl->getMachineID();
152 "No machineID %s. Does not belong to a Ruby network?",
163 for (
auto id = 0;
id <
params().system->maxRequestors(); ++
id) {
177 uint64_t cache_trace_size,
178 uint64_t block_size_bytes)
185 if (sequencer_ptr == NULL) {
186 sequencer_ptr = sequencer_map[cntrl];
190 assert(sequencer_ptr != NULL);
193 if (sequencer_map[cntrl] == NULL) {
194 sequencer_map[cntrl] = sequencer_ptr;
205 sequencer_map, block_size_bytes);
214 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
219 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
230 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
238 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled,"
239 " not recording\n", curr_head->
name());
241 original_events.push_back(
242 std::make_pair(curr_head, curr_head->
when()));
248 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
251 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
264 while (!original_events.empty()) {
267 original_events.pop_back();
281 warn_once(
"Ruby memory writeback is experimental. Continuing simulation "
282 "afterwards may not always work as intended.");
290 uint64_t uncompressed_trace_size)
295 int fd = creat(thefile.c_str(), 0664);
298 fatal(
"Can't open memory trace file '%s'\n", filename);
301 gzFile compressedMemory = gzdopen(
fd,
"wb");
302 if (compressedMemory == NULL)
303 fatal(
"Insufficient memory to allocate compression state for %s\n",
306 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
307 uncompressed_trace_size) {
308 fatal(
"Write failed on memory trace file '%s'\n", filename);
311 if (gzclose(compressedMemory)) {
312 fatal(
"Close failed on memory trace file '%s'\n", filename);
330 fatal(
"Call memWriteback() before serialize() to create"
335 uint8_t *raw_data =
new uint8_t[4096];
338 std::string cache_trace_file =
name() +
".cache.gz";
358 uint64_t &uncompressed_trace_size)
361 gzFile compressedTrace;
364 int fd = open(filename.c_str(), O_RDONLY);
367 fatal(
"Unable to open trace file %s", filename);
370 compressedTrace = gzdopen(
fd,
"rb");
371 if (compressedTrace == NULL) {
372 fatal(
"Insufficient memory to allocate compression state for %s\n",
376 raw_data =
new uint8_t[uncompressed_trace_size];
377 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
378 uncompressed_trace_size) {
379 fatal(
"Unable to read complete trace from file %s\n", filename);
382 if (gzclose(compressedTrace)) {
383 fatal(
"Failed to close cache trace file '%s'\n", filename);
390 uint8_t *uncompressed_trace = NULL;
398 std::string cache_trace_file;
399 uint64_t cache_trace_size = 0;
403 cache_trace_file = cp.
getCptDir() +
"/" + cache_trace_file;
441 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
486 network->resetStats();
491#ifndef PARTIAL_FUNC_READS
498 AccessPermission access_perm = AccessPermission_NotPresent;
502 unsigned int num_ro = 0;
503 unsigned int num_rw = 0;
504 unsigned int num_busy = 0;
505 unsigned int num_maybe_stale = 0;
506 unsigned int num_backing_store = 0;
507 unsigned int num_invalid = 0;
520 for (
auto& cntrl :
netCntrls[request_net_id]) {
521 access_perm = cntrl-> getAccessPermission(line_address);
522 if (access_perm == AccessPermission_Read_Only){
524 if (ctrl_ro ==
nullptr) ctrl_ro = cntrl;
526 else if (access_perm == AccessPermission_Read_Write){
528 if (ctrl_rw ==
nullptr) ctrl_rw = cntrl;
530 else if (access_perm == AccessPermission_Busy)
532 else if (access_perm == AccessPermission_Maybe_Stale)
534 else if (access_perm == AccessPermission_Backing_Store) {
540 if (ctrl_backing_store ==
nullptr)
541 ctrl_backing_store = cntrl;
543 else if (access_perm == AccessPermission_Invalid ||
544 access_perm == AccessPermission_NotPresent)
556 int num_controllers =
netCntrls[request_net_id].size();
557 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
561 }
else if (num_ro > 0 || num_rw >= 1) {
566 warn(
"More than one Abstract Controller with RW permission for "
567 "addr: %#x on cacheline: %#x.", address, line_address);
575 num_maybe_stale, num_busy, num_ro, num_rw);
585 }
else if ((num_busy + num_maybe_stale) > 0) {
590 "(num_maybe_stale=%d, num_busy = %d)\n",
591 num_maybe_stale, num_busy);
592 for (
auto& cntrl :
netCntrls[request_net_id]) {
593 if (cntrl->functionalReadBuffers(pkt))
597 "(num_maybe_stale=%d, num_busy = %d)\n",
598 num_maybe_stale, num_busy);
600 if (network->functionalRead(pkt))
624 switch(ctrl->getAccessPermission(line_address)) {
625 case AccessPermission_Read_Only:
626 ctrl_ro.push_back(ctrl);
628 case AccessPermission_Busy:
629 ctrl_busy.push_back(ctrl);
631 case AccessPermission_Read_Write:
632 assert(ctrl_rw ==
nullptr);
635 case AccessPermission_Backing_Store:
636 assert(ctrl_bs ==
nullptr);
639 case AccessPermission_Backing_Store_Busy:
640 assert(ctrl_bs ==
nullptr);
642 ctrl_busy.push_back(ctrl);
645 ctrl_others.push_back(ctrl);
650 DPRINTF(RubySystem,
"num_ro=%d, num_busy=%d , has_rw=%d, "
651 "backing_store=%d\n",
652 ctrl_ro.size(), ctrl_busy.size(),
653 ctrl_rw !=
nullptr, ctrl_bs !=
nullptr);
658 if (ctrl_rw !=
nullptr) {
666 for (
auto ctrl : ctrl_ro)
675 if (!ctrl_busy.empty() || !bytes.isFull()) {
676 DPRINTF(RubySystem,
"Reading from remaining controllers, "
677 "buffers and networks\n");
678 if (ctrl_rw !=
nullptr)
680 for (
auto ctrl : ctrl_ro)
681 ctrl->functionalReadBuffers(pkt, bytes);
682 if (ctrl_bs !=
nullptr)
684 for (
auto ctrl : ctrl_busy) {
685 ctrl->functionalRead(line_address, pkt, bytes);
686 ctrl->functionalReadBuffers(pkt, bytes);
689 network->functionalRead(pkt, bytes);
691 for (
auto ctrl : ctrl_others) {
692 ctrl->functionalRead(line_address, pkt, bytes);
693 ctrl->functionalReadBuffers(pkt, bytes);
697 panic_if(!(bytes.isFull() || bytes.isEmpty()),
698 "Inconsistent state on functional read for %#x %s\n",
701 return bytes.isFull();
714 AccessPermission access_perm = AccessPermission_NotPresent;
718 [[maybe_unused]] uint32_t num_functional_writes = 0;
725 for (
auto& cntrl :
netCntrls[request_net_id]) {
726 num_functional_writes += cntrl->functionalWriteBuffers(pkt);
728 access_perm = cntrl->getAccessPermission(line_addr);
729 if (access_perm != AccessPermission_Invalid &&
730 access_perm != AccessPermission_NotPresent) {
731 num_functional_writes +=
732 cntrl->functionalWrite(line_addr, pkt);
737 if (cntrl->getCPUSequencer()) {
738 num_functional_writes +=
739 cntrl->getCPUSequencer()->functionalWrite(pkt);
741 if (cntrl->getDMASequencer()) {
742 num_functional_writes +=
743 cntrl->getDMASequencer()->functionalWrite(pkt);
748 num_functional_writes += network->functionalWrite(pkt);
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
void resetClock() const
Reset the object's clock using the current global tick value.
void setCurTick(Tick newVal)
EventQueue * eventq
A pointer to this object's event queue.
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
RequestorID requestorId() const
MachineID getMachineID() const
virtual bool functionalReadBuffers(PacketPtr &)=0
These functions are used by ruby system to read/write the data blocks that exist with in the controll...
virtual void functionalRead(const Addr &addr, PacketPtr)
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
uint64_t getNumRecords() const
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
void enqueueRubyEvent(Tick tick)
static bool m_warmup_enabled
static bool getWarmupEnabled()
bool functionalWrite(Packet *ptr)
static uint32_t getBlockSizeBytes()
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
static bool m_cooldown_enabled
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
void registerMachineID(const MachineID &mach_id, Network *network)
static bool m_randomization
static unsigned m_systems_to_warmup
std::unordered_map< RequestorID, unsigned > requestorToNetwork
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
RubySystem(const Params &p)
void registerNetwork(Network *)
void startup() override
startup() is the final initialization call before simulation.
std::unordered_map< MachineID, unsigned > machineToNetwork
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
void registerAbstractController(AbstractController *)
CacheRecorder * m_cache_recorder
static uint32_t m_block_size_bits
std::vector< std::unique_ptr< Network > > m_networks
void drainResume() override
Resume execution after a successful drain.
static uint32_t m_block_size_bytes
std::vector< AbstractController * > m_abs_cntrl_vec
void resetStats() override
Callback to reset stats.
memory::SimpleMemory * m_phys_mem
std::unordered_map< unsigned, std::vector< AbstractController * > > netCntrls
static uint32_t m_memory_size_bits
bool functionalRead(Packet *ptr)
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
void serialize(CheckpointOut &cp) const override
Serialize an object.
void registerRequestorIDs()
void memWriteback() override
Write back dirty buffers to memory using functional writes.
static bool getCooldownEnabled()
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
static constexpr bool isPowerOf2(const T &n)
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
void deschedule(Event *event)
Deschedule the specified event.
bool isAutoDelete() const
The function returns true if the object is automatically deleted after the event is processed.
virtual const std::string name() const
bool empty() const
Returns true if no events are queued.
Tick when() const
Get the time that the event is scheduled.
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
#define fatal(...)
This implements a cprintf based fatal() function.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
#define UNSERIALIZE_OPT_SCALAR(scalar)
const std::string getCptDir()
static std::string dir()
Get the current checkout directory name.
const Params & params() const
virtual void resetStats()
Callback to reset stats.
std::string MachineIDToString(MachineID machine)
Addr makeLineAddress(Addr addr)
void registerDumpCallback(const std::function< void()> &callback)
Register a callback that should be called whenever statistics are about to be dumped.
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
std::ostream CheckpointOut
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
#define UNSERIALIZE_SCALAR(scalar)
#define SERIALIZE_SCALAR(scalar)
SimpleMemory declaration.
Declaration of Statistics objects.