51 #include "debug/RubyCacheTrace.hh" 52 #include "debug/RubySystem.hh" 74 :
ClockedObject(p), m_access_backing_store(p->access_backing_store),
75 m_cache_recorder(NULL)
117 uint64_t cache_trace_size,
118 uint64_t block_size_bytes)
125 if (sequencer_ptr == NULL) {
126 sequencer_ptr = sequencer_map[cntrl];
130 assert(sequencer_ptr != NULL);
133 if (sequencer_map[cntrl] == NULL) {
134 sequencer_map[cntrl] = sequencer_ptr;
145 sequencer_map, block_size_bytes);
154 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
159 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
163 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
171 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled," 172 " not recording\n", curr_head->
name());
174 original_events.push_back(make_pair(curr_head, curr_head->
when()));
180 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
183 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
196 while (!original_events.empty()) {
199 original_events.pop_back();
213 warn_once(
"Ruby memory writeback is experimental. Continuing simulation " 214 "afterwards may not always work as intended.");
222 uint64_t uncompressed_trace_size)
227 int fd = creat(thefile.c_str(), 0664);
230 fatal(
"Can't open memory trace file '%s'\n", filename);
233 gzFile compressedMemory = gzdopen(fd,
"wb");
234 if (compressedMemory == NULL)
235 fatal(
"Insufficient memory to allocate compression state for %s\n",
238 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
239 uncompressed_trace_size) {
240 fatal(
"Write failed on memory trace file '%s'\n", filename);
243 if (gzclose(compressedMemory)) {
244 fatal(
"Close failed on memory trace file '%s'\n", filename);
262 fatal(
"Call memWriteback() before serialize() to create ruby trace");
266 uint8_t *raw_data =
new uint8_t[4096];
269 string cache_trace_file =
name() +
".cache.gz";
289 uint64_t &uncompressed_trace_size)
292 gzFile compressedTrace;
295 int fd = open(filename.c_str(), O_RDONLY);
298 fatal(
"Unable to open trace file %s", filename);
301 compressedTrace = gzdopen(fd,
"rb");
302 if (compressedTrace == NULL) {
303 fatal(
"Insufficient memory to allocate compression state for %s\n",
307 raw_data =
new uint8_t[uncompressed_trace_size];
308 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
309 uncompressed_trace_size) {
310 fatal(
"Unable to read complete trace from file %s\n", filename);
313 if (gzclose(compressedTrace)) {
314 fatal(
"Failed to close cache trace file '%s'\n", filename);
321 uint8_t *uncompressed_trace = NULL;
329 string cache_trace_file;
330 uint64_t cache_trace_size = 0;
334 cache_trace_file = cp.
getCptDir() +
"/" + cache_trace_file;
366 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
418 AccessPermission access_perm = AccessPermission_NotPresent;
423 unsigned int num_ro = 0;
424 unsigned int num_rw = 0;
425 unsigned int num_busy = 0;
426 unsigned int num_maybe_stale = 0;
427 unsigned int num_backing_store = 0;
428 unsigned int num_invalid = 0;
436 for (
unsigned int i = 0;
i < num_controllers; ++
i) {
438 if (access_perm == AccessPermission_Read_Only){
442 else if (access_perm == AccessPermission_Read_Write){
446 else if (access_perm == AccessPermission_Busy)
448 else if (access_perm == AccessPermission_Maybe_Stale)
450 else if (access_perm == AccessPermission_Backing_Store) {
456 if (ctrl_backing_store ==
nullptr)
459 else if (access_perm == AccessPermission_Invalid ||
460 access_perm == AccessPermission_NotPresent)
472 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
476 }
else if (num_ro > 0 || num_rw >= 1) {
481 warn(
"More than one Abstract Controller with RW permission for " 482 "addr: %#x on cacheline: %#x.", address, line_address);
490 num_maybe_stale, num_busy, num_ro, num_rw);
500 }
else if ((num_busy + num_maybe_stale) > 0) {
505 "(num_maybe_stale=%d, num_busy = %d)\n",
506 num_maybe_stale, num_busy);
507 for (
unsigned int i = 0;
i < num_controllers;++
i) {
512 "(num_maybe_stale=%d, num_busy = %d)\n",
513 num_maybe_stale, num_busy);
530 AccessPermission access_perm = AccessPermission_NotPresent;
535 uint32_t M5_VAR_USED num_functional_writes = 0;
537 for (
unsigned int i = 0;
i < num_controllers;++
i) {
538 num_functional_writes +=
542 if (access_perm != AccessPermission_Invalid &&
543 access_perm != AccessPermission_NotPresent) {
544 num_functional_writes +=
551 num_functional_writes +=
555 num_functional_writes +=
567 RubySystemParams::create()
EventQueue * eventq
A pointer to this object's event queue.
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
Simulate for num_cycles additional cycles.
void serialize(CheckpointOut &cp) const override
Serialize an object.
bool isAutoDelete() const
#define fatal(...)
This implements a cprintf based fatal() function.
SimpleMemory * m_phys_mem
bool empty() const
Returns true if no events are queued.
static bool m_randomization
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
CacheRecorder * m_cache_recorder
static uint32_t m_block_size_bytes
RubySystem(const Params *p)
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
bool functionalRead(Packet *ptr)
static bool m_warmup_enabled
virtual bool functionalRead(Packet *pkt)
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
void deschedule(Event *event)
Deschedule the specified event.
Overload hash function for BasicBlockRange type.
virtual void functionalRead(const Addr &addr, PacketPtr)=0
bool functionalWrite(Packet *ptr)
void drainResume() override
Resume execution after a successful drain.
Declaration of Statistics objects.
void enqueueRubyEvent(Tick tick)
SimpleMemory declaration.
std::enable_if< std::is_integral< T >::value, int >::type floorLog2(T x)
static bool getWarmupEnabled()
#define UNSERIALIZE_SCALAR(scalar)
void resetClock() const
Reset the object's clock using the current global tick value.
Tick curTick()
The current simulated tick.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
virtual const std::string name() const
void registerNetwork(Network *)
MachineID getMachineID() const
uint64_t Tick
Tick count type.
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
#define UNSERIALIZE_OPT_SCALAR(scalar)
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
bool isPowerOf2(const T &n)
void startup() override
startup() is the final initialization call before simulation.
void setCurTick(Tick newVal)
void memWriteback() override
Write back dirty buffers to memory using functional writes.
static bool getCooldownEnabled()
static uint32_t m_memory_size_bits
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Addr makeLineAddress(Addr addr)
void resetStats() override
Callback to reset stats.
static uint32_t m_block_size_bits
std::vector< AbstractController * > m_abs_cntrl_vec
void registerDumpCallback(Callback *cb)
Register a callback that should be called whenever statistics are about to be dumped.
#define SERIALIZE_SCALAR(scalar)
static std::string dir()
Get the current checkout directory name.
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
virtual const std::string name() const
virtual uint32_t functionalWrite(Packet *pkt)
std::ostream CheckpointOut
const std::string getCptDir()
static bool m_cooldown_enabled
Tick when() const
Get the time that the event is scheduled.
static unsigned m_systems_to_warmup
static uint32_t getBlockSizeBytes()
void registerAbstractController(AbstractController *)
void unserialize(CheckpointIn &cp) override
Unserialize an object.