39 #include "debug/RubyCacheTrace.hh" 40 #include "debug/RubySystem.hh" 60 :
ClockedObject(p), m_access_backing_store(p->access_backing_store),
61 m_cache_recorder(NULL)
103 uint64_t cache_trace_size,
104 uint64_t block_size_bytes)
111 if (sequencer_ptr == NULL) {
112 sequencer_ptr = sequencer_map[cntrl];
116 assert(sequencer_ptr != NULL);
119 if (sequencer_map[cntrl] == NULL) {
120 sequencer_map[cntrl] = sequencer_ptr;
131 sequencer_map, block_size_bytes);
140 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
145 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
149 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
157 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled," 158 " not recording\n", curr_head->
name());
160 original_events.push_back(make_pair(curr_head, curr_head->
when()));
166 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
169 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
182 while (!original_events.empty()) {
185 original_events.pop_back();
199 warn_once(
"Ruby memory writeback is experimental. Continuing simulation " 200 "afterwards may not always work as intended.");
208 uint64_t uncompressed_trace_size)
213 int fd = creat(thefile.c_str(), 0664);
216 fatal(
"Can't open memory trace file '%s'\n", filename);
219 gzFile compressedMemory = gzdopen(fd,
"wb");
220 if (compressedMemory == NULL)
221 fatal(
"Insufficient memory to allocate compression state for %s\n",
224 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
225 uncompressed_trace_size) {
226 fatal(
"Write failed on memory trace file '%s'\n", filename);
229 if (gzclose(compressedMemory)) {
230 fatal(
"Close failed on memory trace file '%s'\n", filename);
248 fatal(
"Call memWriteback() before serialize() to create ruby trace");
252 uint8_t *raw_data =
new uint8_t[4096];
255 string cache_trace_file =
name() +
".cache.gz";
275 uint64_t &uncompressed_trace_size)
278 gzFile compressedTrace;
281 int fd = open(filename.c_str(), O_RDONLY);
284 fatal(
"Unable to open trace file %s", filename);
287 compressedTrace = gzdopen(fd,
"rb");
288 if (compressedTrace == NULL) {
289 fatal(
"Insufficient memory to allocate compression state for %s\n",
293 raw_data =
new uint8_t[uncompressed_trace_size];
294 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
295 uncompressed_trace_size) {
296 fatal(
"Unable to read complete trace from file %s\n", filename);
299 if (gzclose(compressedTrace)) {
300 fatal(
"Failed to close cache trace file '%s'\n", filename);
307 uint8_t *uncompressed_trace = NULL;
315 string cache_trace_file;
316 uint64_t cache_trace_size = 0;
320 cache_trace_file = cp.
cptDir +
"/" + cache_trace_file;
352 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
404 AccessPermission access_perm = AccessPermission_NotPresent;
409 unsigned int num_ro = 0;
410 unsigned int num_rw = 0;
411 unsigned int num_busy = 0;
412 unsigned int num_backing_store = 0;
413 unsigned int num_invalid = 0;
417 for (
unsigned int i = 0;
i < num_controllers; ++
i) {
419 if (access_perm == AccessPermission_Read_Only)
421 else if (access_perm == AccessPermission_Read_Write)
423 else if (access_perm == AccessPermission_Busy)
425 else if (access_perm == AccessPermission_Backing_Store)
431 else if (access_perm == AccessPermission_Invalid ||
432 access_perm == AccessPermission_NotPresent)
444 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
446 for (
unsigned int i = 0;
i < num_controllers; ++
i) {
448 if (access_perm == AccessPermission_Backing_Store) {
453 }
else if (num_ro > 0 || num_rw >= 1) {
458 warn(
"More than one Abstract Controller with RW permission for " 459 "addr: %#x on cacheline: %#x.", address, line_address);
466 num_busy, num_ro, num_rw);
470 for (
unsigned int i = 0;
i < num_controllers;++
i) {
472 if (access_perm == AccessPermission_Read_Only ||
473 access_perm == AccessPermission_Read_Write) {
492 AccessPermission access_perm = AccessPermission_NotPresent;
499 for (
unsigned int i = 0;
i < num_controllers;++
i) {
500 num_functional_writes +=
504 if (access_perm != AccessPermission_Invalid &&
505 access_perm != AccessPermission_NotPresent) {
506 num_functional_writes +=
518 RubySystemParams::create()
EventQueue * eventq
A pointer to this object's event queue.
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
Simulate for num_cycles additional cycles.
void serialize(CheckpointOut &cp) const override
Serialize an object.
#define fatal(...)
This implements a cprintf based fatal() function.
SimpleMemory * m_phys_mem
bool isAutoDelete() const
void deschedule(Event *event)
Deschedule the specified event.
static bool m_randomization
Tick when() const
Get the time that the event is scheduled.
#define UNSERIALIZE_OPT_SCALAR(scalar)
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
CacheRecorder * m_cache_recorder
static uint32_t m_block_size_bytes
RubySystem(const Params *p)
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
virtual const std::string name() const
bool functionalRead(Packet *ptr)
static bool m_warmup_enabled
Overload hash function for BasicBlockRange type.
bool functionalWrite(Packet *ptr)
void drainResume() override
Resume execution after a successful drain.
Declaration of Statistics objects.
void enqueueRubyEvent(Tick tick)
SimpleMemory declaration.
static bool getWarmupEnabled()
#define UNSERIALIZE_SCALAR(scalar)
void resetClock() const
Reset the object's clock using the current global tick value.
Tick curTick()
The current simulated tick.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
void registerNetwork(Network *)
MachineID getMachineID() const
uint64_t Tick
Tick count type.
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
bool isPowerOf2(const T &n)
void startup() override
startup() is the final initialization call before simulation.
void setCurTick(Tick newVal)
void memWriteback() override
Write back dirty buffers to memory using functional writes.
static bool getCooldownEnabled()
static uint32_t m_memory_size_bits
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
virtual const std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Addr makeLineAddress(Addr addr)
void resetStats() override
Callback to reset stats.
static uint32_t m_block_size_bits
std::vector< AbstractController * > m_abs_cntrl_vec
void registerDumpCallback(Callback *cb)
Register a callback that should be called whenever statistics are about to be dumped.
#define SERIALIZE_SCALAR(scalar)
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
int floorLog2(unsigned x)
virtual uint32_t functionalWrite(Packet *pkt)
std::ostream CheckpointOut
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
static const int NumArgumentRegs M5_VAR_USED
static bool m_cooldown_enabled
static unsigned m_systems_to_warmup
static uint32_t getBlockSizeBytes()
void registerAbstractController(AbstractController *)
void unserialize(CheckpointIn &cp) override
Unserialize an object.