32 #include "debug/RubyCacheTrace.hh"
41 out <<
"[TraceRecord: Node, " << m_cntrl_id <<
", "
42 << m_data_address <<
", " << m_pc_address <<
", "
43 << m_type <<
", Time: " << m_time <<
"]";
47 : m_uncompressed_trace(NULL),
48 m_uncompressed_trace_size(0),
49 m_block_size_bytes(
RubySystem::getBlockSizeBytes())
54 uint64_t uncompressed_trace_size,
56 uint64_t block_size_bytes)
57 : m_uncompressed_trace(uncompressed_trace),
58 m_uncompressed_trace_size(uncompressed_trace_size),
59 m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
60 m_records_flushed(0), m_block_size_bytes(block_size_bytes)
67 panic(
"Recorded cache block size (%d) < current block size (%d) !!",
95 assert(m_sequencer_ptr != NULL);
98 DPRINTF(RubyCacheTrace,
"Flushing %s\n", *rec);
111 DPRINTF(RubyCacheTrace,
"Issuing %s\n", *traceRecord);
118 if (traceRecord->
m_type == RubyRequestType_LD) {
120 req = std::make_shared<Request>(
124 }
else if (traceRecord->
m_type == RubyRequestType_IFETCH) {
126 req = std::make_shared<Request>(
132 req = std::make_shared<Request>(
142 assert(m_sequencer_ptr != NULL);
176 uint64_t current_size = 0;
179 for (
int i = 0;
i < size; ++
i) {
181 if (current_size + record_size > total_size) {
182 uint8_t* new_buf =
new (nothrow) uint8_t[total_size * 2];
183 if (new_buf == NULL) {
184 fatal(
"Unable to allocate buffer of size %s\n",
187 total_size = total_size * 2;
188 uint8_t* old_buf = *buf;
189 memcpy(new_buf, old_buf, current_size);
195 memcpy(&((*buf)[current_size]),
m_records[
i], record_size);
196 current_size += record_size;