52 #include "debug/RubyCacheTrace.hh"
53 #include "debug/RubySystem.hh"
81 m_cache_recorder(NULL)
119 for (
int idx = 0; idx <
m_networks.size(); ++idx) {
125 fatal_if(network_id < 0,
"Could not add MachineID %s. Network not found",
148 MachineID mach_id = cntrl->getMachineID();
152 "No machineID %s. Does not belong to a Ruby network?",
163 for (
auto id = 0;
id <
params().system->maxRequestors(); ++
id) {
177 uint64_t cache_trace_size,
178 uint64_t block_size_bytes)
185 if (sequencer_ptr == NULL) {
186 sequencer_ptr = sequencer_map[cntrl];
190 assert(sequencer_ptr != NULL);
193 if (sequencer_map[cntrl] == NULL) {
194 sequencer_map[cntrl] = sequencer_ptr;
205 sequencer_map, block_size_bytes);
214 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
219 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
230 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
238 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled,"
239 " not recording\n", curr_head->
name());
241 original_events.push_back(
242 std::make_pair(curr_head, curr_head->
when()));
248 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
251 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
264 while (!original_events.empty()) {
267 original_events.pop_back();
281 warn_once(
"Ruby memory writeback is experimental. Continuing simulation "
282 "afterwards may not always work as intended.");
290 uint64_t uncompressed_trace_size)
295 int fd = creat(thefile.c_str(), 0664);
298 fatal(
"Can't open memory trace file '%s'\n", filename);
301 gzFile compressedMemory = gzdopen(
fd,
"wb");
302 if (compressedMemory == NULL)
303 fatal(
"Insufficient memory to allocate compression state for %s\n",
306 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
307 uncompressed_trace_size) {
308 fatal(
"Write failed on memory trace file '%s'\n", filename);
311 if (gzclose(compressedMemory)) {
312 fatal(
"Close failed on memory trace file '%s'\n", filename);
330 fatal(
"Call memWriteback() before serialize() to create"
335 uint8_t *raw_data =
new uint8_t[4096];
338 std::string cache_trace_file =
name() +
".cache.gz";
358 uint64_t &uncompressed_trace_size)
361 gzFile compressedTrace;
364 int fd = open(filename.c_str(), O_RDONLY);
367 fatal(
"Unable to open trace file %s", filename);
370 compressedTrace = gzdopen(
fd,
"rb");
371 if (compressedTrace == NULL) {
372 fatal(
"Insufficient memory to allocate compression state for %s\n",
376 raw_data =
new uint8_t[uncompressed_trace_size];
377 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
378 uncompressed_trace_size) {
379 fatal(
"Unable to read complete trace from file %s\n", filename);
382 if (gzclose(compressedTrace)) {
383 fatal(
"Failed to close cache trace file '%s'\n", filename);
390 uint8_t *uncompressed_trace = NULL;
398 std::string cache_trace_file;
399 uint64_t cache_trace_size = 0;
403 cache_trace_file = cp.
getCptDir() +
"/" + cache_trace_file;
441 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
486 network->resetStats();
491 #ifndef PARTIAL_FUNC_READS
498 AccessPermission access_perm = AccessPermission_NotPresent;
502 unsigned int num_ro = 0;
503 unsigned int num_rw = 0;
504 unsigned int num_busy = 0;
505 unsigned int num_maybe_stale = 0;
506 unsigned int num_backing_store = 0;
507 unsigned int num_invalid = 0;
520 for (
auto& cntrl :
netCntrls[request_net_id]) {
521 access_perm = cntrl-> getAccessPermission(line_address);
522 if (access_perm == AccessPermission_Read_Only){
524 if (ctrl_ro ==
nullptr) ctrl_ro = cntrl;
526 else if (access_perm == AccessPermission_Read_Write){
528 if (ctrl_rw ==
nullptr) ctrl_rw = cntrl;
530 else if (access_perm == AccessPermission_Busy)
532 else if (access_perm == AccessPermission_Maybe_Stale)
534 else if (access_perm == AccessPermission_Backing_Store) {
540 if (ctrl_backing_store ==
nullptr)
541 ctrl_backing_store = cntrl;
543 else if (access_perm == AccessPermission_Invalid ||
544 access_perm == AccessPermission_NotPresent)
556 int num_controllers =
netCntrls[request_net_id].size();
557 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
561 }
else if (num_ro > 0 || num_rw >= 1) {
566 warn(
"More than one Abstract Controller with RW permission for "
567 "addr: %#x on cacheline: %#x.", address, line_address);
575 num_maybe_stale, num_busy, num_ro, num_rw);
585 }
else if ((num_busy + num_maybe_stale) > 0) {
590 "(num_maybe_stale=%d, num_busy = %d)\n",
591 num_maybe_stale, num_busy);
592 for (
auto& cntrl :
netCntrls[request_net_id]) {
593 if (cntrl->functionalReadBuffers(pkt))
597 "(num_maybe_stale=%d, num_busy = %d)\n",
598 num_maybe_stale, num_busy);
600 if (network->functionalRead(pkt))
624 switch(ctrl->getAccessPermission(line_address)) {
625 case AccessPermission_Read_Only:
626 ctrl_ro.push_back(ctrl);
628 case AccessPermission_Busy:
629 ctrl_busy.push_back(ctrl);
631 case AccessPermission_Read_Write:
632 assert(ctrl_rw ==
nullptr);
635 case AccessPermission_Backing_Store:
636 assert(ctrl_bs ==
nullptr);
639 case AccessPermission_Backing_Store_Busy:
640 assert(ctrl_bs ==
nullptr);
642 ctrl_busy.push_back(ctrl);
645 ctrl_others.push_back(ctrl);
651 "backing_store=%d\n",
652 ctrl_ro.size(), ctrl_busy.size(),
653 ctrl_rw !=
nullptr, ctrl_bs !=
nullptr);
658 if (ctrl_rw !=
nullptr) {
666 for (
auto ctrl : ctrl_ro)
667 ctrl->functionalRead(line_address, pkt, bytes);
675 if (!ctrl_busy.empty() || !bytes.isFull()) {
677 "buffers and networks\n");
678 if (ctrl_rw !=
nullptr)
680 for (
auto ctrl : ctrl_ro)
681 ctrl->functionalReadBuffers(pkt, bytes);
682 if (ctrl_bs !=
nullptr)
684 for (
auto ctrl : ctrl_busy) {
685 ctrl->functionalRead(line_address, pkt, bytes);
686 ctrl->functionalReadBuffers(pkt, bytes);
689 network->functionalRead(pkt, bytes);
691 for (
auto ctrl : ctrl_others) {
692 ctrl->functionalRead(line_address, pkt, bytes);
693 ctrl->functionalReadBuffers(pkt, bytes);
697 panic_if(!(bytes.isFull() || bytes.isEmpty()),
698 "Inconsistent state on functional read for %#x %s\n",
701 return bytes.isFull();
714 AccessPermission access_perm = AccessPermission_NotPresent;
718 [[maybe_unused]] uint32_t num_functional_writes = 0;
725 for (
auto& cntrl :
netCntrls[request_net_id]) {
726 num_functional_writes += cntrl->functionalWriteBuffers(pkt);
728 access_perm = cntrl->getAccessPermission(line_addr);
729 if (access_perm != AccessPermission_Invalid &&
730 access_perm != AccessPermission_NotPresent) {
731 num_functional_writes +=
732 cntrl->functionalWrite(line_addr, pkt);
737 if (cntrl->getCPUSequencer()) {
738 num_functional_writes +=
739 cntrl->getCPUSequencer()->functionalWrite(pkt);
741 if (cntrl->getDMASequencer()) {
742 num_functional_writes +=
743 cntrl->getDMASequencer()->functionalWrite(pkt);
748 num_functional_writes += network->functionalWrite(pkt);