52 #include "debug/RubyCacheTrace.hh"
53 #include "debug/RubySystem.hh"
81 m_cache_recorder(NULL)
119 for (
int idx = 0; idx <
m_networks.size(); ++idx) {
125 fatal_if(network_id < 0,
"Could not add MachineID %s. Network not found",
148 MachineID mach_id = cntrl->getMachineID();
152 "No machineID %s. Does not belong to a Ruby network?",
163 for (
auto id = 0;
id <
params().system->maxRequestors(); ++
id) {
177 uint64_t cache_trace_size,
178 uint64_t block_size_bytes)
185 if (sequencer_ptr == NULL) {
186 sequencer_ptr = sequencer_map[cntrl];
190 assert(sequencer_ptr != NULL);
193 if (sequencer_map[cntrl] == NULL) {
194 sequencer_map[cntrl] = sequencer_ptr;
205 sequencer_map, block_size_bytes);
214 DPRINTF(RubyCacheTrace,
"Recording Cache Trace\n");
219 DPRINTF(RubyCacheTrace,
"Cache Trace Complete\n");
223 DPRINTF(RubyCacheTrace,
"Recording current tick %ld\n", curtick_original);
231 DPRINTF(RubyCacheTrace,
"Event %s auto-deletes when descheduled,"
232 " not recording\n", curr_head->
name());
234 original_events.push_back(
235 std::make_pair(curr_head, curr_head->
when()));
241 DPRINTF(RubyCacheTrace,
"Starting cache flush\n");
244 DPRINTF(RubyCacheTrace,
"Cache flush complete\n");
257 while (!original_events.empty()) {
260 original_events.pop_back();
274 warn_once(
"Ruby memory writeback is experimental. Continuing simulation "
275 "afterwards may not always work as intended.");
283 uint64_t uncompressed_trace_size)
288 int fd = creat(thefile.c_str(), 0664);
291 fatal(
"Can't open memory trace file '%s'\n", filename);
294 gzFile compressedMemory = gzdopen(
fd,
"wb");
295 if (compressedMemory == NULL)
296 fatal(
"Insufficient memory to allocate compression state for %s\n",
299 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
300 uncompressed_trace_size) {
301 fatal(
"Write failed on memory trace file '%s'\n", filename);
304 if (gzclose(compressedMemory)) {
305 fatal(
"Close failed on memory trace file '%s'\n", filename);
323 fatal(
"Call memWriteback() before serialize() to create ruby trace");
327 uint8_t *raw_data =
new uint8_t[4096];
330 std::string cache_trace_file =
name() +
".cache.gz";
350 uint64_t &uncompressed_trace_size)
353 gzFile compressedTrace;
356 int fd = open(filename.c_str(), O_RDONLY);
359 fatal(
"Unable to open trace file %s", filename);
362 compressedTrace = gzdopen(
fd,
"rb");
363 if (compressedTrace == NULL) {
364 fatal(
"Insufficient memory to allocate compression state for %s\n",
368 raw_data =
new uint8_t[uncompressed_trace_size];
369 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
370 uncompressed_trace_size) {
371 fatal(
"Unable to read complete trace from file %s\n", filename);
374 if (gzclose(compressedTrace)) {
375 fatal(
"Failed to close cache trace file '%s'\n", filename);
382 uint8_t *uncompressed_trace = NULL;
390 std::string cache_trace_file;
391 uint64_t cache_trace_size = 0;
395 cache_trace_file = cp.
getCptDir() +
"/" + cache_trace_file;
433 DPRINTF(RubyCacheTrace,
"Starting ruby cache warmup\n");
478 network->resetStats();
483 #ifndef PARTIAL_FUNC_READS
490 AccessPermission access_perm = AccessPermission_NotPresent;
494 unsigned int num_ro = 0;
495 unsigned int num_rw = 0;
496 unsigned int num_busy = 0;
497 unsigned int num_maybe_stale = 0;
498 unsigned int num_backing_store = 0;
499 unsigned int num_invalid = 0;
512 for (
auto& cntrl :
netCntrls[request_net_id]) {
513 access_perm = cntrl-> getAccessPermission(line_address);
514 if (access_perm == AccessPermission_Read_Only){
516 if (ctrl_ro ==
nullptr) ctrl_ro = cntrl;
518 else if (access_perm == AccessPermission_Read_Write){
520 if (ctrl_rw ==
nullptr) ctrl_rw = cntrl;
522 else if (access_perm == AccessPermission_Busy)
524 else if (access_perm == AccessPermission_Maybe_Stale)
526 else if (access_perm == AccessPermission_Backing_Store) {
532 if (ctrl_backing_store ==
nullptr)
533 ctrl_backing_store = cntrl;
535 else if (access_perm == AccessPermission_Invalid ||
536 access_perm == AccessPermission_NotPresent)
548 int num_controllers =
netCntrls[request_net_id].size();
549 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
553 }
else if (num_ro > 0 || num_rw >= 1) {
558 warn(
"More than one Abstract Controller with RW permission for "
559 "addr: %#x on cacheline: %#x.", address, line_address);
567 num_maybe_stale, num_busy, num_ro, num_rw);
577 }
else if ((num_busy + num_maybe_stale) > 0) {
582 "(num_maybe_stale=%d, num_busy = %d)\n",
583 num_maybe_stale, num_busy);
584 for (
auto& cntrl :
netCntrls[request_net_id]) {
585 if (cntrl->functionalReadBuffers(pkt))
589 "(num_maybe_stale=%d, num_busy = %d)\n",
590 num_maybe_stale, num_busy);
592 if (network->functionalRead(pkt))
616 switch(ctrl->getAccessPermission(line_address)) {
617 case AccessPermission_Read_Only:
618 ctrl_ro.push_back(ctrl);
620 case AccessPermission_Busy:
621 ctrl_busy.push_back(ctrl);
623 case AccessPermission_Read_Write:
624 assert(ctrl_rw ==
nullptr);
627 case AccessPermission_Backing_Store:
628 assert(ctrl_bs ==
nullptr);
631 case AccessPermission_Backing_Store_Busy:
632 assert(ctrl_bs ==
nullptr);
634 ctrl_busy.push_back(ctrl);
637 ctrl_others.push_back(ctrl);
643 "backing_store=%d\n",
644 ctrl_ro.size(), ctrl_busy.size(),
645 ctrl_rw !=
nullptr, ctrl_bs !=
nullptr);
650 if (ctrl_rw !=
nullptr) {
658 for (
auto ctrl : ctrl_ro)
659 ctrl->functionalRead(line_address, pkt, bytes);
667 if (!ctrl_busy.empty() || !bytes.isFull()) {
669 "buffers and networks\n");
670 if (ctrl_rw !=
nullptr)
672 for (
auto ctrl : ctrl_ro)
673 ctrl->functionalReadBuffers(pkt, bytes);
674 if (ctrl_bs !=
nullptr)
676 for (
auto ctrl : ctrl_busy) {
677 ctrl->functionalRead(line_address, pkt, bytes);
678 ctrl->functionalReadBuffers(pkt, bytes);
681 network->functionalRead(pkt, bytes);
683 for (
auto ctrl : ctrl_others) {
684 ctrl->functionalRead(line_address, pkt, bytes);
685 ctrl->functionalReadBuffers(pkt, bytes);
689 panic_if(!(bytes.isFull() || bytes.isEmpty()),
690 "Inconsistent state on functional read for %#x %s\n",
693 return bytes.isFull();
706 AccessPermission access_perm = AccessPermission_NotPresent;
710 [[maybe_unused]] uint32_t num_functional_writes = 0;
717 for (
auto& cntrl :
netCntrls[request_net_id]) {
718 num_functional_writes += cntrl->functionalWriteBuffers(pkt);
720 access_perm = cntrl->getAccessPermission(line_addr);
721 if (access_perm != AccessPermission_Invalid &&
722 access_perm != AccessPermission_NotPresent) {
723 num_functional_writes +=
724 cntrl->functionalWrite(line_addr, pkt);
729 if (cntrl->getCPUSequencer()) {
730 num_functional_writes +=
731 cntrl->getCPUSequencer()->functionalWrite(pkt);
733 if (cntrl->getDMASequencer()) {
734 num_functional_writes +=
735 cntrl->getDMASequencer()->functionalWrite(pkt);
740 num_functional_writes += network->functionalWrite(pkt);