gem5 v23.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
RubySystem.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019,2021 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2011 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
42
43#include <fcntl.h>
44#include <zlib.h>
45
46#include <cstdio>
47#include <list>
48
49#include "base/compiler.hh"
50#include "base/intmath.hh"
51#include "base/statistics.hh"
52#include "debug/RubyCacheTrace.hh"
53#include "debug/RubySystem.hh"
58#include "mem/simple_mem.hh"
59#include "sim/eventq.hh"
60#include "sim/simulate.hh"
61#include "sim/system.hh"
62
63namespace gem5
64{
65
66namespace ruby
67{
68
74// To look forward to allowing multiple RubySystem instances, track the number
75// of RubySystems that need to be warmed up on checkpoint restore.
78
80 : ClockedObject(p), m_access_backing_store(p.access_backing_store),
81 m_cache_recorder(NULL)
82{
83 m_randomization = p.randomization;
84
85 m_block_size_bytes = p.block_size_bytes;
88 m_memory_size_bits = p.memory_size_bits;
89
90 // Resize to the size of different machine types
91 m_abstract_controls.resize(MachineType_NUM);
92
93 // Collate the statistics before they are printed.
95 // Create the profiler
96 m_profiler = new Profiler(p, this);
97 m_phys_mem = p.phys_mem;
98}
99
100void
102{
103 m_networks.emplace_back(network_ptr);
104}
105
106void
108{
109 m_abs_cntrl_vec.push_back(cntrl);
110
111 MachineID id = cntrl->getMachineID();
112 m_abstract_controls[id.getType()][id.getNum()] = cntrl;
113}
114
115void
117{
118 int network_id = -1;
119 for (int idx = 0; idx < m_networks.size(); ++idx) {
120 if (m_networks[idx].get() == network) {
121 network_id = idx;
122 }
123 }
124
125 fatal_if(network_id < 0, "Could not add MachineID %s. Network not found",
126 MachineIDToString(mach_id).c_str());
127
128 machineToNetwork.insert(std::make_pair(mach_id, network_id));
129}
130
131// This registers all requestor IDs in the system for functional reads. This
132// should be called in init() since requestor IDs are obtained in a SimObject's
133// constructor and there are functional reads/writes between init() and
134// startup().
135void
137{
138 // Create the map for RequestorID to network node. This is done in init()
139 // because all RequestorIDs must be obtained in the constructor and
140 // AbstractControllers are registered in their constructor. This is done
141 // in two steps: (1) Add all of the AbstractControllers. Since we don't
142 // have a mapping of RequestorID to MachineID this is the easiest way to
143 // filter out AbstractControllers from non-Ruby requestors. (2) Go through
144 // the system's list of RequestorIDs and add missing RequestorIDs to
145 // network 0 (the default).
146 for (auto& cntrl : m_abs_cntrl_vec) {
147 RequestorID id = cntrl->getRequestorId();
148 MachineID mach_id = cntrl->getMachineID();
149
150 // These are setup in Network constructor and should exist
151 fatal_if(!machineToNetwork.count(mach_id),
152 "No machineID %s. Does not belong to a Ruby network?",
153 MachineIDToString(mach_id).c_str());
154
155 auto network_id = machineToNetwork[mach_id];
156 requestorToNetwork.insert(std::make_pair(id, network_id));
157
158 // Create helper vectors for each network to iterate over.
159 netCntrls[network_id].push_back(cntrl);
160 }
161
162 // Default all other requestor IDs to network 0
163 for (auto id = 0; id < params().system->maxRequestors(); ++id) {
164 if (!requestorToNetwork.count(id)) {
165 requestorToNetwork.insert(std::make_pair(id, 0));
166 }
167 }
168}
169
171{
172 delete m_profiler;
173}
174
175void
176RubySystem::makeCacheRecorder(uint8_t *uncompressed_trace,
177 uint64_t cache_trace_size,
178 uint64_t block_size_bytes)
179{
180 std::vector<Sequencer*> sequencer_map;
181 Sequencer* sequencer_ptr = NULL;
182
183 for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
184 sequencer_map.push_back(m_abs_cntrl_vec[cntrl]->getCPUSequencer());
185 if (sequencer_ptr == NULL) {
186 sequencer_ptr = sequencer_map[cntrl];
187 }
188 }
189
190 assert(sequencer_ptr != NULL);
191
192 for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
193 if (sequencer_map[cntrl] == NULL) {
194 sequencer_map[cntrl] = sequencer_ptr;
195 }
196 }
197
198 // Remove the old CacheRecorder if it's still hanging about.
199 if (m_cache_recorder != NULL) {
200 delete m_cache_recorder;
201 }
202
203 // Create the CacheRecorder and record the cache trace
204 m_cache_recorder = new CacheRecorder(uncompressed_trace, cache_trace_size,
205 sequencer_map, block_size_bytes);
206}
207
208void
210{
211 m_cooldown_enabled = true;
212
213 // Make the trace so we know what to write back.
214 DPRINTF(RubyCacheTrace, "Recording Cache Trace\n");
216 for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
217 m_abs_cntrl_vec[cntrl]->recordCacheTrace(cntrl, m_cache_recorder);
218 }
219 DPRINTF(RubyCacheTrace, "Cache Trace Complete\n");
220
221 // If there is no dirty block, we don't need to flush the cache
223 {
224 m_cooldown_enabled = false;
225 return;
226 }
227
228 // save the current tick value
229 Tick curtick_original = curTick();
230 DPRINTF(RubyCacheTrace, "Recording current tick %ld\n", curtick_original);
231
232 // Deschedule all prior events on the event queue, but record the tick they
233 // were scheduled at so they can be restored correctly later.
234 std::list<std::pair<Event*, Tick> > original_events;
235 while (!eventq->empty()) {
236 Event *curr_head = eventq->getHead();
237 if (curr_head->isAutoDelete()) {
238 DPRINTF(RubyCacheTrace, "Event %s auto-deletes when descheduled,"
239 " not recording\n", curr_head->name());
240 } else {
241 original_events.push_back(
242 std::make_pair(curr_head, curr_head->when()));
243 }
244 eventq->deschedule(curr_head);
245 }
246
247 // Schedule an event to start cache cooldown
248 DPRINTF(RubyCacheTrace, "Starting cache flush\n");
250 simulate();
251 DPRINTF(RubyCacheTrace, "Cache flush complete\n");
252
253 // Deschedule any events left on the event queue.
254 while (!eventq->empty()) {
256 }
257
258 // Restore curTick
259 setCurTick(curtick_original);
260
261 // Restore all events that were originally on the event queue. This is
262 // done after setting curTick back to its original value so that events do
263 // not seem to be scheduled in the past.
264 while (!original_events.empty()) {
265 std::pair<Event*, Tick> event = original_events.back();
266 eventq->schedule(event.first, event.second);
267 original_events.pop_back();
268 }
269
270 // No longer flushing back to memory.
271 m_cooldown_enabled = false;
272
273 // There are several issues with continuing simulation after calling
274 // memWriteback() at the moment, that stem from taking events off the
275 // queue, simulating again, and then putting them back on, whilst
276 // pretending that no time has passed. One is that some events will have
277 // been deleted, so can't be put back. Another is that any object
278 // recording the tick something happens may end up storing a tick in the
279 // future. A simple warning here alerts the user that things may not work
280 // as expected.
281 warn_once("Ruby memory writeback is experimental. Continuing simulation "
282 "afterwards may not always work as intended.");
283
284 // Keep the cache recorder around so that we can dump the trace if a
285 // checkpoint is immediately taken.
286}
287
288void
289RubySystem::writeCompressedTrace(uint8_t *raw_data, std::string filename,
290 uint64_t uncompressed_trace_size)
291{
292 // Create the checkpoint file for the memory
293 std::string thefile = CheckpointIn::dir() + "/" + filename.c_str();
294
295 int fd = creat(thefile.c_str(), 0664);
296 if (fd < 0) {
297 perror("creat");
298 fatal("Can't open memory trace file '%s'\n", filename);
299 }
300
301 gzFile compressedMemory = gzdopen(fd, "wb");
302 if (compressedMemory == NULL)
303 fatal("Insufficient memory to allocate compression state for %s\n",
304 filename);
305
306 if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
307 uncompressed_trace_size) {
308 fatal("Write failed on memory trace file '%s'\n", filename);
309 }
310
311 if (gzclose(compressedMemory)) {
312 fatal("Close failed on memory trace file '%s'\n", filename);
313 }
314 delete[] raw_data;
315}
316
317void
319{
320 // Store the cache-block size, so we are able to restore on systems
321 // with a different cache-block size. CacheRecorder depends on the
322 // correct cache-block size upon unserializing.
323 uint64_t block_size_bytes = getBlockSizeBytes();
324 SERIALIZE_SCALAR(block_size_bytes);
325
326 // Check that there's a valid trace to use. If not, then memory won't
327 // be up-to-date and the simulation will probably fail when restoring
328 // from the checkpoint.
329 if (m_cache_recorder == NULL) {
330 fatal("Call memWriteback() before serialize() to create"
331 "ruby trace");
332 }
333
334 // Aggregate the trace entries together into a single array
335 uint8_t *raw_data = new uint8_t[4096];
336 uint64_t cache_trace_size = m_cache_recorder->aggregateRecords(
337 &raw_data, 4096);
338 std::string cache_trace_file = name() + ".cache.gz";
339 writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size);
340
341 SERIALIZE_SCALAR(cache_trace_file);
342 SERIALIZE_SCALAR(cache_trace_size);
343}
344
345void
347{
348 // Delete the cache recorder if it was created in memWriteback()
349 // to checkpoint the current cache state.
350 if (m_cache_recorder) {
351 delete m_cache_recorder;
352 m_cache_recorder = NULL;
353 }
354}
355
356void
357RubySystem::readCompressedTrace(std::string filename, uint8_t *&raw_data,
358 uint64_t &uncompressed_trace_size)
359{
360 // Read the trace file
361 gzFile compressedTrace;
362
363 // trace file
364 int fd = open(filename.c_str(), O_RDONLY);
365 if (fd < 0) {
366 perror("open");
367 fatal("Unable to open trace file %s", filename);
368 }
369
370 compressedTrace = gzdopen(fd, "rb");
371 if (compressedTrace == NULL) {
372 fatal("Insufficient memory to allocate compression state for %s\n",
373 filename);
374 }
375
376 raw_data = new uint8_t[uncompressed_trace_size];
377 if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
378 uncompressed_trace_size) {
379 fatal("Unable to read complete trace from file %s\n", filename);
380 }
381
382 if (gzclose(compressedTrace)) {
383 fatal("Failed to close cache trace file '%s'\n", filename);
384 }
385}
386
387void
389{
390 uint8_t *uncompressed_trace = NULL;
391
392 // This value should be set to the checkpoint-system's block-size.
393 // Optional, as checkpoints without it can be run if the
394 // checkpoint-system's block-size == current block-size.
395 uint64_t block_size_bytes = getBlockSizeBytes();
396 UNSERIALIZE_OPT_SCALAR(block_size_bytes);
397
398 std::string cache_trace_file;
399 uint64_t cache_trace_size = 0;
400
401 UNSERIALIZE_SCALAR(cache_trace_file);
402 UNSERIALIZE_SCALAR(cache_trace_size);
403 cache_trace_file = cp.getCptDir() + "/" + cache_trace_file;
404
405 readCompressedTrace(cache_trace_file, uncompressed_trace,
406 cache_trace_size);
407 m_warmup_enabled = true;
409
410 // Create the cache recorder that will hang around until startup.
411 makeCacheRecorder(uncompressed_trace, cache_trace_size, block_size_bytes);
412}
413
414void
416{
418}
419
420void
422{
423
424 // Ruby restores state from a checkpoint by resetting the clock to 0 and
425 // playing the requests that can possibly re-generate the cache state.
426 // The clock value is set to the actual checkpointed value once all the
427 // requests have been executed.
428 //
429 // This way of restoring state is pretty finicky. For example, if a
430 // Ruby component reads time before the state has been restored, it would
431 // cache this value and hence its clock would not be reset to 0, when
432 // Ruby resets the global clock. This can potentially result in a
433 // deadlock.
434 //
435 // The solution is that no Ruby component should read time before the
436 // simulation starts. And then one also needs to hope that the time
437 // Ruby finishes restoring the state is less than the time when the
438 // state was checkpointed.
439
440 if (m_warmup_enabled) {
441 DPRINTF(RubyCacheTrace, "Starting ruby cache warmup\n");
442 // save the current tick value
443 Tick curtick_original = curTick();
444 // save the event queue head
445 Event* eventq_head = eventq->replaceHead(NULL);
446 // set curTick to 0 and reset Ruby System's clock
447 setCurTick(0);
448 resetClock();
449
450 // Schedule an event to start cache warmup
452 simulate();
453
454 delete m_cache_recorder;
455 m_cache_recorder = NULL;
457 if (m_systems_to_warmup == 0) {
458 m_warmup_enabled = false;
459 }
460
461 // Restore eventq head
462 eventq->replaceHead(eventq_head);
463 // Restore curTick and Ruby System's clock
464 setCurTick(curtick_original);
465 resetClock();
466 }
467
468 resetStats();
469}
470
471void
473{
474 if (getWarmupEnabled()) {
476 } else if (getCooldownEnabled()) {
478 }
479}
480
481void
483{
485 for (auto& network : m_networks) {
486 network->resetStats();
487 }
489}
490
491#ifndef PARTIAL_FUNC_READS
492bool
494{
495 Addr address(pkt->getAddr());
496 Addr line_address = makeLineAddress(address);
497
498 AccessPermission access_perm = AccessPermission_NotPresent;
499
500 DPRINTF(RubySystem, "Functional Read request for %#x\n", address);
501
502 unsigned int num_ro = 0;
503 unsigned int num_rw = 0;
504 unsigned int num_busy = 0;
505 unsigned int num_maybe_stale = 0;
506 unsigned int num_backing_store = 0;
507 unsigned int num_invalid = 0;
508
509 // Only send functional requests within the same network.
510 assert(requestorToNetwork.count(pkt->requestorId()));
511 int request_net_id = requestorToNetwork[pkt->requestorId()];
512 assert(netCntrls.count(request_net_id));
513
514 AbstractController *ctrl_ro = nullptr;
515 AbstractController *ctrl_rw = nullptr;
516 AbstractController *ctrl_backing_store = nullptr;
517
518 // In this loop we count the number of controllers that have the given
519 // address in read only, read write and busy states.
520 for (auto& cntrl : netCntrls[request_net_id]) {
521 access_perm = cntrl-> getAccessPermission(line_address);
522 if (access_perm == AccessPermission_Read_Only){
523 num_ro++;
524 if (ctrl_ro == nullptr) ctrl_ro = cntrl;
525 }
526 else if (access_perm == AccessPermission_Read_Write){
527 num_rw++;
528 if (ctrl_rw == nullptr) ctrl_rw = cntrl;
529 }
530 else if (access_perm == AccessPermission_Busy)
531 num_busy++;
532 else if (access_perm == AccessPermission_Maybe_Stale)
533 num_maybe_stale++;
534 else if (access_perm == AccessPermission_Backing_Store) {
535 // See RubySlicc_Exports.sm for details, but Backing_Store is meant
536 // to represent blocks in memory *for Broadcast/Snooping protocols*,
537 // where memory has no idea whether it has an exclusive copy of data
538 // or not.
539 num_backing_store++;
540 if (ctrl_backing_store == nullptr)
541 ctrl_backing_store = cntrl;
542 }
543 else if (access_perm == AccessPermission_Invalid ||
544 access_perm == AccessPermission_NotPresent)
545 num_invalid++;
546 }
547
548 // This if case is meant to capture what happens in a Broadcast/Snoop
549 // protocol where the block does not exist in the cache hierarchy. You
550 // only want to read from the Backing_Store memory if there is no copy in
551 // the cache hierarchy, otherwise you want to try to read the RO or RW
552 // copies existing in the cache hierarchy (covered by the else statement).
553 // The reason is because the Backing_Store memory could easily be stale, if
554 // there are copies floating around the cache hierarchy, so you want to read
555 // it only if it's not in the cache hierarchy at all.
556 int num_controllers = netCntrls[request_net_id].size();
557 if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
558 DPRINTF(RubySystem, "only copy in Backing_Store memory, read from it\n");
559 ctrl_backing_store->functionalRead(line_address, pkt);
560 return true;
561 } else if (num_ro > 0 || num_rw >= 1) {
562 if (num_rw > 1) {
563 // We iterate over the vector of abstract controllers, and return
564 // the first copy found. If we have more than one cache with block
565 // in writable permission, the first one found would be returned.
566 warn("More than one Abstract Controller with RW permission for "
567 "addr: %#x on cacheline: %#x.", address, line_address);
568 }
569 // In Broadcast/Snoop protocols, this covers if you know the block
570 // exists somewhere in the caching hierarchy, then you want to read any
571 // valid RO or RW block. In directory protocols, same thing, you want
572 // to read any valid readable copy of the block.
573 DPRINTF(RubySystem, "num_maybe_stale=%d, num_busy = %d, num_ro = %d, "
574 "num_rw = %d\n",
575 num_maybe_stale, num_busy, num_ro, num_rw);
576 // Use the copy from the controller with read/write permission (if
577 // any), otherwise use get the first read only found
578 if (ctrl_rw) {
579 ctrl_rw->functionalRead(line_address, pkt);
580 } else {
581 assert(ctrl_ro);
582 ctrl_ro->functionalRead(line_address, pkt);
583 }
584 return true;
585 } else if ((num_busy + num_maybe_stale) > 0) {
586 // No controller has a valid copy of the block, but a transient or
587 // stale state indicates a valid copy should be in transit in the
588 // network or in a message buffer waiting to be handled
589 DPRINTF(RubySystem, "Controllers functionalRead lookup "
590 "(num_maybe_stale=%d, num_busy = %d)\n",
591 num_maybe_stale, num_busy);
592 for (auto& cntrl : netCntrls[request_net_id]) {
593 if (cntrl->functionalReadBuffers(pkt))
594 return true;
595 }
596 DPRINTF(RubySystem, "Network functionalRead lookup "
597 "(num_maybe_stale=%d, num_busy = %d)\n",
598 num_maybe_stale, num_busy);
599 for (auto& network : m_networks) {
600 if (network->functionalRead(pkt))
601 return true;
602 }
603 }
604
605 return false;
606}
607#else
608bool
610{
611 Addr address(pkt->getAddr());
612 Addr line_address = makeLineAddress(address);
613
614 DPRINTF(RubySystem, "Functional Read request for %#x\n", address);
615
619 AbstractController *ctrl_rw = nullptr;
620 AbstractController *ctrl_bs = nullptr;
621
622 // Build lists of controllers that have line
623 for (auto ctrl : m_abs_cntrl_vec) {
624 switch(ctrl->getAccessPermission(line_address)) {
625 case AccessPermission_Read_Only:
626 ctrl_ro.push_back(ctrl);
627 break;
628 case AccessPermission_Busy:
629 ctrl_busy.push_back(ctrl);
630 break;
631 case AccessPermission_Read_Write:
632 assert(ctrl_rw == nullptr);
633 ctrl_rw = ctrl;
634 break;
635 case AccessPermission_Backing_Store:
636 assert(ctrl_bs == nullptr);
637 ctrl_bs = ctrl;
638 break;
639 case AccessPermission_Backing_Store_Busy:
640 assert(ctrl_bs == nullptr);
641 ctrl_bs = ctrl;
642 ctrl_busy.push_back(ctrl);
643 break;
644 default:
645 ctrl_others.push_back(ctrl);
646 break;
647 }
648 }
649
650 DPRINTF(RubySystem, "num_ro=%d, num_busy=%d , has_rw=%d, "
651 "backing_store=%d\n",
652 ctrl_ro.size(), ctrl_busy.size(),
653 ctrl_rw != nullptr, ctrl_bs != nullptr);
654
655 // Issue functional reads to all controllers found in a stable state
656 // until we get a full copy of the line
657 WriteMask bytes;
658 if (ctrl_rw != nullptr) {
659 ctrl_rw->functionalRead(line_address, pkt, bytes);
660 // if a RW controllter has the full line that's all uptodate
661 if (bytes.isFull())
662 return true;
663 }
664
665 // Get data from RO and BS
666 for (auto ctrl : ctrl_ro)
667 ctrl->functionalRead(line_address, pkt, bytes);
668
669 if (ctrl_bs)
670 ctrl_bs->functionalRead(line_address, pkt, bytes);
671
672 // if there is any busy controller or bytes still not set, then a partial
673 // and/or dirty copy of the line might be in a message buffer or the
674 // network
675 if (!ctrl_busy.empty() || !bytes.isFull()) {
676 DPRINTF(RubySystem, "Reading from remaining controllers, "
677 "buffers and networks\n");
678 if (ctrl_rw != nullptr)
679 ctrl_rw->functionalReadBuffers(pkt, bytes);
680 for (auto ctrl : ctrl_ro)
681 ctrl->functionalReadBuffers(pkt, bytes);
682 if (ctrl_bs != nullptr)
683 ctrl_bs->functionalReadBuffers(pkt, bytes);
684 for (auto ctrl : ctrl_busy) {
685 ctrl->functionalRead(line_address, pkt, bytes);
686 ctrl->functionalReadBuffers(pkt, bytes);
687 }
688 for (auto& network : m_networks) {
689 network->functionalRead(pkt, bytes);
690 }
691 for (auto ctrl : ctrl_others) {
692 ctrl->functionalRead(line_address, pkt, bytes);
693 ctrl->functionalReadBuffers(pkt, bytes);
694 }
695 }
696 // we either got the full line or couldn't find anything at this point
697 panic_if(!(bytes.isFull() || bytes.isEmpty()),
698 "Inconsistent state on functional read for %#x %s\n",
699 address, bytes);
700
701 return bytes.isFull();
702}
703#endif
704
705// The function searches through all the buffers that exist in different
706// cache, directory and memory controllers, and in the network components
707// and writes the data portion of those that hold the address specified
708// in the packet.
709bool
711{
712 Addr addr(pkt->getAddr());
713 Addr line_addr = makeLineAddress(addr);
714 AccessPermission access_perm = AccessPermission_NotPresent;
715
716 DPRINTF(RubySystem, "Functional Write request for %#x\n", addr);
717
718 [[maybe_unused]] uint32_t num_functional_writes = 0;
719
720 // Only send functional requests within the same network.
721 assert(requestorToNetwork.count(pkt->requestorId()));
722 int request_net_id = requestorToNetwork[pkt->requestorId()];
723 assert(netCntrls.count(request_net_id));
724
725 for (auto& cntrl : netCntrls[request_net_id]) {
726 num_functional_writes += cntrl->functionalWriteBuffers(pkt);
727
728 access_perm = cntrl->getAccessPermission(line_addr);
729 if (access_perm != AccessPermission_Invalid &&
730 access_perm != AccessPermission_NotPresent) {
731 num_functional_writes +=
732 cntrl->functionalWrite(line_addr, pkt);
733 }
734
735 // Also updates requests pending in any sequencer associated
736 // with the controller
737 if (cntrl->getCPUSequencer()) {
738 num_functional_writes +=
739 cntrl->getCPUSequencer()->functionalWrite(pkt);
740 }
741 if (cntrl->getDMASequencer()) {
742 num_functional_writes +=
743 cntrl->getDMASequencer()->functionalWrite(pkt);
744 }
745 }
746
747 for (auto& network : m_networks) {
748 num_functional_writes += network->functionalWrite(pkt);
749 }
750 DPRINTF(RubySystem, "Messages written = %u\n", num_functional_writes);
751
752 return true;
753}
754
755} // namespace ruby
756} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
void resetClock() const
Reset the object's clock using the current global tick value.
void setCurTick(Tick newVal)
Definition eventq.hh:1073
EventQueue * eventq
A pointer to this object's event queue.
Definition eventq.hh:984
Event * getHead() const
Definition eventq.hh:851
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
Definition eventq.cc:382
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
RequestorID requestorId() const
Definition packet.hh:780
virtual bool functionalReadBuffers(PacketPtr &)=0
These functions are used by ruby system to read/write the data blocks that exist with in the controll...
virtual void functionalRead(const Addr &addr, PacketPtr)
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
uint64_t getNumRecords() const
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
void enqueueRubyEvent(Tick tick)
static bool m_warmup_enabled
static bool getWarmupEnabled()
Definition RubySystem.hh:75
bool functionalWrite(Packet *ptr)
static uint32_t getBlockSizeBytes()
Definition RubySystem.hh:72
void unserialize(CheckpointIn &cp) override
Unserialize an object.
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
static bool m_cooldown_enabled
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
void registerMachineID(const MachineID &mach_id, Network *network)
static bool m_randomization
static unsigned m_systems_to_warmup
std::unordered_map< RequestorID, unsigned > requestorToNetwork
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
RubySystem(const Params &p)
Definition RubySystem.cc:79
void registerNetwork(Network *)
void startup() override
startup() is the final initialization call before simulation.
std::unordered_map< MachineID, unsigned > machineToNetwork
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
void registerAbstractController(AbstractController *)
CacheRecorder * m_cache_recorder
static uint32_t m_block_size_bits
std::vector< std::unique_ptr< Network > > m_networks
void drainResume() override
Resume execution after a successful drain.
static uint32_t m_block_size_bytes
std::vector< AbstractController * > m_abs_cntrl_vec
void resetStats() override
Callback to reset stats.
memory::SimpleMemory * m_phys_mem
std::unordered_map< unsigned, std::vector< AbstractController * > > netCntrls
static uint32_t m_memory_size_bits
bool functionalRead(Packet *ptr)
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
void serialize(CheckpointOut &cp) const override
Serialize an object.
void memWriteback() override
Write back dirty buffers to memory using functional writes.
static bool getCooldownEnabled()
Definition RubySystem.hh:76
STL list class.
Definition stl.hh:51
STL pair class.
Definition stl.hh:58
STL vector class.
Definition stl.hh:37
static constexpr std::enable_if_t< std::is_integral_v< T >, int > floorLog2(T x)
Definition intmath.hh:59
static constexpr bool isPowerOf2(const T &n)
Definition intmath.hh:98
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
Definition eventq.hh:757
void deschedule(Event *event)
Deschedule the specified event.
Definition eventq.hh:790
bool isAutoDelete() const
The function returns true if the object is automatically deleted after the event is processed.
Definition eventq.hh:494
virtual const std::string name() const
Definition eventq.cc:84
bool empty() const
Returns true if no events are queued.
Definition eventq.hh:891
Tick when() const
Get the time that the event is scheduled.
Definition eventq.hh:501
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
#define UNSERIALIZE_OPT_SCALAR(scalar)
Definition serialize.hh:582
const std::string getCptDir()
Definition serialize.hh:85
static std::string dir()
Get the current checkout directory name.
Definition serialize.cc:154
const Params & params() const
virtual void resetStats()
Callback to reset stats.
Definition group.cc:86
#define warn(...)
Definition logging.hh:256
#define warn_once(...)
Definition logging.hh:260
Bitfield< 14, 12 > fd
Definition types.hh:150
Bitfield< 33 > id
Bitfield< 10, 5 > event
Bitfield< 0 > p
Bitfield< 3 > addr
Definition types.hh:84
std::string MachineIDToString(MachineID machine)
Definition MachineID.hh:73
Addr makeLineAddress(Addr addr)
Definition Address.cc:60
void registerDumpCallback(const std::function< void()> &callback)
Register a callback that should be called whenever statistics are about to be dumped.
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
std::ostream CheckpointOut
Definition serialize.hh:66
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
uint16_t RequestorID
Definition request.hh:95
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
Definition simulate.cc:188
#define UNSERIALIZE_SCALAR(scalar)
Definition serialize.hh:575
#define SERIALIZE_SCALAR(scalar)
Definition serialize.hh:568
SimpleMemory declaration.
Declaration of Statistics objects.

Generated on Mon Jul 10 2023 14:24:33 for gem5 by doxygen 1.9.7