gem5  v20.0.0.2
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
RubySystem.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 1999-2011 Mark D. Hill and David A. Wood
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
42 
43 #include <fcntl.h>
44 #include <zlib.h>
45 
46 #include <cstdio>
47 #include <list>
48 
49 #include "base/intmath.hh"
50 #include "base/statistics.hh"
51 #include "debug/RubyCacheTrace.hh"
52 #include "debug/RubySystem.hh"
57 #include "mem/simple_mem.hh"
58 #include "sim/eventq.hh"
59 #include "sim/simulate.hh"
60 
61 using namespace std;
62 
67 bool RubySystem::m_warmup_enabled = false;
68 // To look forward to allowing multiple RubySystem instances, track the number
69 // of RubySystems that need to be warmed up on checkpoint restore.
72 
74  : ClockedObject(p), m_access_backing_store(p->access_backing_store),
75  m_cache_recorder(NULL)
76 {
77  m_randomization = p->randomization;
78 
79  m_block_size_bytes = p->block_size_bytes;
82  m_memory_size_bits = p->memory_size_bits;
83 
84  // Resize to the size of different machine types
85  m_abstract_controls.resize(MachineType_NUM);
86 
87  // Collate the statistics before they are printed.
89  // Create the profiler
90  m_profiler = new Profiler(p, this);
91  m_phys_mem = p->phys_mem;
92 }
93 
94 void
96 {
97  m_networks.emplace_back(network_ptr);
98 }
99 
100 void
102 {
103  m_abs_cntrl_vec.push_back(cntrl);
104 
105  MachineID id = cntrl->getMachineID();
106  m_abstract_controls[id.getType()][id.getNum()] = cntrl;
107 }
108 
110 {
111  delete m_profiler;
112 }
113 
114 void
115 RubySystem::makeCacheRecorder(uint8_t *uncompressed_trace,
116  uint64_t cache_trace_size,
117  uint64_t block_size_bytes)
118 {
119  vector<Sequencer*> sequencer_map;
120  Sequencer* sequencer_ptr = NULL;
121 
122  for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
123  sequencer_map.push_back(m_abs_cntrl_vec[cntrl]->getCPUSequencer());
124  if (sequencer_ptr == NULL) {
125  sequencer_ptr = sequencer_map[cntrl];
126  }
127  }
128 
129  assert(sequencer_ptr != NULL);
130 
131  for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
132  if (sequencer_map[cntrl] == NULL) {
133  sequencer_map[cntrl] = sequencer_ptr;
134  }
135  }
136 
137  // Remove the old CacheRecorder if it's still hanging about.
138  if (m_cache_recorder != NULL) {
139  delete m_cache_recorder;
140  }
141 
142  // Create the CacheRecorder and record the cache trace
143  m_cache_recorder = new CacheRecorder(uncompressed_trace, cache_trace_size,
144  sequencer_map, block_size_bytes);
145 }
146 
147 void
149 {
150  m_cooldown_enabled = true;
151 
152  // Make the trace so we know what to write back.
153  DPRINTF(RubyCacheTrace, "Recording Cache Trace\n");
155  for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
156  m_abs_cntrl_vec[cntrl]->recordCacheTrace(cntrl, m_cache_recorder);
157  }
158  DPRINTF(RubyCacheTrace, "Cache Trace Complete\n");
159 
160  // save the current tick value
161  Tick curtick_original = curTick();
162  DPRINTF(RubyCacheTrace, "Recording current tick %ld\n", curtick_original);
163 
164  // Deschedule all prior events on the event queue, but record the tick they
165  // were scheduled at so they can be restored correctly later.
166  list<pair<Event*, Tick> > original_events;
167  while (!eventq->empty()) {
168  Event *curr_head = eventq->getHead();
169  if (curr_head->isAutoDelete()) {
170  DPRINTF(RubyCacheTrace, "Event %s auto-deletes when descheduled,"
171  " not recording\n", curr_head->name());
172  } else {
173  original_events.push_back(make_pair(curr_head, curr_head->when()));
174  }
175  eventq->deschedule(curr_head);
176  }
177 
178  // Schedule an event to start cache cooldown
179  DPRINTF(RubyCacheTrace, "Starting cache flush\n");
181  simulate();
182  DPRINTF(RubyCacheTrace, "Cache flush complete\n");
183 
184  // Deschedule any events left on the event queue.
185  while (!eventq->empty()) {
187  }
188 
189  // Restore curTick
190  setCurTick(curtick_original);
191 
192  // Restore all events that were originally on the event queue. This is
193  // done after setting curTick back to its original value so that events do
194  // not seem to be scheduled in the past.
195  while (!original_events.empty()) {
196  pair<Event*, Tick> event = original_events.back();
197  eventq->schedule(event.first, event.second);
198  original_events.pop_back();
199  }
200 
201  // No longer flushing back to memory.
202  m_cooldown_enabled = false;
203 
204  // There are several issues with continuing simulation after calling
205  // memWriteback() at the moment, that stem from taking events off the
206  // queue, simulating again, and then putting them back on, whilst
207  // pretending that no time has passed. One is that some events will have
208  // been deleted, so can't be put back. Another is that any object
209  // recording the tick something happens may end up storing a tick in the
210  // future. A simple warning here alerts the user that things may not work
211  // as expected.
212  warn_once("Ruby memory writeback is experimental. Continuing simulation "
213  "afterwards may not always work as intended.");
214 
215  // Keep the cache recorder around so that we can dump the trace if a
216  // checkpoint is immediately taken.
217 }
218 
219 void
220 RubySystem::writeCompressedTrace(uint8_t *raw_data, string filename,
221  uint64_t uncompressed_trace_size)
222 {
223  // Create the checkpoint file for the memory
224  string thefile = CheckpointIn::dir() + "/" + filename.c_str();
225 
226  int fd = creat(thefile.c_str(), 0664);
227  if (fd < 0) {
228  perror("creat");
229  fatal("Can't open memory trace file '%s'\n", filename);
230  }
231 
232  gzFile compressedMemory = gzdopen(fd, "wb");
233  if (compressedMemory == NULL)
234  fatal("Insufficient memory to allocate compression state for %s\n",
235  filename);
236 
237  if (gzwrite(compressedMemory, raw_data, uncompressed_trace_size) !=
238  uncompressed_trace_size) {
239  fatal("Write failed on memory trace file '%s'\n", filename);
240  }
241 
242  if (gzclose(compressedMemory)) {
243  fatal("Close failed on memory trace file '%s'\n", filename);
244  }
245  delete[] raw_data;
246 }
247 
248 void
250 {
251  // Store the cache-block size, so we are able to restore on systems with a
252  // different cache-block size. CacheRecorder depends on the correct
253  // cache-block size upon unserializing.
254  uint64_t block_size_bytes = getBlockSizeBytes();
255  SERIALIZE_SCALAR(block_size_bytes);
256 
257  // Check that there's a valid trace to use. If not, then memory won't be
258  // up-to-date and the simulation will probably fail when restoring from the
259  // checkpoint.
260  if (m_cache_recorder == NULL) {
261  fatal("Call memWriteback() before serialize() to create ruby trace");
262  }
263 
264  // Aggregate the trace entries together into a single array
265  uint8_t *raw_data = new uint8_t[4096];
266  uint64_t cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
267  4096);
268  string cache_trace_file = name() + ".cache.gz";
269  writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size);
270 
271  SERIALIZE_SCALAR(cache_trace_file);
272  SERIALIZE_SCALAR(cache_trace_size);
273 }
274 
275 void
277 {
278  // Delete the cache recorder if it was created in memWriteback()
279  // to checkpoint the current cache state.
280  if (m_cache_recorder) {
281  delete m_cache_recorder;
282  m_cache_recorder = NULL;
283  }
284 }
285 
286 void
287 RubySystem::readCompressedTrace(string filename, uint8_t *&raw_data,
288  uint64_t &uncompressed_trace_size)
289 {
290  // Read the trace file
291  gzFile compressedTrace;
292 
293  // trace file
294  int fd = open(filename.c_str(), O_RDONLY);
295  if (fd < 0) {
296  perror("open");
297  fatal("Unable to open trace file %s", filename);
298  }
299 
300  compressedTrace = gzdopen(fd, "rb");
301  if (compressedTrace == NULL) {
302  fatal("Insufficient memory to allocate compression state for %s\n",
303  filename);
304  }
305 
306  raw_data = new uint8_t[uncompressed_trace_size];
307  if (gzread(compressedTrace, raw_data, uncompressed_trace_size) <
308  uncompressed_trace_size) {
309  fatal("Unable to read complete trace from file %s\n", filename);
310  }
311 
312  if (gzclose(compressedTrace)) {
313  fatal("Failed to close cache trace file '%s'\n", filename);
314  }
315 }
316 
317 void
319 {
320  uint8_t *uncompressed_trace = NULL;
321 
322  // This value should be set to the checkpoint-system's block-size.
323  // Optional, as checkpoints without it can be run if the
324  // checkpoint-system's block-size == current block-size.
325  uint64_t block_size_bytes = getBlockSizeBytes();
326  UNSERIALIZE_OPT_SCALAR(block_size_bytes);
327 
328  string cache_trace_file;
329  uint64_t cache_trace_size = 0;
330 
331  UNSERIALIZE_SCALAR(cache_trace_file);
332  UNSERIALIZE_SCALAR(cache_trace_size);
333  cache_trace_file = cp.getCptDir() + "/" + cache_trace_file;
334 
335  readCompressedTrace(cache_trace_file, uncompressed_trace,
336  cache_trace_size);
337  m_warmup_enabled = true;
339 
340  // Create the cache recorder that will hang around until startup.
341  makeCacheRecorder(uncompressed_trace, cache_trace_size, block_size_bytes);
342 }
343 
344 void
346 {
347 
348  // Ruby restores state from a checkpoint by resetting the clock to 0 and
349  // playing the requests that can possibly re-generate the cache state.
350  // The clock value is set to the actual checkpointed value once all the
351  // requests have been executed.
352  //
353  // This way of restoring state is pretty finicky. For example, if a
354  // Ruby component reads time before the state has been restored, it would
355  // cache this value and hence its clock would not be reset to 0, when
356  // Ruby resets the global clock. This can potentially result in a
357  // deadlock.
358  //
359  // The solution is that no Ruby component should read time before the
360  // simulation starts. And then one also needs to hope that the time
361  // Ruby finishes restoring the state is less than the time when the
362  // state was checkpointed.
363 
364  if (m_warmup_enabled) {
365  DPRINTF(RubyCacheTrace, "Starting ruby cache warmup\n");
366  // save the current tick value
367  Tick curtick_original = curTick();
368  // save the event queue head
369  Event* eventq_head = eventq->replaceHead(NULL);
370  // set curTick to 0 and reset Ruby System's clock
371  setCurTick(0);
372  resetClock();
373 
374  // Schedule an event to start cache warmup
376  simulate();
377 
378  delete m_cache_recorder;
379  m_cache_recorder = NULL;
381  if (m_systems_to_warmup == 0) {
382  m_warmup_enabled = false;
383  }
384 
385  // Restore eventq head
386  eventq->replaceHead(eventq_head);
387  // Restore curTick and Ruby System's clock
388  setCurTick(curtick_original);
389  resetClock();
390  }
391 
392  resetStats();
393 }
394 
395 void
397 {
398  if (getWarmupEnabled()) {
400  } else if (getCooldownEnabled()) {
402  }
403 }
404 
405 void
407 {
409  for (auto& network : m_networks) {
410  network->resetStats();
411  }
412 }
413 
414 bool
416 {
417  Addr address(pkt->getAddr());
418  Addr line_address = makeLineAddress(address);
419 
420  AccessPermission access_perm = AccessPermission_NotPresent;
421  int num_controllers = m_abs_cntrl_vec.size();
422 
423  DPRINTF(RubySystem, "Functional Read request for %#x\n", address);
424 
425  unsigned int num_ro = 0;
426  unsigned int num_rw = 0;
427  unsigned int num_busy = 0;
428  unsigned int num_maybe_stale = 0;
429  unsigned int num_backing_store = 0;
430  unsigned int num_invalid = 0;
431 
432  AbstractController *ctrl_ro = nullptr;
433  AbstractController *ctrl_rw = nullptr;
434  AbstractController *ctrl_backing_store = nullptr;
435 
436  // In this loop we count the number of controllers that have the given
437  // address in read only, read write and busy states.
438  for (unsigned int i = 0; i < num_controllers; ++i) {
439  access_perm = m_abs_cntrl_vec[i]-> getAccessPermission(line_address);
440  if (access_perm == AccessPermission_Read_Only){
441  num_ro++;
442  if (ctrl_ro == nullptr) ctrl_ro = m_abs_cntrl_vec[i];
443  }
444  else if (access_perm == AccessPermission_Read_Write){
445  num_rw++;
446  if (ctrl_rw == nullptr) ctrl_rw = m_abs_cntrl_vec[i];
447  }
448  else if (access_perm == AccessPermission_Busy)
449  num_busy++;
450  else if (access_perm == AccessPermission_Maybe_Stale)
451  num_maybe_stale++;
452  else if (access_perm == AccessPermission_Backing_Store) {
453  // See RubySlicc_Exports.sm for details, but Backing_Store is meant
454  // to represent blocks in memory *for Broadcast/Snooping protocols*,
455  // where memory has no idea whether it has an exclusive copy of data
456  // or not.
457  num_backing_store++;
458  if (ctrl_backing_store == nullptr)
459  ctrl_backing_store = m_abs_cntrl_vec[i];
460  }
461  else if (access_perm == AccessPermission_Invalid ||
462  access_perm == AccessPermission_NotPresent)
463  num_invalid++;
464  }
465 
466  // This if case is meant to capture what happens in a Broadcast/Snoop
467  // protocol where the block does not exist in the cache hierarchy. You
468  // only want to read from the Backing_Store memory if there is no copy in
469  // the cache hierarchy, otherwise you want to try to read the RO or RW
470  // copies existing in the cache hierarchy (covered by the else statement).
471  // The reason is because the Backing_Store memory could easily be stale, if
472  // there are copies floating around the cache hierarchy, so you want to read
473  // it only if it's not in the cache hierarchy at all.
474  if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
475  DPRINTF(RubySystem, "only copy in Backing_Store memory, read from it\n");
476  ctrl_backing_store->functionalRead(line_address, pkt);
477  return true;
478  } else if (num_ro > 0 || num_rw >= 1) {
479  if (num_rw > 1) {
480  // We iterate over the vector of abstract controllers, and return
481  // the first copy found. If we have more than one cache with block
482  // in writable permission, the first one found would be returned.
483  warn("More than one Abstract Controller with RW permission for "
484  "addr: %#x on cacheline: %#x.", address, line_address);
485  }
486  // In Broadcast/Snoop protocols, this covers if you know the block
487  // exists somewhere in the caching hierarchy, then you want to read any
488  // valid RO or RW block. In directory protocols, same thing, you want
489  // to read any valid readable copy of the block.
490  DPRINTF(RubySystem, "num_maybe_stale=%d, num_busy = %d, num_ro = %d, "
491  "num_rw = %d\n",
492  num_maybe_stale, num_busy, num_ro, num_rw);
493  // Use the copy from the controller with read/write permission (if
494  // any), otherwise use get the first read only found
495  if (ctrl_rw) {
496  ctrl_rw->functionalRead(line_address, pkt);
497  } else {
498  assert(ctrl_ro);
499  ctrl_ro->functionalRead(line_address, pkt);
500  }
501  return true;
502  } else if ((num_busy + num_maybe_stale) > 0) {
503  // No controller has a valid copy of the block, but a transient or
504  // stale state indicates a valid copy should be in transit in the
505  // network or in a message buffer waiting to be handled
506  DPRINTF(RubySystem, "Controllers functionalRead lookup "
507  "(num_maybe_stale=%d, num_busy = %d)\n",
508  num_maybe_stale, num_busy);
509  for (unsigned int i = 0; i < num_controllers;++i) {
510  if (m_abs_cntrl_vec[i]->functionalReadBuffers(pkt))
511  return true;
512  }
513  DPRINTF(RubySystem, "Network functionalRead lookup "
514  "(num_maybe_stale=%d, num_busy = %d)\n",
515  num_maybe_stale, num_busy);
516  for (auto& network : m_networks) {
517  if (network->functionalRead(pkt))
518  return true;
519  }
520  }
521 
522  return false;
523 }
524 
525 // The function searches through all the buffers that exist in different
526 // cache, directory and memory controllers, and in the network components
527 // and writes the data portion of those that hold the address specified
528 // in the packet.
529 bool
531 {
532  Addr addr(pkt->getAddr());
533  Addr line_addr = makeLineAddress(addr);
534  AccessPermission access_perm = AccessPermission_NotPresent;
535  int num_controllers = m_abs_cntrl_vec.size();
536 
537  DPRINTF(RubySystem, "Functional Write request for %#x\n", addr);
538 
539  uint32_t M5_VAR_USED num_functional_writes = 0;
540 
541  for (unsigned int i = 0; i < num_controllers;++i) {
542  num_functional_writes +=
543  m_abs_cntrl_vec[i]->functionalWriteBuffers(pkt);
544 
545  access_perm = m_abs_cntrl_vec[i]->getAccessPermission(line_addr);
546  if (access_perm != AccessPermission_Invalid &&
547  access_perm != AccessPermission_NotPresent) {
548  num_functional_writes +=
549  m_abs_cntrl_vec[i]->functionalWrite(line_addr, pkt);
550  }
551 
552  // Also updates requests pending in any sequencer associated
553  // with the controller
554  if (m_abs_cntrl_vec[i]->getCPUSequencer()) {
555  num_functional_writes +=
556  m_abs_cntrl_vec[i]->getCPUSequencer()->functionalWrite(pkt);
557  }
558  if (m_abs_cntrl_vec[i]->getDMASequencer()) {
559  num_functional_writes +=
560  m_abs_cntrl_vec[i]->getDMASequencer()->functionalWrite(pkt);
561  }
562  }
563 
564  for (auto& network : m_networks) {
565  num_functional_writes += network->functionalWrite(pkt);
566  }
567  DPRINTF(RubySystem, "Messages written = %u\n", num_functional_writes);
568 
569  return true;
570 }
571 
572 RubySystem *
573 RubySystemParams::create()
574 {
575  return new RubySystem(this);
576 }
EventQueue * eventq
A pointer to this object&#39;s event queue.
Definition: eventq.hh:973
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
#define DPRINTF(x,...)
Definition: trace.hh:222
static void readCompressedTrace(std::string filename, uint8_t *&raw_data, uint64_t &uncompressed_trace_size)
Definition: RubySystem.cc:287
static void writeCompressedTrace(uint8_t *raw_data, std::string file, uint64_t uncompressed_trace_size)
Definition: RubySystem.cc:220
GlobalSimLoopExitEvent * simulate(Tick num_cycles)
Simulate for num_cycles additional cycles.
Definition: simulate.cc:80
void serialize(CheckpointOut &cp) const override
Serialize an object.
Definition: RubySystem.cc:249
bool isAutoDelete() const
Definition: eventq.hh:493
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
SimpleMemory * m_phys_mem
Definition: RubySystem.hh:130
bool empty() const
Returns true if no events are queued.
Definition: eventq.hh:887
Bitfield< 7 > i
static bool m_randomization
Definition: RubySystem.hh:122
STL pair class.
Definition: stl.hh:58
void makeCacheRecorder(uint8_t *uncompressed_trace, uint64_t cache_trace_size, uint64_t block_size_bytes)
Definition: RubySystem.cc:115
CacheRecorder * m_cache_recorder
Definition: RubySystem.hh:140
static uint32_t m_block_size_bytes
Definition: RubySystem.hh:123
RubySystem(const Params *p)
Definition: RubySystem.cc:73
ip6_addr_t addr
Definition: inet.hh:330
uint64_t aggregateRecords(uint8_t **data, uint64_t size)
std::vector< std::unique_ptr< Network > > m_networks
Definition: RubySystem.hh:134
bool functionalRead(Packet *ptr)
Definition: RubySystem.cc:415
static bool m_warmup_enabled
Definition: RubySystem.hh:127
void schedule(Event *event, Tick when, bool global=false)
Schedule the given event on this queue.
Definition: eventq.hh:755
void deschedule(Event *event)
Deschedule the specified event.
Definition: eventq.hh:788
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
virtual void functionalRead(const Addr &addr, PacketPtr)=0
Profiler * m_profiler
Definition: RubySystem.hh:139
Definition: cprintf.cc:40
bool functionalWrite(Packet *ptr)
Definition: RubySystem.cc:530
void drainResume() override
Resume execution after a successful drain.
Definition: RubySystem.cc:276
Declaration of Statistics objects.
void enqueueRubyEvent(Tick tick)
Definition: RubySystem.hh:97
SimpleMemory declaration.
STL vector class.
Definition: stl.hh:37
std::enable_if< std::is_integral< T >::value, int >::type floorLog2(T x)
Definition: intmath.hh:57
static bool getWarmupEnabled()
Definition: RubySystem.hh:62
#define UNSERIALIZE_SCALAR(scalar)
Definition: serialize.hh:770
void resetClock() const
Reset the object&#39;s clock using the current global tick value.
RubySystemParams Params
Definition: RubySystem.hh:53
Tick curTick()
The current simulated tick.
Definition: core.hh:44
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
virtual const std::string name() const
Definition: eventq.cc:83
void registerNetwork(Network *)
Definition: RubySystem.cc:95
MachineID getMachineID() const
uint64_t Tick
Tick count type.
Definition: types.hh:61
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
#define UNSERIALIZE_OPT_SCALAR(scalar)
Definition: serialize.hh:777
std::vector< std::map< uint32_t, AbstractController * > > m_abstract_controls
Definition: RubySystem.hh:141
Addr getAddr() const
Definition: packet.hh:720
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
bool isPowerOf2(const T &n)
Definition: intmath.hh:90
void startup() override
startup() is the final initialization call before simulation.
Definition: RubySystem.cc:345
STL list class.
Definition: stl.hh:51
void setCurTick(Tick newVal)
Definition: eventq.hh:1056
void memWriteback() override
Write back dirty buffers to memory using functional writes.
Definition: RubySystem.cc:148
static bool getCooldownEnabled()
Definition: RubySystem.hh:63
static uint32_t m_memory_size_bits
Definition: RubySystem.hh:125
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
Addr makeLineAddress(Addr addr)
Definition: Address.cc:54
Bitfield< 10, 5 > event
#define warn_once(...)
Definition: logging.hh:212
void resetStats() override
Callback to reset stats.
Definition: RubySystem.cc:406
static uint32_t m_block_size_bits
Definition: RubySystem.hh:124
std::vector< AbstractController * > m_abs_cntrl_vec
Definition: RubySystem.hh:135
void registerDumpCallback(Callback *cb)
Register a callback that should be called whenever statistics are about to be dumped.
Definition: statistics.cc:589
#define SERIALIZE_SCALAR(scalar)
Definition: serialize.hh:763
static std::string dir()
Get the current checkout directory name.
Definition: serialize.cc:263
Event * replaceHead(Event *s)
function for replacing the head of the event queue, so that a different set of events can run without...
Definition: eventq.cc:355
Cycles m_start_cycle
Definition: RubySystem.hh:136
virtual const std::string name() const
Definition: sim_object.hh:128
std::ostream CheckpointOut
Definition: serialize.hh:63
Definition: eventq.hh:246
const std::string getCptDir()
Definition: serialize.hh:85
void processRubyEvent()
Definition: RubySystem.cc:396
#define warn(...)
Definition: logging.hh:208
Event * getHead() const
Definition: eventq.hh:847
static bool m_cooldown_enabled
Definition: RubySystem.hh:129
Bitfield< 14, 12 > fd
Definition: types.hh:158
Bitfield< 0 > p
Tick when() const
Get the time that the event is scheduled.
Definition: eventq.hh:500
static unsigned m_systems_to_warmup
Definition: RubySystem.hh:128
static uint32_t getBlockSizeBytes()
Definition: RubySystem.hh:59
void registerAbstractController(AbstractController *)
Definition: RubySystem.cc:101
void unserialize(CheckpointIn &cp) override
Unserialize an object.
Definition: RubySystem.cc:318

Generated on Mon Jun 8 2020 15:45:12 for gem5 by doxygen 1.8.13