gem5  v21.1.0.2
Sequencer.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15  * Copyright (c) 2013 Advanced Micro Devices, Inc.
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
43 
44 #include "arch/x86/ldstflags.hh"
45 #include "base/logging.hh"
46 #include "base/str.hh"
48 #include "debug/LLSC.hh"
49 #include "debug/MemoryAccess.hh"
50 #include "debug/ProtocolTrace.hh"
51 #include "debug/RubySequencer.hh"
52 #include "debug/RubyStats.hh"
53 #include "mem/packet.hh"
55 #include "mem/ruby/protocol/PrefetchBit.hh"
56 #include "mem/ruby/protocol/RubyAccessMode.hh"
60 #include "sim/system.hh"
61 
62 namespace gem5
63 {
64 
65 namespace ruby
66 {
67 
69  : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
70  deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
71 {
72  m_outstanding_count = 0;
73 
74  m_dataCache_ptr = p.dcache;
75  m_max_outstanding_requests = p.max_outstanding_requests;
76  m_deadlock_threshold = p.deadlock_threshold;
77 
78  m_coreId = p.coreid; // for tracking the two CorePair sequencers
79  assert(m_max_outstanding_requests > 0);
80  assert(m_deadlock_threshold > 0);
81 
82  m_runningGarnetStandalone = p.garnet_standalone;
83 
84 
85  // These statistical variables are not for display.
86  // The profiler will collate these across different
87  // sequencers and display those collated statistics.
88  m_outstandReqHist.init(10);
89  m_latencyHist.init(10);
90  m_hitLatencyHist.init(10);
91  m_missLatencyHist.init(10);
92 
93  for (int i = 0; i < RubyRequestType_NUM; i++) {
94  m_typeLatencyHist.push_back(new statistics::Histogram());
95  m_typeLatencyHist[i]->init(10);
96 
97  m_hitTypeLatencyHist.push_back(new statistics::Histogram());
98  m_hitTypeLatencyHist[i]->init(10);
99 
100  m_missTypeLatencyHist.push_back(new statistics::Histogram());
101  m_missTypeLatencyHist[i]->init(10);
102  }
103 
104  for (int i = 0; i < MachineType_NUM; i++) {
105  m_hitMachLatencyHist.push_back(new statistics::Histogram());
106  m_hitMachLatencyHist[i]->init(10);
107 
108  m_missMachLatencyHist.push_back(new statistics::Histogram());
109  m_missMachLatencyHist[i]->init(10);
110 
111  m_IssueToInitialDelayHist.push_back(new statistics::Histogram());
112  m_IssueToInitialDelayHist[i]->init(10);
113 
114  m_InitialToForwardDelayHist.push_back(new statistics::Histogram());
115  m_InitialToForwardDelayHist[i]->init(10);
116 
117  m_ForwardToFirstResponseDelayHist.push_back(
118  new statistics::Histogram());
119  m_ForwardToFirstResponseDelayHist[i]->init(10);
120 
121  m_FirstResponseToCompletionDelayHist.push_back(
122  new statistics::Histogram());
123  m_FirstResponseToCompletionDelayHist[i]->init(10);
124  }
125 
126  for (int i = 0; i < RubyRequestType_NUM; i++) {
127  m_hitTypeMachLatencyHist.push_back(
129  m_missTypeMachLatencyHist.push_back(
131 
132  for (int j = 0; j < MachineType_NUM; j++) {
133  m_hitTypeMachLatencyHist[i].push_back(new statistics::Histogram());
134  m_hitTypeMachLatencyHist[i][j]->init(10);
135 
136  m_missTypeMachLatencyHist[i].push_back(
137  new statistics::Histogram());
138  m_missTypeMachLatencyHist[i][j]->init(10);
139  }
140  }
141 
142 }
143 
145 {
146 }
147 
148 void
150 {
151  fatal_if(m_dataCache_ptr == NULL,
152  "%s must have a dcache object to support LLSC requests.", name());
153  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
154  if (line) {
155  line->setLocked(m_version);
156  DPRINTF(LLSC, "LLSC Monitor - inserting load linked - "
157  "addr=0x%lx - cpu=%u\n", claddr, m_version);
158  }
159 }
160 
161 void
163 {
164  // clear monitor is called for all stores and evictions
165  if (m_dataCache_ptr == NULL)
166  return;
167  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
168  if (line && line->isLocked(m_version)) {
169  line->clearLocked();
170  DPRINTF(LLSC, "LLSC Monitor - clearing due to store - "
171  "addr=0x%lx - cpu=%u\n", claddr, m_version);
172  }
173 }
174 
175 bool
177 {
178  fatal_if(m_dataCache_ptr == NULL,
179  "%s must have a dcache object to support LLSC requests.", name());
180  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
181  if (!line)
182  return false;
183 
184  DPRINTF(LLSC, "LLSC Monitor - clearing due to "
185  "store conditional - "
186  "addr=0x%lx - cpu=%u\n",
187  claddr, m_version);
188 
189  if (line->isLocked(m_version)) {
190  line->clearLocked();
191  return true;
192  } else {
193  line->clearLocked();
194  return false;
195  }
196 }
197 
198 bool
200 {
201  assert(m_dataCache_ptr != NULL);
202  const Addr claddr = makeLineAddress(address);
203  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
204  if (!line)
205  return false;
206 
207  if (line->isLocked(m_version)) {
208  return true;
209  } else {
210  return false;
211  }
212 }
213 
214 void
216 {
218 }
219 
220 void
222 {
223  assert(drainState() != DrainState::Draining);
224 
225  // Check for deadlock of any of the requests
226  Cycles current_time = curCycle();
227 
228  // Check across all outstanding requests
229  int total_outstanding = 0;
230 
231  for (const auto &table_entry : m_RequestTable) {
232  for (const auto &seq_req : table_entry.second) {
233  if (current_time - seq_req.issue_time < m_deadlock_threshold)
234  continue;
235 
236  panic("Possible Deadlock detected. Aborting!\n version: %d "
237  "request.paddr: 0x%x m_readRequestTable: %d current time: "
238  "%u issue_time: %d difference: %d\n", m_version,
239  seq_req.pkt->getAddr(), table_entry.second.size(),
240  current_time * clockPeriod(), seq_req.issue_time
241  * clockPeriod(), (current_time * clockPeriod())
242  - (seq_req.issue_time * clockPeriod()));
243  }
244  total_outstanding += table_entry.second.size();
245  }
246 
247  assert(m_outstanding_count == total_outstanding);
248 
249  if (m_outstanding_count > 0) {
250  // If there are still outstanding requests, keep checking
252  }
253 }
254 
255 int
257 {
258  int num_written = RubyPort::functionalWrite(func_pkt);
259 
260  for (const auto &table_entry : m_RequestTable) {
261  for (const auto& seq_req : table_entry.second) {
262  if (seq_req.functionalWrite(func_pkt))
263  ++num_written;
264  }
265  }
266 
267  return num_written;
268 }
269 
271 {
276  for (int i = 0; i < RubyRequestType_NUM; i++) {
277  m_typeLatencyHist[i]->reset();
278  m_hitTypeLatencyHist[i]->reset();
279  m_missTypeLatencyHist[i]->reset();
280  for (int j = 0; j < MachineType_NUM; j++) {
281  m_hitTypeMachLatencyHist[i][j]->reset();
282  m_missTypeMachLatencyHist[i][j]->reset();
283  }
284  }
285 
286  for (int i = 0; i < MachineType_NUM; i++) {
287  m_missMachLatencyHist[i]->reset();
288  m_hitMachLatencyHist[i]->reset();
289 
290  m_IssueToInitialDelayHist[i]->reset();
291  m_InitialToForwardDelayHist[i]->reset();
294 
295  m_IncompleteTimes[i] = 0;
296  }
297 }
298 
299 // Insert the request in the request table. Return RequestStatus_Aliased
300 // if the entry was already present.
301 RequestStatus
302 Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
303  RubyRequestType secondary_type)
304 {
305  // See if we should schedule a deadlock check
306  if (!deadlockCheckEvent.scheduled() &&
309  }
310 
311  Addr line_addr = makeLineAddress(pkt->getAddr());
312  // Check if there is any outstanding request for the same cache line.
313  auto &seq_req_list = m_RequestTable[line_addr];
314  // Create a default entry
315  seq_req_list.emplace_back(pkt, primary_type,
316  secondary_type, curCycle());
318 
319  if (seq_req_list.size() > 1) {
320  return RequestStatus_Aliased;
321  }
322 
324 
325  return RequestStatus_Ready;
326 }
327 
328 void
330 {
332 }
333 
334 void
335 Sequencer::recordMissLatency(SequencerRequest* srequest, bool llscSuccess,
336  const MachineType respondingMach,
337  bool isExternalHit, Cycles initialRequestTime,
338  Cycles forwardRequestTime,
339  Cycles firstResponseTime)
340 {
341  RubyRequestType type = srequest->m_type;
342  Cycles issued_time = srequest->issue_time;
343  Cycles completion_time = curCycle();
344 
345  assert(curCycle() >= issued_time);
346  Cycles total_lat = completion_time - issued_time;
347 
348  if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
349  // if the request was combined in the protocol with an earlier request
350  // for the same address, it is possible that it will return an
351  // initialRequestTime corresponding the earlier request. Since Cycles
352  // is unsigned, we can't let this request get profiled below.
353 
354  total_lat = Cycles(0);
355  }
356 
357  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
358  curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
359  "", "", printAddress(srequest->pkt->getAddr()), total_lat);
360 
361  m_latencyHist.sample(total_lat);
362  m_typeLatencyHist[type]->sample(total_lat);
363 
364  if (isExternalHit) {
365  m_missLatencyHist.sample(total_lat);
366  m_missTypeLatencyHist[type]->sample(total_lat);
367 
368  if (respondingMach != MachineType_NUM) {
369  m_missMachLatencyHist[respondingMach]->sample(total_lat);
370  m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
371 
372  if ((issued_time <= initialRequestTime) &&
373  (initialRequestTime <= forwardRequestTime) &&
374  (forwardRequestTime <= firstResponseTime) &&
375  (firstResponseTime <= completion_time)) {
376 
377  m_IssueToInitialDelayHist[respondingMach]->sample(
378  initialRequestTime - issued_time);
379  m_InitialToForwardDelayHist[respondingMach]->sample(
380  forwardRequestTime - initialRequestTime);
381  m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
382  firstResponseTime - forwardRequestTime);
383  m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
384  completion_time - firstResponseTime);
385  } else {
386  m_IncompleteTimes[respondingMach]++;
387  }
388  }
389  } else {
390  m_hitLatencyHist.sample(total_lat);
391  m_hitTypeLatencyHist[type]->sample(total_lat);
392 
393  if (respondingMach != MachineType_NUM) {
394  m_hitMachLatencyHist[respondingMach]->sample(total_lat);
395  m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
396  }
397  }
398 }
399 
400 void
402 {
403  llscClearMonitor(address);
404  writeCallback(address, data);
405 }
406 
407 void
409  const bool externalHit, const MachineType mach,
410  const Cycles initialRequestTime,
411  const Cycles forwardRequestTime,
412  const Cycles firstResponseTime,
413  const bool noCoales)
414 {
415  //
416  // Free the whole list as we assume we have had the exclusive access
417  // to this cache line when response for the write comes back
418  //
419  assert(address == makeLineAddress(address));
420  assert(m_RequestTable.find(address) != m_RequestTable.end());
421  auto &seq_req_list = m_RequestTable[address];
422 
423  // Perform hitCallback on every cpu request made to this cache block while
424  // ruby request was outstanding. Since only 1 ruby request was made,
425  // profile the ruby latency once.
426  bool ruby_request = true;
427  int aliased_stores = 0;
428  int aliased_loads = 0;
429  while (!seq_req_list.empty()) {
430  SequencerRequest &seq_req = seq_req_list.front();
431 
432  if (noCoales && !ruby_request) {
433  // Do not process follow-up requests
434  // (e.g. if full line no present)
435  // Reissue to the cache hierarchy
436  issueRequest(seq_req.pkt, seq_req.m_second_type);
437  break;
438  }
439 
440  if (ruby_request) {
441  assert(seq_req.m_type != RubyRequestType_LD);
442  assert(seq_req.m_type != RubyRequestType_Load_Linked);
443  assert(seq_req.m_type != RubyRequestType_IFETCH);
444  }
445 
446  // handle write request
447  if ((seq_req.m_type != RubyRequestType_LD) &&
448  (seq_req.m_type != RubyRequestType_Load_Linked) &&
449  (seq_req.m_type != RubyRequestType_IFETCH)) {
450  // LL/SC support (tested with ARMv8)
451  bool success = true;
452 
453  if (seq_req.m_type != RubyRequestType_Store_Conditional) {
454  // Regular stores to addresses being monitored
455  // will fail (remove) the monitor entry.
456  llscClearMonitor(address);
457  } else {
458  // Store conditionals must first check the monitor
459  // if they will succeed or not
460  success = llscStoreConditional(address);
461  seq_req.pkt->req->setExtraData(success ? 1 : 0);
462  }
463 
464  // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
465  // address variable here is assumed to be a line address, so when
466  // blocking buffers, must check line addresses.
467  if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
468  // blockOnQueue blocks all first-level cache controller queues
469  // waiting on memory accesses for the specified address that go
470  // to the specified queue. In this case, a Locked_RMW_Write must
471  // go to the mandatory_q before unblocking the first-level
472  // controller. This will block standard loads, stores, ifetches,
473  // etc.
475  } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
476  m_controller->unblock(address);
477  }
478 
479  if (ruby_request) {
480  recordMissLatency(&seq_req, success, mach, externalHit,
481  initialRequestTime, forwardRequestTime,
482  firstResponseTime);
483  } else {
484  aliased_stores++;
485  }
486  markRemoved();
487  hitCallback(&seq_req, data, success, mach, externalHit,
488  initialRequestTime, forwardRequestTime,
489  firstResponseTime, !ruby_request);
490  ruby_request = false;
491  } else {
492  // handle read request
493  assert(!ruby_request);
494  markRemoved();
495  aliased_loads++;
496  hitCallback(&seq_req, data, true, mach, externalHit,
497  initialRequestTime, forwardRequestTime,
498  firstResponseTime, !ruby_request);
499  }
500  seq_req_list.pop_front();
501  }
502 
503  // free all outstanding requests corresponding to this address
504  if (seq_req_list.empty()) {
505  m_RequestTable.erase(address);
506  }
507 }
508 
509 void
511  bool externalHit, const MachineType mach,
512  Cycles initialRequestTime,
513  Cycles forwardRequestTime,
514  Cycles firstResponseTime)
515 {
516  //
517  // Free up read requests until we hit the first Write request
518  // or end of the corresponding list.
519  //
520  assert(address == makeLineAddress(address));
521  assert(m_RequestTable.find(address) != m_RequestTable.end());
522  auto &seq_req_list = m_RequestTable[address];
523 
524  // Perform hitCallback on every cpu request made to this cache block while
525  // ruby request was outstanding. Since only 1 ruby request was made,
526  // profile the ruby latency once.
527  bool ruby_request = true;
528  int aliased_loads = 0;
529  while (!seq_req_list.empty()) {
530  SequencerRequest &seq_req = seq_req_list.front();
531  if (ruby_request) {
532  assert((seq_req.m_type == RubyRequestType_LD) ||
533  (seq_req.m_type == RubyRequestType_Load_Linked) ||
534  (seq_req.m_type == RubyRequestType_IFETCH));
535  } else {
536  aliased_loads++;
537  }
538  if ((seq_req.m_type != RubyRequestType_LD) &&
539  (seq_req.m_type != RubyRequestType_Load_Linked) &&
540  (seq_req.m_type != RubyRequestType_IFETCH)) {
541  // Write request: reissue request to the cache hierarchy
542  issueRequest(seq_req.pkt, seq_req.m_second_type);
543  break;
544  }
545  if (ruby_request) {
546  recordMissLatency(&seq_req, true, mach, externalHit,
547  initialRequestTime, forwardRequestTime,
548  firstResponseTime);
549  }
550  markRemoved();
551  hitCallback(&seq_req, data, true, mach, externalHit,
552  initialRequestTime, forwardRequestTime,
553  firstResponseTime, !ruby_request);
554  ruby_request = false;
555  seq_req_list.pop_front();
556  }
557 
558  // free all outstanding requests corresponding to this address
559  if (seq_req_list.empty()) {
560  m_RequestTable.erase(address);
561  }
562 }
563 
564 void
566  bool llscSuccess,
567  const MachineType mach, const bool externalHit,
568  const Cycles initialRequestTime,
569  const Cycles forwardRequestTime,
570  const Cycles firstResponseTime,
571  const bool was_coalesced)
572 {
573  warn_once("Replacement policy updates recently became the responsibility "
574  "of SLICC state machines. Make sure to setMRU() near callbacks "
575  "in .sm files!");
576 
577  PacketPtr pkt = srequest->pkt;
578  Addr request_address(pkt->getAddr());
579  RubyRequestType type = srequest->m_type;
580 
581  if (was_coalesced) {
582  // Notify the controller about a coalesced request so it can properly
583  // account for it in its hit/miss stats and/or train prefetchers
584  // (this is protocol-dependent)
585  m_controller->notifyCoalesced(request_address, type, pkt->req,
586  data, externalHit);
587  }
588 
589  // Load-linked handling
590  if (type == RubyRequestType_Load_Linked) {
591  Addr line_addr = makeLineAddress(request_address);
592  llscLoadLinked(line_addr);
593  }
594 
595  // update the data unless it is a non-data-carrying flush
597  data.setData(pkt);
598  } else if (!pkt->isFlush()) {
599  if ((type == RubyRequestType_LD) ||
600  (type == RubyRequestType_IFETCH) ||
601  (type == RubyRequestType_RMW_Read) ||
602  (type == RubyRequestType_Locked_RMW_Read) ||
603  (type == RubyRequestType_Load_Linked)) {
604  pkt->setData(
605  data.getData(getOffset(request_address), pkt->getSize()));
606  DPRINTF(RubySequencer, "read data %s\n", data);
607  } else if (pkt->req->isSwap()) {
608  assert(!pkt->isMaskedWrite());
609  std::vector<uint8_t> overwrite_val(pkt->getSize());
610  pkt->writeData(&overwrite_val[0]);
611  pkt->setData(
612  data.getData(getOffset(request_address), pkt->getSize()));
613  data.setData(&overwrite_val[0],
614  getOffset(request_address), pkt->getSize());
615  DPRINTF(RubySequencer, "swap data %s\n", data);
616  } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
617  // Types of stores set the actual data here, apart from
618  // failed Store Conditional requests
619  data.setData(pkt);
620  DPRINTF(RubySequencer, "set data %s\n", data);
621  }
622  }
623 
624  // If using the RubyTester, update the RubyTester sender state's
625  // subBlock with the recieved data. The tester will later access
626  // this state.
627  if (m_usingRubyTester) {
628  DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
629  pkt->cmdString(), pkt->getAddr());
630  RubyTester::SenderState* testerSenderState =
632  assert(testerSenderState);
633  testerSenderState->subBlock.mergeFrom(data);
634  }
635 
638  assert(pkt->req);
639  delete pkt;
640  rs->m_cache_recorder->enqueueNextFetchRequest();
641  } else if (RubySystem::getCooldownEnabled()) {
642  delete pkt;
643  rs->m_cache_recorder->enqueueNextFlushRequest();
644  } else {
645  ruby_hit_callback(pkt);
647  }
648 }
649 
650 bool
652 {
653  return m_RequestTable.empty();
654 }
655 
656 RequestStatus
658 {
659  // HTM abort signals must be allowed to reach the Sequencer
660  // the same cycle they are issued. They cannot be retried.
662  !pkt->req->isHTMAbort()) {
663  return RequestStatus_BufferFull;
664  }
665 
666  RubyRequestType primary_type = RubyRequestType_NULL;
667  RubyRequestType secondary_type = RubyRequestType_NULL;
668 
669  if (pkt->isLLSC()) {
670  // LL/SC instructions need to be handled carefully by the cache
671  // coherence protocol to ensure they follow the proper semantics. In
672  // particular, by identifying the operations as atomic, the protocol
673  // should understand that migratory sharing optimizations should not
674  // be performed (i.e. a load between the LL and SC should not steal
675  // away exclusive permission).
676  //
677  // The following logic works correctly with the semantics
678  // of armV8 LDEX/STEX instructions.
679 
680  if (pkt->isWrite()) {
681  DPRINTF(RubySequencer, "Issuing SC\n");
682  primary_type = RubyRequestType_Store_Conditional;
683 #if defined (PROTOCOL_MESI_Three_Level) || defined (PROTOCOL_MESI_Three_Level_HTM)
684  secondary_type = RubyRequestType_Store_Conditional;
685 #else
686  secondary_type = RubyRequestType_ST;
687 #endif
688  } else {
689  DPRINTF(RubySequencer, "Issuing LL\n");
690  assert(pkt->isRead());
691  primary_type = RubyRequestType_Load_Linked;
692  secondary_type = RubyRequestType_LD;
693  }
694  } else if (pkt->req->isLockedRMW()) {
695  //
696  // x86 locked instructions are translated to store cache coherence
697  // requests because these requests should always be treated as read
698  // exclusive operations and should leverage any migratory sharing
699  // optimization built into the protocol.
700  //
701  if (pkt->isWrite()) {
702  DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
703  primary_type = RubyRequestType_Locked_RMW_Write;
704  } else {
705  DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
706  assert(pkt->isRead());
707  primary_type = RubyRequestType_Locked_RMW_Read;
708  }
709  secondary_type = RubyRequestType_ST;
710  } else {
711  //
712  // To support SwapReq, we need to check isWrite() first: a SwapReq
713  // should always be treated like a write, but since a SwapReq implies
714  // both isWrite() and isRead() are true, check isWrite() first here.
715  //
716  if (pkt->isWrite()) {
717  //
718  // Note: M5 packets do not differentiate ST from RMW_Write
719  //
720  primary_type = secondary_type = RubyRequestType_ST;
721  } else if (pkt->isRead()) {
722  // hardware transactional memory commands
723  if (pkt->req->isHTMCmd()) {
724  primary_type = secondary_type = htmCmdToRubyRequestType(pkt);
725  } else if (pkt->req->isInstFetch()) {
726  primary_type = secondary_type = RubyRequestType_IFETCH;
727  } else {
728  bool storeCheck = false;
729  // only X86 need the store check
730  if (system->getArch() == Arch::X86ISA) {
731  uint32_t flags = pkt->req->getFlags();
732  storeCheck = flags &
734  }
735  if (storeCheck) {
736  primary_type = RubyRequestType_RMW_Read;
737  secondary_type = RubyRequestType_ST;
738  } else {
739  primary_type = secondary_type = RubyRequestType_LD;
740  }
741  }
742  } else if (pkt->isFlush()) {
743  primary_type = secondary_type = RubyRequestType_FLUSH;
744  } else {
745  panic("Unsupported ruby packet type\n");
746  }
747  }
748 
749  // Check if the line is blocked for a Locked_RMW
751  (primary_type != RubyRequestType_Locked_RMW_Write)) {
752  // Return that this request's cache line address aliases with
753  // a prior request that locked the cache line. The request cannot
754  // proceed until the cache line is unlocked by a Locked_RMW_Write
755  return RequestStatus_Aliased;
756  }
757 
758  RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
759 
760  // It is OK to receive RequestStatus_Aliased, it can be considered Issued
761  if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
762  return status;
763  // non-aliased with any existing request in the request table, just issue
764  // to the cache
765  if (status != RequestStatus_Aliased)
766  issueRequest(pkt, secondary_type);
767 
768  // TODO: issue hardware prefetches here
769  return RequestStatus_Issued;
770 }
771 
772 void
773 Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
774 {
775  assert(pkt != NULL);
776  ContextID proc_id = pkt->req->hasContextId() ?
777  pkt->req->contextId() : InvalidContextID;
778 
779  ContextID core_id = coreId();
780 
781  // If valid, copy the pc to the ruby request
782  Addr pc = 0;
783  if (pkt->req->hasPC()) {
784  pc = pkt->req->getPC();
785  }
786 
787  // check if the packet has data as for example prefetch and flush
788  // requests do not
789  std::shared_ptr<RubyRequest> msg =
790  std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
791  pkt->getSize(), pc, secondary_type,
792  RubyAccessMode_Supervisor, pkt,
793  PrefetchBit_No, proc_id, core_id);
794 
795  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
796  curTick(), m_version, "Seq", "Begin", "", "",
797  printAddress(msg->getPhysicalAddress()),
798  RubyRequestType_to_string(secondary_type));
799 
800  // hardware transactional memory
801  // If the request originates in a transaction,
802  // then mark the Ruby message as such.
803  if (pkt->isHtmTransactional()) {
804  msg->m_htmFromTransaction = true;
805  msg->m_htmTransactionUid = pkt->getHtmTransactionUid();
806  }
807 
808  Tick latency = cyclesToTicks(
809  m_controller->mandatoryQueueLatency(secondary_type));
810  assert(latency > 0);
811 
812  assert(m_mandatory_q_ptr != NULL);
813  m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
814 }
815 
816 template <class KEY, class VALUE>
817 std::ostream &
818 operator<<(std::ostream &out, const std::unordered_map<KEY, VALUE> &map)
819 {
820  for (const auto &table_entry : map) {
821  out << "[ " << table_entry.first << " =";
822  for (const auto &seq_req : table_entry.second) {
823  out << " " << RubyRequestType_to_string(seq_req.m_second_type);
824  }
825  }
826  out << " ]";
827 
828  return out;
829 }
830 
831 void
832 Sequencer::print(std::ostream& out) const
833 {
834  out << "[Sequencer: " << m_version
835  << ", outstanding requests: " << m_outstanding_count
836  << ", request table: " << m_RequestTable
837  << "]";
838 }
839 
840 void
841 Sequencer::recordRequestType(SequencerRequestType requestType) {
842  DPRINTF(RubyStats, "Recorded statistic: %s\n",
843  SequencerRequestType_to_string(requestType));
844 }
845 
846 void
848 {
849  llscClearMonitor(address);
850  ruby_eviction_callback(address);
851 }
852 
853 } // namespace ruby
854 } // namespace gem5
gem5::ruby::Sequencer::m_FirstResponseToCompletionDelayHist
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
Definition: Sequencer.hh:277
gem5::ruby::Sequencer::~Sequencer
~Sequencer()
Definition: Sequencer.cc:144
gem5::ruby::SubBlock::mergeFrom
void mergeFrom(const DataBlock &data)
Definition: SubBlock.hh:66
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:577
gem5::ruby::Sequencer::m_RequestTable
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition: Sequencer.hh:217
gem5::ruby::Sequencer::m_hitMachLatencyHist
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
Definition: Sequencer.hh:259
gem5::ruby::RubyPort::m_version
uint32_t m_version
Definition: RubyPort.hh:196
gem5::ruby::Sequencer::m_deadlock_threshold
Cycles m_deadlock_threshold
Definition: Sequencer.hh:219
gem5::ruby::htmCmdToRubyRequestType
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
Definition: RubySlicc_Util.hh:165
system.hh
Profiler.hh
DPRINTFR
#define DPRINTFR(x,...)
Definition: trace.hh:200
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::Clocked::curCycle
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Definition: clocked_object.hh:195
gem5::ruby::printAddress
std::string printAddress(Addr addr)
Definition: Address.cc:80
gem5::ruby::Sequencer::makeRequest
RequestStatus makeRequest(PacketPtr pkt) override
Definition: Sequencer.cc:657
gem5::ruby::Sequencer::writeCallbackScFail
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
Definition: Sequencer.cc:401
gem5::ruby::Sequencer::issueRequest
void issueRequest(PacketPtr pkt, RubyRequestType type)
Definition: Sequencer.cc:773
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
gem5::ruby::Sequencer::m_dataCache_ptr
CacheMemory * m_dataCache_ptr
Definition: Sequencer.hh:228
gem5::ruby::Sequencer::m_outstandReqHist
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition: Sequencer.hh:246
warn_once
#define warn_once(...)
Definition: logging.hh:249
gem5::Packet::findNextSenderState
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:564
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1252
gem5::ruby::operator<<
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
Definition: BoolVec.cc:49
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::ruby::AbstractCacheEntry
Definition: AbstractCacheEntry.hh:62
gem5::ruby::RubyPort::system
System * system
Definition: RubyPort.hh:200
gem5::InvalidContextID
const ContextID InvalidContextID
Definition: types.hh:247
gem5::ruby::Sequencer::hitCallback
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
Definition: Sequencer.cc:565
gem5::ruby::Sequencer::insertRequest
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition: Sequencer.cc:302
gem5::ruby::RubyPort::ruby_hit_callback
void ruby_hit_callback(PacketPtr pkt)
Definition: RubyPort.cc:441
gem5::ruby::MessageBuffer::enqueue
void enqueue(MsgPtr message, Tick curTime, Tick delta)
Definition: MessageBuffer.cc:197
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
RubyRequest.hh
ldstflags.hh
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
std::vector
STL vector class.
Definition: stl.hh:37
gem5::ruby::AbstractCacheEntry::clearLocked
void clearLocked()
Definition: AbstractCacheEntry.cc:91
gem5::ruby::Sequencer::m_missLatencyHist
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition: Sequencer.hh:264
gem5::ruby::AbstractController::isBlocked
bool isBlocked(Addr) const
Definition: AbstractController.cc:319
gem5::ruby::Sequencer::recordRequestType
void recordRequestType(SequencerRequestType requestType)
Definition: Sequencer.cc:841
gem5::ruby::Sequencer::coreId
int coreId() const
Definition: Sequencer.hh:143
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::ruby::CacheMemory::lookup
AbstractCacheEntry * lookup(Addr address)
Definition: CacheMemory.cc:342
gem5::ruby::makeLineAddress
Addr makeLineAddress(Addr addr)
Definition: Address.cc:60
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1325
gem5::ruby::Sequencer::m_outstanding_count
int m_outstanding_count
Definition: Sequencer.hh:238
gem5::ruby::RubyPort::m_mandatory_q_ptr
MessageBuffer * m_mandatory_q_ptr
Definition: RubyPort.hh:198
packet.hh
gem5::RubyTester::SenderState
Definition: RubyTester.hh:89
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
str.hh
gem5::ruby::Sequencer::m_missMachLatencyHist
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition: Sequencer.hh:269
gem5::ruby::Sequencer::m_missTypeMachLatencyHist
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
Definition: Sequencer.hh:271
gem5::statistics::Histogram
A simple histogram stat.
Definition: statistics.hh:2123
gem5::ruby::Sequencer::evictionCallback
void evictionCallback(Addr address)
Definition: Sequencer.cc:847
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::ruby::RubyPort::m_usingRubyTester
bool m_usingRubyTester
Definition: RubyPort.hh:199
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:582
gem5::ruby::RubyPort::ruby_eviction_callback
void ruby_eviction_callback(Addr address)
Definition: RubyPort.cc:640
gem5::ruby::Sequencer::readCallback
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:510
gem5::ruby::Sequencer::recordMissLatency
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition: Sequencer.cc:335
gem5::ruby::Sequencer::m_ForwardToFirstResponseDelayHist
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
Definition: Sequencer.hh:276
gem5::ruby::Sequencer::llscStoreConditional
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:176
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::ruby::Sequencer::m_latencyHist
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition: Sequencer.hh:249
gem5::Clocked::cyclesToTicks
Tick cyclesToTicks(Cycles c) const
Definition: clocked_object.hh:227
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ruby::Sequencer::llscCheckMonitor
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:199
gem5::X86ISA::FlagShift
const int FlagShift
Definition: ldstflags.hh:54
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::ruby::Sequencer::m_IssueToInitialDelayHist
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition: Sequencer.hh:274
gem5::ruby::SequencerRequest::m_second_type
RubyRequestType m_second_type
Definition: Sequencer.hh:66
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::ruby::Sequencer::llscClearLocalMonitor
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
Definition: Sequencer.cc:215
gem5::ruby::SequencerRequest
Definition: Sequencer.hh:62
gem5::ruby::Sequencer::m_missTypeLatencyHist
std::vector< statistics::Histogram * > m_missTypeLatencyHist
Definition: Sequencer.hh:265
gem5::ruby::Sequencer::m_InitialToForwardDelayHist
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Definition: Sequencer.hh:275
gem5::X86ISA::type
type
Definition: misc.hh:733
RubyTester.hh
gem5::ruby::Sequencer::functionalWrite
virtual int functionalWrite(Packet *func_pkt) override
Definition: Sequencer.cc:256
gem5::ruby::Sequencer::m_hitTypeMachLatencyHist
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
Definition: Sequencer.hh:260
gem5::Packet::getHtmTransactionUid
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition: packet.cc:534
gem5::Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:609
RubySlicc_Util.hh
gem5::ruby::getOffset
Addr getOffset(Addr addr)
Definition: Address.cc:54
gem5::ruby::Sequencer::resetStats
void resetStats() override
Callback to reset stats.
Definition: Sequencer.cc:270
gem5::ruby::Sequencer::m_hitLatencyHist
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition: Sequencer.hh:254
gem5::ruby::AbstractCacheEntry::setLocked
void setLocked(int context)
Definition: AbstractCacheEntry.cc:84
gem5::ruby::AbstractController::notifyCoalesced
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
Definition: AbstractController.hh:148
gem5::ruby::Sequencer::m_hitTypeLatencyHist
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
Definition: Sequencer.hh:255
gem5::ruby::Sequencer::Sequencer
Sequencer(const Params &)
Definition: Sequencer.cc:68
RubySystem.hh
gem5::ruby::Sequencer::empty
virtual bool empty() const
Definition: Sequencer.cc:651
gem5::ruby::RubyPort::testDrainComplete
void testDrainComplete()
Definition: RubyPort.cc:490
gem5::Packet::writeData
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1281
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::ruby::RubySystem::getCooldownEnabled
static bool getCooldownEnabled()
Definition: RubySystem.hh:76
gem5::ruby::RubyPort::m_ruby_system
RubySystem * m_ruby_system
Definition: RubyPort.hh:195
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::ruby::Sequencer::m_max_outstanding_requests
int m_max_outstanding_requests
Definition: Sequencer.hh:226
gem5::ruby::AbstractController::unblock
void unblock(Addr)
Definition: AbstractController.cc:325
gem5::ruby::Sequencer::print
virtual void print(std::ostream &out) const
Definition: Sequencer.cc:832
gem5::ruby::Sequencer::wakeup
virtual void wakeup()
Definition: Sequencer.cc:221
gem5::ruby::AbstractController::mandatoryQueueLatency
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
Definition: AbstractController.hh:119
gem5::ruby::Sequencer::llscLoadLinked
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:149
gem5::X86ISA::StoreCheck
@ StoreCheck
Definition: ldstflags.hh:59
gem5::ruby::RubySystem
Definition: RubySystem.hh:63
gem5::ruby::RubyPort::functionalWrite
virtual int functionalWrite(Packet *func_pkt)
Definition: RubyPort.cc:674
gem5::Packet::isFlush
bool isFlush() const
Definition: packet.hh:612
gem5::ruby::Sequencer::markRemoved
void markRemoved()
Definition: Sequencer.cc:329
gem5::ruby::RubyPort::Params
RubyPortParams Params
Definition: RubyPort.hh:153
gem5::ruby::RubySystem::getWarmupEnabled
static bool getWarmupEnabled()
Definition: RubySystem.hh:75
gem5::ruby::Sequencer::llscClearMonitor
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
Definition: Sequencer.cc:162
gem5::ruby::RubyPort::m_controller
AbstractController * m_controller
Definition: RubyPort.hh:197
gem5::ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:246
gem5::MipsISA::pc
Bitfield< 4 > pc
Definition: pra_constants.hh:243
gem5::ruby::Sequencer::writeCallback
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
Definition: Sequencer.cc:408
gem5::ruby::SequencerRequest::m_type
RubyRequestType m_type
Definition: Sequencer.hh:65
gem5::ruby::SequencerRequest::issue_time
Cycles issue_time
Definition: Sequencer.hh:67
logging.hh
gem5::System::getArch
Arch getArch() const
Get the architecture.
Definition: system.hh:395
gem5::ruby::AbstractCacheEntry::isLocked
bool isLocked(int context) const
Definition: AbstractCacheEntry.cc:98
gem5::ruby::CacheMemory::clearLockedAll
void clearLockedAll(int context)
Definition: CacheMemory.cc:497
gem5::statistics::DistBase::reset
void reset()
Reset stat value to default.
Definition: statistics.hh:1349
gem5::Packet::isMaskedWrite
bool isMaskedWrite() const
Definition: packet.hh:1400
gem5::Packet::isHtmTransactional
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition: packet.cc:528
gem5::ruby::DataBlock
Definition: DataBlock.hh:60
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::ruby::Sequencer::deadlockCheckEvent
EventFunctionWrapper deadlockCheckEvent
Definition: Sequencer.hh:280
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:225
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::ruby::RubyPort
Definition: RubyPort.hh:64
gem5::RubyTester::SenderState::subBlock
ruby::SubBlock subBlock
Definition: RubyTester.hh:91
gem5::ruby::Sequencer::m_IncompleteTimes
std::vector< statistics::Counter > m_IncompleteTimes
Definition: Sequencer.hh:278
gem5::ruby::AbstractController::blockOnQueue
void blockOnQueue(Addr, MessageBuffer *)
Definition: AbstractController.cc:312
gem5::ruby::Sequencer::m_typeLatencyHist
std::vector< statistics::Histogram * > m_typeLatencyHist
Definition: Sequencer.hh:250
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::ArmISA::rs
Bitfield< 9, 8 > rs
Definition: misc_types.hh:376
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::ArmISA::status
Bitfield< 5, 0 > status
Definition: misc_types.hh:422
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::ruby::SequencerRequest::pkt
PacketPtr pkt
Definition: Sequencer.hh:64
Sequencer.hh

Generated on Tue Sep 21 2021 12:25:42 for gem5 by doxygen 1.8.17