gem5  [DEVELOP-FOR-23.0]
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Sequencer.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15  * Copyright (c) 2013 Advanced Micro Devices, Inc.
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
43 
44 #include "arch/x86/ldstflags.hh"
45 #include "base/compiler.hh"
46 #include "base/logging.hh"
47 #include "base/str.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/MemoryAccess.hh"
51 #include "debug/ProtocolTrace.hh"
52 #include "debug/RubyHitMiss.hh"
53 #include "debug/RubySequencer.hh"
54 #include "debug/RubyStats.hh"
55 #include "mem/packet.hh"
57 #include "mem/ruby/protocol/PrefetchBit.hh"
58 #include "mem/ruby/protocol/RubyAccessMode.hh"
62 #include "sim/system.hh"
63 
64 namespace gem5
65 {
66 
67 namespace ruby
68 {
69 
71  : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
72  deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
73 {
74  m_outstanding_count = 0;
75 
76  m_dataCache_ptr = p.dcache;
77  m_max_outstanding_requests = p.max_outstanding_requests;
78  m_deadlock_threshold = p.deadlock_threshold;
79 
80  m_coreId = p.coreid; // for tracking the two CorePair sequencers
81  assert(m_max_outstanding_requests > 0);
82  assert(m_deadlock_threshold > 0);
83 
84  m_unaddressedTransactionCnt = 0;
85 
86  m_runningGarnetStandalone = p.garnet_standalone;
87 
88 
89  // These statistical variables are not for display.
90  // The profiler will collate these across different
91  // sequencers and display those collated statistics.
92  m_outstandReqHist.init(10);
93  m_latencyHist.init(10);
94  m_hitLatencyHist.init(10);
95  m_missLatencyHist.init(10);
96 
97  for (int i = 0; i < RubyRequestType_NUM; i++) {
98  m_typeLatencyHist.push_back(new statistics::Histogram());
99  m_typeLatencyHist[i]->init(10);
100 
101  m_hitTypeLatencyHist.push_back(new statistics::Histogram());
102  m_hitTypeLatencyHist[i]->init(10);
103 
104  m_missTypeLatencyHist.push_back(new statistics::Histogram());
105  m_missTypeLatencyHist[i]->init(10);
106  }
107 
108  for (int i = 0; i < MachineType_NUM; i++) {
109  m_hitMachLatencyHist.push_back(new statistics::Histogram());
110  m_hitMachLatencyHist[i]->init(10);
111 
112  m_missMachLatencyHist.push_back(new statistics::Histogram());
113  m_missMachLatencyHist[i]->init(10);
114 
115  m_IssueToInitialDelayHist.push_back(new statistics::Histogram());
116  m_IssueToInitialDelayHist[i]->init(10);
117 
118  m_InitialToForwardDelayHist.push_back(new statistics::Histogram());
119  m_InitialToForwardDelayHist[i]->init(10);
120 
121  m_ForwardToFirstResponseDelayHist.push_back(
122  new statistics::Histogram());
123  m_ForwardToFirstResponseDelayHist[i]->init(10);
124 
125  m_FirstResponseToCompletionDelayHist.push_back(
126  new statistics::Histogram());
127  m_FirstResponseToCompletionDelayHist[i]->init(10);
128  }
129 
130  for (int i = 0; i < RubyRequestType_NUM; i++) {
131  m_hitTypeMachLatencyHist.push_back(
133  m_missTypeMachLatencyHist.push_back(
135 
136  for (int j = 0; j < MachineType_NUM; j++) {
137  m_hitTypeMachLatencyHist[i].push_back(new statistics::Histogram());
138  m_hitTypeMachLatencyHist[i][j]->init(10);
139 
140  m_missTypeMachLatencyHist[i].push_back(
141  new statistics::Histogram());
142  m_missTypeMachLatencyHist[i][j]->init(10);
143  }
144  }
145 
146 }
147 
149 {
150 }
151 
152 void
154 {
155  fatal_if(m_dataCache_ptr == NULL,
156  "%s must have a dcache object to support LLSC requests.", name());
157  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
158  if (line) {
159  line->setLocked(m_version);
160  DPRINTF(LLSC, "LLSC Monitor - inserting load linked - "
161  "addr=0x%lx - cpu=%u\n", claddr, m_version);
162  }
163 }
164 
165 void
167 {
168  // clear monitor is called for all stores and evictions
169  if (m_dataCache_ptr == NULL)
170  return;
171  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
172  if (line && line->isLocked(m_version)) {
173  line->clearLocked();
174  DPRINTF(LLSC, "LLSC Monitor - clearing due to store - "
175  "addr=0x%lx - cpu=%u\n", claddr, m_version);
176  }
177 }
178 
179 bool
181 {
182  fatal_if(m_dataCache_ptr == NULL,
183  "%s must have a dcache object to support LLSC requests.", name());
184  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
185  if (!line)
186  return false;
187 
188  DPRINTF(LLSC, "LLSC Monitor - clearing due to "
189  "store conditional - "
190  "addr=0x%lx - cpu=%u\n",
191  claddr, m_version);
192 
193  if (line->isLocked(m_version)) {
194  line->clearLocked();
195  return true;
196  } else {
197  line->clearLocked();
198  return false;
199  }
200 }
201 
202 bool
204 {
205  assert(m_dataCache_ptr != NULL);
206  const Addr claddr = makeLineAddress(address);
207  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
208  if (!line)
209  return false;
210 
211  if (line->isLocked(m_version)) {
212  return true;
213  } else {
214  return false;
215  }
216 }
217 
218 void
220 {
222 }
223 
224 void
226 {
227  assert(drainState() != DrainState::Draining);
228 
229  // Check for deadlock of any of the requests
230  Cycles current_time = curCycle();
231 
232  // Check across all outstanding requests
233  [[maybe_unused]] int total_outstanding = 0;
234 
235  for (const auto &table_entry : m_RequestTable) {
236  for (const auto &seq_req : table_entry.second) {
237  if (current_time - seq_req.issue_time < m_deadlock_threshold)
238  continue;
239 
240  panic("Possible Deadlock detected. Aborting!\n version: %d "
241  "request.paddr: 0x%x m_readRequestTable: %d current time: "
242  "%u issue_time: %d difference: %d\n", m_version,
243  seq_req.pkt->getAddr(), table_entry.second.size(),
244  current_time * clockPeriod(), seq_req.issue_time
245  * clockPeriod(), (current_time * clockPeriod())
246  - (seq_req.issue_time * clockPeriod()));
247  }
248  total_outstanding += table_entry.second.size();
249  }
250 
251  assert(m_outstanding_count == total_outstanding);
252 
253  if (m_outstanding_count > 0) {
254  // If there are still outstanding requests, keep checking
256  }
257 }
258 
259 int
261 {
262  int num_written = RubyPort::functionalWrite(func_pkt);
263 
264  for (const auto &table_entry : m_RequestTable) {
265  for (const auto& seq_req : table_entry.second) {
266  if (seq_req.functionalWrite(func_pkt))
267  ++num_written;
268  }
269  }
270 
271  return num_written;
272 }
273 
275 {
280  for (int i = 0; i < RubyRequestType_NUM; i++) {
281  m_typeLatencyHist[i]->reset();
282  m_hitTypeLatencyHist[i]->reset();
283  m_missTypeLatencyHist[i]->reset();
284  for (int j = 0; j < MachineType_NUM; j++) {
285  m_hitTypeMachLatencyHist[i][j]->reset();
286  m_missTypeMachLatencyHist[i][j]->reset();
287  }
288  }
289 
290  for (int i = 0; i < MachineType_NUM; i++) {
291  m_missMachLatencyHist[i]->reset();
292  m_hitMachLatencyHist[i]->reset();
293 
294  m_IssueToInitialDelayHist[i]->reset();
295  m_InitialToForwardDelayHist[i]->reset();
298 
299  m_IncompleteTimes[i] = 0;
300  }
301 }
302 
303 // Insert the request in the request table. Return RequestStatus_Aliased
304 // if the entry was already present.
305 RequestStatus
306 Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
307  RubyRequestType secondary_type)
308 {
309  // See if we should schedule a deadlock check
310  if (!deadlockCheckEvent.scheduled() &&
313  }
314 
315  if (isTlbiCmdRequest(primary_type)) {
316  assert(primary_type == secondary_type);
317 
318  switch (primary_type) {
319  case RubyRequestType_TLBI_EXT_SYNC_COMP:
320  // Don't have to store any data on this
321  break;
322  case RubyRequestType_TLBI:
323  case RubyRequestType_TLBI_SYNC:
324  {
326 
327  // returns pair<inserted element, was inserted>
328  [[maybe_unused]] auto insert_data = \
329  m_UnaddressedRequestTable.emplace(
332  pkt, primary_type, secondary_type, curCycle()));
333 
334  // if insert_data.second is false, wasn't inserted
335  assert(insert_data.second &&
336  "Another TLBI request with the same ID exists");
337 
338  DPRINTF(RubySequencer, "Inserting TLBI request %016x\n",
340 
341  break;
342  }
343 
344  default:
345  panic("Unexpected TLBI RubyRequestType");
346  }
347 
348  return RequestStatus_Ready;
349  }
350 
351  Addr line_addr = makeLineAddress(pkt->getAddr());
352  // Check if there is any outstanding request for the same cache line.
353  auto &seq_req_list = m_RequestTable[line_addr];
354  // Create a default entry
355  seq_req_list.emplace_back(pkt, primary_type,
356  secondary_type, curCycle());
358 
359  if (seq_req_list.size() > 1) {
360  return RequestStatus_Aliased;
361  }
362 
364 
365  return RequestStatus_Ready;
366 }
367 
368 void
370 {
372 }
373 
374 void
375 Sequencer::recordMissLatency(SequencerRequest* srequest, bool llscSuccess,
376  const MachineType respondingMach,
377  bool isExternalHit, Cycles initialRequestTime,
378  Cycles forwardRequestTime,
379  Cycles firstResponseTime)
380 {
381  RubyRequestType type = srequest->m_type;
382  Cycles issued_time = srequest->issue_time;
383  Cycles completion_time = curCycle();
384 
385  assert(curCycle() >= issued_time);
386  Cycles total_lat = completion_time - issued_time;
387 
388  if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
389  // if the request was combined in the protocol with an earlier request
390  // for the same address, it is possible that it will return an
391  // initialRequestTime corresponding the earlier request. Since Cycles
392  // is unsigned, we can't let this request get profiled below.
393 
394  total_lat = Cycles(0);
395  }
396 
397  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
398  curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
399  "", "", printAddress(srequest->pkt->getAddr()), total_lat);
400 
401  m_latencyHist.sample(total_lat);
402  m_typeLatencyHist[type]->sample(total_lat);
403 
404  if (isExternalHit) {
405  m_missLatencyHist.sample(total_lat);
406  m_missTypeLatencyHist[type]->sample(total_lat);
407 
408  if (respondingMach != MachineType_NUM) {
409  m_missMachLatencyHist[respondingMach]->sample(total_lat);
410  m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
411 
412  if ((issued_time <= initialRequestTime) &&
413  (initialRequestTime <= forwardRequestTime) &&
414  (forwardRequestTime <= firstResponseTime) &&
415  (firstResponseTime <= completion_time)) {
416 
417  m_IssueToInitialDelayHist[respondingMach]->sample(
418  initialRequestTime - issued_time);
419  m_InitialToForwardDelayHist[respondingMach]->sample(
420  forwardRequestTime - initialRequestTime);
421  m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
422  firstResponseTime - forwardRequestTime);
423  m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
424  completion_time - firstResponseTime);
425  } else {
426  m_IncompleteTimes[respondingMach]++;
427  }
428  }
429  } else {
430  m_hitLatencyHist.sample(total_lat);
431  m_hitTypeLatencyHist[type]->sample(total_lat);
432 
433  if (respondingMach != MachineType_NUM) {
434  m_hitMachLatencyHist[respondingMach]->sample(total_lat);
435  m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
436  }
437  }
438 }
439 
440 void
442 {
443  llscClearMonitor(address);
444  writeCallback(address, data);
445 }
446 
447 void
449  const bool externalHit, const MachineType mach,
450  const Cycles initialRequestTime,
451  const Cycles forwardRequestTime,
452  const Cycles firstResponseTime,
453  const bool noCoales)
454 {
455  //
456  // Free the whole list as we assume we have had the exclusive access
457  // to this cache line when response for the write comes back
458  //
459  assert(address == makeLineAddress(address));
460  assert(m_RequestTable.find(address) != m_RequestTable.end());
461  auto &seq_req_list = m_RequestTable[address];
462 
463  // Perform hitCallback on every cpu request made to this cache block while
464  // ruby request was outstanding. Since only 1 ruby request was made,
465  // profile the ruby latency once.
466  bool ruby_request = true;
467  while (!seq_req_list.empty()) {
468  SequencerRequest &seq_req = seq_req_list.front();
469 
470  if (noCoales && !ruby_request) {
471  // Do not process follow-up requests
472  // (e.g. if full line no present)
473  // Reissue to the cache hierarchy
474  issueRequest(seq_req.pkt, seq_req.m_second_type);
475  break;
476  }
477 
478  if (ruby_request) {
479  assert(seq_req.m_type != RubyRequestType_LD);
480  assert(seq_req.m_type != RubyRequestType_Load_Linked);
481  assert(seq_req.m_type != RubyRequestType_IFETCH);
482  }
483 
484  // handle write request
485  if ((seq_req.m_type != RubyRequestType_LD) &&
486  (seq_req.m_type != RubyRequestType_Load_Linked) &&
487  (seq_req.m_type != RubyRequestType_IFETCH)) {
488  // LL/SC support (tested with ARMv8)
489  bool success = true;
490 
491  if (seq_req.m_type != RubyRequestType_Store_Conditional) {
492  // Regular stores to addresses being monitored
493  // will fail (remove) the monitor entry.
494  llscClearMonitor(address);
495  } else {
496  // Store conditionals must first check the monitor
497  // if they will succeed or not
498  success = llscStoreConditional(address);
499  seq_req.pkt->req->setExtraData(success ? 1 : 0);
500  }
501 
502  // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
503  // address variable here is assumed to be a line address, so when
504  // blocking buffers, must check line addresses.
505  if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
506  // blockOnQueue blocks all first-level cache controller queues
507  // waiting on memory accesses for the specified address that go
508  // to the specified queue. In this case, a Locked_RMW_Write must
509  // go to the mandatory_q before unblocking the first-level
510  // controller. This will block standard loads, stores, ifetches,
511  // etc.
513  } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
514  m_controller->unblock(address);
515  }
516 
517  if (ruby_request) {
518  recordMissLatency(&seq_req, success, mach, externalHit,
519  initialRequestTime, forwardRequestTime,
520  firstResponseTime);
521  }
522 
523  markRemoved();
524  hitCallback(&seq_req, data, success, mach, externalHit,
525  initialRequestTime, forwardRequestTime,
526  firstResponseTime, !ruby_request);
527  ruby_request = false;
528  } else {
529  // handle read request
530  assert(!ruby_request);
531  markRemoved();
532  hitCallback(&seq_req, data, true, mach, externalHit,
533  initialRequestTime, forwardRequestTime,
534  firstResponseTime, !ruby_request);
535  }
536  seq_req_list.pop_front();
537  }
538 
539  // free all outstanding requests corresponding to this address
540  if (seq_req_list.empty()) {
541  m_RequestTable.erase(address);
542  }
543 }
544 
545 void
547  bool externalHit, const MachineType mach,
548  Cycles initialRequestTime,
549  Cycles forwardRequestTime,
550  Cycles firstResponseTime)
551 {
552  //
553  // Free up read requests until we hit the first Write request
554  // or end of the corresponding list.
555  //
556  assert(address == makeLineAddress(address));
557  assert(m_RequestTable.find(address) != m_RequestTable.end());
558  auto &seq_req_list = m_RequestTable[address];
559 
560  // Perform hitCallback on every cpu request made to this cache block while
561  // ruby request was outstanding. Since only 1 ruby request was made,
562  // profile the ruby latency once.
563  bool ruby_request = true;
564  while (!seq_req_list.empty()) {
565  SequencerRequest &seq_req = seq_req_list.front();
566  if (ruby_request) {
567  assert((seq_req.m_type == RubyRequestType_LD) ||
568  (seq_req.m_type == RubyRequestType_Load_Linked) ||
569  (seq_req.m_type == RubyRequestType_IFETCH));
570  }
571  if ((seq_req.m_type != RubyRequestType_LD) &&
572  (seq_req.m_type != RubyRequestType_Load_Linked) &&
573  (seq_req.m_type != RubyRequestType_IFETCH)) {
574  // Write request: reissue request to the cache hierarchy
575  issueRequest(seq_req.pkt, seq_req.m_second_type);
576  break;
577  }
578  if (ruby_request) {
579  recordMissLatency(&seq_req, true, mach, externalHit,
580  initialRequestTime, forwardRequestTime,
581  firstResponseTime);
582  }
583  markRemoved();
584  hitCallback(&seq_req, data, true, mach, externalHit,
585  initialRequestTime, forwardRequestTime,
586  firstResponseTime, !ruby_request);
587  ruby_request = false;
588  seq_req_list.pop_front();
589  }
590 
591  // free all outstanding requests corresponding to this address
592  if (seq_req_list.empty()) {
593  m_RequestTable.erase(address);
594  }
595 }
596 
597 void
599  bool llscSuccess,
600  const MachineType mach, const bool externalHit,
601  const Cycles initialRequestTime,
602  const Cycles forwardRequestTime,
603  const Cycles firstResponseTime,
604  const bool was_coalesced)
605 {
606  warn_once("Replacement policy updates recently became the responsibility "
607  "of SLICC state machines. Make sure to setMRU() near callbacks "
608  "in .sm files!");
609 
610  PacketPtr pkt = srequest->pkt;
611  Addr request_address(pkt->getAddr());
612  RubyRequestType type = srequest->m_type;
613 
614  if (was_coalesced) {
615  // Notify the controller about a coalesced request so it can properly
616  // account for it in its hit/miss stats and/or train prefetchers
617  // (this is protocol-dependent)
618  m_controller->notifyCoalesced(request_address, type, pkt->req,
619  data, externalHit);
620  }
621 
622  // Load-linked handling
623  if (type == RubyRequestType_Load_Linked) {
624  Addr line_addr = makeLineAddress(request_address);
625  llscLoadLinked(line_addr);
626  }
627 
628  DPRINTF(RubyHitMiss, "Cache %s at %#x\n",
629  externalHit ? "miss" : "hit",
630  printAddress(request_address));
631 
632  // update the data unless it is a non-data-carrying flush
634  data.setData(pkt);
635  } else if (!pkt->isFlush()) {
636  if ((type == RubyRequestType_LD) ||
637  (type == RubyRequestType_IFETCH) ||
638  (type == RubyRequestType_RMW_Read) ||
639  (type == RubyRequestType_Locked_RMW_Read) ||
640  (type == RubyRequestType_Load_Linked)) {
641  pkt->setData(
642  data.getData(getOffset(request_address), pkt->getSize()));
643  DPRINTF(RubySequencer, "read data %s\n", data);
644  } else if (pkt->req->isSwap()) {
645  assert(!pkt->isMaskedWrite());
646  std::vector<uint8_t> overwrite_val(pkt->getSize());
647  pkt->writeData(&overwrite_val[0]);
648  pkt->setData(
649  data.getData(getOffset(request_address), pkt->getSize()));
650  data.setData(&overwrite_val[0],
651  getOffset(request_address), pkt->getSize());
652  DPRINTF(RubySequencer, "swap data %s\n", data);
653  } else if (pkt->isAtomicOp()) {
654  // Set the data in the packet to the old value in the cache
655  pkt->setData(
656  data.getData(getOffset(request_address), pkt->getSize()));
657  DPRINTF(RubySequencer, "AMO original data %s\n", data);
658  // execute AMO operation
659  (*(pkt->getAtomicOp()))(
660  data.getDataMod(getOffset(request_address)));
661  DPRINTF(RubySequencer, "AMO new data %s\n", data);
662  } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
663  // Types of stores set the actual data here, apart from
664  // failed Store Conditional requests
665  data.setData(pkt);
666  DPRINTF(RubySequencer, "set data %s\n", data);
667  }
668  }
669 
670  // If using the RubyTester, update the RubyTester sender state's
671  // subBlock with the recieved data. The tester will later access
672  // this state.
673  if (m_usingRubyTester) {
674  DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
675  pkt->cmdString(), pkt->getAddr());
676  RubyTester::SenderState* testerSenderState =
678  assert(testerSenderState);
679  testerSenderState->subBlock.mergeFrom(data);
680  }
681 
684  assert(pkt->req);
685  delete pkt;
686  rs->m_cache_recorder->enqueueNextFetchRequest();
687  } else if (RubySystem::getCooldownEnabled()) {
688  delete pkt;
689  rs->m_cache_recorder->enqueueNextFlushRequest();
690  } else {
691  ruby_hit_callback(pkt);
693  }
694 }
695 
696 void
698  RubyRequestType reqType,
699  const MachineType mach,
700  const Cycles initialRequestTime,
701  const Cycles forwardRequestTime,
702  const Cycles firstResponseTime)
703 {
704  DPRINTF(RubySequencer, "unaddressedCallback ID:%08x type:%d\n",
705  unaddressedReqId, reqType);
706 
707  switch (reqType) {
708  case RubyRequestType_TLBI_EXT_SYNC:
709  {
710  // This should trigger the CPU to wait for stale translations
711  // and send an EXT_SYNC_COMP once complete.
712 
713  // Don't look for the ID in our requestTable.
714  // It won't be there because we didn't request this Sync
715  ruby_stale_translation_callback(unaddressedReqId);
716  break;
717  }
718  case RubyRequestType_TLBI:
719  case RubyRequestType_TLBI_SYNC:
720  {
721  // These signal that a TLBI operation that this core initiated
722  // of the respective type (TLBI or Sync) has finished.
723 
724  assert(m_UnaddressedRequestTable.find(unaddressedReqId)
725  != m_UnaddressedRequestTable.end());
726 
727  {
728  SequencerRequest &seq_req =
729  m_UnaddressedRequestTable.at(unaddressedReqId);
730  assert(seq_req.m_type == reqType);
731 
732  PacketPtr pkt = seq_req.pkt;
733 
736  }
737 
738  m_UnaddressedRequestTable.erase(unaddressedReqId);
739  break;
740  }
741  default:
742  panic("Unexpected TLBI RubyRequestType");
743  }
744 }
745 
746 bool
748 {
749  return m_RequestTable.empty() &&
751 }
752 
753 RequestStatus
755 {
756  // HTM abort signals must be allowed to reach the Sequencer
757  // the same cycle they are issued. They cannot be retried.
759  !pkt->req->isHTMAbort()) {
760  return RequestStatus_BufferFull;
761  }
762 
763  RubyRequestType primary_type = RubyRequestType_NULL;
764  RubyRequestType secondary_type = RubyRequestType_NULL;
765 
766  if (pkt->isLLSC()) {
767  // LL/SC instructions need to be handled carefully by the cache
768  // coherence protocol to ensure they follow the proper semantics. In
769  // particular, by identifying the operations as atomic, the protocol
770  // should understand that migratory sharing optimizations should not
771  // be performed (i.e. a load between the LL and SC should not steal
772  // away exclusive permission).
773  //
774  // The following logic works correctly with the semantics
775  // of armV8 LDEX/STEX instructions.
776 
777  if (pkt->isWrite()) {
778  DPRINTF(RubySequencer, "Issuing SC\n");
779  primary_type = RubyRequestType_Store_Conditional;
780 #if defined (PROTOCOL_MESI_Three_Level) || defined (PROTOCOL_MESI_Three_Level_HTM)
781  secondary_type = RubyRequestType_Store_Conditional;
782 #else
783  secondary_type = RubyRequestType_ST;
784 #endif
785  } else {
786  DPRINTF(RubySequencer, "Issuing LL\n");
787  assert(pkt->isRead());
788  primary_type = RubyRequestType_Load_Linked;
789  secondary_type = RubyRequestType_LD;
790  }
791  } else if (pkt->req->isLockedRMW()) {
792  //
793  // x86 locked instructions are translated to store cache coherence
794  // requests because these requests should always be treated as read
795  // exclusive operations and should leverage any migratory sharing
796  // optimization built into the protocol.
797  //
798  if (pkt->isWrite()) {
799  DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
800  primary_type = RubyRequestType_Locked_RMW_Write;
801  } else {
802  DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
803  assert(pkt->isRead());
804  primary_type = RubyRequestType_Locked_RMW_Read;
805  }
806  secondary_type = RubyRequestType_ST;
807  } else if (pkt->req->isTlbiCmd()) {
808  primary_type = secondary_type = tlbiCmdToRubyRequestType(pkt);
809  DPRINTF(RubySequencer, "Issuing TLBI\n");
810  } else {
811  //
812  // To support SwapReq, we need to check isWrite() first: a SwapReq
813  // should always be treated like a write, but since a SwapReq implies
814  // both isWrite() and isRead() are true, check isWrite() first here.
815  //
816  if (pkt->isWrite()) {
817  //
818  // Note: M5 packets do not differentiate ST from RMW_Write
819  //
820  primary_type = secondary_type = RubyRequestType_ST;
821  } else if (pkt->isRead()) {
822  // hardware transactional memory commands
823  if (pkt->req->isHTMCmd()) {
824  primary_type = secondary_type = htmCmdToRubyRequestType(pkt);
825  } else if (pkt->req->isInstFetch()) {
826  primary_type = secondary_type = RubyRequestType_IFETCH;
827  } else {
828  if (pkt->req->isReadModifyWrite()) {
829  primary_type = RubyRequestType_RMW_Read;
830  secondary_type = RubyRequestType_ST;
831  } else {
832  primary_type = secondary_type = RubyRequestType_LD;
833  }
834  }
835  } else if (pkt->isFlush()) {
836  primary_type = secondary_type = RubyRequestType_FLUSH;
837  } else {
838  panic("Unsupported ruby packet type\n");
839  }
840  }
841 
842  // Check if the line is blocked for a Locked_RMW
843  if (!pkt->req->isMemMgmt() &&
845  (primary_type != RubyRequestType_Locked_RMW_Write)) {
846  // Return that this request's cache line address aliases with
847  // a prior request that locked the cache line. The request cannot
848  // proceed until the cache line is unlocked by a Locked_RMW_Write
849  return RequestStatus_Aliased;
850  }
851 
852  RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
853 
854  // It is OK to receive RequestStatus_Aliased, it can be considered Issued
855  if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
856  return status;
857  // non-aliased with any existing request in the request table, just issue
858  // to the cache
859  if (status != RequestStatus_Aliased)
860  issueRequest(pkt, secondary_type);
861 
862  // TODO: issue hardware prefetches here
863  return RequestStatus_Issued;
864 }
865 
866 void
867 Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
868 {
869  assert(pkt != NULL);
870  ContextID proc_id = pkt->req->hasContextId() ?
871  pkt->req->contextId() : InvalidContextID;
872 
873  ContextID core_id = coreId();
874 
875  // If valid, copy the pc to the ruby request
876  Addr pc = 0;
877  if (pkt->req->hasPC()) {
878  pc = pkt->req->getPC();
879  }
880 
881  // check if the packet has data as for example prefetch and flush
882  // requests do not
883  std::shared_ptr<RubyRequest> msg;
884  if (pkt->req->isMemMgmt()) {
885  msg = std::make_shared<RubyRequest>(clockEdge(),
886  pc, secondary_type,
887  RubyAccessMode_Supervisor, pkt,
888  proc_id, core_id);
889 
890  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s\n",
891  curTick(), m_version, "Seq", "Begin", "", "",
892  RubyRequestType_to_string(secondary_type));
893 
894  if (pkt->req->isTlbiCmd()) {
895  msg->m_isTlbi = true;
896  switch (secondary_type) {
897  case RubyRequestType_TLBI_EXT_SYNC_COMP:
898  msg->m_tlbiTransactionUid = pkt->req->getExtraData();
899  break;
900  case RubyRequestType_TLBI:
901  case RubyRequestType_TLBI_SYNC:
902  msg->m_tlbiTransactionUid = \
903  getCurrentUnaddressedTransactionID();
904  break;
905  default:
906  panic("Unexpected TLBI RubyRequestType");
907  }
908  DPRINTF(RubySequencer, "Issuing TLBI %016x\n",
909  msg->m_tlbiTransactionUid);
910  }
911  } else {
912  msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
913  pkt->getSize(), pc, secondary_type,
914  RubyAccessMode_Supervisor, pkt,
915  PrefetchBit_No, proc_id, core_id);
916 
917  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
918  curTick(), m_version, "Seq", "Begin", "", "",
919  printAddress(msg->getPhysicalAddress()),
920  RubyRequestType_to_string(secondary_type));
921  }
922 
923  // hardware transactional memory
924  // If the request originates in a transaction,
925  // then mark the Ruby message as such.
926  if (pkt->isHtmTransactional()) {
927  msg->m_htmFromTransaction = true;
928  msg->m_htmTransactionUid = pkt->getHtmTransactionUid();
929  }
930 
931  Tick latency = cyclesToTicks(
932  m_controller->mandatoryQueueLatency(secondary_type));
933  assert(latency > 0);
934 
935  assert(m_mandatory_q_ptr != NULL);
936  m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
937 }
938 
939 template <class KEY, class VALUE>
940 std::ostream &
941 operator<<(std::ostream &out, const std::unordered_map<KEY, VALUE> &map)
942 {
943  for (const auto &table_entry : map) {
944  out << "[ " << table_entry.first << " =";
945  for (const auto &seq_req : table_entry.second) {
946  out << " " << RubyRequestType_to_string(seq_req.m_second_type);
947  }
948  }
949  out << " ]";
950 
951  return out;
952 }
953 
954 void
955 Sequencer::print(std::ostream& out) const
956 {
957  out << "[Sequencer: " << m_version
958  << ", outstanding requests: " << m_outstanding_count
959  << ", request table: " << m_RequestTable
960  << "]";
961 }
962 
963 void
964 Sequencer::recordRequestType(SequencerRequestType requestType) {
965  DPRINTF(RubyStats, "Recorded statistic: %s\n",
966  SequencerRequestType_to_string(requestType));
967 }
968 
969 void
971 {
972  llscClearMonitor(address);
973  ruby_eviction_callback(address);
974 }
975 
976 void
978 {
980  // Limit m_unaddressedTransactionCnt to 32 bits,
981  // top 32 bits should always be zeroed out
982  uint64_t aligned_txid = \
983  m_unaddressedTransactionCnt << RubySystem::getBlockSizeBits();
984 
985  if (aligned_txid > 0xFFFFFFFFull) {
987  }
988 }
989 
990 uint64_t
992 {
993  return (
994  uint64_t(m_version & 0xFFFFFFFF) << 32) |
996  );
997 }
998 
999 } // namespace ruby
1000 } // namespace gem5
gem5::ruby::Sequencer::m_FirstResponseToCompletionDelayHist
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
Definition: Sequencer.hh:289
gem5::ruby::Sequencer::~Sequencer
~Sequencer()
Definition: Sequencer.cc:148
gem5::ruby::SubBlock::mergeFrom
void mergeFrom(const DataBlock &data)
Definition: SubBlock.hh:66
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:588
gem5::ruby::Sequencer::m_RequestTable
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition: Sequencer.hh:224
gem5::ruby::Sequencer::m_hitMachLatencyHist
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
Definition: Sequencer.hh:271
gem5::Packet::isAtomicOp
bool isAtomicOp() const
Definition: packet.hh:846
gem5::ruby::RubyPort::m_version
uint32_t m_version
Definition: RubyPort.hh:203
gem5::ruby::Sequencer::m_unaddressedTransactionCnt
uint64_t m_unaddressedTransactionCnt
Definition: Sequencer.hh:253
gem5::ruby::Sequencer::m_deadlock_threshold
Cycles m_deadlock_threshold
Definition: Sequencer.hh:229
gem5::ruby::htmCmdToRubyRequestType
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
Definition: RubySlicc_Util.hh:179
system.hh
Profiler.hh
DPRINTFR
#define DPRINTFR(x,...)
Definition: trace.hh:224
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::Clocked::curCycle
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Definition: clocked_object.hh:195
gem5::ruby::printAddress
std::string printAddress(Addr addr)
Definition: Address.cc:80
gem5::ruby::Sequencer::makeRequest
RequestStatus makeRequest(PacketPtr pkt) override
Definition: Sequencer.cc:754
gem5::ruby::Sequencer::writeCallbackScFail
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
Definition: Sequencer.cc:441
gem5::ruby::Sequencer::issueRequest
void issueRequest(PacketPtr pkt, RubyRequestType type)
Definition: Sequencer.cc:867
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
gem5::ruby::Sequencer::m_UnaddressedRequestTable
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
Definition: Sequencer.hh:227
gem5::ruby::Sequencer::getCurrentUnaddressedTransactionID
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Definition: Sequencer.cc:991
gem5::ruby::Sequencer::m_dataCache_ptr
CacheMemory * m_dataCache_ptr
Definition: Sequencer.hh:238
gem5::ruby::Sequencer::m_outstandReqHist
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition: Sequencer.hh:258
warn_once
#define warn_once(...)
Definition: logging.hh:260
gem5::Packet::findNextSenderState
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:575
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1293
gem5::ruby::operator<<
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
Definition: BoolVec.cc:49
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:377
gem5::ruby::AbstractCacheEntry
Definition: AbstractCacheEntry.hh:62
gem5::InvalidContextID
const ContextID InvalidContextID
Definition: types.hh:240
gem5::ruby::Sequencer::hitCallback
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
Definition: Sequencer.cc:598
gem5::ruby::Sequencer::insertRequest
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition: Sequencer.cc:306
gem5::ruby::Sequencer::incrementUnaddressedTransactionCnt
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
Definition: Sequencer.cc:977
gem5::ruby::RubyPort::ruby_hit_callback
void ruby_hit_callback(PacketPtr pkt)
Definition: RubyPort.cc:452
gem5::ruby::MessageBuffer::enqueue
void enqueue(MsgPtr message, Tick curTime, Tick delta)
Definition: MessageBuffer.cc:217
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:594
RubyRequest.hh
ldstflags.hh
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1012
std::vector
STL vector class.
Definition: stl.hh:37
gem5::ruby::AbstractCacheEntry::clearLocked
void clearLocked()
Definition: AbstractCacheEntry.cc:91
gem5::ruby::Sequencer::m_missLatencyHist
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition: Sequencer.hh:276
gem5::ruby::AbstractController::isBlocked
bool isBlocked(Addr) const
Definition: AbstractController.cc:327
gem5::ruby::Sequencer::recordRequestType
void recordRequestType(SequencerRequestType requestType)
Definition: Sequencer.cc:964
gem5::ruby::Sequencer::coreId
int coreId() const
Definition: Sequencer.hh:150
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::ruby::CacheMemory::lookup
AbstractCacheEntry * lookup(Addr address)
Definition: CacheMemory.cc:342
gem5::ruby::makeLineAddress
Addr makeLineAddress(Addr addr)
Definition: Address.cc:60
gem5::ruby::tlbiCmdToRubyRequestType
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
Definition: RubySlicc_Util.hh:196
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1327
gem5::ruby::Sequencer::m_outstanding_count
int m_outstanding_count
Definition: Sequencer.hh:248
gem5::ruby::RubyPort::ruby_stale_translation_callback
void ruby_stale_translation_callback(Addr txnId)
Definition: RubyPort.cc:494
gem5::ruby::RubyPort::m_mandatory_q_ptr
MessageBuffer * m_mandatory_q_ptr
Definition: RubyPort.hh:205
packet.hh
gem5::RubyTester::SenderState
Definition: RubyTester.hh:89
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
str.hh
gem5::ruby::Sequencer::m_missMachLatencyHist
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition: Sequencer.hh:281
gem5::Packet::getAtomicOp
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition: packet.hh:845
gem5::ruby::Sequencer::m_missTypeMachLatencyHist
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
Definition: Sequencer.hh:283
gem5::statistics::Histogram
A simple histogram stat.
Definition: statistics.hh:2125
gem5::ruby::Sequencer::evictionCallback
void evictionCallback(Addr address)
Definition: Sequencer.cc:970
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::ruby::RubyPort::m_usingRubyTester
bool m_usingRubyTester
Definition: RubyPort.hh:206
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:593
gem5::ruby::RubyPort::ruby_eviction_callback
void ruby_eviction_callback(Addr address)
Definition: RubyPort.cc:695
gem5::ruby::Sequencer::readCallback
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:546
gem5::ruby::Sequencer::recordMissLatency
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition: Sequencer.cc:375
gem5::ruby::Sequencer::m_ForwardToFirstResponseDelayHist
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
Definition: Sequencer.hh:288
gem5::ruby::Sequencer::llscStoreConditional
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:180
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::VegaISA::p
Bitfield< 54 > p
Definition: pagetable.hh:70
gem5::ruby::Sequencer::m_latencyHist
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition: Sequencer.hh:261
gem5::Clocked::cyclesToTicks
Tick cyclesToTicks(Cycles c) const
Definition: clocked_object.hh:227
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:210
gem5::ruby::Sequencer::llscCheckMonitor
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:203
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
gem5::ruby::Sequencer::m_IssueToInitialDelayHist
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition: Sequencer.hh:286
gem5::ruby::SequencerRequest::m_second_type
RubyRequestType m_second_type
Definition: Sequencer.hh:66
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::ruby::Sequencer::llscClearLocalMonitor
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
Definition: Sequencer.cc:219
gem5::ruby::SequencerRequest
Definition: Sequencer.hh:62
gem5::ruby::Sequencer::m_missTypeLatencyHist
std::vector< statistics::Histogram * > m_missTypeLatencyHist
Definition: Sequencer.hh:277
gem5::ruby::Sequencer::m_InitialToForwardDelayHist
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Definition: Sequencer.hh:287
gem5::X86ISA::type
type
Definition: misc.hh:734
RubyTester.hh
gem5::ruby::Sequencer::functionalWrite
virtual int functionalWrite(Packet *func_pkt) override
Definition: Sequencer.cc:260
gem5::ruby::Sequencer::m_hitTypeMachLatencyHist
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
Definition: Sequencer.hh:272
gem5::ruby::RubySystem::getBlockSizeBits
static uint32_t getBlockSizeBits()
Definition: RubySystem.hh:73
gem5::Packet::getHtmTransactionUid
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition: packet.cc:529
gem5::Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:620
RubySlicc_Util.hh
gem5::ruby::getOffset
Addr getOffset(Addr addr)
Definition: Address.cc:54
gem5::ruby::Sequencer::resetStats
void resetStats() override
Callback to reset stats.
Definition: Sequencer.cc:274
compiler.hh
gem5::ruby::Sequencer::m_hitLatencyHist
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition: Sequencer.hh:266
gem5::ruby::AbstractCacheEntry::setLocked
void setLocked(int context)
Definition: AbstractCacheEntry.cc:84
gem5::ruby::AbstractController::notifyCoalesced
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
Definition: AbstractController.hh:157
gem5::ruby::Sequencer::m_hitTypeLatencyHist
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
Definition: Sequencer.hh:267
gem5::ruby::Sequencer::Sequencer
Sequencer(const Params &)
Definition: Sequencer.cc:70
RubySystem.hh
gem5::ruby::Sequencer::empty
virtual bool empty() const
Definition: Sequencer.cc:747
gem5::ruby::RubyPort::testDrainComplete
void testDrainComplete()
Definition: RubyPort.cc:549
gem5::Packet::writeData
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1322
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::ruby::RubyPort::ruby_unaddressed_callback
void ruby_unaddressed_callback(PacketPtr pkt)
Definition: RubyPort.cc:475
gem5::ruby::RubySystem::getCooldownEnabled
static bool getCooldownEnabled()
Definition: RubySystem.hh:76
gem5::ruby::RubyPort::m_ruby_system
RubySystem * m_ruby_system
Definition: RubyPort.hh:202
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::ruby::Sequencer::m_max_outstanding_requests
int m_max_outstanding_requests
Definition: Sequencer.hh:236
gem5::ruby::AbstractController::unblock
void unblock(Addr)
Definition: AbstractController.cc:333
gem5::ruby::Sequencer::print
virtual void print(std::ostream &out) const
Definition: Sequencer.cc:955
gem5::ruby::Sequencer::wakeup
virtual void wakeup()
Definition: Sequencer.cc:225
gem5::ruby::AbstractController::mandatoryQueueLatency
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
Definition: AbstractController.hh:128
gem5::ruby::Sequencer::llscLoadLinked
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:153
gem5::ruby::RubySystem
Definition: RubySystem.hh:63
gem5::ruby::RubyPort::functionalWrite
virtual int functionalWrite(Packet *func_pkt)
Definition: RubyPort.cc:729
gem5::Packet::isFlush
bool isFlush() const
Definition: packet.hh:624
gem5::ruby::Sequencer::markRemoved
void markRemoved()
Definition: Sequencer.cc:369
gem5::ruby::RubyPort::Params
RubyPortParams Params
Definition: RubyPort.hh:158
gem5::ruby::RubySystem::getWarmupEnabled
static bool getWarmupEnabled()
Definition: RubySystem.hh:75
gem5::ruby::Sequencer::llscClearMonitor
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
Definition: Sequencer.cc:166
gem5::ruby::RubyPort::m_controller
AbstractController * m_controller
Definition: RubyPort.hh:204
gem5::ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:239
gem5::MipsISA::pc
Bitfield< 4 > pc
Definition: pra_constants.hh:243
gem5::ruby::Sequencer::writeCallback
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
Definition: Sequencer.cc:448
gem5::ruby::SequencerRequest::m_type
RubyRequestType m_type
Definition: Sequencer.hh:65
gem5::ruby::SequencerRequest::issue_time
Cycles issue_time
Definition: Sequencer.hh:67
gem5::ruby::isTlbiCmdRequest
bool isTlbiCmdRequest(RubyRequestType type)
Definition: RubySlicc_Util.hh:166
logging.hh
gem5::ruby::Sequencer::unaddressedCallback
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:697
gem5::ruby::AbstractCacheEntry::isLocked
bool isLocked(int context) const
Definition: AbstractCacheEntry.cc:98
gem5::ruby::CacheMemory::clearLockedAll
void clearLockedAll(int context)
Definition: CacheMemory.cc:497
gem5::statistics::DistBase::reset
void reset()
Reset stat value to default.
Definition: statistics.hh:1351
gem5::Packet::isMaskedWrite
bool isMaskedWrite() const
Definition: packet.hh:1441
gem5::Packet::isHtmTransactional
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition: packet.cc:523
gem5::ruby::DataBlock
Definition: DataBlock.hh:60
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:807
gem5::ruby::Sequencer::deadlockCheckEvent
EventFunctionWrapper deadlockCheckEvent
Definition: Sequencer.hh:292
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:236
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::ruby::RubyPort
Definition: RubyPort.hh:64
gem5::RubyTester::SenderState::subBlock
ruby::SubBlock subBlock
Definition: RubyTester.hh:91
gem5::ruby::Sequencer::m_IncompleteTimes
std::vector< statistics::Counter > m_IncompleteTimes
Definition: Sequencer.hh:290
gem5::ruby::AbstractController::blockOnQueue
void blockOnQueue(Addr, MessageBuffer *)
Definition: AbstractController.cc:320
gem5::ruby::Sequencer::m_typeLatencyHist
std::vector< statistics::Histogram * > m_typeLatencyHist
Definition: Sequencer.hh:262
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:817
gem5::ArmISA::rs
Bitfield< 9, 8 > rs
Definition: misc_types.hh:433
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:458
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:188
gem5::ArmISA::status
Bitfield< 5, 0 > status
Definition: misc_types.hh:480
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::ruby::SequencerRequest::pkt
PacketPtr pkt
Definition: Sequencer.hh:64
Sequencer.hh

Generated on Sun Jul 30 2023 01:56:59 for gem5 by doxygen 1.8.17