gem5  v22.0.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Sequencer.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15  * Copyright (c) 2013 Advanced Micro Devices, Inc.
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
43 
44 #include "arch/x86/ldstflags.hh"
45 #include "base/logging.hh"
46 #include "base/str.hh"
48 #include "debug/LLSC.hh"
49 #include "debug/MemoryAccess.hh"
50 #include "debug/ProtocolTrace.hh"
51 #include "debug/RubySequencer.hh"
52 #include "debug/RubyStats.hh"
53 #include "mem/packet.hh"
55 #include "mem/ruby/protocol/PrefetchBit.hh"
56 #include "mem/ruby/protocol/RubyAccessMode.hh"
60 #include "sim/system.hh"
61 
62 namespace gem5
63 {
64 
65 namespace ruby
66 {
67 
69  : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
70  deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
71 {
72  m_outstanding_count = 0;
73 
74  m_dataCache_ptr = p.dcache;
75  m_max_outstanding_requests = p.max_outstanding_requests;
76  m_deadlock_threshold = p.deadlock_threshold;
77 
78  m_coreId = p.coreid; // for tracking the two CorePair sequencers
79  assert(m_max_outstanding_requests > 0);
80  assert(m_deadlock_threshold > 0);
81 
82  m_unaddressedTransactionCnt = 0;
83 
84  m_runningGarnetStandalone = p.garnet_standalone;
85 
86 
87  // These statistical variables are not for display.
88  // The profiler will collate these across different
89  // sequencers and display those collated statistics.
90  m_outstandReqHist.init(10);
91  m_latencyHist.init(10);
92  m_hitLatencyHist.init(10);
93  m_missLatencyHist.init(10);
94 
95  for (int i = 0; i < RubyRequestType_NUM; i++) {
96  m_typeLatencyHist.push_back(new statistics::Histogram());
97  m_typeLatencyHist[i]->init(10);
98 
99  m_hitTypeLatencyHist.push_back(new statistics::Histogram());
100  m_hitTypeLatencyHist[i]->init(10);
101 
102  m_missTypeLatencyHist.push_back(new statistics::Histogram());
103  m_missTypeLatencyHist[i]->init(10);
104  }
105 
106  for (int i = 0; i < MachineType_NUM; i++) {
107  m_hitMachLatencyHist.push_back(new statistics::Histogram());
108  m_hitMachLatencyHist[i]->init(10);
109 
110  m_missMachLatencyHist.push_back(new statistics::Histogram());
111  m_missMachLatencyHist[i]->init(10);
112 
113  m_IssueToInitialDelayHist.push_back(new statistics::Histogram());
114  m_IssueToInitialDelayHist[i]->init(10);
115 
116  m_InitialToForwardDelayHist.push_back(new statistics::Histogram());
117  m_InitialToForwardDelayHist[i]->init(10);
118 
119  m_ForwardToFirstResponseDelayHist.push_back(
120  new statistics::Histogram());
121  m_ForwardToFirstResponseDelayHist[i]->init(10);
122 
123  m_FirstResponseToCompletionDelayHist.push_back(
124  new statistics::Histogram());
125  m_FirstResponseToCompletionDelayHist[i]->init(10);
126  }
127 
128  for (int i = 0; i < RubyRequestType_NUM; i++) {
129  m_hitTypeMachLatencyHist.push_back(
131  m_missTypeMachLatencyHist.push_back(
133 
134  for (int j = 0; j < MachineType_NUM; j++) {
135  m_hitTypeMachLatencyHist[i].push_back(new statistics::Histogram());
136  m_hitTypeMachLatencyHist[i][j]->init(10);
137 
138  m_missTypeMachLatencyHist[i].push_back(
139  new statistics::Histogram());
140  m_missTypeMachLatencyHist[i][j]->init(10);
141  }
142  }
143 
144 }
145 
147 {
148 }
149 
150 void
152 {
153  fatal_if(m_dataCache_ptr == NULL,
154  "%s must have a dcache object to support LLSC requests.", name());
155  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
156  if (line) {
157  line->setLocked(m_version);
158  DPRINTF(LLSC, "LLSC Monitor - inserting load linked - "
159  "addr=0x%lx - cpu=%u\n", claddr, m_version);
160  }
161 }
162 
163 void
165 {
166  // clear monitor is called for all stores and evictions
167  if (m_dataCache_ptr == NULL)
168  return;
169  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
170  if (line && line->isLocked(m_version)) {
171  line->clearLocked();
172  DPRINTF(LLSC, "LLSC Monitor - clearing due to store - "
173  "addr=0x%lx - cpu=%u\n", claddr, m_version);
174  }
175 }
176 
177 bool
179 {
180  fatal_if(m_dataCache_ptr == NULL,
181  "%s must have a dcache object to support LLSC requests.", name());
182  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
183  if (!line)
184  return false;
185 
186  DPRINTF(LLSC, "LLSC Monitor - clearing due to "
187  "store conditional - "
188  "addr=0x%lx - cpu=%u\n",
189  claddr, m_version);
190 
191  if (line->isLocked(m_version)) {
192  line->clearLocked();
193  return true;
194  } else {
195  line->clearLocked();
196  return false;
197  }
198 }
199 
200 bool
202 {
203  assert(m_dataCache_ptr != NULL);
204  const Addr claddr = makeLineAddress(address);
205  AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
206  if (!line)
207  return false;
208 
209  if (line->isLocked(m_version)) {
210  return true;
211  } else {
212  return false;
213  }
214 }
215 
216 void
218 {
220 }
221 
222 void
224 {
225  assert(drainState() != DrainState::Draining);
226 
227  // Check for deadlock of any of the requests
228  Cycles current_time = curCycle();
229 
230  // Check across all outstanding requests
231  int total_outstanding = 0;
232 
233  for (const auto &table_entry : m_RequestTable) {
234  for (const auto &seq_req : table_entry.second) {
235  if (current_time - seq_req.issue_time < m_deadlock_threshold)
236  continue;
237 
238  panic("Possible Deadlock detected. Aborting!\n version: %d "
239  "request.paddr: 0x%x m_readRequestTable: %d current time: "
240  "%u issue_time: %d difference: %d\n", m_version,
241  seq_req.pkt->getAddr(), table_entry.second.size(),
242  current_time * clockPeriod(), seq_req.issue_time
243  * clockPeriod(), (current_time * clockPeriod())
244  - (seq_req.issue_time * clockPeriod()));
245  }
246  total_outstanding += table_entry.second.size();
247  }
248 
249  assert(m_outstanding_count == total_outstanding);
250 
251  if (m_outstanding_count > 0) {
252  // If there are still outstanding requests, keep checking
254  }
255 }
256 
257 int
259 {
260  int num_written = RubyPort::functionalWrite(func_pkt);
261 
262  for (const auto &table_entry : m_RequestTable) {
263  for (const auto& seq_req : table_entry.second) {
264  if (seq_req.functionalWrite(func_pkt))
265  ++num_written;
266  }
267  }
268 
269  return num_written;
270 }
271 
273 {
278  for (int i = 0; i < RubyRequestType_NUM; i++) {
279  m_typeLatencyHist[i]->reset();
280  m_hitTypeLatencyHist[i]->reset();
281  m_missTypeLatencyHist[i]->reset();
282  for (int j = 0; j < MachineType_NUM; j++) {
283  m_hitTypeMachLatencyHist[i][j]->reset();
284  m_missTypeMachLatencyHist[i][j]->reset();
285  }
286  }
287 
288  for (int i = 0; i < MachineType_NUM; i++) {
289  m_missMachLatencyHist[i]->reset();
290  m_hitMachLatencyHist[i]->reset();
291 
292  m_IssueToInitialDelayHist[i]->reset();
293  m_InitialToForwardDelayHist[i]->reset();
296 
297  m_IncompleteTimes[i] = 0;
298  }
299 }
300 
301 // Insert the request in the request table. Return RequestStatus_Aliased
302 // if the entry was already present.
303 RequestStatus
304 Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
305  RubyRequestType secondary_type)
306 {
307  // See if we should schedule a deadlock check
308  if (!deadlockCheckEvent.scheduled() &&
311  }
312 
313  if (isTlbiCmdRequest(primary_type)) {
314  assert(primary_type == secondary_type);
315 
316  switch (primary_type) {
317  case RubyRequestType_TLBI_EXT_SYNC_COMP:
318  // Don't have to store any data on this
319  break;
320  case RubyRequestType_TLBI:
321  case RubyRequestType_TLBI_SYNC:
322  {
324 
325  // returns pair<inserted element, was inserted>
326  [[maybe_unused]] auto insert_data = \
327  m_UnaddressedRequestTable.emplace(
330  pkt, primary_type, secondary_type, curCycle()));
331 
332  // if insert_data.second is false, wasn't inserted
333  assert(insert_data.second &&
334  "Another TLBI request with the same ID exists");
335 
336  DPRINTF(RubySequencer, "Inserting TLBI request %016x\n",
338 
339  break;
340  }
341 
342  default:
343  panic("Unexpected TLBI RubyRequestType");
344  }
345 
346  return RequestStatus_Ready;
347  }
348 
349  Addr line_addr = makeLineAddress(pkt->getAddr());
350  // Check if there is any outstanding request for the same cache line.
351  auto &seq_req_list = m_RequestTable[line_addr];
352  // Create a default entry
353  seq_req_list.emplace_back(pkt, primary_type,
354  secondary_type, curCycle());
356 
357  if (seq_req_list.size() > 1) {
358  return RequestStatus_Aliased;
359  }
360 
362 
363  return RequestStatus_Ready;
364 }
365 
366 void
368 {
370 }
371 
372 void
373 Sequencer::recordMissLatency(SequencerRequest* srequest, bool llscSuccess,
374  const MachineType respondingMach,
375  bool isExternalHit, Cycles initialRequestTime,
376  Cycles forwardRequestTime,
377  Cycles firstResponseTime)
378 {
379  RubyRequestType type = srequest->m_type;
380  Cycles issued_time = srequest->issue_time;
381  Cycles completion_time = curCycle();
382 
383  assert(curCycle() >= issued_time);
384  Cycles total_lat = completion_time - issued_time;
385 
386  if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
387  // if the request was combined in the protocol with an earlier request
388  // for the same address, it is possible that it will return an
389  // initialRequestTime corresponding the earlier request. Since Cycles
390  // is unsigned, we can't let this request get profiled below.
391 
392  total_lat = Cycles(0);
393  }
394 
395  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
396  curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
397  "", "", printAddress(srequest->pkt->getAddr()), total_lat);
398 
399  m_latencyHist.sample(total_lat);
400  m_typeLatencyHist[type]->sample(total_lat);
401 
402  if (isExternalHit) {
403  m_missLatencyHist.sample(total_lat);
404  m_missTypeLatencyHist[type]->sample(total_lat);
405 
406  if (respondingMach != MachineType_NUM) {
407  m_missMachLatencyHist[respondingMach]->sample(total_lat);
408  m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
409 
410  if ((issued_time <= initialRequestTime) &&
411  (initialRequestTime <= forwardRequestTime) &&
412  (forwardRequestTime <= firstResponseTime) &&
413  (firstResponseTime <= completion_time)) {
414 
415  m_IssueToInitialDelayHist[respondingMach]->sample(
416  initialRequestTime - issued_time);
417  m_InitialToForwardDelayHist[respondingMach]->sample(
418  forwardRequestTime - initialRequestTime);
419  m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
420  firstResponseTime - forwardRequestTime);
421  m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
422  completion_time - firstResponseTime);
423  } else {
424  m_IncompleteTimes[respondingMach]++;
425  }
426  }
427  } else {
428  m_hitLatencyHist.sample(total_lat);
429  m_hitTypeLatencyHist[type]->sample(total_lat);
430 
431  if (respondingMach != MachineType_NUM) {
432  m_hitMachLatencyHist[respondingMach]->sample(total_lat);
433  m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
434  }
435  }
436 }
437 
438 void
440 {
441  llscClearMonitor(address);
442  writeCallback(address, data);
443 }
444 
445 void
447  const bool externalHit, const MachineType mach,
448  const Cycles initialRequestTime,
449  const Cycles forwardRequestTime,
450  const Cycles firstResponseTime,
451  const bool noCoales)
452 {
453  //
454  // Free the whole list as we assume we have had the exclusive access
455  // to this cache line when response for the write comes back
456  //
457  assert(address == makeLineAddress(address));
458  assert(m_RequestTable.find(address) != m_RequestTable.end());
459  auto &seq_req_list = m_RequestTable[address];
460 
461  // Perform hitCallback on every cpu request made to this cache block while
462  // ruby request was outstanding. Since only 1 ruby request was made,
463  // profile the ruby latency once.
464  bool ruby_request = true;
465  int aliased_stores = 0;
466  int aliased_loads = 0;
467  while (!seq_req_list.empty()) {
468  SequencerRequest &seq_req = seq_req_list.front();
469 
470  if (noCoales && !ruby_request) {
471  // Do not process follow-up requests
472  // (e.g. if full line no present)
473  // Reissue to the cache hierarchy
474  issueRequest(seq_req.pkt, seq_req.m_second_type);
475  break;
476  }
477 
478  if (ruby_request) {
479  assert(seq_req.m_type != RubyRequestType_LD);
480  assert(seq_req.m_type != RubyRequestType_Load_Linked);
481  assert(seq_req.m_type != RubyRequestType_IFETCH);
482  }
483 
484  // handle write request
485  if ((seq_req.m_type != RubyRequestType_LD) &&
486  (seq_req.m_type != RubyRequestType_Load_Linked) &&
487  (seq_req.m_type != RubyRequestType_IFETCH)) {
488  // LL/SC support (tested with ARMv8)
489  bool success = true;
490 
491  if (seq_req.m_type != RubyRequestType_Store_Conditional) {
492  // Regular stores to addresses being monitored
493  // will fail (remove) the monitor entry.
494  llscClearMonitor(address);
495  } else {
496  // Store conditionals must first check the monitor
497  // if they will succeed or not
498  success = llscStoreConditional(address);
499  seq_req.pkt->req->setExtraData(success ? 1 : 0);
500  }
501 
502  // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
503  // address variable here is assumed to be a line address, so when
504  // blocking buffers, must check line addresses.
505  if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
506  // blockOnQueue blocks all first-level cache controller queues
507  // waiting on memory accesses for the specified address that go
508  // to the specified queue. In this case, a Locked_RMW_Write must
509  // go to the mandatory_q before unblocking the first-level
510  // controller. This will block standard loads, stores, ifetches,
511  // etc.
513  } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
514  m_controller->unblock(address);
515  }
516 
517  if (ruby_request) {
518  recordMissLatency(&seq_req, success, mach, externalHit,
519  initialRequestTime, forwardRequestTime,
520  firstResponseTime);
521  } else {
522  aliased_stores++;
523  }
524  markRemoved();
525  hitCallback(&seq_req, data, success, mach, externalHit,
526  initialRequestTime, forwardRequestTime,
527  firstResponseTime, !ruby_request);
528  ruby_request = false;
529  } else {
530  // handle read request
531  assert(!ruby_request);
532  markRemoved();
533  aliased_loads++;
534  hitCallback(&seq_req, data, true, mach, externalHit,
535  initialRequestTime, forwardRequestTime,
536  firstResponseTime, !ruby_request);
537  }
538  seq_req_list.pop_front();
539  }
540 
541  // free all outstanding requests corresponding to this address
542  if (seq_req_list.empty()) {
543  m_RequestTable.erase(address);
544  }
545 }
546 
547 void
549  bool externalHit, const MachineType mach,
550  Cycles initialRequestTime,
551  Cycles forwardRequestTime,
552  Cycles firstResponseTime)
553 {
554  //
555  // Free up read requests until we hit the first Write request
556  // or end of the corresponding list.
557  //
558  assert(address == makeLineAddress(address));
559  assert(m_RequestTable.find(address) != m_RequestTable.end());
560  auto &seq_req_list = m_RequestTable[address];
561 
562  // Perform hitCallback on every cpu request made to this cache block while
563  // ruby request was outstanding. Since only 1 ruby request was made,
564  // profile the ruby latency once.
565  bool ruby_request = true;
566  int aliased_loads = 0;
567  while (!seq_req_list.empty()) {
568  SequencerRequest &seq_req = seq_req_list.front();
569  if (ruby_request) {
570  assert((seq_req.m_type == RubyRequestType_LD) ||
571  (seq_req.m_type == RubyRequestType_Load_Linked) ||
572  (seq_req.m_type == RubyRequestType_IFETCH));
573  } else {
574  aliased_loads++;
575  }
576  if ((seq_req.m_type != RubyRequestType_LD) &&
577  (seq_req.m_type != RubyRequestType_Load_Linked) &&
578  (seq_req.m_type != RubyRequestType_IFETCH)) {
579  // Write request: reissue request to the cache hierarchy
580  issueRequest(seq_req.pkt, seq_req.m_second_type);
581  break;
582  }
583  if (ruby_request) {
584  recordMissLatency(&seq_req, true, mach, externalHit,
585  initialRequestTime, forwardRequestTime,
586  firstResponseTime);
587  }
588  markRemoved();
589  hitCallback(&seq_req, data, true, mach, externalHit,
590  initialRequestTime, forwardRequestTime,
591  firstResponseTime, !ruby_request);
592  ruby_request = false;
593  seq_req_list.pop_front();
594  }
595 
596  // free all outstanding requests corresponding to this address
597  if (seq_req_list.empty()) {
598  m_RequestTable.erase(address);
599  }
600 }
601 
602 void
604  bool llscSuccess,
605  const MachineType mach, const bool externalHit,
606  const Cycles initialRequestTime,
607  const Cycles forwardRequestTime,
608  const Cycles firstResponseTime,
609  const bool was_coalesced)
610 {
611  warn_once("Replacement policy updates recently became the responsibility "
612  "of SLICC state machines. Make sure to setMRU() near callbacks "
613  "in .sm files!");
614 
615  PacketPtr pkt = srequest->pkt;
616  Addr request_address(pkt->getAddr());
617  RubyRequestType type = srequest->m_type;
618 
619  if (was_coalesced) {
620  // Notify the controller about a coalesced request so it can properly
621  // account for it in its hit/miss stats and/or train prefetchers
622  // (this is protocol-dependent)
623  m_controller->notifyCoalesced(request_address, type, pkt->req,
624  data, externalHit);
625  }
626 
627  // Load-linked handling
628  if (type == RubyRequestType_Load_Linked) {
629  Addr line_addr = makeLineAddress(request_address);
630  llscLoadLinked(line_addr);
631  }
632 
633  // update the data unless it is a non-data-carrying flush
635  data.setData(pkt);
636  } else if (!pkt->isFlush()) {
637  if ((type == RubyRequestType_LD) ||
638  (type == RubyRequestType_IFETCH) ||
639  (type == RubyRequestType_RMW_Read) ||
640  (type == RubyRequestType_Locked_RMW_Read) ||
641  (type == RubyRequestType_Load_Linked)) {
642  pkt->setData(
643  data.getData(getOffset(request_address), pkt->getSize()));
644  DPRINTF(RubySequencer, "read data %s\n", data);
645  } else if (pkt->req->isSwap()) {
646  assert(!pkt->isMaskedWrite());
647  std::vector<uint8_t> overwrite_val(pkt->getSize());
648  pkt->writeData(&overwrite_val[0]);
649  pkt->setData(
650  data.getData(getOffset(request_address), pkt->getSize()));
651  data.setData(&overwrite_val[0],
652  getOffset(request_address), pkt->getSize());
653  DPRINTF(RubySequencer, "swap data %s\n", data);
654  } else if (pkt->isAtomicOp()) {
655  // Set the data in the packet to the old value in the cache
656  pkt->setData(
657  data.getData(getOffset(request_address), pkt->getSize()));
658  DPRINTF(RubySequencer, "AMO original data %s\n", data);
659  // execute AMO operation
660  (*(pkt->getAtomicOp()))(
661  data.getDataMod(getOffset(request_address)));
662  DPRINTF(RubySequencer, "AMO new data %s\n", data);
663  } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
664  // Types of stores set the actual data here, apart from
665  // failed Store Conditional requests
666  data.setData(pkt);
667  DPRINTF(RubySequencer, "set data %s\n", data);
668  }
669  }
670 
671  // If using the RubyTester, update the RubyTester sender state's
672  // subBlock with the recieved data. The tester will later access
673  // this state.
674  if (m_usingRubyTester) {
675  DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
676  pkt->cmdString(), pkt->getAddr());
677  RubyTester::SenderState* testerSenderState =
679  assert(testerSenderState);
680  testerSenderState->subBlock.mergeFrom(data);
681  }
682 
685  assert(pkt->req);
686  delete pkt;
687  rs->m_cache_recorder->enqueueNextFetchRequest();
688  } else if (RubySystem::getCooldownEnabled()) {
689  delete pkt;
690  rs->m_cache_recorder->enqueueNextFlushRequest();
691  } else {
692  ruby_hit_callback(pkt);
694  }
695 }
696 
697 void
699  RubyRequestType reqType,
700  const MachineType mach,
701  const Cycles initialRequestTime,
702  const Cycles forwardRequestTime,
703  const Cycles firstResponseTime)
704 {
705  DPRINTF(RubySequencer, "unaddressedCallback ID:%08x type:%d\n",
706  unaddressedReqId, reqType);
707 
708  switch (reqType) {
709  case RubyRequestType_TLBI_EXT_SYNC:
710  {
711  // This should trigger the CPU to wait for stale translations
712  // and send an EXT_SYNC_COMP once complete.
713 
714  // Don't look for the ID in our requestTable.
715  // It won't be there because we didn't request this Sync
716  ruby_stale_translation_callback(unaddressedReqId);
717  break;
718  }
719  case RubyRequestType_TLBI:
720  case RubyRequestType_TLBI_SYNC:
721  {
722  // These signal that a TLBI operation that this core initiated
723  // of the respective type (TLBI or Sync) has finished.
724 
725  assert(m_UnaddressedRequestTable.find(unaddressedReqId)
726  != m_UnaddressedRequestTable.end());
727 
728  {
729  SequencerRequest &seq_req =
730  m_UnaddressedRequestTable.at(unaddressedReqId);
731  assert(seq_req.m_type == reqType);
732 
733  PacketPtr pkt = seq_req.pkt;
734 
737  }
738 
739  m_UnaddressedRequestTable.erase(unaddressedReqId);
740  break;
741  }
742  default:
743  panic("Unexpected TLBI RubyRequestType");
744  }
745 }
746 
747 bool
749 {
750  return m_RequestTable.empty() &&
752 }
753 
754 RequestStatus
756 {
757  // HTM abort signals must be allowed to reach the Sequencer
758  // the same cycle they are issued. They cannot be retried.
760  !pkt->req->isHTMAbort()) {
761  return RequestStatus_BufferFull;
762  }
763 
764  RubyRequestType primary_type = RubyRequestType_NULL;
765  RubyRequestType secondary_type = RubyRequestType_NULL;
766 
767  if (pkt->isLLSC()) {
768  // LL/SC instructions need to be handled carefully by the cache
769  // coherence protocol to ensure they follow the proper semantics. In
770  // particular, by identifying the operations as atomic, the protocol
771  // should understand that migratory sharing optimizations should not
772  // be performed (i.e. a load between the LL and SC should not steal
773  // away exclusive permission).
774  //
775  // The following logic works correctly with the semantics
776  // of armV8 LDEX/STEX instructions.
777 
778  if (pkt->isWrite()) {
779  DPRINTF(RubySequencer, "Issuing SC\n");
780  primary_type = RubyRequestType_Store_Conditional;
781 #if defined (PROTOCOL_MESI_Three_Level) || defined (PROTOCOL_MESI_Three_Level_HTM)
782  secondary_type = RubyRequestType_Store_Conditional;
783 #else
784  secondary_type = RubyRequestType_ST;
785 #endif
786  } else {
787  DPRINTF(RubySequencer, "Issuing LL\n");
788  assert(pkt->isRead());
789  primary_type = RubyRequestType_Load_Linked;
790  secondary_type = RubyRequestType_LD;
791  }
792  } else if (pkt->req->isLockedRMW()) {
793  //
794  // x86 locked instructions are translated to store cache coherence
795  // requests because these requests should always be treated as read
796  // exclusive operations and should leverage any migratory sharing
797  // optimization built into the protocol.
798  //
799  if (pkt->isWrite()) {
800  DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
801  primary_type = RubyRequestType_Locked_RMW_Write;
802  } else {
803  DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
804  assert(pkt->isRead());
805  primary_type = RubyRequestType_Locked_RMW_Read;
806  }
807  secondary_type = RubyRequestType_ST;
808  } else if (pkt->req->isTlbiCmd()) {
809  primary_type = secondary_type = tlbiCmdToRubyRequestType(pkt);
810  DPRINTF(RubySequencer, "Issuing TLBI\n");
811  } else {
812  //
813  // To support SwapReq, we need to check isWrite() first: a SwapReq
814  // should always be treated like a write, but since a SwapReq implies
815  // both isWrite() and isRead() are true, check isWrite() first here.
816  //
817  if (pkt->isWrite()) {
818  //
819  // Note: M5 packets do not differentiate ST from RMW_Write
820  //
821  primary_type = secondary_type = RubyRequestType_ST;
822  } else if (pkt->isRead()) {
823  // hardware transactional memory commands
824  if (pkt->req->isHTMCmd()) {
825  primary_type = secondary_type = htmCmdToRubyRequestType(pkt);
826  } else if (pkt->req->isInstFetch()) {
827  primary_type = secondary_type = RubyRequestType_IFETCH;
828  } else {
829  if (pkt->req->isReadModifyWrite()) {
830  primary_type = RubyRequestType_RMW_Read;
831  secondary_type = RubyRequestType_ST;
832  } else {
833  primary_type = secondary_type = RubyRequestType_LD;
834  }
835  }
836  } else if (pkt->isFlush()) {
837  primary_type = secondary_type = RubyRequestType_FLUSH;
838  } else {
839  panic("Unsupported ruby packet type\n");
840  }
841  }
842 
843  // Check if the line is blocked for a Locked_RMW
844  if (!pkt->req->isMemMgmt() &&
846  (primary_type != RubyRequestType_Locked_RMW_Write)) {
847  // Return that this request's cache line address aliases with
848  // a prior request that locked the cache line. The request cannot
849  // proceed until the cache line is unlocked by a Locked_RMW_Write
850  return RequestStatus_Aliased;
851  }
852 
853  RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
854 
855  // It is OK to receive RequestStatus_Aliased, it can be considered Issued
856  if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
857  return status;
858  // non-aliased with any existing request in the request table, just issue
859  // to the cache
860  if (status != RequestStatus_Aliased)
861  issueRequest(pkt, secondary_type);
862 
863  // TODO: issue hardware prefetches here
864  return RequestStatus_Issued;
865 }
866 
867 void
868 Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
869 {
870  assert(pkt != NULL);
871  ContextID proc_id = pkt->req->hasContextId() ?
872  pkt->req->contextId() : InvalidContextID;
873 
874  ContextID core_id = coreId();
875 
876  // If valid, copy the pc to the ruby request
877  Addr pc = 0;
878  if (pkt->req->hasPC()) {
879  pc = pkt->req->getPC();
880  }
881 
882  // check if the packet has data as for example prefetch and flush
883  // requests do not
884  std::shared_ptr<RubyRequest> msg;
885  if (pkt->req->isMemMgmt()) {
886  msg = std::make_shared<RubyRequest>(clockEdge(),
887  pc, secondary_type,
888  RubyAccessMode_Supervisor, pkt,
889  proc_id, core_id);
890 
891  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s\n",
892  curTick(), m_version, "Seq", "Begin", "", "",
893  RubyRequestType_to_string(secondary_type));
894 
895  if (pkt->req->isTlbiCmd()) {
896  msg->m_isTlbi = true;
897  switch (secondary_type) {
898  case RubyRequestType_TLBI_EXT_SYNC_COMP:
899  msg->m_tlbiTransactionUid = pkt->req->getExtraData();
900  break;
901  case RubyRequestType_TLBI:
902  case RubyRequestType_TLBI_SYNC:
903  msg->m_tlbiTransactionUid = \
904  getCurrentUnaddressedTransactionID();
905  break;
906  default:
907  panic("Unexpected TLBI RubyRequestType");
908  }
909  DPRINTF(RubySequencer, "Issuing TLBI %016x\n",
910  msg->m_tlbiTransactionUid);
911  }
912  } else {
913  msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
914  pkt->getSize(), pc, secondary_type,
915  RubyAccessMode_Supervisor, pkt,
916  PrefetchBit_No, proc_id, core_id);
917 
918  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
919  curTick(), m_version, "Seq", "Begin", "", "",
920  printAddress(msg->getPhysicalAddress()),
921  RubyRequestType_to_string(secondary_type));
922  }
923 
924  // hardware transactional memory
925  // If the request originates in a transaction,
926  // then mark the Ruby message as such.
927  if (pkt->isHtmTransactional()) {
928  msg->m_htmFromTransaction = true;
929  msg->m_htmTransactionUid = pkt->getHtmTransactionUid();
930  }
931 
932  Tick latency = cyclesToTicks(
933  m_controller->mandatoryQueueLatency(secondary_type));
934  assert(latency > 0);
935 
936  assert(m_mandatory_q_ptr != NULL);
937  m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
938 }
939 
940 template <class KEY, class VALUE>
941 std::ostream &
942 operator<<(std::ostream &out, const std::unordered_map<KEY, VALUE> &map)
943 {
944  for (const auto &table_entry : map) {
945  out << "[ " << table_entry.first << " =";
946  for (const auto &seq_req : table_entry.second) {
947  out << " " << RubyRequestType_to_string(seq_req.m_second_type);
948  }
949  }
950  out << " ]";
951 
952  return out;
953 }
954 
955 void
956 Sequencer::print(std::ostream& out) const
957 {
958  out << "[Sequencer: " << m_version
959  << ", outstanding requests: " << m_outstanding_count
960  << ", request table: " << m_RequestTable
961  << "]";
962 }
963 
964 void
965 Sequencer::recordRequestType(SequencerRequestType requestType) {
966  DPRINTF(RubyStats, "Recorded statistic: %s\n",
967  SequencerRequestType_to_string(requestType));
968 }
969 
970 void
972 {
973  llscClearMonitor(address);
974  ruby_eviction_callback(address);
975 }
976 
977 void
979 {
981  // Limit m_unaddressedTransactionCnt to 32 bits,
982  // top 32 bits should always be zeroed out
983  uint64_t aligned_txid = \
984  m_unaddressedTransactionCnt << RubySystem::getBlockSizeBits();
985 
986  if (aligned_txid > 0xFFFFFFFFull) {
988  }
989 }
990 
991 uint64_t
993 {
994  return (
995  uint64_t(m_version & 0xFFFFFFFF) << 32) |
997  );
998 }
999 
1000 } // namespace ruby
1001 } // namespace gem5
gem5::ruby::Sequencer::m_FirstResponseToCompletionDelayHist
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
Definition: Sequencer.hh:289
gem5::ruby::Sequencer::~Sequencer
~Sequencer()
Definition: Sequencer.cc:146
gem5::ruby::SubBlock::mergeFrom
void mergeFrom(const DataBlock &data)
Definition: SubBlock.hh:66
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:585
gem5::ruby::Sequencer::m_RequestTable
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition: Sequencer.hh:224
gem5::ruby::Sequencer::m_hitMachLatencyHist
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
Definition: Sequencer.hh:271
gem5::Packet::isAtomicOp
bool isAtomicOp() const
Definition: packet.hh:829
gem5::ruby::RubyPort::m_version
uint32_t m_version
Definition: RubyPort.hh:198
gem5::ruby::Sequencer::m_unaddressedTransactionCnt
uint64_t m_unaddressedTransactionCnt
Definition: Sequencer.hh:253
gem5::ruby::Sequencer::m_deadlock_threshold
Cycles m_deadlock_threshold
Definition: Sequencer.hh:229
gem5::ruby::htmCmdToRubyRequestType
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
Definition: RubySlicc_Util.hh:179
system.hh
Profiler.hh
DPRINTFR
#define DPRINTFR(x,...)
Definition: trace.hh:200
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::Clocked::curCycle
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Definition: clocked_object.hh:195
gem5::ruby::printAddress
std::string printAddress(Addr addr)
Definition: Address.cc:80
gem5::ruby::Sequencer::makeRequest
RequestStatus makeRequest(PacketPtr pkt) override
Definition: Sequencer.cc:755
gem5::ruby::Sequencer::writeCallbackScFail
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
Definition: Sequencer.cc:439
gem5::ruby::Sequencer::issueRequest
void issueRequest(PacketPtr pkt, RubyRequestType type)
Definition: Sequencer.cc:868
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
gem5::ruby::Sequencer::m_UnaddressedRequestTable
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
Definition: Sequencer.hh:227
gem5::ruby::Sequencer::getCurrentUnaddressedTransactionID
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Definition: Sequencer.cc:992
gem5::ruby::Sequencer::m_dataCache_ptr
CacheMemory * m_dataCache_ptr
Definition: Sequencer.hh:238
gem5::ruby::Sequencer::m_outstandReqHist
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition: Sequencer.hh:258
warn_once
#define warn_once(...)
Definition: logging.hh:250
gem5::Packet::findNextSenderState
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:572
gem5::Packet::setData
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1265
gem5::ruby::operator<<
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
Definition: BoolVec.cc:49
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:374
gem5::ruby::AbstractCacheEntry
Definition: AbstractCacheEntry.hh:62
gem5::InvalidContextID
const ContextID InvalidContextID
Definition: types.hh:240
gem5::ruby::Sequencer::hitCallback
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
Definition: Sequencer.cc:603
gem5::ruby::Sequencer::insertRequest
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition: Sequencer.cc:304
gem5::ruby::Sequencer::incrementUnaddressedTransactionCnt
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
Definition: Sequencer.cc:978
gem5::ruby::RubyPort::ruby_hit_callback
void ruby_hit_callback(PacketPtr pkt)
Definition: RubyPort.cc:454
gem5::ruby::MessageBuffer::enqueue
void enqueue(MsgPtr message, Tick curTime, Tick delta)
Definition: MessageBuffer.cc:217
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:591
RubyRequest.hh
ldstflags.hh
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
std::vector
STL vector class.
Definition: stl.hh:37
gem5::ruby::AbstractCacheEntry::clearLocked
void clearLocked()
Definition: AbstractCacheEntry.cc:91
gem5::ruby::Sequencer::m_missLatencyHist
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition: Sequencer.hh:276
gem5::ruby::AbstractController::isBlocked
bool isBlocked(Addr) const
Definition: AbstractController.cc:327
gem5::ruby::Sequencer::recordRequestType
void recordRequestType(SequencerRequestType requestType)
Definition: Sequencer.cc:965
gem5::ruby::Sequencer::coreId
int coreId() const
Definition: Sequencer.hh:150
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::ruby::CacheMemory::lookup
AbstractCacheEntry * lookup(Addr address)
Definition: CacheMemory.cc:342
gem5::ruby::makeLineAddress
Addr makeLineAddress(Addr addr)
Definition: Address.cc:60
gem5::ruby::tlbiCmdToRubyRequestType
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
Definition: RubySlicc_Util.hh:196
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
gem5::ruby::Sequencer::m_outstanding_count
int m_outstanding_count
Definition: Sequencer.hh:248
gem5::ruby::RubyPort::ruby_stale_translation_callback
void ruby_stale_translation_callback(Addr txnId)
Definition: RubyPort.cc:496
gem5::ruby::RubyPort::m_mandatory_q_ptr
MessageBuffer * m_mandatory_q_ptr
Definition: RubyPort.hh:200
packet.hh
gem5::RubyTester::SenderState
Definition: RubyTester.hh:89
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
str.hh
gem5::ruby::Sequencer::m_missMachLatencyHist
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition: Sequencer.hh:281
gem5::Packet::getAtomicOp
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition: packet.hh:828
gem5::ruby::Sequencer::m_missTypeMachLatencyHist
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
Definition: Sequencer.hh:283
gem5::statistics::Histogram
A simple histogram stat.
Definition: statistics.hh:2126
gem5::ruby::Sequencer::evictionCallback
void evictionCallback(Addr address)
Definition: Sequencer.cc:971
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::ruby::RubyPort::m_usingRubyTester
bool m_usingRubyTester
Definition: RubyPort.hh:201
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:590
gem5::ruby::RubyPort::ruby_eviction_callback
void ruby_eviction_callback(Addr address)
Definition: RubyPort.cc:701
gem5::ruby::Sequencer::readCallback
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:548
gem5::ruby::Sequencer::recordMissLatency
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition: Sequencer.cc:373
gem5::ruby::Sequencer::m_ForwardToFirstResponseDelayHist
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
Definition: Sequencer.hh:288
gem5::ruby::Sequencer::llscStoreConditional
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:178
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::VegaISA::p
Bitfield< 54 > p
Definition: pagetable.hh:70
gem5::ruby::Sequencer::m_latencyHist
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition: Sequencer.hh:261
gem5::Clocked::cyclesToTicks
Tick cyclesToTicks(Cycles c) const
Definition: clocked_object.hh:227
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ruby::Sequencer::llscCheckMonitor
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:201
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:291
gem5::ruby::Sequencer::m_IssueToInitialDelayHist
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition: Sequencer.hh:286
gem5::ruby::SequencerRequest::m_second_type
RubyRequestType m_second_type
Definition: Sequencer.hh:66
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::ruby::Sequencer::llscClearLocalMonitor
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
Definition: Sequencer.cc:217
gem5::ruby::SequencerRequest
Definition: Sequencer.hh:62
gem5::ruby::Sequencer::m_missTypeLatencyHist
std::vector< statistics::Histogram * > m_missTypeLatencyHist
Definition: Sequencer.hh:277
gem5::ruby::Sequencer::m_InitialToForwardDelayHist
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Definition: Sequencer.hh:287
gem5::X86ISA::type
type
Definition: misc.hh:727
RubyTester.hh
gem5::ruby::Sequencer::functionalWrite
virtual int functionalWrite(Packet *func_pkt) override
Definition: Sequencer.cc:258
gem5::ruby::Sequencer::m_hitTypeMachLatencyHist
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
Definition: Sequencer.hh:272
gem5::ruby::RubySystem::getBlockSizeBits
static uint32_t getBlockSizeBits()
Definition: RubySystem.hh:73
gem5::Packet::getHtmTransactionUid
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition: packet.cc:525
gem5::Packet::isLLSC
bool isLLSC() const
Definition: packet.hh:617
RubySlicc_Util.hh
gem5::ruby::getOffset
Addr getOffset(Addr addr)
Definition: Address.cc:54
gem5::ruby::Sequencer::resetStats
void resetStats() override
Callback to reset stats.
Definition: Sequencer.cc:272
gem5::ruby::Sequencer::m_hitLatencyHist
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition: Sequencer.hh:266
gem5::ruby::AbstractCacheEntry::setLocked
void setLocked(int context)
Definition: AbstractCacheEntry.cc:84
gem5::ruby::AbstractController::notifyCoalesced
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
Definition: AbstractController.hh:148
gem5::ruby::Sequencer::m_hitTypeLatencyHist
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
Definition: Sequencer.hh:267
gem5::ruby::Sequencer::Sequencer
Sequencer(const Params &)
Definition: Sequencer.cc:68
RubySystem.hh
gem5::ruby::Sequencer::empty
virtual bool empty() const
Definition: Sequencer.cc:748
gem5::ruby::RubyPort::testDrainComplete
void testDrainComplete()
Definition: RubyPort.cc:551
gem5::Packet::writeData
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1294
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::ruby::RubyPort::ruby_unaddressed_callback
void ruby_unaddressed_callback(PacketPtr pkt)
Definition: RubyPort.cc:477
gem5::ruby::RubySystem::getCooldownEnabled
static bool getCooldownEnabled()
Definition: RubySystem.hh:76
gem5::ruby::RubyPort::m_ruby_system
RubySystem * m_ruby_system
Definition: RubyPort.hh:197
gem5::Clocked::clockEdge
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Definition: clocked_object.hh:177
gem5::ruby::Sequencer::m_max_outstanding_requests
int m_max_outstanding_requests
Definition: Sequencer.hh:236
gem5::ruby::AbstractController::unblock
void unblock(Addr)
Definition: AbstractController.cc:333
gem5::ruby::Sequencer::print
virtual void print(std::ostream &out) const
Definition: Sequencer.cc:956
gem5::ruby::Sequencer::wakeup
virtual void wakeup()
Definition: Sequencer.cc:223
gem5::ruby::AbstractController::mandatoryQueueLatency
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
Definition: AbstractController.hh:119
gem5::ruby::Sequencer::llscLoadLinked
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
Definition: Sequencer.cc:151
gem5::ruby::RubySystem
Definition: RubySystem.hh:63
gem5::ruby::RubyPort::functionalWrite
virtual int functionalWrite(Packet *func_pkt)
Definition: RubyPort.cc:735
gem5::Packet::isFlush
bool isFlush() const
Definition: packet.hh:621
gem5::ruby::Sequencer::markRemoved
void markRemoved()
Definition: Sequencer.cc:367
gem5::ruby::RubyPort::Params
RubyPortParams Params
Definition: RubyPort.hh:153
gem5::ruby::RubySystem::getWarmupEnabled
static bool getWarmupEnabled()
Definition: RubySystem.hh:75
gem5::ruby::Sequencer::llscClearMonitor
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
Definition: Sequencer.cc:164
gem5::ruby::RubyPort::m_controller
AbstractController * m_controller
Definition: RubyPort.hh:199
gem5::ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:239
gem5::MipsISA::pc
Bitfield< 4 > pc
Definition: pra_constants.hh:243
gem5::ruby::Sequencer::writeCallback
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
Definition: Sequencer.cc:446
gem5::ruby::SequencerRequest::m_type
RubyRequestType m_type
Definition: Sequencer.hh:65
gem5::ruby::SequencerRequest::issue_time
Cycles issue_time
Definition: Sequencer.hh:67
gem5::ruby::isTlbiCmdRequest
bool isTlbiCmdRequest(RubyRequestType type)
Definition: RubySlicc_Util.hh:166
logging.hh
gem5::ruby::Sequencer::unaddressedCallback
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:698
gem5::ruby::AbstractCacheEntry::isLocked
bool isLocked(int context) const
Definition: AbstractCacheEntry.cc:98
gem5::ruby::CacheMemory::clearLockedAll
void clearLockedAll(int context)
Definition: CacheMemory.cc:497
gem5::statistics::DistBase::reset
void reset()
Reset stat value to default.
Definition: statistics.hh:1352
gem5::Packet::isMaskedWrite
bool isMaskedWrite() const
Definition: packet.hh:1413
gem5::Packet::isHtmTransactional
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition: packet.cc:519
gem5::ruby::DataBlock
Definition: DataBlock.hh:60
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:790
gem5::ruby::Sequencer::deadlockCheckEvent
EventFunctionWrapper deadlockCheckEvent
Definition: Sequencer.hh:292
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::ruby::RubyPort
Definition: RubyPort.hh:64
gem5::RubyTester::SenderState::subBlock
ruby::SubBlock subBlock
Definition: RubyTester.hh:91
gem5::ruby::Sequencer::m_IncompleteTimes
std::vector< statistics::Counter > m_IncompleteTimes
Definition: Sequencer.hh:290
gem5::ruby::AbstractController::blockOnQueue
void blockOnQueue(Addr, MessageBuffer *)
Definition: AbstractController.cc:320
gem5::ruby::Sequencer::m_typeLatencyHist
std::vector< statistics::Histogram * > m_typeLatencyHist
Definition: Sequencer.hh:262
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:800
gem5::ArmISA::rs
Bitfield< 9, 8 > rs
Definition: misc_types.hh:377
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::status
Bitfield< 5, 0 > status
Definition: misc_types.hh:423
gem5::Clocked::clockPeriod
Tick clockPeriod() const
Definition: clocked_object.hh:217
gem5::ruby::SequencerRequest::pkt
PacketPtr pkt
Definition: Sequencer.hh:64
Sequencer.hh

Generated on Wed Jul 13 2022 10:39:26 for gem5 by doxygen 1.8.17