gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Sequencer.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3  * Copyright (c) 2013 Advanced Micro Devices, Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met: redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer;
10  * redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution;
13  * neither the name of the copyright holders nor the names of its
14  * contributors may be used to endorse or promote products derived from
15  * this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
31 
32 #include "arch/x86/ldstflags.hh"
33 #include "base/logging.hh"
34 #include "base/str.hh"
36 #include "debug/MemoryAccess.hh"
37 #include "debug/ProtocolTrace.hh"
38 #include "debug/RubySequencer.hh"
39 #include "debug/RubyStats.hh"
40 #include "mem/packet.hh"
42 #include "mem/ruby/protocol/PrefetchBit.hh"
43 #include "mem/ruby/protocol/RubyAccessMode.hh"
46 #include "sim/system.hh"
47 
48 using namespace std;
49 
50 Sequencer *
51 RubySequencerParams::create()
52 {
53  return new Sequencer(this);
54 }
55 
57  : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
58  deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
59 {
61 
62  m_instCache_ptr = p->icache;
63  m_dataCache_ptr = p->dcache;
64  m_max_outstanding_requests = p->max_outstanding_requests;
65  m_deadlock_threshold = p->deadlock_threshold;
66 
67  m_coreId = p->coreid; // for tracking the two CorePair sequencers
68  assert(m_max_outstanding_requests > 0);
69  assert(m_deadlock_threshold > 0);
70  assert(m_instCache_ptr != NULL);
71  assert(m_dataCache_ptr != NULL);
72 
73  m_runningGarnetStandalone = p->garnet_standalone;
74 }
75 
77 {
78 }
79 
80 void
82 {
83  assert(drainState() != DrainState::Draining);
84 
85  // Check for deadlock of any of the requests
86  Cycles current_time = curCycle();
87 
88  // Check across all outstanding requests
89  int total_outstanding = 0;
90 
91  for (const auto &table_entry : m_RequestTable) {
92  for (const auto seq_req : table_entry.second) {
93  if (current_time - seq_req.issue_time < m_deadlock_threshold)
94  continue;
95 
96  panic("Possible Deadlock detected. Aborting!\n version: %d "
97  "request.paddr: 0x%x m_readRequestTable: %d current time: "
98  "%u issue_time: %d difference: %d\n", m_version,
99  seq_req.pkt->getAddr(), table_entry.second.size(),
100  current_time * clockPeriod(), seq_req.issue_time
101  * clockPeriod(), (current_time * clockPeriod())
102  - (seq_req.issue_time * clockPeriod()));
103  }
104  total_outstanding += table_entry.second.size();
105  }
106 
107  assert(m_outstanding_count == total_outstanding);
108 
109  if (m_outstanding_count > 0) {
110  // If there are still outstanding requests, keep checking
112  }
113 }
114 
116 {
121  for (int i = 0; i < RubyRequestType_NUM; i++) {
122  m_typeLatencyHist[i]->reset();
123  m_hitTypeLatencyHist[i]->reset();
124  m_missTypeLatencyHist[i]->reset();
125  for (int j = 0; j < MachineType_NUM; j++) {
126  m_hitTypeMachLatencyHist[i][j]->reset();
127  m_missTypeMachLatencyHist[i][j]->reset();
128  }
129  }
130 
131  for (int i = 0; i < MachineType_NUM; i++) {
132  m_missMachLatencyHist[i]->reset();
133  m_hitMachLatencyHist[i]->reset();
134 
135  m_IssueToInitialDelayHist[i]->reset();
136  m_InitialToForwardDelayHist[i]->reset();
139 
140  m_IncompleteTimes[i] = 0;
141  }
142 }
143 
144 // Insert the request in the request table. Return RequestStatus_Aliased
145 // if the entry was already present.
146 RequestStatus
147 Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
148  RubyRequestType secondary_type)
149 {
150  // See if we should schedule a deadlock check
151  if (!deadlockCheckEvent.scheduled() &&
154  }
155 
156  Addr line_addr = makeLineAddress(pkt->getAddr());
157  // Check if there is any outstanding request for the same cache line.
158  auto &seq_req_list = m_RequestTable[line_addr];
159  // Create a default entry
160  seq_req_list.emplace_back(pkt, primary_type, secondary_type, curCycle());
162 
163  if (seq_req_list.size() > 1) {
164  return RequestStatus_Aliased;
165  }
166 
168 
169  return RequestStatus_Ready;
170 }
171 
172 void
174 {
176 }
177 
178 void
180 {
182  // The controller has lost the coherence permissions, hence the lock
183  // on the cache line maintained by the cache should be cleared.
184  if (e && e->isLocked(m_version)) {
185  e->clearLocked();
186  }
187 }
188 
189 bool
191 {
193  if (!e)
194  return true;
195 
196  // The success flag indicates whether the LLSC operation was successful.
197  // LL ops will always succeed, but SC may fail if the cache line is no
198  // longer locked.
199  bool success = true;
200  if (request->m_type == RubyRequestType_Store_Conditional) {
201  if (!e->isLocked(m_version)) {
202  //
203  // For failed SC requests, indicate the failure to the cpu by
204  // setting the extra data to zero.
205  //
206  request->pkt->req->setExtraData(0);
207  success = false;
208  } else {
209  //
210  // For successful SC requests, indicate the success to the cpu by
211  // setting the extra data to one.
212  //
213  request->pkt->req->setExtraData(1);
214  }
215  //
216  // Independent of success, all SC operations must clear the lock
217  //
218  e->clearLocked();
219  } else if (request->m_type == RubyRequestType_Load_Linked) {
220  //
221  // Note: To fully follow Alpha LLSC semantics, should the LL clear any
222  // previously locked cache lines?
223  //
224  e->setLocked(m_version);
225  } else if (e->isLocked(m_version)) {
226  //
227  // Normal writes should clear the locked address
228  //
229  e->clearLocked();
230  }
231  return success;
232 }
233 
234 void
235 Sequencer::recordMissLatency(SequencerRequest* srequest, bool llscSuccess,
236  const MachineType respondingMach,
237  bool isExternalHit, Cycles initialRequestTime,
238  Cycles forwardRequestTime,
239  Cycles firstResponseTime)
240 {
241  RubyRequestType type = srequest->m_type;
242  Cycles issued_time = srequest->issue_time;
243  Cycles completion_time = curCycle();
244 
245  assert(curCycle() >= issued_time);
246  Cycles total_lat = completion_time - issued_time;
247 
248  if (initialRequestTime < issued_time) {
249  // if the request was combined in the protocol with an earlier request
250  // for the same address, it is possible that it will return an
251  // initialRequestTime corresponding the earlier request. Since Cycles
252  // is unsigned, we can't let this request get profiled below.
253 
254  total_lat = Cycles(0);
255  }
256 
257  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
258  curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
259  "", "", printAddress(srequest->pkt->getAddr()), total_lat);
260 
261  m_latencyHist.sample(total_lat);
262  m_typeLatencyHist[type]->sample(total_lat);
263 
264  if (isExternalHit) {
265  m_missLatencyHist.sample(total_lat);
266  m_missTypeLatencyHist[type]->sample(total_lat);
267 
268  if (respondingMach != MachineType_NUM) {
269  m_missMachLatencyHist[respondingMach]->sample(total_lat);
270  m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
271 
272  if ((issued_time <= initialRequestTime) &&
273  (initialRequestTime <= forwardRequestTime) &&
274  (forwardRequestTime <= firstResponseTime) &&
275  (firstResponseTime <= completion_time)) {
276 
277  m_IssueToInitialDelayHist[respondingMach]->sample(
278  initialRequestTime - issued_time);
279  m_InitialToForwardDelayHist[respondingMach]->sample(
280  forwardRequestTime - initialRequestTime);
281  m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
282  firstResponseTime - forwardRequestTime);
283  m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
284  completion_time - firstResponseTime);
285  } else {
286  m_IncompleteTimes[respondingMach]++;
287  }
288  }
289  } else {
290  m_hitLatencyHist.sample(total_lat);
291  m_hitTypeLatencyHist[type]->sample(total_lat);
292 
293  if (respondingMach != MachineType_NUM) {
294  m_hitMachLatencyHist[respondingMach]->sample(total_lat);
295  m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
296  }
297  }
298 }
299 
300 void
302  const bool externalHit, const MachineType mach,
303  const Cycles initialRequestTime,
304  const Cycles forwardRequestTime,
305  const Cycles firstResponseTime)
306 {
307  //
308  // Free the whole list as we assume we have had the exclusive access
309  // to this cache line when response for the write comes back
310  //
311  assert(address == makeLineAddress(address));
312  assert(m_RequestTable.find(address) != m_RequestTable.end());
313  auto &seq_req_list = m_RequestTable[address];
314 
315  // Perform hitCallback on every cpu request made to this cache block while
316  // ruby request was outstanding. Since only 1 ruby request was made,
317  // profile the ruby latency once.
318  bool ruby_request = true;
319  int aliased_stores = 0;
320  int aliased_loads = 0;
321  while (!seq_req_list.empty()) {
322  SequencerRequest &seq_req = seq_req_list.front();
323  if (ruby_request) {
324  assert(seq_req.m_type != RubyRequestType_LD);
325  assert(seq_req.m_type != RubyRequestType_IFETCH);
326  }
327 
328  // handle write request
329  if ((seq_req.m_type != RubyRequestType_LD) &&
330  (seq_req.m_type != RubyRequestType_IFETCH)) {
331  //
332  // For Alpha, properly handle LL, SC, and write requests with
333  // respect to locked cache blocks.
334  //
335  // Not valid for Garnet_standalone protocl
336  //
337  bool success = true;
339  success = handleLlsc(address, &seq_req);
340 
341  // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
342  // address variable here is assumed to be a line address, so when
343  // blocking buffers, must check line addresses.
344  if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
345  // blockOnQueue blocks all first-level cache controller queues
346  // waiting on memory accesses for the specified address that go
347  // to the specified queue. In this case, a Locked_RMW_Write must
348  // go to the mandatory_q before unblocking the first-level
349  // controller. This will block standard loads, stores, ifetches,
350  // etc.
352  } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
353  m_controller->unblock(address);
354  }
355 
356  if (ruby_request) {
357  recordMissLatency(&seq_req, success, mach, externalHit,
358  initialRequestTime, forwardRequestTime,
359  firstResponseTime);
360  } else {
361  aliased_stores++;
362  }
363  hitCallback(&seq_req, data, success, mach, externalHit,
364  initialRequestTime, forwardRequestTime,
365  firstResponseTime);
366  } else {
367  // handle read request
368  assert(!ruby_request);
369  aliased_loads++;
370  hitCallback(&seq_req, data, true, mach, externalHit,
371  initialRequestTime, forwardRequestTime,
372  firstResponseTime);
373  }
374  seq_req_list.pop_front();
375  markRemoved();
376  ruby_request = false;
377  }
378 
379  // free all outstanding requests corresponding to this address
380  if (seq_req_list.empty()) {
381  m_RequestTable.erase(address);
382  }
383 }
384 
385 void
387  bool externalHit, const MachineType mach,
388  Cycles initialRequestTime,
389  Cycles forwardRequestTime,
390  Cycles firstResponseTime)
391 {
392  //
393  // Free up read requests until we hit the first Write request
394  // or end of the corresponding list.
395  //
396  assert(address == makeLineAddress(address));
397  assert(m_RequestTable.find(address) != m_RequestTable.end());
398  auto &seq_req_list = m_RequestTable[address];
399 
400  // Perform hitCallback on every cpu request made to this cache block while
401  // ruby request was outstanding. Since only 1 ruby request was made,
402  // profile the ruby latency once.
403  bool ruby_request = true;
404  int aliased_loads = 0;
405  while (!seq_req_list.empty()) {
406  SequencerRequest &seq_req = seq_req_list.front();
407  if (ruby_request) {
408  assert((seq_req.m_type == RubyRequestType_LD) ||
409  (seq_req.m_type == RubyRequestType_IFETCH));
410  } else {
411  aliased_loads++;
412  }
413  if ((seq_req.m_type != RubyRequestType_LD) &&
414  (seq_req.m_type != RubyRequestType_IFETCH)) {
415  // Write request: reissue request to the cache hierarchy
416  issueRequest(seq_req.pkt, seq_req.m_second_type);
417  break;
418  }
419  if (ruby_request) {
420  recordMissLatency(&seq_req, true, mach, externalHit,
421  initialRequestTime, forwardRequestTime,
422  firstResponseTime);
423  }
424  hitCallback(&seq_req, data, true, mach, externalHit,
425  initialRequestTime, forwardRequestTime,
426  firstResponseTime);
427  seq_req_list.pop_front();
428  markRemoved();
429  ruby_request = false;
430  }
431 
432  // free all outstanding requests corresponding to this address
433  if (seq_req_list.empty()) {
434  m_RequestTable.erase(address);
435  }
436 }
437 
438 void
440  bool llscSuccess,
441  const MachineType mach, const bool externalHit,
442  const Cycles initialRequestTime,
443  const Cycles forwardRequestTime,
444  const Cycles firstResponseTime)
445 {
446  warn_once("Replacement policy updates recently became the responsibility "
447  "of SLICC state machines. Make sure to setMRU() near callbacks "
448  "in .sm files!");
449 
450  PacketPtr pkt = srequest->pkt;
451  Addr request_address(pkt->getAddr());
452  RubyRequestType type = srequest->m_type;
453 
454  // update the data unless it is a non-data-carrying flush
456  data.setData(pkt->getConstPtr<uint8_t>(),
457  getOffset(request_address), pkt->getSize());
458  } else if (!pkt->isFlush()) {
459  if ((type == RubyRequestType_LD) ||
460  (type == RubyRequestType_IFETCH) ||
461  (type == RubyRequestType_RMW_Read) ||
462  (type == RubyRequestType_Locked_RMW_Read) ||
463  (type == RubyRequestType_Load_Linked)) {
464  pkt->setData(
465  data.getData(getOffset(request_address), pkt->getSize()));
466  DPRINTF(RubySequencer, "read data %s\n", data);
467  } else if (pkt->req->isSwap()) {
468  std::vector<uint8_t> overwrite_val(pkt->getSize());
469  pkt->writeData(&overwrite_val[0]);
470  pkt->setData(
471  data.getData(getOffset(request_address), pkt->getSize()));
472  data.setData(&overwrite_val[0],
473  getOffset(request_address), pkt->getSize());
474  DPRINTF(RubySequencer, "swap data %s\n", data);
475  } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
476  // Types of stores set the actual data here, apart from
477  // failed Store Conditional requests
478  data.setData(pkt->getConstPtr<uint8_t>(),
479  getOffset(request_address), pkt->getSize());
480  DPRINTF(RubySequencer, "set data %s\n", data);
481  }
482  }
483 
484  // If using the RubyTester, update the RubyTester sender state's
485  // subBlock with the recieved data. The tester will later access
486  // this state.
487  if (m_usingRubyTester) {
488  DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
489  pkt->cmdString(), pkt->getAddr());
490  RubyTester::SenderState* testerSenderState =
492  assert(testerSenderState);
493  testerSenderState->subBlock.mergeFrom(data);
494  }
495 
498  assert(pkt->req);
499  delete pkt;
501  } else if (RubySystem::getCooldownEnabled()) {
502  delete pkt;
504  } else {
505  ruby_hit_callback(pkt);
507  }
508 }
509 
510 bool
512 {
513  return m_RequestTable.empty();
514 }
515 
516 RequestStatus
518 {
520  return RequestStatus_BufferFull;
521  }
522 
523  RubyRequestType primary_type = RubyRequestType_NULL;
524  RubyRequestType secondary_type = RubyRequestType_NULL;
525 
526  if (pkt->isLLSC()) {
527  //
528  // Alpha LL/SC instructions need to be handled carefully by the cache
529  // coherence protocol to ensure they follow the proper semantics. In
530  // particular, by identifying the operations as atomic, the protocol
531  // should understand that migratory sharing optimizations should not
532  // be performed (i.e. a load between the LL and SC should not steal
533  // away exclusive permission).
534  //
535  if (pkt->isWrite()) {
536  DPRINTF(RubySequencer, "Issuing SC\n");
537  primary_type = RubyRequestType_Store_Conditional;
538  } else {
539  DPRINTF(RubySequencer, "Issuing LL\n");
540  assert(pkt->isRead());
541  primary_type = RubyRequestType_Load_Linked;
542  }
543  secondary_type = RubyRequestType_ATOMIC;
544  } else if (pkt->req->isLockedRMW()) {
545  //
546  // x86 locked instructions are translated to store cache coherence
547  // requests because these requests should always be treated as read
548  // exclusive operations and should leverage any migratory sharing
549  // optimization built into the protocol.
550  //
551  if (pkt->isWrite()) {
552  DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
553  primary_type = RubyRequestType_Locked_RMW_Write;
554  } else {
555  DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
556  assert(pkt->isRead());
557  primary_type = RubyRequestType_Locked_RMW_Read;
558  }
559  secondary_type = RubyRequestType_ST;
560  } else {
561  //
562  // To support SwapReq, we need to check isWrite() first: a SwapReq
563  // should always be treated like a write, but since a SwapReq implies
564  // both isWrite() and isRead() are true, check isWrite() first here.
565  //
566  if (pkt->isWrite()) {
567  //
568  // Note: M5 packets do not differentiate ST from RMW_Write
569  //
570  primary_type = secondary_type = RubyRequestType_ST;
571  } else if (pkt->isRead()) {
572  if (pkt->req->isInstFetch()) {
573  primary_type = secondary_type = RubyRequestType_IFETCH;
574  } else {
575  bool storeCheck = false;
576  // only X86 need the store check
577  if (system->getArch() == Arch::X86ISA) {
578  uint32_t flags = pkt->req->getFlags();
579  storeCheck = flags &
581  }
582  if (storeCheck) {
583  primary_type = RubyRequestType_RMW_Read;
584  secondary_type = RubyRequestType_ST;
585  } else {
586  primary_type = secondary_type = RubyRequestType_LD;
587  }
588  }
589  } else if (pkt->isFlush()) {
590  primary_type = secondary_type = RubyRequestType_FLUSH;
591  } else {
592  panic("Unsupported ruby packet type\n");
593  }
594  }
595 
596  // Check if the line is blocked for a Locked_RMW
598  (primary_type != RubyRequestType_Locked_RMW_Write)) {
599  // Return that this request's cache line address aliases with
600  // a prior request that locked the cache line. The request cannot
601  // proceed until the cache line is unlocked by a Locked_RMW_Write
602  return RequestStatus_Aliased;
603  }
604 
605  RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
606 
607  // It is OK to receive RequestStatus_Aliased, it can be considered Issued
608  if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
609  return status;
610  // non-aliased with any existing request in the request table, just issue
611  // to the cache
612  if (status != RequestStatus_Aliased)
613  issueRequest(pkt, secondary_type);
614 
615  // TODO: issue hardware prefetches here
616  return RequestStatus_Issued;
617 }
618 
619 void
620 Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
621 {
622  assert(pkt != NULL);
623  ContextID proc_id = pkt->req->hasContextId() ?
624  pkt->req->contextId() : InvalidContextID;
625 
626  ContextID core_id = coreId();
627 
628  // If valid, copy the pc to the ruby request
629  Addr pc = 0;
630  if (pkt->req->hasPC()) {
631  pc = pkt->req->getPC();
632  }
633 
634  // check if the packet has data as for example prefetch and flush
635  // requests do not
636  std::shared_ptr<RubyRequest> msg =
637  std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
638  pkt->isFlush() ?
639  nullptr : pkt->getPtr<uint8_t>(),
640  pkt->getSize(), pc, secondary_type,
641  RubyAccessMode_Supervisor, pkt,
642  PrefetchBit_No, proc_id, core_id);
643 
644  DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
645  curTick(), m_version, "Seq", "Begin", "", "",
646  printAddress(msg->getPhysicalAddress()),
647  RubyRequestType_to_string(secondary_type));
648 
649  Tick latency = cyclesToTicks(
650  m_controller->mandatoryQueueLatency(secondary_type));
651  assert(latency > 0);
652 
653  assert(m_mandatory_q_ptr != NULL);
654  m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
655 }
656 
657 template <class KEY, class VALUE>
658 std::ostream &
659 operator<<(ostream &out, const std::unordered_map<KEY, VALUE> &map)
660 {
661  for (const auto &table_entry : map) {
662  out << "[ " << table_entry.first << " =";
663  for (const auto &seq_req : table_entry.second) {
664  out << " " << RubyRequestType_to_string(seq_req.m_second_type);
665  }
666  }
667  out << " ]";
668 
669  return out;
670 }
671 
672 void
673 Sequencer::print(ostream& out) const
674 {
675  out << "[Sequencer: " << m_version
676  << ", outstanding requests: " << m_outstanding_count
677  << ", request table: " << m_RequestTable
678  << "]";
679 }
680 
681 // this can be called from setState whenever coherence permissions are
682 // upgraded when invoked, coherence violations will be checked for the
683 // given block
684 void
686 {
687 }
688 
689 void
690 Sequencer::recordRequestType(SequencerRequestType requestType) {
691  DPRINTF(RubyStats, "Recorded statistic: %s\n",
692  SequencerRequestType_to_string(requestType));
693 }
694 
695 void
697 {
698  ruby_eviction_callback(address);
699 }
700 
701 void
703 {
705 
706  // These statistical variables are not for display.
707  // The profiler will collate these across different
708  // sequencers and display those collated statistics.
710  m_latencyHist.init(10);
713 
714  for (int i = 0; i < RubyRequestType_NUM; i++) {
715  m_typeLatencyHist.push_back(new Stats::Histogram());
716  m_typeLatencyHist[i]->init(10);
717 
718  m_hitTypeLatencyHist.push_back(new Stats::Histogram());
719  m_hitTypeLatencyHist[i]->init(10);
720 
721  m_missTypeLatencyHist.push_back(new Stats::Histogram());
722  m_missTypeLatencyHist[i]->init(10);
723  }
724 
725  for (int i = 0; i < MachineType_NUM; i++) {
726  m_hitMachLatencyHist.push_back(new Stats::Histogram());
727  m_hitMachLatencyHist[i]->init(10);
728 
729  m_missMachLatencyHist.push_back(new Stats::Histogram());
730  m_missMachLatencyHist[i]->init(10);
731 
733  m_IssueToInitialDelayHist[i]->init(10);
734 
736  m_InitialToForwardDelayHist[i]->init(10);
737 
740 
743  }
744 
745  for (int i = 0; i < RubyRequestType_NUM; i++) {
748 
749  for (int j = 0; j < MachineType_NUM; j++) {
750  m_hitTypeMachLatencyHist[i].push_back(new Stats::Histogram());
751  m_hitTypeMachLatencyHist[i][j]->init(10);
752 
753  m_missTypeMachLatencyHist[i].push_back(new Stats::Histogram());
754  m_missTypeMachLatencyHist[i][j]->init(10);
755  }
756  }
757 }
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
#define DPRINTF(x,...)
Definition: trace.hh:229
int m_max_outstanding_requests
Definition: Sequencer.hh:178
void resetStats()
Callback to reset stats.
Definition: Sequencer.cc:115
const uint8_t * getData(int offset, int len) const
Definition: DataBlock.cc:95
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition: Sequencer.hh:192
const int FlagShift
Definition: ldstflags.hh:52
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:510
Cycles issue_time
Definition: Sequencer.hh:49
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition: Sequencer.cc:235
Bitfield< 7 > i
std::vector< Stats::Histogram * > m_missTypeLatencyHist
Definition: Sequencer.hh:222
~Sequencer()
Definition: Sequencer.cc:76
Running normally.
void print(std::ostream &out) const
Definition: Sequencer.cc:673
bool isLocked(int context) const
CacheRecorder * m_cache_recorder
Definition: RubySystem.hh:139
EventFunctionWrapper deadlockCheckEvent
Definition: Sequencer.hh:236
int coreId() const
Definition: Sequencer.hh:104
AbstractController * m_controller
Definition: RubyPort.hh:187
ip6_addr_t addr
Definition: inet.hh:335
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:282
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:66
PacketPtr pkt
Definition: Sequencer.hh:46
std::vector< Stats::Histogram * > m_FirstResponseToCompletionDelayHist
Definition: Sequencer.hh:233
void recordRequestType(SequencerRequestType requestType)
Definition: Sequencer.cc:690
Histogram & init(size_type size)
Set the parameters of this histogram.
Definition: statistics.hh:2644
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:586
Stats::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition: Sequencer.hh:221
Stats::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition: Sequencer.hh:206
CacheMemory * m_dataCache_ptr
Definition: Sequencer.hh:181
Tick clockPeriod() const
std::vector< Stats::Histogram * > m_typeLatencyHist
Definition: Sequencer.hh:207
bool isWrite() const
Definition: packet.hh:529
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1090
RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition: Sequencer.cc:147
bool isRead() const
Definition: packet.hh:528
RubySystem * m_ruby_system
Definition: RubyPort.hh:185
RubyPortParams Params
Definition: RubyPort.hh:145
Stats::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition: Sequencer.hh:203
std::vector< Stats::Histogram * > m_ForwardToFirstResponseDelayHist
Definition: Sequencer.hh:232
CacheMemory * m_instCache_ptr
Definition: Sequencer.hh:182
RequestPtr req
A pointer to the original request.
Definition: packet.hh:327
RubyRequestType m_type
Definition: Sequencer.hh:47
Bitfield< 5, 0 > status
std::vector< Stats::Histogram * > m_InitialToForwardDelayHist
Definition: Sequencer.hh:231
unsigned getSize() const
Definition: packet.hh:736
static bool getWarmupEnabled()
Definition: RubySystem.hh:62
Tick cyclesToTicks(Cycles c) const
uint8_t type
Definition: inet.hh:333
Tick curTick()
The current simulated tick.
Definition: core.hh:47
Bitfield< 4 > pc
void enqueueNextFlushRequest()
Function for flushing the memory contents of the caches to the main memory.
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:385
void ruby_eviction_callback(Addr address)
Definition: RubyPort.cc:589
void invalidateSC(Addr address)
Definition: Sequencer.cc:179
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition: packet.hh:1158
uint64_t Tick
Tick count type.
Definition: types.hh:63
System * system
Definition: RubyPort.hh:190
void evictionCallback(Addr address)
Definition: Sequencer.cc:696
std::vector< Stats::Histogram * > m_hitTypeLatencyHist
Definition: Sequencer.hh:212
uint32_t m_version
Definition: RubyPort.hh:186
std::vector< Stats::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition: Sequencer.hh:230
bool handleLlsc(Addr address, SequencerRequest *request)
Definition: Sequencer.cc:190
A simple histogram stat.
Definition: statistics.hh:2629
Addr getAddr() const
Definition: packet.hh:726
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition: packet.hh:1187
bool m_usingRubyTester
Definition: RubyPort.hh:189
Addr getOffset(Addr addr)
Definition: Address.cc:48
static bool getCooldownEnabled()
Definition: RubySystem.hh:63
RubyRequestType m_second_type
Definition: Sequencer.hh:48
std::vector< Stats::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition: Sequencer.hh:226
void ruby_hit_callback(PacketPtr pkt)
Definition: RubyPort.cc:417
void setLocked(int context)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void issueRequest(PacketPtr pkt, RubyRequestType type)
Definition: Sequencer.cc:620
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
Addr makeLineAddress(Addr addr)
Definition: Address.cc:54
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
#define warn_once(...)
Definition: logging.hh:216
std::string printAddress(Addr addr)
Definition: Address.cc:67
void reset()
Reset stat value to default.
Definition: statistics.hh:1923
int m_coreId
Definition: Sequencer.hh:198
void blockOnQueue(Addr, MessageBuffer *)
Bitfield< 24 > j
bool isLLSC() const
Definition: packet.hh:554
Bitfield< 9 > e
MessageBuffer * m_mandatory_q_ptr
Definition: RubyPort.hh:188
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime)
Definition: Sequencer.cc:439
std::vector< std::vector< Stats::Histogram * > > m_missTypeMachLatencyHist
Definition: Sequencer.hh:227
std::vector< Stats::Counter > m_IncompleteTimes
Definition: Sequencer.hh:234
void regStats()
Callback to set stat parameters.
Definition: Sequencer.cc:702
void testDrainComplete()
Definition: RubyPort.cc:466
Declaration of the Packet class.
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
bool empty() const
Definition: Sequencer.cc:511
bool m_runningGarnetStandalone
Definition: Sequencer.hh:200
const ContextID InvalidContextID
Definition: types.hh:232
void markRemoved()
Definition: Sequencer.cc:173
void schedule(Event &event, Tick when)
Definition: eventq.hh:744
const T * getConstPtr() const
Definition: packet.hh:1099
void setData(const uint8_t *data, int offset, int len)
Definition: DataBlock.cc:108
std::vector< Stats::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages...
Definition: Sequencer.hh:216
Cycles m_deadlock_threshold
Definition: Sequencer.hh:179
void wakeup()
Definition: Sequencer.cc:81
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:523
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:386
RequestStatus makeRequest(PacketPtr pkt)
Definition: Sequencer.cc:517
Bitfield< 0 > p
Bitfield< 9, 8 > rs
const char data[]
bool isFlush() const
Definition: packet.hh:557
AbstractCacheEntry * lookup(Addr address)
Definition: CacheMemory.cc:342
bool isBlocked(Addr) const
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition: Sequencer.cc:301
void enqueue(MsgPtr message, Tick curTime, Tick delta)
int m_outstanding_count
Definition: Sequencer.hh:195
Stats::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition: Sequencer.hh:211
int ContextID
Globally unique thread context ID.
Definition: types.hh:231
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1899
void checkCoherence(Addr address)
Definition: Sequencer.cc:685
std::vector< std::vector< Stats::Histogram * > > m_hitTypeMachLatencyHist
Definition: Sequencer.hh:217
Arch getArch() const
Get the architecture.
Definition: system.hh:287
#define DPRINTFR(...)
Definition: trace.hh:231
Sequencer(const Params *)
Definition: Sequencer.cc:56

Generated on Fri Feb 28 2020 16:27:02 for gem5 by doxygen 1.8.13