gem5 v24.1.0.1
Loading...
Searching...
No Matches
Sequencer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019-2021,2023 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * Copyright (c) 2013 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
43
44#include "arch/x86/ldstflags.hh"
45#include "base/compiler.hh"
46#include "base/logging.hh"
47#include "base/str.hh"
49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "debug/ProtocolTrace.hh"
52#include "debug/RubyHitMiss.hh"
53#include "debug/RubySequencer.hh"
54#include "debug/RubyStats.hh"
55#include "mem/packet.hh"
57#include "mem/ruby/protocol/PrefetchBit.hh"
58#include "mem/ruby/protocol/RubyAccessMode.hh"
62#include "sim/system.hh"
63
64namespace gem5
65{
66
67namespace ruby
68{
69
71 : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
72 deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
73{
74 m_outstanding_count = 0;
75
76 m_ruby_system = p.ruby_system;
77
78 m_dataCache_ptr = p.dcache;
79 m_max_outstanding_requests = p.max_outstanding_requests;
80 m_deadlock_threshold = p.deadlock_threshold;
81
82 m_coreId = p.coreid; // for tracking the two CorePair sequencers
83 assert(m_max_outstanding_requests > 0);
84 assert(m_deadlock_threshold > 0);
85
86 m_unaddressedTransactionCnt = 0;
87
88 m_runningGarnetStandalone = p.garnet_standalone;
89
90 m_num_pending_invs = 0;
91 m_cache_inv_pkt = nullptr;
92
93 // These statistical variables are not for display.
94 // The profiler will collate these across different
95 // sequencers and display those collated statistics.
96 m_outstandReqHist.init(10);
97 m_latencyHist.init(10);
98 m_hitLatencyHist.init(10);
99 m_missLatencyHist.init(10);
100
101 for (int i = 0; i < RubyRequestType_NUM; i++) {
102 m_typeLatencyHist.push_back(new statistics::Histogram());
103 m_typeLatencyHist[i]->init(10);
104
105 m_hitTypeLatencyHist.push_back(new statistics::Histogram());
106 m_hitTypeLatencyHist[i]->init(10);
107
108 m_missTypeLatencyHist.push_back(new statistics::Histogram());
109 m_missTypeLatencyHist[i]->init(10);
110 }
111
112 for (int i = 0; i < MachineType_NUM; i++) {
113 m_hitMachLatencyHist.push_back(new statistics::Histogram());
114 m_hitMachLatencyHist[i]->init(10);
115
116 m_missMachLatencyHist.push_back(new statistics::Histogram());
117 m_missMachLatencyHist[i]->init(10);
118
119 m_IssueToInitialDelayHist.push_back(new statistics::Histogram());
120 m_IssueToInitialDelayHist[i]->init(10);
121
122 m_InitialToForwardDelayHist.push_back(new statistics::Histogram());
123 m_InitialToForwardDelayHist[i]->init(10);
124
125 m_ForwardToFirstResponseDelayHist.push_back(
127 m_ForwardToFirstResponseDelayHist[i]->init(10);
128
129 m_FirstResponseToCompletionDelayHist.push_back(
131 m_FirstResponseToCompletionDelayHist[i]->init(10);
132 }
133
134 for (int i = 0; i < RubyRequestType_NUM; i++) {
135 m_hitTypeMachLatencyHist.push_back(
137 m_missTypeMachLatencyHist.push_back(
139
140 for (int j = 0; j < MachineType_NUM; j++) {
141 m_hitTypeMachLatencyHist[i].push_back(new statistics::Histogram());
142 m_hitTypeMachLatencyHist[i][j]->init(10);
143
144 m_missTypeMachLatencyHist[i].push_back(
145 new statistics::Histogram());
146 m_missTypeMachLatencyHist[i][j]->init(10);
147 }
148 }
149
150}
151
155
156void
158{
160 "%s must have a dcache object to support LLSC requests.", name());
162 if (line) {
163 line->setLocked(m_version);
164 DPRINTF(LLSC, "LLSC Monitor - inserting load linked - "
165 "addr=0x%lx - cpu=%u\n", claddr, m_version);
166 }
167}
168
169void
171{
172 // clear monitor is called for all stores and evictions
173 if (m_dataCache_ptr == NULL)
174 return;
176 if (line && line->isLocked(m_version)) {
177 line->clearLocked();
178 DPRINTF(LLSC, "LLSC Monitor - clearing due to store - "
179 "addr=0x%lx - cpu=%u\n", claddr, m_version);
180 }
181}
182
183bool
185{
187 "%s must have a dcache object to support LLSC requests.", name());
189 if (!line)
190 return false;
191
192 DPRINTF(LLSC, "LLSC Monitor - clearing due to "
193 "store conditional - "
194 "addr=0x%lx - cpu=%u\n",
195 claddr, m_version);
196
197 if (line->isLocked(m_version)) {
198 line->clearLocked();
199 return true;
200 } else {
201 line->clearLocked();
202 return false;
203 }
204}
205
206bool
208{
209 assert(m_dataCache_ptr != NULL);
210 const Addr claddr = makeLineAddress(address);
212 if (!line)
213 return false;
214
215 if (line->isLocked(m_version)) {
216 return true;
217 } else {
218 return false;
219 }
220}
221
222void
227
228void
230{
231 assert(drainState() != DrainState::Draining);
232
233 // Check for deadlock of any of the requests
234 Cycles current_time = curCycle();
235
236 // Check across all outstanding requests
237 [[maybe_unused]] int total_outstanding = 0;
238
239 for (const auto &table_entry : m_RequestTable) {
240 for (const auto &seq_req : table_entry.second) {
241 if (current_time - seq_req.issue_time < m_deadlock_threshold)
242 continue;
243
244 panic("Possible Deadlock detected. Aborting!\n version: %d "
245 "request.paddr: 0x%x m_readRequestTable: %d current time: "
246 "%u issue_time: %d difference: %d\n", m_version,
247 seq_req.pkt->getAddr(), table_entry.second.size(),
248 current_time * clockPeriod(), seq_req.issue_time
249 * clockPeriod(), (current_time * clockPeriod())
250 - (seq_req.issue_time * clockPeriod()));
251 }
252 total_outstanding += table_entry.second.size();
253 }
254
255 assert(m_outstanding_count == total_outstanding);
256
257 if (m_outstanding_count > 0) {
258 // If there are still outstanding requests, keep checking
260 }
261}
262
263int
265{
266 int num_written = RubyPort::functionalWrite(func_pkt);
267
268 for (const auto &table_entry : m_RequestTable) {
269 for (const auto& seq_req : table_entry.second) {
270 if (seq_req.functionalWrite(func_pkt))
271 ++num_written;
272 }
273 }
274 // Functional writes to addresses being monitored
275 // will fail (remove) the monitor entry.
277
278 return num_written;
279}
280
282{
287 for (int i = 0; i < RubyRequestType_NUM; i++) {
288 m_typeLatencyHist[i]->reset();
289 m_hitTypeLatencyHist[i]->reset();
290 m_missTypeLatencyHist[i]->reset();
291 for (int j = 0; j < MachineType_NUM; j++) {
292 m_hitTypeMachLatencyHist[i][j]->reset();
293 m_missTypeMachLatencyHist[i][j]->reset();
294 }
295 }
296
297 for (int i = 0; i < MachineType_NUM; i++) {
298 m_missMachLatencyHist[i]->reset();
299 m_hitMachLatencyHist[i]->reset();
300
305
306 m_IncompleteTimes[i] = 0;
307 }
308}
309
310// Insert the request in the request table. Return RequestStatus_Aliased
311// if the entry was already present.
312RequestStatus
313Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
314 RubyRequestType secondary_type)
315{
316 // See if we should schedule a deadlock check
320 }
321
322 if (isTlbiCmdRequest(primary_type)) {
323 assert(primary_type == secondary_type);
324
325 switch (primary_type) {
326 case RubyRequestType_TLBI_EXT_SYNC_COMP:
327 // Don't have to store any data on this
328 break;
329 case RubyRequestType_TLBI:
330 case RubyRequestType_TLBI_SYNC:
331 {
333
334 // returns pair<inserted element, was inserted>
335 [[maybe_unused]] auto insert_data = \
339 pkt, primary_type, secondary_type, curCycle()));
340
341 // if insert_data.second is false, wasn't inserted
342 assert(insert_data.second &&
343 "Another TLBI request with the same ID exists");
344
345 DPRINTF(RubySequencer, "Inserting TLBI request %016x\n",
347
348 break;
349 }
350
351 default:
352 panic("Unexpected TLBI RubyRequestType");
353 }
354
355 return RequestStatus_Ready;
356 }
357
358 // If command is MemSyncReq, it is used to invalidate the cache.
359 // As the cache invalidation requests are already issued in invL1(),
360 // there is no need to create a new request for the same here.
361 // Instead, return RequestStatus_Aliased, and make the sequencer skip
362 // an extra issueRequest
363 if (pkt->cmd == MemCmd::MemSyncReq) {
364 return RequestStatus_Aliased;
365 }
366
367 Addr line_addr = makeLineAddress(pkt->getAddr());
368 // Check if there is any outstanding request for the same cache line.
369 auto &seq_req_list = m_RequestTable[line_addr];
370 // Create a default entry
371 seq_req_list.emplace_back(pkt, primary_type,
372 secondary_type, curCycle());
374
375 if (seq_req_list.size() > 1) {
376 return RequestStatus_Aliased;
377 }
378
380
381 return RequestStatus_Ready;
382}
383
384void
389
390void
392 const MachineType respondingMach,
393 bool isExternalHit, Cycles initialRequestTime,
394 Cycles forwardRequestTime,
395 Cycles firstResponseTime)
396{
397 RubyRequestType type = srequest->m_type;
398 Cycles issued_time = srequest->issue_time;
399 Cycles completion_time = curCycle();
400
401 assert(curCycle() >= issued_time);
402 Cycles total_lat = completion_time - issued_time;
403
404 if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
405 // if the request was combined in the protocol with an earlier request
406 // for the same address, it is possible that it will return an
407 // initialRequestTime corresponding the earlier request. Since Cycles
408 // is unsigned, we can't let this request get profiled below.
409
410 total_lat = Cycles(0);
411 }
412
413 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
414 curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
415 "", "", printAddress(srequest->pkt->getAddr()), total_lat);
416
417 m_latencyHist.sample(total_lat);
418 m_typeLatencyHist[type]->sample(total_lat);
419
420 if (isExternalHit) {
421 m_missLatencyHist.sample(total_lat);
422 m_missTypeLatencyHist[type]->sample(total_lat);
423
424 if (respondingMach != MachineType_NUM) {
425 m_missMachLatencyHist[respondingMach]->sample(total_lat);
426 m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
427
428 if ((issued_time <= initialRequestTime) &&
429 (initialRequestTime <= forwardRequestTime) &&
430 (forwardRequestTime <= firstResponseTime) &&
431 (firstResponseTime <= completion_time)) {
432
433 m_IssueToInitialDelayHist[respondingMach]->sample(
434 initialRequestTime - issued_time);
435 m_InitialToForwardDelayHist[respondingMach]->sample(
436 forwardRequestTime - initialRequestTime);
437 m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
438 firstResponseTime - forwardRequestTime);
439 m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
440 completion_time - firstResponseTime);
441 } else {
442 m_IncompleteTimes[respondingMach]++;
443 }
444 }
445 } else {
446 m_hitLatencyHist.sample(total_lat);
447 m_hitTypeLatencyHist[type]->sample(total_lat);
448
449 if (respondingMach != MachineType_NUM) {
450 m_hitMachLatencyHist[respondingMach]->sample(total_lat);
451 m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
452 }
453 }
454}
455
456void
458{
459 llscClearMonitor(address);
460 writeCallback(address, data);
461}
462
463void
465 const bool externalHit, const MachineType mach,
466 const Cycles initialRequestTime,
467 const Cycles forwardRequestTime,
468 const Cycles firstResponseTime,
469 const bool noCoales)
470{
471 //
472 // Free the whole list as we assume we have had the exclusive access
473 // to this cache line when response for the write comes back
474 //
475 assert(address == makeLineAddress(address));
476 assert(m_RequestTable.find(address) != m_RequestTable.end());
477 auto &seq_req_list = m_RequestTable[address];
478
479 // Perform hitCallback on every cpu request made to this cache block while
480 // ruby request was outstanding. Since only 1 ruby request was made,
481 // profile the ruby latency once.
482 bool ruby_request = true;
483 while (!seq_req_list.empty()) {
484 SequencerRequest &seq_req = seq_req_list.front();
485 // Atomic Request may be executed remotly in the cache hierarchy
486 bool atomic_req =
487 ((seq_req.m_type == RubyRequestType_ATOMIC_RETURN) ||
488 (seq_req.m_type == RubyRequestType_ATOMIC_NO_RETURN));
489
490 if ((noCoales || atomic_req) && !ruby_request) {
491 // Do not process follow-up requests
492 // (e.g. if full line no present)
493 // Reissue to the cache hierarchy
494 issueRequest(seq_req.pkt, seq_req.m_second_type);
495 break;
496 }
497
498 if (ruby_request) {
499 assert(seq_req.m_type != RubyRequestType_LD);
500 assert(seq_req.m_type != RubyRequestType_Load_Linked);
501 assert(seq_req.m_type != RubyRequestType_IFETCH);
502 assert(seq_req.m_type != RubyRequestType_ATOMIC_RETURN);
503 assert(seq_req.m_type != RubyRequestType_ATOMIC_NO_RETURN);
504 }
505
506 // handle write request
507 if ((seq_req.m_type != RubyRequestType_LD) &&
508 (seq_req.m_type != RubyRequestType_Load_Linked) &&
509 (seq_req.m_type != RubyRequestType_IFETCH)) {
510 // LL/SC support (tested with ARMv8)
511 bool success = true;
512
513 if (seq_req.m_type != RubyRequestType_Store_Conditional) {
514 // Regular stores to addresses being monitored
515 // will fail (remove) the monitor entry.
516 llscClearMonitor(address);
517 } else {
518 // Store conditionals must first check the monitor
519 // if they will succeed or not
520 success = llscStoreConditional(address);
521 seq_req.pkt->req->setExtraData(success ? 1 : 0);
522 }
523
524 // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
525 // address variable here is assumed to be a line address, so when
526 // blocking buffers, must check line addresses.
527 if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
528 // blockOnQueue blocks all first-level cache controller queues
529 // waiting on memory accesses for the specified address that go
530 // to the specified queue. In this case, a Locked_RMW_Write must
531 // go to the mandatory_q before unblocking the first-level
532 // controller. This will block standard loads, stores, ifetches,
533 // etc.
535 } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
536 m_controller->unblock(address);
537 }
538
539 if (ruby_request) {
540 recordMissLatency(&seq_req, success, mach, externalHit,
541 initialRequestTime, forwardRequestTime,
542 firstResponseTime);
543 }
544
545 markRemoved();
546 hitCallback(&seq_req, data, success, mach, externalHit,
547 initialRequestTime, forwardRequestTime,
548 firstResponseTime, !ruby_request);
549 ruby_request = false;
550 } else {
551 // handle read request
552 assert(!ruby_request);
553 markRemoved();
554 hitCallback(&seq_req, data, true, mach, externalHit,
555 initialRequestTime, forwardRequestTime,
556 firstResponseTime, !ruby_request);
557 }
558 seq_req_list.pop_front();
559 }
560
561 // free all outstanding requests corresponding to this address
562 if (seq_req_list.empty()) {
563 m_RequestTable.erase(address);
564 }
565}
566
567bool
570 const bool ruby_request,
571 bool externalHit,
572 const MachineType mach,
573 Cycles initialRequestTime,
574 Cycles forwardRequestTime,
575 Cycles firstResponseTime)
576{
577 if (ruby_request) {
578 assert((seq_req.m_type == RubyRequestType_LD) ||
579 (seq_req.m_type == RubyRequestType_Load_Linked) ||
580 (seq_req.m_type == RubyRequestType_IFETCH));
581 }
582 if ((seq_req.m_type != RubyRequestType_LD) &&
583 (seq_req.m_type != RubyRequestType_Load_Linked) &&
584 (seq_req.m_type != RubyRequestType_IFETCH) &&
585 (seq_req.m_type != RubyRequestType_REPLACEMENT)) {
586 // Write request: reissue request to the cache hierarchy
587 issueRequest(seq_req.pkt, seq_req.m_second_type);
588 return true;
589 }
590 return false;
591
592}
593
594void
596 bool externalHit, const MachineType mach,
597 Cycles initialRequestTime,
598 Cycles forwardRequestTime,
599 Cycles firstResponseTime)
600{
601 //
602 // Free up read requests until we hit the first Write request
603 // or end of the corresponding list.
604 //
605 assert(address == makeLineAddress(address));
606 assert(m_RequestTable.find(address) != m_RequestTable.end());
607 auto &seq_req_list = m_RequestTable[address];
608
609 // Perform hitCallback on every cpu request made to this cache block while
610 // ruby request was outstanding. Since only 1 ruby request was made,
611 // profile the ruby latency once.
612 bool ruby_request = true;
613 while (!seq_req_list.empty()) {
614 SequencerRequest &seq_req = seq_req_list.front();
615 if (processReadCallback(seq_req, data, ruby_request, externalHit, mach,
616 initialRequestTime, forwardRequestTime,
617 firstResponseTime)) {
618 break;
619 }
620 if (ruby_request) {
621 recordMissLatency(&seq_req, true, mach, externalHit,
622 initialRequestTime, forwardRequestTime,
623 firstResponseTime);
624 }
625 markRemoved();
626 hitCallback(&seq_req, data, true, mach, externalHit,
627 initialRequestTime, forwardRequestTime,
628 firstResponseTime, !ruby_request);
629 ruby_request = false;
630 seq_req_list.pop_front();
631 }
632
633 // free all outstanding requests corresponding to this address
634 if (seq_req_list.empty()) {
635 m_RequestTable.erase(address);
636 }
637}
638
639void
641 const bool externalHit, const MachineType mach,
642 const Cycles initialRequestTime,
643 const Cycles forwardRequestTime,
644 const Cycles firstResponseTime)
645{
646 //
647 // Free the first request (an atomic operation) from the list.
648 // Then issue the next request to ruby system as we cannot
649 // assume the cache line is present in the cache
650 // (the opperation could be performed remotly)
651 //
652 assert(address == makeLineAddress(address));
653 assert(m_RequestTable.find(address) != m_RequestTable.end());
654 auto &seq_req_list = m_RequestTable[address];
655
656 // Perform hitCallback only on the first cpu request that
657 // issued the ruby request
658 bool ruby_request = true;
659 while (!seq_req_list.empty()) {
660 SequencerRequest &seq_req = seq_req_list.front();
661
662 if (ruby_request) {
663 // Check that the request was an atomic memory operation
664 // and record the latency
665 assert((seq_req.m_type == RubyRequestType_ATOMIC_RETURN) ||
666 (seq_req.m_type == RubyRequestType_ATOMIC_NO_RETURN));
667 recordMissLatency(&seq_req, true, mach, externalHit,
668 initialRequestTime, forwardRequestTime,
669 firstResponseTime);
670 } else {
671 // Read, Write or Atomic request:
672 // reissue request to the cache hierarchy
673 // (we don't know if op was performed remotly)
674 issueRequest(seq_req.pkt, seq_req.m_second_type);
675 break;
676 }
677
678 // Atomics clean the monitor entry
679 llscClearMonitor(address);
680
681 markRemoved();
682 ruby_request = false;
683 hitCallback(&seq_req, data, true, mach, externalHit,
684 initialRequestTime, forwardRequestTime,
685 firstResponseTime, false);
686 seq_req_list.pop_front();
687 }
688
689 // free all outstanding requests corresponding to this address
690 if (seq_req_list.empty()) {
691 m_RequestTable.erase(address);
692 }
693}
694
695void
697 bool llscSuccess,
698 const MachineType mach, const bool externalHit,
699 const Cycles initialRequestTime,
700 const Cycles forwardRequestTime,
701 const Cycles firstResponseTime,
702 const bool was_coalesced)
703{
704 warn_once("Replacement policy updates recently became the responsibility "
705 "of SLICC state machines. Make sure to setMRU() near callbacks "
706 "in .sm files!");
707
708 PacketPtr pkt = srequest->pkt;
709 Addr request_address(pkt->getAddr());
710 RubyRequestType type = srequest->m_type;
711
712 if (was_coalesced) {
713 // Notify the controller about a coalesced request so it can properly
714 // account for it in its hit/miss stats and/or train prefetchers
715 // (this is protocol-dependent)
716 m_controller->notifyCoalesced(request_address, type, pkt->req,
717 data, externalHit);
718 }
719
720 // Load-linked handling
721 if (type == RubyRequestType_Load_Linked) {
722 Addr line_addr = makeLineAddress(request_address);
723 llscLoadLinked(line_addr);
724 }
725
726 DPRINTF(RubyHitMiss, "Cache %s at %#x\n",
727 externalHit ? "miss" : "hit",
728 printAddress(request_address));
729
730 // update the data unless it is a non-data-carrying flush
732 data.setData(pkt);
733 } else if (!pkt->isFlush()) {
734 if ((type == RubyRequestType_LD) ||
735 (type == RubyRequestType_IFETCH) ||
736 (type == RubyRequestType_RMW_Read) ||
737 (type == RubyRequestType_Locked_RMW_Read) ||
738 (type == RubyRequestType_Load_Linked) ||
739 (type == RubyRequestType_ATOMIC_RETURN)) {
740 pkt->setData(
741 data.getData(getOffset(request_address), pkt->getSize()));
742
743 if (type == RubyRequestType_ATOMIC_RETURN) {
744 DPRINTF(RubySequencer, "ATOMIC RETURN data %s\n", data);
745 } else {
746 DPRINTF(RubySequencer, "read data %s\n", data);
747 }
748 } else if (pkt->req->isSwap()) {
749 assert(!pkt->isMaskedWrite());
750 std::vector<uint8_t> overwrite_val(pkt->getSize());
751 pkt->writeData(&overwrite_val[0]);
752 pkt->setData(
753 data.getData(getOffset(request_address), pkt->getSize()));
754 data.setData(&overwrite_val[0],
755 getOffset(request_address), pkt->getSize());
756 DPRINTF(RubySequencer, "swap data %s\n", data);
757 } else if (pkt->isAtomicOp()) {
758 // Set the data in the packet to the old value in the cache
759 pkt->setData(
760 data.getData(getOffset(request_address), pkt->getSize()));
761 DPRINTF(RubySequencer, "AMO original data %s\n", data);
762 // execute AMO operation
763 (*(pkt->getAtomicOp()))(
764 data.getDataMod(getOffset(request_address)));
765 DPRINTF(RubySequencer, "AMO new data %s\n", data);
766 } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
767 // Types of stores set the actual data here, apart from
768 // failed Store Conditional requests
769 data.setData(pkt);
770 DPRINTF(RubySequencer, "set data %s\n", data);
771 }
772 }
773
774 // If using the RubyTester, update the RubyTester sender state's
775 // subBlock with the recieved data. The tester will later access
776 // this state.
777 if (m_usingRubyTester) {
778 DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
779 pkt->cmdString(), pkt->getAddr());
780 RubyTester::SenderState* testerSenderState =
782 assert(testerSenderState);
783 testerSenderState->subBlock.mergeFrom(data);
784 }
785
788 assert(pkt->req);
789 delete pkt;
790 rs->m_cache_recorder->enqueueNextFetchRequest();
791 } else if (m_ruby_system->getCooldownEnabled()) {
792 delete pkt;
793 rs->m_cache_recorder->enqueueNextFlushRequest();
794 } else {
797 }
798}
799
800void
802 RubyRequestType reqType,
803 const MachineType mach,
804 const Cycles initialRequestTime,
805 const Cycles forwardRequestTime,
806 const Cycles firstResponseTime)
807{
808 DPRINTF(RubySequencer, "unaddressedCallback ID:%08x type:%d\n",
809 unaddressedReqId, reqType);
810
811 switch (reqType) {
812 case RubyRequestType_TLBI_EXT_SYNC:
813 {
814 // This should trigger the CPU to wait for stale translations
815 // and send an EXT_SYNC_COMP once complete.
816
817 // Don't look for the ID in our requestTable.
818 // It won't be there because we didn't request this Sync
819 ruby_stale_translation_callback(unaddressedReqId);
820 break;
821 }
822 case RubyRequestType_TLBI:
823 case RubyRequestType_TLBI_SYNC:
824 {
825 // These signal that a TLBI operation that this core initiated
826 // of the respective type (TLBI or Sync) has finished.
827
828 assert(m_UnaddressedRequestTable.find(unaddressedReqId)
830
831 {
832 SequencerRequest &seq_req =
833 m_UnaddressedRequestTable.at(unaddressedReqId);
834 assert(seq_req.m_type == reqType);
835
836 PacketPtr pkt = seq_req.pkt;
837
840 }
841
842 m_UnaddressedRequestTable.erase(unaddressedReqId);
843 break;
844 }
845 default:
846 panic("Unexpected TLBI RubyRequestType");
847 }
848}
849
850void
852{
853 for (auto& pkt : mylist) {
854 // When Ruby is in warmup or cooldown phase, the requests come
855 // from the cache recorder. They do not track which port to use
856 // and do not need to send the response back
860 safe_cast<RubyPort::SenderState *>(pkt->senderState);
861 MemResponsePort *port = ss->port;
862 assert(port != NULL);
863
864 pkt->senderState = ss->predecessor;
865
866 if (pkt->cmd != MemCmd::WriteReq) {
867 // for WriteReq, we keep the original senderState until
868 // writeCompleteCallback
869 delete ss;
870 }
871
872 port->hitCallback(pkt);
874 }
875 }
876
880 } else if (m_ruby_system->getCooldownEnabled()) {
881 rs->m_cache_recorder->enqueueNextFlushRequest();
882 } else {
884 }
885}
886
887void
889{
890 // Since L1 invalidate is currently done with paddr = 0
891 assert(m_cache_inv_pkt && m_num_pending_invs > 0);
892
894
895 if (m_num_pending_invs == 0) {
897 m_cache_inv_pkt = nullptr;
898 completeHitCallback(pkt_list);
899 }
900}
901
902void
904{
905 int size = m_dataCache_ptr->getNumBlocks();
906 DPRINTF(RubySequencer,
907 "There are %d Invalidations outstanding before Cache Walk\n",
909 // Walk the cache
910 for (int i = 0; i < size; i++) {
912 // Evict Read-only data
913 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
914 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
916 addr, 0, 0, request_type, RubyAccessMode_Supervisor,
917 nullptr);
918 DPRINTF(RubySequencer, "Evicting addr 0x%x\n", addr);
919 assert(m_mandatory_q_ptr != NULL);
920 Tick latency = cyclesToTicks(
921 m_controller->mandatoryQueueLatency(request_type));
922 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency,
926 }
927 DPRINTF(RubySequencer,
928 "There are %d Invalidations outstanding after Cache Walk\n",
930}
931
932bool
934{
935 return m_RequestTable.empty() &&
937}
938
939RequestStatus
941{
942 // HTM abort signals must be allowed to reach the Sequencer
943 // the same cycle they are issued. They cannot be retried.
945 !pkt->req->isHTMAbort()) {
946 return RequestStatus_BufferFull;
947 }
948
949 RubyRequestType primary_type = RubyRequestType_NULL;
950 RubyRequestType secondary_type = RubyRequestType_NULL;
951
952 if (pkt->isLLSC()) {
953 // LL/SC instructions need to be handled carefully by the cache
954 // coherence protocol to ensure they follow the proper semantics. In
955 // particular, by identifying the operations as atomic, the protocol
956 // should understand that migratory sharing optimizations should not
957 // be performed (i.e. a load between the LL and SC should not steal
958 // away exclusive permission).
959 //
960 // The following logic works correctly with the semantics
961 // of armV8 LDEX/STEX instructions.
962 const ProtocolInfo &protocol_info = m_ruby_system->getProtocolInfo();
963
964 if (pkt->isWrite()) {
965 DPRINTF(RubySequencer, "Issuing SC\n");
966 primary_type = RubyRequestType_Store_Conditional;
967 if (protocol_info.getUseSecondaryStoreConditional()) {
968 secondary_type = RubyRequestType_Store_Conditional;
969 } else {
970 secondary_type = RubyRequestType_ST;
971 }
972 } else {
973 DPRINTF(RubySequencer, "Issuing LL\n");
974 assert(pkt->isRead());
975 primary_type = RubyRequestType_Load_Linked;
976 if (protocol_info.getUseSecondaryLoadLinked()) {
977 secondary_type = RubyRequestType_Load_Linked;
978 } else {
979 secondary_type = RubyRequestType_LD;
980 }
981 }
982 } else if (pkt->req->isLockedRMW()) {
983 //
984 // x86 locked instructions are translated to store cache coherence
985 // requests because these requests should always be treated as read
986 // exclusive operations and should leverage any migratory sharing
987 // optimization built into the protocol.
988 //
989 if (pkt->isWrite()) {
990 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
991 primary_type = RubyRequestType_Locked_RMW_Write;
992 } else {
993 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
994 assert(pkt->isRead());
995 primary_type = RubyRequestType_Locked_RMW_Read;
996 }
997 secondary_type = RubyRequestType_ST;
998 } else if (pkt->req->isTlbiCmd()) {
999 primary_type = secondary_type = tlbiCmdToRubyRequestType(pkt);
1000 DPRINTF(RubySequencer, "Issuing TLBI\n");
1001#if defined (PROTOCOL_CHI)
1002 } else if (pkt->isAtomicOp()) {
1003 if (pkt->req->isAtomicReturn()){
1004 DPRINTF(RubySequencer, "Issuing ATOMIC RETURN \n");
1005 primary_type = secondary_type =
1006 RubyRequestType_ATOMIC_RETURN;
1007 } else {
1008 DPRINTF(RubySequencer, "Issuing ATOMIC NO RETURN\n");
1009 primary_type = secondary_type =
1010 RubyRequestType_ATOMIC_NO_RETURN;
1011
1012 }
1013#endif
1014 } else if (pkt->req->hasNoAddr()) {
1015 primary_type = secondary_type = RubyRequestType_hasNoAddr;
1016 } else {
1017 //
1018 // To support SwapReq, we need to check isWrite() first: a SwapReq
1019 // should always be treated like a write, but since a SwapReq implies
1020 // both isWrite() and isRead() are true, check isWrite() first here.
1021 //
1022 if (pkt->isWrite()) {
1023 //
1024 // Note: M5 packets do not differentiate ST from RMW_Write
1025 //
1026 primary_type = secondary_type = RubyRequestType_ST;
1027 } else if (pkt->isRead()) {
1028 // hardware transactional memory commands
1029 if (pkt->req->isHTMCmd()) {
1030 primary_type = secondary_type = htmCmdToRubyRequestType(pkt);
1031 } else if (pkt->req->isInstFetch()) {
1032 primary_type = secondary_type = RubyRequestType_IFETCH;
1033 } else {
1034 if (pkt->req->isReadModifyWrite()) {
1035 primary_type = RubyRequestType_RMW_Read;
1036 secondary_type = RubyRequestType_ST;
1037 } else {
1038 primary_type = secondary_type = RubyRequestType_LD;
1039 }
1040 }
1041 } else if (pkt->isFlush()) {
1042 primary_type = secondary_type = RubyRequestType_FLUSH;
1043 } else if (pkt->cmd == MemCmd::MemSyncReq) {
1044 primary_type = secondary_type = RubyRequestType_REPLACEMENT;
1045 assert(!m_cache_inv_pkt);
1046 m_cache_inv_pkt = pkt;
1047 invL1();
1048 } else {
1049 panic("Cannot convert packet [%s] to ruby request\n",
1050 pkt->print());
1051 }
1052 }
1053
1054 // Check if the line is blocked for a Locked_RMW
1055 if (!pkt->req->isMemMgmt() &&
1057 (primary_type != RubyRequestType_Locked_RMW_Write)) {
1058 // Return that this request's cache line address aliases with
1059 // a prior request that locked the cache line. The request cannot
1060 // proceed until the cache line is unlocked by a Locked_RMW_Write
1061 return RequestStatus_Aliased;
1062 }
1063
1064 RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
1065
1066 // It is OK to receive RequestStatus_Aliased, it can be considered Issued
1067 if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
1068 return status;
1069 // non-aliased with any existing request in the request table, just issue
1070 // to the cache
1071 if (status != RequestStatus_Aliased)
1072 issueRequest(pkt, secondary_type);
1073
1074 // TODO: issue hardware prefetches here
1075 return RequestStatus_Issued;
1076}
1077
1078void
1079Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
1080{
1081 assert(pkt != NULL);
1082 ContextID proc_id = pkt->req->hasContextId() ?
1083 pkt->req->contextId() : InvalidContextID;
1084
1085 ContextID core_id = coreId();
1086
1087 // If valid, copy the pc to the ruby request
1088 Addr pc = 0;
1089 if (pkt->req->hasPC()) {
1090 pc = pkt->req->getPC();
1091 }
1092
1093 int blk_size = m_ruby_system->getBlockSizeBytes();
1094
1095 // check if the packet has data as for example prefetch and flush
1096 // requests do not
1097 std::shared_ptr<RubyRequest> msg;
1098 if (pkt->req->isMemMgmt()) {
1099 msg = std::make_shared<RubyRequest>(clockEdge(), blk_size,
1101 pc, secondary_type,
1102 RubyAccessMode_Supervisor, pkt,
1103 proc_id, core_id);
1104
1105 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s\n",
1106 curTick(), m_version, "Seq", "Begin", "", "",
1107 RubyRequestType_to_string(secondary_type));
1108
1109 if (pkt->req->isTlbiCmd()) {
1110 msg->m_isTlbi = true;
1111 switch (secondary_type) {
1112 case RubyRequestType_TLBI_EXT_SYNC_COMP:
1113 msg->m_tlbiTransactionUid = pkt->req->getExtraData();
1114 break;
1115 case RubyRequestType_TLBI:
1116 case RubyRequestType_TLBI_SYNC:
1117 msg->m_tlbiTransactionUid = \
1118 getCurrentUnaddressedTransactionID();
1119 break;
1120 default:
1121 panic("Unexpected TLBI RubyRequestType");
1122 }
1123 DPRINTF(RubySequencer, "Issuing TLBI %016x\n",
1124 msg->m_tlbiTransactionUid);
1125 }
1126 } else {
1127 msg = std::make_shared<RubyRequest>(clockEdge(), blk_size,
1129 pkt->getAddr(), pkt->getSize(),
1130 pc, secondary_type,
1131 RubyAccessMode_Supervisor, pkt,
1132 PrefetchBit_No, proc_id, core_id);
1133
1134 if (pkt->isAtomicOp() &&
1135 ((secondary_type == RubyRequestType_ATOMIC_RETURN) ||
1136 (secondary_type == RubyRequestType_ATOMIC_NO_RETURN))){
1137 // Create the blocksize, access mask and atomicops
1138 uint32_t offset = getOffset(pkt->getAddr());
1140 atomicOps.push_back(std::make_pair<int,AtomicOpFunctor*>
1141 (offset, pkt->getAtomicOp()));
1142
1143 msg->setWriteMask(offset, pkt->getSize(), atomicOps);
1144 }
1145
1146 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
1147 curTick(), m_version, "Seq", "Begin", "", "",
1148 printAddress(msg->getPhysicalAddress()),
1149 RubyRequestType_to_string(secondary_type));
1150 }
1151
1152 // hardware transactional memory
1153 // If the request originates in a transaction,
1154 // then mark the Ruby message as such.
1155 if (pkt->isHtmTransactional()) {
1156 msg->m_htmFromTransaction = true;
1157 msg->m_htmTransactionUid = pkt->getHtmTransactionUid();
1158 }
1159
1160 Tick latency = cyclesToTicks(
1161 m_controller->mandatoryQueueLatency(secondary_type));
1162 assert(latency > 0);
1163
1164 assert(m_mandatory_q_ptr != NULL);
1165 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency,
1168}
1169
1170template <class KEY, class VALUE>
1171std::ostream &
1172operator<<(std::ostream &out, const std::unordered_map<KEY, VALUE> &map)
1173{
1174 for (const auto &table_entry : map) {
1175 out << "[ " << table_entry.first << " =";
1176 for (const auto &seq_req : table_entry.second) {
1177 out << " " << RubyRequestType_to_string(seq_req.m_second_type);
1178 }
1179 }
1180 out << " ]";
1181
1182 return out;
1183}
1184
1185void
1186Sequencer::print(std::ostream& out) const
1187{
1188 out << "[Sequencer: " << m_version
1189 << ", outstanding requests: " << m_outstanding_count
1190 << ", request table: " << m_RequestTable
1191 << "]";
1192}
1193
1194void
1195Sequencer::recordRequestType(SequencerRequestType requestType) {
1196 DPRINTF(RubyStats, "Recorded statistic: %s\n",
1197 SequencerRequestType_to_string(requestType));
1198}
1199
1200void
1202{
1203 llscClearMonitor(address);
1204 ruby_eviction_callback(address);
1205}
1206
1207void
1209{
1211 // Limit m_unaddressedTransactionCnt to 32 bits,
1212 // top 32 bits should always be zeroed out
1213 uint64_t aligned_txid = \
1215
1216 if (aligned_txid > 0xFFFFFFFFull) {
1218 }
1219}
1220
1221uint64_t
1223{
1224 return (
1225 uint64_t(m_version & 0xFFFFFFFF) << 32) |
1227 );
1228}
1229
1230} // namespace ruby
1231} // namespace gem5
#define DPRINTFR(x,...)
Definition trace.hh:223
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
bool isAtomicOp() const
Definition packet.hh:846
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:523
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition packet.hh:1293
bool isWrite() const
Definition packet.hh:594
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition packet.cc:529
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition packet.hh:845
bool isLLSC() const
Definition packet.hh:620
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1450
bool isFlush() const
Definition packet.hh:624
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition packet.hh:1322
bool isLocked(int context) const
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
void blockOnQueue(Addr, MessageBuffer *)
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
AbstractCacheEntry * lookup(Addr address)
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool ruby_is_random, bool ruby_warmup, bool bypassStrictFIFO=false)
bool getUseSecondaryStoreConditional() const
bool getUseSecondaryLoadLinked() const
void ruby_hit_callback(PacketPtr pkt)
Definition RubyPort.cc:462
Addr makeLineAddress(Addr addr) const
Definition RubyPort.cc:759
void ruby_unaddressed_callback(PacketPtr pkt)
Definition RubyPort.cc:487
void ruby_stale_translation_callback(Addr txnId)
Definition RubyPort.cc:506
std::string printAddress(Addr addr) const
Definition RubyPort.cc:765
virtual int functionalWrite(Packet *func_pkt)
Definition RubyPort.cc:741
AbstractController * m_controller
Definition RubyPort.hh:209
void ruby_eviction_callback(Addr address)
Definition RubyPort.cc:707
MessageBuffer * m_mandatory_q_ptr
Definition RubyPort.hh:210
Addr getOffset(Addr addr) const
Definition RubyPort.cc:753
uint32_t getBlockSizeBits()
Definition RubySystem.hh:74
uint32_t getBlockSizeBytes()
Definition RubySystem.hh:73
CacheRecorder * m_cache_recorder
const ProtocolInfo & getProtocolInfo()
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition Sequencer.hh:286
void resetStats() override
Callback to reset stats.
Definition Sequencer.cc:281
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition Sequencer.hh:246
virtual bool empty() const
Definition Sequencer.cc:933
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
Definition Sequencer.cc:464
std::vector< statistics::Counter > m_IncompleteTimes
Definition Sequencer.hh:318
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:207
virtual int functionalWrite(Packet *func_pkt) override
Definition Sequencer.cc:264
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Definition Sequencer.hh:315
RubySystem * m_ruby_system
Definition Sequencer.hh:257
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
Definition Sequencer.hh:299
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition Sequencer.cc:391
void completeHitCallback(std::vector< PacketPtr > &list)
Definition Sequencer.cc:851
std::vector< statistics::Histogram * > m_typeLatencyHist
Definition Sequencer.hh:290
PacketPtr m_cache_inv_pkt
Definition Sequencer.hh:264
void atomicCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:640
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
Definition Sequencer.cc:457
CacheMemory * m_dataCache_ptr
Definition Sequencer.hh:266
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
Definition Sequencer.hh:317
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
virtual void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
Definition Sequencer.cc:696
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:157
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Sequencer(const Params &)
Definition Sequencer.cc:70
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition Sequencer.hh:289
void issueRequest(PacketPtr pkt, RubyRequestType type)
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:184
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:801
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition Sequencer.cc:313
EventFunctionWrapper deadlockCheckEvent
Definition Sequencer.hh:320
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
Definition Sequencer.hh:249
uint64_t m_unaddressedTransactionCnt
Definition Sequencer.hh:281
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
Definition Sequencer.hh:295
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition Sequencer.hh:294
RequestStatus makeRequest(PacketPtr pkt) override
Definition Sequencer.cc:940
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
Definition Sequencer.cc:223
void recordRequestType(SequencerRequestType requestType)
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition Sequencer.hh:314
virtual void print(std::ostream &out) const
virtual bool processReadCallback(SequencerRequest &seq_req, DataBlock &data, const bool rubyRequest, bool externalHit, const MachineType mach, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition Sequencer.cc:568
RubySequencerParams Params
Definition Sequencer.hh:90
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
Definition Sequencer.hh:300
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
Definition Sequencer.cc:170
virtual void wakeup()
Definition Sequencer.cc:229
void evictionCallback(Addr address)
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
Definition Sequencer.hh:316
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:595
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition Sequencer.hh:309
std::vector< statistics::Histogram * > m_missTypeLatencyHist
Definition Sequencer.hh:305
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition Sequencer.hh:304
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
Definition Sequencer.hh:311
void mergeFrom(const DataBlock &data)
Definition SubBlock.hh:66
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
void reset()
Reset stat value to default.
A simple histogram stat.
STL vector class.
Definition stl.hh:37
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
@ Draining
Draining buffers pending serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define warn_once(...)
Definition logging.hh:260
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 9, 8 > rs
Bitfield< 5, 0 > status
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 3 > addr
Definition types.hh:84
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
bool isTlbiCmdRequest(RubyRequestType type)
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
std::ostream & operator<<(std::ostream &os, const BaseSemihosting::InPlaceArg &ipa)
int ContextID
Globally unique thread context ID.
Definition types.hh:239
const ContextID InvalidContextID
Definition types.hh:240
Declaration of the Packet class.
RubyRequestType m_second_type
Definition Sequencer.hh:67

Generated on Mon Jan 13 2025 04:28:41 for gem5 by doxygen 1.9.8