gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
Sequencer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019-2021,2023 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * Copyright (c) 2013 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
43
44#include "arch/x86/ldstflags.hh"
45#include "base/compiler.hh"
46#include "base/logging.hh"
47#include "base/str.hh"
49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "debug/ProtocolTrace.hh"
52#include "debug/RubyHitMiss.hh"
53#include "debug/RubySequencer.hh"
54#include "debug/RubyStats.hh"
55#include "mem/packet.hh"
57#include "mem/ruby/protocol/PrefetchBit.hh"
58#include "mem/ruby/protocol/RubyAccessMode.hh"
62#include "sim/system.hh"
63
64namespace gem5
65{
66
67namespace ruby
68{
69
71 : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
72 deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
73{
74 m_outstanding_count = 0;
75
76 m_ruby_system = p.ruby_system;
77
78 m_dataCache_ptr = p.dcache;
79 m_max_outstanding_requests = p.max_outstanding_requests;
80 m_deadlock_threshold = p.deadlock_threshold;
81
82 m_coreId = p.coreid; // for tracking the two CorePair sequencers
83 assert(m_max_outstanding_requests > 0);
84 assert(m_deadlock_threshold > 0);
85
86 m_unaddressedTransactionCnt = 0;
87
88 m_runningGarnetStandalone = p.garnet_standalone;
89
90 m_num_pending_invs = 0;
91 m_cache_inv_pkt = nullptr;
92
93 // These statistical variables are not for display.
94 // The profiler will collate these across different
95 // sequencers and display those collated statistics.
96 m_outstandReqHist.init(10);
97 m_latencyHist.init(10);
98 m_hitLatencyHist.init(10);
99 m_missLatencyHist.init(10);
100
101 for (int i = 0; i < RubyRequestType_NUM; i++) {
102 m_typeLatencyHist.push_back(new statistics::Histogram());
103 m_typeLatencyHist[i]->init(10);
104
105 m_hitTypeLatencyHist.push_back(new statistics::Histogram());
106 m_hitTypeLatencyHist[i]->init(10);
107
108 m_missTypeLatencyHist.push_back(new statistics::Histogram());
109 m_missTypeLatencyHist[i]->init(10);
110 }
111
112 for (int i = 0; i < MachineType_NUM; i++) {
113 m_hitMachLatencyHist.push_back(new statistics::Histogram());
114 m_hitMachLatencyHist[i]->init(10);
115
116 m_missMachLatencyHist.push_back(new statistics::Histogram());
117 m_missMachLatencyHist[i]->init(10);
118
119 m_IssueToInitialDelayHist.push_back(new statistics::Histogram());
120 m_IssueToInitialDelayHist[i]->init(10);
121
122 m_InitialToForwardDelayHist.push_back(new statistics::Histogram());
123 m_InitialToForwardDelayHist[i]->init(10);
124
125 m_ForwardToFirstResponseDelayHist.push_back(
127 m_ForwardToFirstResponseDelayHist[i]->init(10);
128
129 m_FirstResponseToCompletionDelayHist.push_back(
131 m_FirstResponseToCompletionDelayHist[i]->init(10);
132 }
133
134 for (int i = 0; i < RubyRequestType_NUM; i++) {
135 m_hitTypeMachLatencyHist.push_back(
137 m_missTypeMachLatencyHist.push_back(
139
140 for (int j = 0; j < MachineType_NUM; j++) {
141 m_hitTypeMachLatencyHist[i].push_back(new statistics::Histogram());
142 m_hitTypeMachLatencyHist[i][j]->init(10);
143
144 m_missTypeMachLatencyHist[i].push_back(
145 new statistics::Histogram());
146 m_missTypeMachLatencyHist[i][j]->init(10);
147 }
148 }
149
150}
151
155
156void
158{
160 "%s must have a dcache object to support LLSC requests.", name());
161 AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
162 if (line) {
163 line->setLocked(m_version);
164 DPRINTF(LLSC, "LLSC Monitor - inserting load linked - "
165 "addr=0x%lx - cpu=%u\n", claddr, m_version);
166 }
167}
168
169void
171{
172 // clear monitor is called for all stores and evictions
173 if (m_dataCache_ptr == NULL)
174 return;
175 AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
176 if (line && line->isLocked(m_version)) {
177 line->clearLocked();
178 DPRINTF(LLSC, "LLSC Monitor - clearing due to store - "
179 "addr=0x%lx - cpu=%u\n", claddr, m_version);
180 }
181}
182
183bool
185{
187 "%s must have a dcache object to support LLSC requests.", name());
188 AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
189 if (!line)
190 return false;
191
192 DPRINTF(LLSC, "LLSC Monitor - clearing due to "
193 "store conditional - "
194 "addr=0x%lx - cpu=%u\n",
195 claddr, m_version);
196
197 if (line->isLocked(m_version)) {
198 line->clearLocked();
199 return true;
200 } else {
201 line->clearLocked();
202 return false;
203 }
204}
205
206bool
208{
209 assert(m_dataCache_ptr != NULL);
210 const Addr claddr = makeLineAddress(address);
211 AbstractCacheEntry *line = m_dataCache_ptr->lookup(claddr);
212 if (!line)
213 return false;
214
215 if (line->isLocked(m_version)) {
216 return true;
217 } else {
218 return false;
219 }
220}
221
222void
227
228void
230{
231 assert(drainState() != DrainState::Draining);
232
233 // Check for deadlock of any of the requests
234 Cycles current_time = curCycle();
235
236 // Check across all outstanding requests
237 [[maybe_unused]] int total_outstanding = 0;
238
239 for (const auto &table_entry : m_RequestTable) {
240 for (const auto &seq_req : table_entry.second) {
241 if (current_time - seq_req.issue_time < m_deadlock_threshold)
242 continue;
243
244 panic("Possible Deadlock detected. Aborting!\n version: %d "
245 "request.paddr: 0x%x m_readRequestTable: %d current time: "
246 "%u issue_time: %d difference: %d\n", m_version,
247 seq_req.pkt->getAddr(), table_entry.second.size(),
248 current_time * clockPeriod(), seq_req.issue_time
249 * clockPeriod(), (current_time * clockPeriod())
250 - (seq_req.issue_time * clockPeriod()));
251 }
252 total_outstanding += table_entry.second.size();
253 }
254
255 assert(m_outstanding_count == total_outstanding);
256
257 if (m_outstanding_count > 0) {
258 // If there are still outstanding requests, keep checking
260 }
261}
262
263int
265{
266 int num_written = RubyPort::functionalWrite(func_pkt);
267
268 for (const auto &table_entry : m_RequestTable) {
269 for (const auto& seq_req : table_entry.second) {
270 if (seq_req.functionalWrite(func_pkt))
271 ++num_written;
272 }
273 }
274 // Functional writes to addresses being monitored
275 // will fail (remove) the monitor entry.
277
278 return num_written;
279}
280
282{
283 m_outstandReqHist.reset();
284 m_latencyHist.reset();
285 m_hitLatencyHist.reset();
286 m_missLatencyHist.reset();
287 for (int i = 0; i < RubyRequestType_NUM; i++) {
288 m_typeLatencyHist[i]->reset();
289 m_hitTypeLatencyHist[i]->reset();
290 m_missTypeLatencyHist[i]->reset();
291 for (int j = 0; j < MachineType_NUM; j++) {
292 m_hitTypeMachLatencyHist[i][j]->reset();
293 m_missTypeMachLatencyHist[i][j]->reset();
294 }
295 }
296
297 for (int i = 0; i < MachineType_NUM; i++) {
298 m_missMachLatencyHist[i]->reset();
299 m_hitMachLatencyHist[i]->reset();
300
305
306 m_IncompleteTimes[i] = 0;
307 }
308}
309
310// Insert the request in the request table. Return RequestStatus_Aliased
311// if the entry was already present.
312RequestStatus
313Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
314 RubyRequestType secondary_type)
315{
316 // See if we should schedule a deadlock check
317 if (!deadlockCheckEvent.scheduled() &&
320 }
321
322 if (isTlbiCmdRequest(primary_type)) {
323 assert(primary_type == secondary_type);
324
325 switch (primary_type) {
326 case RubyRequestType_TLBI_EXT_SYNC_COMP:
327 // Don't have to store any data on this
328 break;
329 case RubyRequestType_TLBI:
330 case RubyRequestType_TLBI_SYNC:
331 {
333
334 // returns pair<inserted element, was inserted>
335 [[maybe_unused]] auto insert_data = \
339 pkt, primary_type, secondary_type, curCycle()));
340
341 // if insert_data.second is false, wasn't inserted
342 assert(insert_data.second &&
343 "Another TLBI request with the same ID exists");
344
345 DPRINTF(RubySequencer, "Inserting TLBI request %016x\n",
347
348 break;
349 }
350
351 default:
352 panic("Unexpected TLBI RubyRequestType");
353 }
354
355 return RequestStatus_Ready;
356 }
357
358 // If command is MemSyncReq, it is used to invalidate the cache.
359 // As the cache invalidation requests are already issued in invL1(),
360 // there is no need to create a new request for the same here.
361 // Instead, return RequestStatus_Aliased, and make the sequencer skip
362 // an extra issueRequest
363 if (pkt->cmd == MemCmd::MemSyncReq) {
364 return RequestStatus_Aliased;
365 }
366
367 Addr line_addr = makeLineAddress(pkt->getAddr());
368 // Check if there is any outstanding request for the same cache line.
369 auto &seq_req_list = m_RequestTable[line_addr];
370
371 // For software prefetches, if the same cache line is already requested we
372 // can return aliased and skip emplacing it on the request table, this way
373 // we won't need to handle any response at all
374 if (pkt->cmd.isSWPrefetch() && seq_req_list.size() > 1) {
375 return RequestStatus_Aliased;
376 }
377
378 // Create a default entry
379 seq_req_list.emplace_back(pkt, primary_type,
380 secondary_type, curCycle());
382
383 if (seq_req_list.size() > 1) {
384 return RequestStatus_Aliased;
385 }
386
388
389 return RequestStatus_Ready;
390}
391
392void
397
398void
400 const MachineType respondingMach,
401 bool isExternalHit, Cycles initialRequestTime,
402 Cycles forwardRequestTime,
403 Cycles firstResponseTime)
404{
405 RubyRequestType type = srequest->m_type;
406 Cycles issued_time = srequest->issue_time;
407 Cycles completion_time = curCycle();
408
409 assert(curCycle() >= issued_time);
410 Cycles total_lat = completion_time - issued_time;
411
412 if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
413 // if the request was combined in the protocol with an earlier request
414 // for the same address, it is possible that it will return an
415 // initialRequestTime corresponding the earlier request. Since Cycles
416 // is unsigned, we can't let this request get profiled below.
417
418 total_lat = Cycles(0);
419 }
420
421 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
422 curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
423 "", "", printAddress(srequest->pkt->getAddr()), total_lat);
424
425 m_latencyHist.sample(total_lat);
426 m_typeLatencyHist[type]->sample(total_lat);
427
428 if (isExternalHit) {
429 m_missLatencyHist.sample(total_lat);
430 m_missTypeLatencyHist[type]->sample(total_lat);
431
432 if (respondingMach != MachineType_NUM) {
433 m_missMachLatencyHist[respondingMach]->sample(total_lat);
434 m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
435
436 if ((issued_time <= initialRequestTime) &&
437 (initialRequestTime <= forwardRequestTime) &&
438 (forwardRequestTime <= firstResponseTime) &&
439 (firstResponseTime <= completion_time)) {
440
441 m_IssueToInitialDelayHist[respondingMach]->sample(
442 initialRequestTime - issued_time);
443 m_InitialToForwardDelayHist[respondingMach]->sample(
444 forwardRequestTime - initialRequestTime);
445 m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
446 firstResponseTime - forwardRequestTime);
447 m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
448 completion_time - firstResponseTime);
449 } else {
450 m_IncompleteTimes[respondingMach]++;
451 }
452 }
453 } else {
454 m_hitLatencyHist.sample(total_lat);
455 m_hitTypeLatencyHist[type]->sample(total_lat);
456
457 if (respondingMach != MachineType_NUM) {
458 m_hitMachLatencyHist[respondingMach]->sample(total_lat);
459 m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
460 }
461 }
462}
463
464void
466{
467 llscClearMonitor(address);
468 writeCallback(address, data);
469}
470
471void
473 const bool externalHit, const MachineType mach,
474 const Cycles initialRequestTime,
475 const Cycles forwardRequestTime,
476 const Cycles firstResponseTime,
477 const bool noCoales)
478{
479 //
480 // Free the whole list as we assume we have had the exclusive access
481 // to this cache line when response for the write comes back
482 //
483 assert(address == makeLineAddress(address));
484 assert(m_RequestTable.find(address) != m_RequestTable.end());
485 auto &seq_req_list = m_RequestTable[address];
486
487 // Perform hitCallback on every cpu request made to this cache block while
488 // ruby request was outstanding. Since only 1 ruby request was made,
489 // profile the ruby latency once.
490 bool ruby_request = true;
491 while (!seq_req_list.empty()) {
492 SequencerRequest &seq_req = seq_req_list.front();
493 // Atomic Request may be executed remotly in the cache hierarchy
494 bool atomic_req =
495 ((seq_req.m_type == RubyRequestType_ATOMIC_RETURN) ||
496 (seq_req.m_type == RubyRequestType_ATOMIC_NO_RETURN));
497
498 if ((noCoales || atomic_req) && !ruby_request) {
499 // Do not process follow-up requests
500 // (e.g. if full line no present)
501 // Reissue to the cache hierarchy
502 issueRequest(seq_req.pkt, seq_req.m_second_type);
503 break;
504 }
505
506 if (ruby_request) {
507 assert(seq_req.m_type != RubyRequestType_LD);
508 assert(seq_req.m_type != RubyRequestType_Load_Linked);
509 assert(seq_req.m_type != RubyRequestType_IFETCH);
510 assert(seq_req.m_type != RubyRequestType_ATOMIC_RETURN);
511 assert(seq_req.m_type != RubyRequestType_ATOMIC_NO_RETURN);
512 }
513
514 // handle write request
515 if ((seq_req.m_type != RubyRequestType_LD) &&
516 (seq_req.m_type != RubyRequestType_Load_Linked) &&
517 (seq_req.m_type != RubyRequestType_IFETCH)) {
518 // LL/SC support (tested with ARMv8)
519 bool success = true;
520
521 if (seq_req.m_type != RubyRequestType_Store_Conditional) {
522 // Regular stores to addresses being monitored
523 // will fail (remove) the monitor entry.
524 llscClearMonitor(address);
525 } else {
526 // Store conditionals must first check the monitor
527 // if they will succeed or not
528 success = llscStoreConditional(address);
529 seq_req.pkt->req->setExtraData(success ? 1 : 0);
530 }
531
532 // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
533 // address variable here is assumed to be a line address, so when
534 // blocking buffers, must check line addresses.
535 if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
536 // blockOnQueue blocks all first-level cache controller queues
537 // waiting on memory accesses for the specified address that go
538 // to the specified queue. In this case, a Locked_RMW_Write must
539 // go to the mandatory_q before unblocking the first-level
540 // controller. This will block standard loads, stores, ifetches,
541 // etc.
542 m_controller->blockOnQueue(address, m_mandatory_q_ptr);
543 } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
544 m_controller->unblock(address);
545 }
546
547 if (ruby_request) {
548 recordMissLatency(&seq_req, success, mach, externalHit,
549 initialRequestTime, forwardRequestTime,
550 firstResponseTime);
551 }
552
553 markRemoved();
554 hitCallback(&seq_req, data, success, mach, externalHit,
555 initialRequestTime, forwardRequestTime,
556 firstResponseTime, !ruby_request);
557 ruby_request = false;
558 } else {
559 // handle read request
560 assert(!ruby_request);
561 markRemoved();
562 hitCallback(&seq_req, data, true, mach, externalHit,
563 initialRequestTime, forwardRequestTime,
564 firstResponseTime, !ruby_request);
565 }
566 seq_req_list.pop_front();
567 }
568
569 // free all outstanding requests corresponding to this address
570 if (seq_req_list.empty()) {
571 m_RequestTable.erase(address);
572 }
573}
574
575bool
578 const bool ruby_request,
579 bool externalHit,
580 const MachineType mach,
581 Cycles initialRequestTime,
582 Cycles forwardRequestTime,
583 Cycles firstResponseTime)
584{
585 if (ruby_request) {
586 assert((seq_req.m_type == RubyRequestType_LD) ||
587 (seq_req.m_type == RubyRequestType_Load_Linked) ||
588 (seq_req.m_type == RubyRequestType_IFETCH));
589 }
590 if ((seq_req.m_type != RubyRequestType_LD) &&
591 (seq_req.m_type != RubyRequestType_Load_Linked) &&
592 (seq_req.m_type != RubyRequestType_IFETCH) &&
593 (seq_req.m_type != RubyRequestType_REPLACEMENT)) {
594 // Write request: reissue request to the cache hierarchy
595 issueRequest(seq_req.pkt, seq_req.m_second_type);
596 return true;
597 }
598 return false;
599
600}
601
602void
604 bool externalHit, const MachineType mach,
605 Cycles initialRequestTime,
606 Cycles forwardRequestTime,
607 Cycles firstResponseTime)
608{
609 //
610 // Free up read requests until we hit the first Write request
611 // or end of the corresponding list.
612 //
613 assert(address == makeLineAddress(address));
614 assert(m_RequestTable.find(address) != m_RequestTable.end());
615 auto &seq_req_list = m_RequestTable[address];
616
617 // Perform hitCallback on every cpu request made to this cache block while
618 // ruby request was outstanding. Since only 1 ruby request was made,
619 // profile the ruby latency once.
620 bool ruby_request = true;
621 while (!seq_req_list.empty()) {
622 SequencerRequest &seq_req = seq_req_list.front();
623 if (processReadCallback(seq_req, data, ruby_request, externalHit, mach,
624 initialRequestTime, forwardRequestTime,
625 firstResponseTime)) {
626 break;
627 }
628 if (ruby_request) {
629 recordMissLatency(&seq_req, true, mach, externalHit,
630 initialRequestTime, forwardRequestTime,
631 firstResponseTime);
632 }
633 markRemoved();
634 hitCallback(&seq_req, data, true, mach, externalHit,
635 initialRequestTime, forwardRequestTime,
636 firstResponseTime, !ruby_request);
637 ruby_request = false;
638 seq_req_list.pop_front();
639 }
640
641 // free all outstanding requests corresponding to this address
642 if (seq_req_list.empty()) {
643 m_RequestTable.erase(address);
644 }
645}
646
647void
649 const bool externalHit, const MachineType mach,
650 const Cycles initialRequestTime,
651 const Cycles forwardRequestTime,
652 const Cycles firstResponseTime)
653{
654 //
655 // Free the first request (an atomic operation) from the list.
656 // Then issue the next request to ruby system as we cannot
657 // assume the cache line is present in the cache
658 // (the opperation could be performed remotly)
659 //
660 assert(address == makeLineAddress(address));
661 assert(m_RequestTable.find(address) != m_RequestTable.end());
662 auto &seq_req_list = m_RequestTable[address];
663
664 // Perform hitCallback only on the first cpu request that
665 // issued the ruby request
666 bool ruby_request = true;
667 while (!seq_req_list.empty()) {
668 SequencerRequest &seq_req = seq_req_list.front();
669
670 if (ruby_request) {
671 // Check that the request was an atomic memory operation
672 // and record the latency
673 assert((seq_req.m_type == RubyRequestType_ATOMIC_RETURN) ||
674 (seq_req.m_type == RubyRequestType_ATOMIC_NO_RETURN));
675 recordMissLatency(&seq_req, true, mach, externalHit,
676 initialRequestTime, forwardRequestTime,
677 firstResponseTime);
678 } else {
679 // Read, Write or Atomic request:
680 // reissue request to the cache hierarchy
681 // (we don't know if op was performed remotly)
682 issueRequest(seq_req.pkt, seq_req.m_second_type);
683 break;
684 }
685
686 // Atomics clean the monitor entry
687 llscClearMonitor(address);
688
689 markRemoved();
690 ruby_request = false;
691 hitCallback(&seq_req, data, true, mach, externalHit,
692 initialRequestTime, forwardRequestTime,
693 firstResponseTime, false);
694 seq_req_list.pop_front();
695 }
696
697 // free all outstanding requests corresponding to this address
698 if (seq_req_list.empty()) {
699 m_RequestTable.erase(address);
700 }
701}
702
703void
705 bool llscSuccess,
706 const MachineType mach, const bool externalHit,
707 const Cycles initialRequestTime,
708 const Cycles forwardRequestTime,
709 const Cycles firstResponseTime,
710 const bool was_coalesced)
711{
712 warn_once("Replacement policy updates recently became the responsibility "
713 "of SLICC state machines. Make sure to setMRU() near callbacks "
714 "in .sm files!");
715
716 PacketPtr pkt = srequest->pkt;
717 Addr request_address(pkt->getAddr());
718 RubyRequestType type = srequest->m_type;
719
720 if (was_coalesced) {
721 // Notify the controller about a coalesced request so it can properly
722 // account for it in its hit/miss stats and/or train prefetchers
723 // (this is protocol-dependent)
724 m_controller->notifyCoalesced(request_address, type, pkt->req,
725 data, externalHit);
726 }
727
728 // Load-linked handling
729 if (type == RubyRequestType_Load_Linked) {
730 Addr line_addr = makeLineAddress(request_address);
731 llscLoadLinked(line_addr);
732 }
733
734 DPRINTF(RubyHitMiss, "Cache %s at %#x\n",
735 externalHit ? "miss" : "hit",
736 printAddress(request_address));
737
738 // update the data unless it is a non-data-carrying flush
739 if (m_ruby_system->getWarmupEnabled()) {
740 data.setData(pkt);
741 } else if (!pkt->isFlush()) {
742 if ((type == RubyRequestType_LD) ||
743 (type == RubyRequestType_IFETCH) ||
744 (type == RubyRequestType_RMW_Read) ||
745 (type == RubyRequestType_Locked_RMW_Read) ||
746 (type == RubyRequestType_Load_Linked) ||
747 (type == RubyRequestType_ATOMIC_RETURN)) {
748
749 // For software prefetches, since we've already sent an early
750 // response back to the core, we can just ignore this
751 if (pkt->cmd.isSWPrefetch()) {
752 delete pkt;
753 return;
754 }
755
756 pkt->setData(
757 data.getData(getOffset(request_address), pkt->getSize()));
758
759 if (type == RubyRequestType_ATOMIC_RETURN) {
760 DPRINTF(RubySequencer, "ATOMIC RETURN data %s\n", data);
761 } else {
762 DPRINTF(RubySequencer, "read data %s\n", data);
763 }
764 } else if (pkt->req->isSwap()) {
765 assert(!pkt->isMaskedWrite());
766 std::vector<uint8_t> overwrite_val(pkt->getSize());
767 pkt->writeData(&overwrite_val[0]);
768 pkt->setData(
769 data.getData(getOffset(request_address), pkt->getSize()));
770 data.setData(&overwrite_val[0],
771 getOffset(request_address), pkt->getSize());
772 DPRINTF(RubySequencer, "swap data %s\n", data);
773 } else if (pkt->isAtomicOp()) {
774 // Set the data in the packet to the old value in the cache
775 pkt->setData(
776 data.getData(getOffset(request_address), pkt->getSize()));
777 DPRINTF(RubySequencer, "AMO original data %s\n", data);
778 // execute AMO operation
779 (*(pkt->getAtomicOp()))(
780 data.getDataMod(getOffset(request_address)));
781 DPRINTF(RubySequencer, "AMO new data %s\n", data);
782 } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
783 // Types of stores set the actual data here, apart from
784 // failed Store Conditional requests
785 data.setData(pkt);
786 DPRINTF(RubySequencer, "set data %s\n", data);
787 }
788 }
789
790 // If using the RubyTester, update the RubyTester sender state's
791 // subBlock with the recieved data. The tester will later access
792 // this state.
793 if (m_usingRubyTester) {
794 DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
795 pkt->cmdString(), pkt->getAddr());
796 RubyTester::SenderState* testerSenderState =
798 assert(testerSenderState);
799 testerSenderState->subBlock.mergeFrom(data);
800 }
801
803 if (m_ruby_system->getWarmupEnabled()) {
804 assert(pkt->req);
805 delete pkt;
806 rs->m_cache_recorder->enqueueNextFetchRequest();
807 } else if (m_ruby_system->getCooldownEnabled()) {
808 delete pkt;
809 rs->m_cache_recorder->enqueueNextFlushRequest();
810 } else {
813 }
814}
815
816void
818 RubyRequestType reqType,
819 const MachineType mach,
820 const Cycles initialRequestTime,
821 const Cycles forwardRequestTime,
822 const Cycles firstResponseTime)
823{
824 DPRINTF(RubySequencer, "unaddressedCallback ID:%08x type:%d\n",
825 unaddressedReqId, reqType);
826
827 switch (reqType) {
828 case RubyRequestType_TLBI_EXT_SYNC:
829 {
830 // This should trigger the CPU to wait for stale translations
831 // and send an EXT_SYNC_COMP once complete.
832
833 // Don't look for the ID in our requestTable.
834 // It won't be there because we didn't request this Sync
835 ruby_stale_translation_callback(unaddressedReqId);
836 break;
837 }
838 case RubyRequestType_TLBI:
839 case RubyRequestType_TLBI_SYNC:
840 {
841 // These signal that a TLBI operation that this core initiated
842 // of the respective type (TLBI or Sync) has finished.
843
844 assert(m_UnaddressedRequestTable.find(unaddressedReqId)
846
847 {
848 SequencerRequest &seq_req =
849 m_UnaddressedRequestTable.at(unaddressedReqId);
850 assert(seq_req.m_type == reqType);
851
852 PacketPtr pkt = seq_req.pkt;
853
856 }
857
858 m_UnaddressedRequestTable.erase(unaddressedReqId);
859 break;
860 }
861 default:
862 panic("Unexpected TLBI RubyRequestType");
863 }
864}
865
866void
868{
869 for (auto& pkt : mylist) {
870 // When Ruby is in warmup or cooldown phase, the requests come
871 // from the cache recorder. They do not track which port to use
872 // and do not need to send the response back
873 if (!m_ruby_system->getWarmupEnabled()
874 && !m_ruby_system->getCooldownEnabled()) {
876 safe_cast<RubyPort::SenderState *>(pkt->senderState);
877 MemResponsePort *port = ss->port;
878 assert(port != NULL);
879
880 pkt->senderState = ss->predecessor;
881
882 if (pkt->cmd != MemCmd::WriteReq) {
883 // for WriteReq, we keep the original senderState until
884 // writeCompleteCallback
885 delete ss;
886 }
887
888 port->hitCallback(pkt);
890 }
891 }
892
894 if (m_ruby_system->getWarmupEnabled()) {
895 rs->m_cache_recorder->enqueueNextFetchRequest();
896 } else if (m_ruby_system->getCooldownEnabled()) {
897 rs->m_cache_recorder->enqueueNextFlushRequest();
898 } else {
900 }
901}
902
903void
905{
906 // Since L1 invalidate is currently done with paddr = 0
907 assert(m_cache_inv_pkt && m_num_pending_invs > 0);
908
910
911 if (m_num_pending_invs == 0) {
913 m_cache_inv_pkt = nullptr;
914 completeHitCallback(pkt_list);
915 }
916}
917
918void
920{
921 int size = m_dataCache_ptr->getNumBlocks();
922 DPRINTF(RubySequencer,
923 "There are %d Invalidations outstanding before Cache Walk\n",
925 // Walk the cache
926 for (int i = 0; i < size; i++) {
927 Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
928 // Evict Read-only data
929 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
930 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
931 clockEdge(), m_ruby_system->getBlockSizeBytes(), m_ruby_system,
932 addr, 0, 0, request_type, RubyAccessMode_Supervisor,
933 nullptr);
934 DPRINTF(RubySequencer, "Evicting addr 0x%x\n", addr);
935 assert(m_mandatory_q_ptr != NULL);
936 Tick latency = cyclesToTicks(
937 m_controller->mandatoryQueueLatency(request_type));
938 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency,
939 m_ruby_system->getRandomization(),
940 m_ruby_system->getWarmupEnabled());
942 }
943 DPRINTF(RubySequencer,
944 "There are %d Invalidations outstanding after Cache Walk\n",
946}
947
948bool
950{
951 return m_RequestTable.empty() &&
953}
954
955RequestStatus
957{
958 // HTM abort signals must be allowed to reach the Sequencer
959 // the same cycle they are issued. They cannot be retried.
961 !pkt->req->isHTMAbort()) {
962 return RequestStatus_BufferFull;
963 }
964
965 RubyRequestType primary_type = RubyRequestType_NULL;
966 RubyRequestType secondary_type = RubyRequestType_NULL;
967
968 if (pkt->isLLSC()) {
969 // LL/SC instructions need to be handled carefully by the cache
970 // coherence protocol to ensure they follow the proper semantics. In
971 // particular, by identifying the operations as atomic, the protocol
972 // should understand that migratory sharing optimizations should not
973 // be performed (i.e. a load between the LL and SC should not steal
974 // away exclusive permission).
975 //
976 // The following logic works correctly with the semantics
977 // of armV8 LDEX/STEX instructions.
978 const ProtocolInfo &protocol_info = m_ruby_system->getProtocolInfo();
979
980 if (pkt->isWrite()) {
981 DPRINTF(RubySequencer, "Issuing SC\n");
982 primary_type = RubyRequestType_Store_Conditional;
983 if (protocol_info.getUseSecondaryStoreConditional()) {
984 secondary_type = RubyRequestType_Store_Conditional;
985 } else {
986 secondary_type = RubyRequestType_ST;
987 }
988 } else {
989 DPRINTF(RubySequencer, "Issuing LL\n");
990 assert(pkt->isRead());
991 primary_type = RubyRequestType_Load_Linked;
992 if (protocol_info.getUseSecondaryLoadLinked()) {
993 secondary_type = RubyRequestType_Load_Linked;
994 } else {
995 secondary_type = RubyRequestType_LD;
996 }
997 }
998 } else if (pkt->req->isLockedRMW()) {
999 //
1000 // x86 locked instructions are translated to store cache coherence
1001 // requests because these requests should always be treated as read
1002 // exclusive operations and should leverage any migratory sharing
1003 // optimization built into the protocol.
1004 //
1005 if (pkt->isWrite()) {
1006 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
1007 primary_type = RubyRequestType_Locked_RMW_Write;
1008 } else {
1009 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
1010 assert(pkt->isRead());
1011 primary_type = RubyRequestType_Locked_RMW_Read;
1012 }
1013 secondary_type = RubyRequestType_ST;
1014 } else if (pkt->req->isTlbiCmd()) {
1015 primary_type = secondary_type = tlbiCmdToRubyRequestType(pkt);
1016 DPRINTF(RubySequencer, "Issuing TLBI\n");
1017#if defined (PROTOCOL_CHI)
1018 } else if (pkt->isAtomicOp()) {
1019 if (pkt->req->isAtomicReturn()){
1020 DPRINTF(RubySequencer, "Issuing ATOMIC RETURN \n");
1021 primary_type = secondary_type =
1022 RubyRequestType_ATOMIC_RETURN;
1023 } else {
1024 DPRINTF(RubySequencer, "Issuing ATOMIC NO RETURN\n");
1025 primary_type = secondary_type =
1026 RubyRequestType_ATOMIC_NO_RETURN;
1027
1028 }
1029#endif
1030 } else if (pkt->req->hasNoAddr()) {
1031 primary_type = secondary_type = RubyRequestType_hasNoAddr;
1032 } else {
1033 //
1034 // To support SwapReq, we need to check isWrite() first: a SwapReq
1035 // should always be treated like a write, but since a SwapReq implies
1036 // both isWrite() and isRead() are true, check isWrite() first here.
1037 //
1038 if (pkt->isWrite()) {
1039 //
1040 // Note: M5 packets do not differentiate ST from RMW_Write
1041 //
1042 primary_type = secondary_type = RubyRequestType_ST;
1043 } else if (pkt->isRead()) {
1044 // hardware transactional memory commands
1045 if (pkt->req->isHTMCmd()) {
1046 primary_type = secondary_type = htmCmdToRubyRequestType(pkt);
1047 } else if (pkt->req->isInstFetch()) {
1048 primary_type = secondary_type = RubyRequestType_IFETCH;
1049 } else {
1050 if (pkt->req->isReadModifyWrite()) {
1051 primary_type = RubyRequestType_RMW_Read;
1052 secondary_type = RubyRequestType_ST;
1053 } else {
1054 primary_type = secondary_type = RubyRequestType_LD;
1055 }
1056 }
1057 } else if (pkt->isFlush()) {
1058 primary_type = secondary_type = RubyRequestType_FLUSH;
1059 } else if (pkt->cmd == MemCmd::MemSyncReq) {
1060 primary_type = secondary_type = RubyRequestType_REPLACEMENT;
1061 assert(!m_cache_inv_pkt);
1062 m_cache_inv_pkt = pkt;
1063 invL1();
1064 } else {
1065 panic("Cannot convert packet [%s] to ruby request\n",
1066 pkt->print());
1067 }
1068 }
1069
1070 // Check if the line is blocked for a Locked_RMW
1071 if (!pkt->req->isMemMgmt() &&
1072 m_controller->isBlocked(makeLineAddress(pkt->getAddr())) &&
1073 (primary_type != RubyRequestType_Locked_RMW_Write)) {
1074 // Return that this request's cache line address aliases with
1075 // a prior request that locked the cache line. The request cannot
1076 // proceed until the cache line is unlocked by a Locked_RMW_Write
1077 return RequestStatus_Aliased;
1078 }
1079
1080 RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
1081
1082 // It is OK to receive RequestStatus_Aliased, it can be considered Issued
1083 if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
1084 return status;
1085 // non-aliased with any existing request in the request table, just issue
1086 // to the cache
1087 if (status != RequestStatus_Aliased)
1088 issueRequest(pkt, secondary_type);
1089
1090 // TODO: issue hardware prefetches here
1091 return RequestStatus_Issued;
1092}
1093
1094void
1095Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
1096{
1097 assert(pkt != NULL);
1098 ContextID proc_id = pkt->req->hasContextId() ?
1099 pkt->req->contextId() : InvalidContextID;
1100
1101 ContextID core_id = coreId();
1102
1103 // If valid, copy the pc to the ruby request
1104 Addr pc = 0;
1105 if (pkt->req->hasPC()) {
1106 pc = pkt->req->getPC();
1107 }
1108
1109 int blk_size = m_ruby_system->getBlockSizeBytes();
1110
1111 // check if the packet has data as for example prefetch and flush
1112 // requests do not
1113 std::shared_ptr<RubyRequest> msg;
1114 if (pkt->req->isMemMgmt()) {
1115 msg = std::make_shared<RubyRequest>(clockEdge(), blk_size,
1117 pc, secondary_type,
1118 RubyAccessMode_Supervisor, pkt,
1119 proc_id, core_id);
1120
1121 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s\n",
1122 curTick(), m_version, "Seq", "Begin", "", "",
1123 RubyRequestType_to_string(secondary_type));
1124
1125 if (pkt->req->isTlbiCmd()) {
1126 msg->m_isTlbi = true;
1127 switch (secondary_type) {
1128 case RubyRequestType_TLBI_EXT_SYNC_COMP:
1129 msg->m_tlbiTransactionUid = pkt->req->getExtraData();
1130 break;
1131 case RubyRequestType_TLBI:
1132 case RubyRequestType_TLBI_SYNC:
1133 msg->m_tlbiTransactionUid = \
1134 getCurrentUnaddressedTransactionID();
1135 break;
1136 default:
1137 panic("Unexpected TLBI RubyRequestType");
1138 }
1139 DPRINTF(RubySequencer, "Issuing TLBI %016x\n",
1140 msg->m_tlbiTransactionUid);
1141 }
1142 } else {
1143 msg = std::make_shared<RubyRequest>(clockEdge(), blk_size,
1145 pkt->getAddr(), pkt->getSize(),
1146 pc, secondary_type,
1147 RubyAccessMode_Supervisor, pkt,
1148 PrefetchBit_No, proc_id, core_id);
1149
1150 if (pkt->isAtomicOp() &&
1151 ((secondary_type == RubyRequestType_ATOMIC_RETURN) ||
1152 (secondary_type == RubyRequestType_ATOMIC_NO_RETURN))){
1153 // Create the blocksize, access mask and atomicops
1154 uint32_t offset = getOffset(pkt->getAddr());
1156 atomicOps.push_back(std::make_pair<int,AtomicOpFunctor*>
1157 (offset, pkt->getAtomicOp()));
1158
1159 msg->setWriteMask(offset, pkt->getSize(), atomicOps);
1160 }
1161
1162 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
1163 curTick(), m_version, "Seq", "Begin", "", "",
1164 printAddress(msg->getPhysicalAddress()),
1165 RubyRequestType_to_string(secondary_type));
1166 }
1167
1168 // hardware transactional memory
1169 // If the request originates in a transaction,
1170 // then mark the Ruby message as such.
1171 if (pkt->isHtmTransactional()) {
1172 msg->m_htmFromTransaction = true;
1173 msg->m_htmTransactionUid = pkt->getHtmTransactionUid();
1174 }
1175
1176 Tick latency = cyclesToTicks(
1177 m_controller->mandatoryQueueLatency(secondary_type));
1178 assert(latency > 0);
1179
1180 assert(m_mandatory_q_ptr != NULL);
1181 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency,
1182 m_ruby_system->getRandomization(),
1183 m_ruby_system->getWarmupEnabled());
1184}
1185
1186template <class KEY, class VALUE>
1187std::ostream &
1188operator<<(std::ostream &out, const std::unordered_map<KEY, VALUE> &map)
1189{
1190 for (const auto &[key, values] : map) {
1191 out << "[ " << key << " =";
1192 for (const auto &seq_req : values) {
1193 out << " " << RubyRequestType_to_string(seq_req.m_second_type);
1194 }
1195 out << " ]";
1196 }
1197 return out;
1198}
1199
1200void
1201Sequencer::print(std::ostream& out) const
1202{
1203 out << "[Sequencer: " << m_version
1204 << ", outstanding requests: " << m_outstanding_count
1205 << ", request table: " << m_RequestTable
1206 << "]";
1207}
1208
1209void
1210Sequencer::recordRequestType(SequencerRequestType requestType) {
1211 DPRINTF(RubyStats, "Recorded statistic: %s\n",
1212 SequencerRequestType_to_string(requestType));
1213}
1214
1215void
1217{
1218 llscClearMonitor(address);
1219 ruby_eviction_callback(address);
1220}
1221
1222void
1224{
1226 // Limit m_unaddressedTransactionCnt to 32 bits,
1227 // top 32 bits should always be zeroed out
1228 uint64_t aligned_txid = \
1229 m_unaddressedTransactionCnt << m_ruby_system->getBlockSizeBits();
1230
1231 if (aligned_txid > 0xFFFFFFFFull) {
1233 }
1234}
1235
1236uint64_t
1238{
1239 return (
1240 uint64_t(m_version & 0xFFFFFFFF) << 32) |
1241 (m_unaddressedTransactionCnt << m_ruby_system->getBlockSizeBits()
1242 );
1243}
1244
1245} // namespace ruby
1246} // namespace gem5
#define DPRINTFR(x,...)
Definition trace.hh:223
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
bool isSWPrefetch() const
Definition packet.hh:253
virtual std::string name() const
Definition named.hh:60
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
bool isAtomicOp() const
Definition packet.hh:846
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:523
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition packet.hh:1293
bool isWrite() const
Definition packet.hh:594
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition packet.cc:529
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition packet.hh:845
bool isLLSC() const
Definition packet.hh:620
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1450
bool isFlush() const
Definition packet.hh:624
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition packet.hh:1322
bool isLocked(int context) const
bool getUseSecondaryStoreConditional() const
bool getUseSecondaryLoadLinked() const
void ruby_hit_callback(PacketPtr pkt)
Definition RubyPort.cc:486
Addr makeLineAddress(Addr addr) const
Definition RubyPort.cc:783
void ruby_unaddressed_callback(PacketPtr pkt)
Definition RubyPort.cc:511
void ruby_stale_translation_callback(Addr txnId)
Definition RubyPort.cc:530
std::string printAddress(Addr addr) const
Definition RubyPort.cc:789
RubyPort(const Params &p)
Definition RubyPort.cc:61
virtual int functionalWrite(Packet *func_pkt)
Definition RubyPort.cc:765
AbstractController * m_controller
Definition RubyPort.hh:209
void ruby_eviction_callback(Addr address)
Definition RubyPort.cc:731
MessageBuffer * m_mandatory_q_ptr
Definition RubyPort.hh:210
Addr getOffset(Addr addr) const
Definition RubyPort.cc:777
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition Sequencer.hh:284
void resetStats() override
Callback to reset stats.
Definition Sequencer.cc:281
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition Sequencer.hh:244
virtual bool empty() const
Definition Sequencer.cc:949
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
Definition Sequencer.cc:472
std::vector< statistics::Counter > m_IncompleteTimes
Definition Sequencer.hh:316
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:207
virtual int functionalWrite(Packet *func_pkt) override
Definition Sequencer.cc:264
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Definition Sequencer.hh:313
RubySystem * m_ruby_system
Definition Sequencer.hh:255
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
Definition Sequencer.hh:297
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition Sequencer.cc:399
void completeHitCallback(std::vector< PacketPtr > &list)
Definition Sequencer.cc:867
std::vector< statistics::Histogram * > m_typeLatencyHist
Definition Sequencer.hh:288
PacketPtr m_cache_inv_pkt
Definition Sequencer.hh:262
void atomicCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:648
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
Definition Sequencer.cc:465
CacheMemory * m_dataCache_ptr
Definition Sequencer.hh:264
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
Definition Sequencer.hh:315
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
virtual void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
Definition Sequencer.cc:704
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:157
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Sequencer(const Params &)
Definition Sequencer.cc:70
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition Sequencer.hh:287
void issueRequest(PacketPtr pkt, RubyRequestType type)
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:184
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:817
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition Sequencer.cc:313
EventFunctionWrapper deadlockCheckEvent
Definition Sequencer.hh:318
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
Definition Sequencer.hh:247
uint64_t m_unaddressedTransactionCnt
Definition Sequencer.hh:279
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
Definition Sequencer.hh:293
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition Sequencer.hh:292
RequestStatus makeRequest(PacketPtr pkt) override
Definition Sequencer.cc:956
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
Definition Sequencer.cc:223
void recordRequestType(SequencerRequestType requestType)
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition Sequencer.hh:312
virtual void print(std::ostream &out) const
virtual bool processReadCallback(SequencerRequest &seq_req, DataBlock &data, const bool rubyRequest, bool externalHit, const MachineType mach, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition Sequencer.cc:576
RubySequencerParams Params
Definition Sequencer.hh:88
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
Definition Sequencer.hh:298
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
Definition Sequencer.cc:170
virtual void wakeup()
Definition Sequencer.cc:229
void evictionCallback(Addr address)
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
Definition Sequencer.hh:314
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:603
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition Sequencer.hh:307
std::vector< statistics::Histogram * > m_missTypeLatencyHist
Definition Sequencer.hh:303
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition Sequencer.hh:302
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
Definition Sequencer.hh:309
void mergeFrom(const DataBlock &data)
Definition SubBlock.hh:66
A simple histogram stat.
STL vector class.
Definition stl.hh:37
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:329
@ Draining
Draining buffers pending serialization/handover.
Definition drain.hh:78
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:268
#define warn_once(...)
Definition logging.hh:292
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 9, 8 > rs
Bitfield< 5, 0 > status
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 3 > addr
Definition types.hh:84
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
bool isTlbiCmdRequest(RubyRequestType type)
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
Definition BoolVec.cc:49
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
T safe_cast(U &&ref_or_ptr)
Definition cast.hh:74
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
Packet * PacketPtr
int ContextID
Globally unique thread context ID.
Definition types.hh:239
const ContextID InvalidContextID
Definition types.hh:240
Declaration of the Packet class.
RubyRequestType m_second_type
Definition Sequencer.hh:67

Generated on Mon Oct 27 2025 04:13:04 for gem5 by doxygen 1.14.0