gem5 v24.0.0.0
Loading...
Searching...
No Matches
Sequencer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019-2021 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * Copyright (c) 2013 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
43
44#include "arch/x86/ldstflags.hh"
45#include "base/compiler.hh"
46#include "base/logging.hh"
47#include "base/str.hh"
49#include "debug/LLSC.hh"
50#include "debug/MemoryAccess.hh"
51#include "debug/ProtocolTrace.hh"
52#include "debug/RubyHitMiss.hh"
53#include "debug/RubySequencer.hh"
54#include "debug/RubyStats.hh"
55#include "mem/packet.hh"
57#include "mem/ruby/protocol/PrefetchBit.hh"
58#include "mem/ruby/protocol/RubyAccessMode.hh"
62#include "sim/system.hh"
63
64namespace gem5
65{
66
67namespace ruby
68{
69
71 : RubyPort(p), m_IncompleteTimes(MachineType_NUM),
72 deadlockCheckEvent([this]{ wakeup(); }, "Sequencer deadlock check")
73{
74 m_outstanding_count = 0;
75
76 m_dataCache_ptr = p.dcache;
77 m_max_outstanding_requests = p.max_outstanding_requests;
78 m_deadlock_threshold = p.deadlock_threshold;
79
80 m_coreId = p.coreid; // for tracking the two CorePair sequencers
81 assert(m_max_outstanding_requests > 0);
82 assert(m_deadlock_threshold > 0);
83
84 m_unaddressedTransactionCnt = 0;
85
86 m_runningGarnetStandalone = p.garnet_standalone;
87
88 m_num_pending_invs = 0;
89 m_cache_inv_pkt = nullptr;
90
91 // These statistical variables are not for display.
92 // The profiler will collate these across different
93 // sequencers and display those collated statistics.
94 m_outstandReqHist.init(10);
95 m_latencyHist.init(10);
96 m_hitLatencyHist.init(10);
97 m_missLatencyHist.init(10);
98
99 for (int i = 0; i < RubyRequestType_NUM; i++) {
100 m_typeLatencyHist.push_back(new statistics::Histogram());
101 m_typeLatencyHist[i]->init(10);
102
103 m_hitTypeLatencyHist.push_back(new statistics::Histogram());
104 m_hitTypeLatencyHist[i]->init(10);
105
106 m_missTypeLatencyHist.push_back(new statistics::Histogram());
107 m_missTypeLatencyHist[i]->init(10);
108 }
109
110 for (int i = 0; i < MachineType_NUM; i++) {
111 m_hitMachLatencyHist.push_back(new statistics::Histogram());
112 m_hitMachLatencyHist[i]->init(10);
113
114 m_missMachLatencyHist.push_back(new statistics::Histogram());
115 m_missMachLatencyHist[i]->init(10);
116
117 m_IssueToInitialDelayHist.push_back(new statistics::Histogram());
118 m_IssueToInitialDelayHist[i]->init(10);
119
120 m_InitialToForwardDelayHist.push_back(new statistics::Histogram());
121 m_InitialToForwardDelayHist[i]->init(10);
122
123 m_ForwardToFirstResponseDelayHist.push_back(
125 m_ForwardToFirstResponseDelayHist[i]->init(10);
126
127 m_FirstResponseToCompletionDelayHist.push_back(
129 m_FirstResponseToCompletionDelayHist[i]->init(10);
130 }
131
132 for (int i = 0; i < RubyRequestType_NUM; i++) {
133 m_hitTypeMachLatencyHist.push_back(
135 m_missTypeMachLatencyHist.push_back(
137
138 for (int j = 0; j < MachineType_NUM; j++) {
139 m_hitTypeMachLatencyHist[i].push_back(new statistics::Histogram());
140 m_hitTypeMachLatencyHist[i][j]->init(10);
141
142 m_missTypeMachLatencyHist[i].push_back(
143 new statistics::Histogram());
144 m_missTypeMachLatencyHist[i][j]->init(10);
145 }
146 }
147
148}
149
153
154void
156{
158 "%s must have a dcache object to support LLSC requests.", name());
160 if (line) {
161 line->setLocked(m_version);
162 DPRINTF(LLSC, "LLSC Monitor - inserting load linked - "
163 "addr=0x%lx - cpu=%u\n", claddr, m_version);
164 }
165}
166
167void
169{
170 // clear monitor is called for all stores and evictions
171 if (m_dataCache_ptr == NULL)
172 return;
174 if (line && line->isLocked(m_version)) {
175 line->clearLocked();
176 DPRINTF(LLSC, "LLSC Monitor - clearing due to store - "
177 "addr=0x%lx - cpu=%u\n", claddr, m_version);
178 }
179}
180
181bool
183{
185 "%s must have a dcache object to support LLSC requests.", name());
187 if (!line)
188 return false;
189
190 DPRINTF(LLSC, "LLSC Monitor - clearing due to "
191 "store conditional - "
192 "addr=0x%lx - cpu=%u\n",
193 claddr, m_version);
194
195 if (line->isLocked(m_version)) {
196 line->clearLocked();
197 return true;
198 } else {
199 line->clearLocked();
200 return false;
201 }
202}
203
204bool
206{
207 assert(m_dataCache_ptr != NULL);
208 const Addr claddr = makeLineAddress(address);
210 if (!line)
211 return false;
212
213 if (line->isLocked(m_version)) {
214 return true;
215 } else {
216 return false;
217 }
218}
219
220void
225
226void
228{
229 assert(drainState() != DrainState::Draining);
230
231 // Check for deadlock of any of the requests
232 Cycles current_time = curCycle();
233
234 // Check across all outstanding requests
235 [[maybe_unused]] int total_outstanding = 0;
236
237 for (const auto &table_entry : m_RequestTable) {
238 for (const auto &seq_req : table_entry.second) {
239 if (current_time - seq_req.issue_time < m_deadlock_threshold)
240 continue;
241
242 panic("Possible Deadlock detected. Aborting!\n version: %d "
243 "request.paddr: 0x%x m_readRequestTable: %d current time: "
244 "%u issue_time: %d difference: %d\n", m_version,
245 seq_req.pkt->getAddr(), table_entry.second.size(),
246 current_time * clockPeriod(), seq_req.issue_time
247 * clockPeriod(), (current_time * clockPeriod())
248 - (seq_req.issue_time * clockPeriod()));
249 }
250 total_outstanding += table_entry.second.size();
251 }
252
253 assert(m_outstanding_count == total_outstanding);
254
255 if (m_outstanding_count > 0) {
256 // If there are still outstanding requests, keep checking
258 }
259}
260
261int
263{
264 int num_written = RubyPort::functionalWrite(func_pkt);
265
266 for (const auto &table_entry : m_RequestTable) {
267 for (const auto& seq_req : table_entry.second) {
268 if (seq_req.functionalWrite(func_pkt))
269 ++num_written;
270 }
271 }
272
273 return num_written;
274}
275
277{
282 for (int i = 0; i < RubyRequestType_NUM; i++) {
283 m_typeLatencyHist[i]->reset();
284 m_hitTypeLatencyHist[i]->reset();
285 m_missTypeLatencyHist[i]->reset();
286 for (int j = 0; j < MachineType_NUM; j++) {
287 m_hitTypeMachLatencyHist[i][j]->reset();
288 m_missTypeMachLatencyHist[i][j]->reset();
289 }
290 }
291
292 for (int i = 0; i < MachineType_NUM; i++) {
293 m_missMachLatencyHist[i]->reset();
294 m_hitMachLatencyHist[i]->reset();
295
300
301 m_IncompleteTimes[i] = 0;
302 }
303}
304
305// Insert the request in the request table. Return RequestStatus_Aliased
306// if the entry was already present.
307RequestStatus
308Sequencer::insertRequest(PacketPtr pkt, RubyRequestType primary_type,
309 RubyRequestType secondary_type)
310{
311 // See if we should schedule a deadlock check
315 }
316
317 if (isTlbiCmdRequest(primary_type)) {
318 assert(primary_type == secondary_type);
319
320 switch (primary_type) {
321 case RubyRequestType_TLBI_EXT_SYNC_COMP:
322 // Don't have to store any data on this
323 break;
324 case RubyRequestType_TLBI:
325 case RubyRequestType_TLBI_SYNC:
326 {
328
329 // returns pair<inserted element, was inserted>
330 [[maybe_unused]] auto insert_data = \
334 pkt, primary_type, secondary_type, curCycle()));
335
336 // if insert_data.second is false, wasn't inserted
337 assert(insert_data.second &&
338 "Another TLBI request with the same ID exists");
339
340 DPRINTF(RubySequencer, "Inserting TLBI request %016x\n",
342
343 break;
344 }
345
346 default:
347 panic("Unexpected TLBI RubyRequestType");
348 }
349
350 return RequestStatus_Ready;
351 }
352
353 // If command is MemSyncReq, it is used to invalidate the cache.
354 // As the cache invalidation requests are already issued in invL1(),
355 // there is no need to create a new request for the same here.
356 // Instead, return RequestStatus_Aliased, and make the sequencer skip
357 // an extra issueRequest
358 if (pkt->cmd == MemCmd::MemSyncReq) {
359 return RequestStatus_Aliased;
360 }
361
362 Addr line_addr = makeLineAddress(pkt->getAddr());
363 // Check if there is any outstanding request for the same cache line.
364 auto &seq_req_list = m_RequestTable[line_addr];
365 // Create a default entry
366 seq_req_list.emplace_back(pkt, primary_type,
367 secondary_type, curCycle());
369
370 if (seq_req_list.size() > 1) {
371 return RequestStatus_Aliased;
372 }
373
375
376 return RequestStatus_Ready;
377}
378
379void
384
385void
387 const MachineType respondingMach,
388 bool isExternalHit, Cycles initialRequestTime,
389 Cycles forwardRequestTime,
390 Cycles firstResponseTime)
391{
392 RubyRequestType type = srequest->m_type;
393 Cycles issued_time = srequest->issue_time;
394 Cycles completion_time = curCycle();
395
396 assert(curCycle() >= issued_time);
397 Cycles total_lat = completion_time - issued_time;
398
399 if ((initialRequestTime != 0) && (initialRequestTime < issued_time)) {
400 // if the request was combined in the protocol with an earlier request
401 // for the same address, it is possible that it will return an
402 // initialRequestTime corresponding the earlier request. Since Cycles
403 // is unsigned, we can't let this request get profiled below.
404
405 total_lat = Cycles(0);
406 }
407
408 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
409 curTick(), m_version, "Seq", llscSuccess ? "Done" : "SC_Failed",
410 "", "", printAddress(srequest->pkt->getAddr()), total_lat);
411
412 m_latencyHist.sample(total_lat);
413 m_typeLatencyHist[type]->sample(total_lat);
414
415 if (isExternalHit) {
416 m_missLatencyHist.sample(total_lat);
417 m_missTypeLatencyHist[type]->sample(total_lat);
418
419 if (respondingMach != MachineType_NUM) {
420 m_missMachLatencyHist[respondingMach]->sample(total_lat);
421 m_missTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
422
423 if ((issued_time <= initialRequestTime) &&
424 (initialRequestTime <= forwardRequestTime) &&
425 (forwardRequestTime <= firstResponseTime) &&
426 (firstResponseTime <= completion_time)) {
427
428 m_IssueToInitialDelayHist[respondingMach]->sample(
429 initialRequestTime - issued_time);
430 m_InitialToForwardDelayHist[respondingMach]->sample(
431 forwardRequestTime - initialRequestTime);
432 m_ForwardToFirstResponseDelayHist[respondingMach]->sample(
433 firstResponseTime - forwardRequestTime);
434 m_FirstResponseToCompletionDelayHist[respondingMach]->sample(
435 completion_time - firstResponseTime);
436 } else {
437 m_IncompleteTimes[respondingMach]++;
438 }
439 }
440 } else {
441 m_hitLatencyHist.sample(total_lat);
442 m_hitTypeLatencyHist[type]->sample(total_lat);
443
444 if (respondingMach != MachineType_NUM) {
445 m_hitMachLatencyHist[respondingMach]->sample(total_lat);
446 m_hitTypeMachLatencyHist[type][respondingMach]->sample(total_lat);
447 }
448 }
449}
450
451void
453{
454 llscClearMonitor(address);
455 writeCallback(address, data);
456}
457
458void
460 const bool externalHit, const MachineType mach,
461 const Cycles initialRequestTime,
462 const Cycles forwardRequestTime,
463 const Cycles firstResponseTime,
464 const bool noCoales)
465{
466 //
467 // Free the whole list as we assume we have had the exclusive access
468 // to this cache line when response for the write comes back
469 //
470 assert(address == makeLineAddress(address));
471 assert(m_RequestTable.find(address) != m_RequestTable.end());
472 auto &seq_req_list = m_RequestTable[address];
473
474 // Perform hitCallback on every cpu request made to this cache block while
475 // ruby request was outstanding. Since only 1 ruby request was made,
476 // profile the ruby latency once.
477 bool ruby_request = true;
478 while (!seq_req_list.empty()) {
479 SequencerRequest &seq_req = seq_req_list.front();
480 // Atomic Request may be executed remotly in the cache hierarchy
481 bool atomic_req =
482 ((seq_req.m_type == RubyRequestType_ATOMIC_RETURN) ||
483 (seq_req.m_type == RubyRequestType_ATOMIC_NO_RETURN));
484
485 if ((noCoales || atomic_req) && !ruby_request) {
486 // Do not process follow-up requests
487 // (e.g. if full line no present)
488 // Reissue to the cache hierarchy
489 issueRequest(seq_req.pkt, seq_req.m_second_type);
490 break;
491 }
492
493 if (ruby_request) {
494 assert(seq_req.m_type != RubyRequestType_LD);
495 assert(seq_req.m_type != RubyRequestType_Load_Linked);
496 assert(seq_req.m_type != RubyRequestType_IFETCH);
497 assert(seq_req.m_type != RubyRequestType_ATOMIC_RETURN);
498 assert(seq_req.m_type != RubyRequestType_ATOMIC_NO_RETURN);
499 }
500
501 // handle write request
502 if ((seq_req.m_type != RubyRequestType_LD) &&
503 (seq_req.m_type != RubyRequestType_Load_Linked) &&
504 (seq_req.m_type != RubyRequestType_IFETCH)) {
505 // LL/SC support (tested with ARMv8)
506 bool success = true;
507
508 if (seq_req.m_type != RubyRequestType_Store_Conditional) {
509 // Regular stores to addresses being monitored
510 // will fail (remove) the monitor entry.
511 llscClearMonitor(address);
512 } else {
513 // Store conditionals must first check the monitor
514 // if they will succeed or not
515 success = llscStoreConditional(address);
516 seq_req.pkt->req->setExtraData(success ? 1 : 0);
517 }
518
519 // Handle SLICC block_on behavior for Locked_RMW accesses. NOTE: the
520 // address variable here is assumed to be a line address, so when
521 // blocking buffers, must check line addresses.
522 if (seq_req.m_type == RubyRequestType_Locked_RMW_Read) {
523 // blockOnQueue blocks all first-level cache controller queues
524 // waiting on memory accesses for the specified address that go
525 // to the specified queue. In this case, a Locked_RMW_Write must
526 // go to the mandatory_q before unblocking the first-level
527 // controller. This will block standard loads, stores, ifetches,
528 // etc.
530 } else if (seq_req.m_type == RubyRequestType_Locked_RMW_Write) {
531 m_controller->unblock(address);
532 }
533
534 if (ruby_request) {
535 recordMissLatency(&seq_req, success, mach, externalHit,
536 initialRequestTime, forwardRequestTime,
537 firstResponseTime);
538 }
539
540 markRemoved();
541 hitCallback(&seq_req, data, success, mach, externalHit,
542 initialRequestTime, forwardRequestTime,
543 firstResponseTime, !ruby_request);
544 ruby_request = false;
545 } else {
546 // handle read request
547 assert(!ruby_request);
548 markRemoved();
549 hitCallback(&seq_req, data, true, mach, externalHit,
550 initialRequestTime, forwardRequestTime,
551 firstResponseTime, !ruby_request);
552 }
553 seq_req_list.pop_front();
554 }
555
556 // free all outstanding requests corresponding to this address
557 if (seq_req_list.empty()) {
558 m_RequestTable.erase(address);
559 }
560}
561
562void
564 bool externalHit, const MachineType mach,
565 Cycles initialRequestTime,
566 Cycles forwardRequestTime,
567 Cycles firstResponseTime)
568{
569 //
570 // Free up read requests until we hit the first Write request
571 // or end of the corresponding list.
572 //
573 assert(address == makeLineAddress(address));
574 assert(m_RequestTable.find(address) != m_RequestTable.end());
575 auto &seq_req_list = m_RequestTable[address];
576
577 // Perform hitCallback on every cpu request made to this cache block while
578 // ruby request was outstanding. Since only 1 ruby request was made,
579 // profile the ruby latency once.
580 bool ruby_request = true;
581 while (!seq_req_list.empty()) {
582 SequencerRequest &seq_req = seq_req_list.front();
583 if (ruby_request) {
584 assert((seq_req.m_type == RubyRequestType_LD) ||
585 (seq_req.m_type == RubyRequestType_Load_Linked) ||
586 (seq_req.m_type == RubyRequestType_IFETCH));
587 }
588 if ((seq_req.m_type != RubyRequestType_LD) &&
589 (seq_req.m_type != RubyRequestType_Load_Linked) &&
590 (seq_req.m_type != RubyRequestType_IFETCH) &&
591 (seq_req.m_type != RubyRequestType_REPLACEMENT)) {
592 // Write request: reissue request to the cache hierarchy
593 issueRequest(seq_req.pkt, seq_req.m_second_type);
594 break;
595 }
596 if (ruby_request) {
597 recordMissLatency(&seq_req, true, mach, externalHit,
598 initialRequestTime, forwardRequestTime,
599 firstResponseTime);
600 }
601 markRemoved();
602 hitCallback(&seq_req, data, true, mach, externalHit,
603 initialRequestTime, forwardRequestTime,
604 firstResponseTime, !ruby_request);
605 ruby_request = false;
606 seq_req_list.pop_front();
607 }
608
609 // free all outstanding requests corresponding to this address
610 if (seq_req_list.empty()) {
611 m_RequestTable.erase(address);
612 }
613}
614
615void
617 const bool externalHit, const MachineType mach,
618 const Cycles initialRequestTime,
619 const Cycles forwardRequestTime,
620 const Cycles firstResponseTime)
621{
622 //
623 // Free the first request (an atomic operation) from the list.
624 // Then issue the next request to ruby system as we cannot
625 // assume the cache line is present in the cache
626 // (the opperation could be performed remotly)
627 //
628 assert(address == makeLineAddress(address));
629 assert(m_RequestTable.find(address) != m_RequestTable.end());
630 auto &seq_req_list = m_RequestTable[address];
631
632 // Perform hitCallback only on the first cpu request that
633 // issued the ruby request
634 bool ruby_request = true;
635 while (!seq_req_list.empty()) {
636 SequencerRequest &seq_req = seq_req_list.front();
637
638 if (ruby_request) {
639 // Check that the request was an atomic memory operation
640 // and record the latency
641 assert((seq_req.m_type == RubyRequestType_ATOMIC_RETURN) ||
642 (seq_req.m_type == RubyRequestType_ATOMIC_NO_RETURN));
643 recordMissLatency(&seq_req, true, mach, externalHit,
644 initialRequestTime, forwardRequestTime,
645 firstResponseTime);
646 } else {
647 // Read, Write or Atomic request:
648 // reissue request to the cache hierarchy
649 // (we don't know if op was performed remotly)
650 issueRequest(seq_req.pkt, seq_req.m_second_type);
651 break;
652 }
653
654 // Atomics clean the monitor entry
655 llscClearMonitor(address);
656
657 markRemoved();
658 ruby_request = false;
659 hitCallback(&seq_req, data, true, mach, externalHit,
660 initialRequestTime, forwardRequestTime,
661 firstResponseTime, false);
662 seq_req_list.pop_front();
663 }
664
665 // free all outstanding requests corresponding to this address
666 if (seq_req_list.empty()) {
667 m_RequestTable.erase(address);
668 }
669}
670
671void
673 bool llscSuccess,
674 const MachineType mach, const bool externalHit,
675 const Cycles initialRequestTime,
676 const Cycles forwardRequestTime,
677 const Cycles firstResponseTime,
678 const bool was_coalesced)
679{
680 warn_once("Replacement policy updates recently became the responsibility "
681 "of SLICC state machines. Make sure to setMRU() near callbacks "
682 "in .sm files!");
683
684 PacketPtr pkt = srequest->pkt;
685 Addr request_address(pkt->getAddr());
686 RubyRequestType type = srequest->m_type;
687
688 if (was_coalesced) {
689 // Notify the controller about a coalesced request so it can properly
690 // account for it in its hit/miss stats and/or train prefetchers
691 // (this is protocol-dependent)
692 m_controller->notifyCoalesced(request_address, type, pkt->req,
693 data, externalHit);
694 }
695
696 // Load-linked handling
697 if (type == RubyRequestType_Load_Linked) {
698 Addr line_addr = makeLineAddress(request_address);
699 llscLoadLinked(line_addr);
700 }
701
702 DPRINTF(RubyHitMiss, "Cache %s at %#x\n",
703 externalHit ? "miss" : "hit",
704 printAddress(request_address));
705
706 // update the data unless it is a non-data-carrying flush
708 data.setData(pkt);
709 } else if (!pkt->isFlush()) {
710 if ((type == RubyRequestType_LD) ||
711 (type == RubyRequestType_IFETCH) ||
712 (type == RubyRequestType_RMW_Read) ||
713 (type == RubyRequestType_Locked_RMW_Read) ||
714 (type == RubyRequestType_Load_Linked) ||
715 (type == RubyRequestType_ATOMIC_RETURN)) {
716 pkt->setData(
717 data.getData(getOffset(request_address), pkt->getSize()));
718
719 if (type == RubyRequestType_ATOMIC_RETURN) {
720 DPRINTF(RubySequencer, "ATOMIC RETURN data %s\n", data);
721 } else {
722 DPRINTF(RubySequencer, "read data %s\n", data);
723 }
724 } else if (pkt->req->isSwap()) {
725 assert(!pkt->isMaskedWrite());
726 std::vector<uint8_t> overwrite_val(pkt->getSize());
727 pkt->writeData(&overwrite_val[0]);
728 pkt->setData(
729 data.getData(getOffset(request_address), pkt->getSize()));
730 data.setData(&overwrite_val[0],
731 getOffset(request_address), pkt->getSize());
732 DPRINTF(RubySequencer, "swap data %s\n", data);
733 } else if (pkt->isAtomicOp()) {
734 // Set the data in the packet to the old value in the cache
735 pkt->setData(
736 data.getData(getOffset(request_address), pkt->getSize()));
737 DPRINTF(RubySequencer, "AMO original data %s\n", data);
738 // execute AMO operation
739 (*(pkt->getAtomicOp()))(
740 data.getDataMod(getOffset(request_address)));
741 DPRINTF(RubySequencer, "AMO new data %s\n", data);
742 } else if (type != RubyRequestType_Store_Conditional || llscSuccess) {
743 // Types of stores set the actual data here, apart from
744 // failed Store Conditional requests
745 data.setData(pkt);
746 DPRINTF(RubySequencer, "set data %s\n", data);
747 }
748 }
749
750 // If using the RubyTester, update the RubyTester sender state's
751 // subBlock with the recieved data. The tester will later access
752 // this state.
753 if (m_usingRubyTester) {
754 DPRINTF(RubySequencer, "hitCallback %s 0x%x using RubyTester\n",
755 pkt->cmdString(), pkt->getAddr());
756 RubyTester::SenderState* testerSenderState =
758 assert(testerSenderState);
759 testerSenderState->subBlock.mergeFrom(data);
760 }
761
764 assert(pkt->req);
765 delete pkt;
766 rs->m_cache_recorder->enqueueNextFetchRequest();
767 } else if (RubySystem::getCooldownEnabled()) {
768 delete pkt;
769 rs->m_cache_recorder->enqueueNextFlushRequest();
770 } else {
773 }
774}
775
776void
778 RubyRequestType reqType,
779 const MachineType mach,
780 const Cycles initialRequestTime,
781 const Cycles forwardRequestTime,
782 const Cycles firstResponseTime)
783{
784 DPRINTF(RubySequencer, "unaddressedCallback ID:%08x type:%d\n",
785 unaddressedReqId, reqType);
786
787 switch (reqType) {
788 case RubyRequestType_TLBI_EXT_SYNC:
789 {
790 // This should trigger the CPU to wait for stale translations
791 // and send an EXT_SYNC_COMP once complete.
792
793 // Don't look for the ID in our requestTable.
794 // It won't be there because we didn't request this Sync
795 ruby_stale_translation_callback(unaddressedReqId);
796 break;
797 }
798 case RubyRequestType_TLBI:
799 case RubyRequestType_TLBI_SYNC:
800 {
801 // These signal that a TLBI operation that this core initiated
802 // of the respective type (TLBI or Sync) has finished.
803
804 assert(m_UnaddressedRequestTable.find(unaddressedReqId)
806
807 {
808 SequencerRequest &seq_req =
809 m_UnaddressedRequestTable.at(unaddressedReqId);
810 assert(seq_req.m_type == reqType);
811
812 PacketPtr pkt = seq_req.pkt;
813
816 }
817
818 m_UnaddressedRequestTable.erase(unaddressedReqId);
819 break;
820 }
821 default:
822 panic("Unexpected TLBI RubyRequestType");
823 }
824}
825
826void
828{
829 for (auto& pkt : mylist) {
830 // When Ruby is in warmup or cooldown phase, the requests come
831 // from the cache recorder. They do not track which port to use
832 // and do not need to send the response back
836 safe_cast<RubyPort::SenderState *>(pkt->senderState);
837 MemResponsePort *port = ss->port;
838 assert(port != NULL);
839
840 pkt->senderState = ss->predecessor;
841
842 if (pkt->cmd != MemCmd::WriteReq) {
843 // for WriteReq, we keep the original senderState until
844 // writeCompleteCallback
845 delete ss;
846 }
847
848 port->hitCallback(pkt);
850 }
851 }
852
856 } else if (RubySystem::getCooldownEnabled()) {
857 rs->m_cache_recorder->enqueueNextFlushRequest();
858 } else {
860 }
861}
862
863void
865{
866 // Since L1 invalidate is currently done with paddr = 0
867 assert(m_cache_inv_pkt && m_num_pending_invs > 0);
868
870
871 if (m_num_pending_invs == 0) {
873 m_cache_inv_pkt = nullptr;
874 completeHitCallback(pkt_list);
875 }
876}
877
878void
880{
881 int size = m_dataCache_ptr->getNumBlocks();
882 DPRINTF(RubySequencer,
883 "There are %d Invalidations outstanding before Cache Walk\n",
885 // Walk the cache
886 for (int i = 0; i < size; i++) {
888 // Evict Read-only data
889 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
890 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
891 clockEdge(), addr, 0, 0,
892 request_type, RubyAccessMode_Supervisor,
893 nullptr);
894 DPRINTF(RubySequencer, "Evicting addr 0x%x\n", addr);
895 assert(m_mandatory_q_ptr != NULL);
896 Tick latency = cyclesToTicks(
897 m_controller->mandatoryQueueLatency(request_type));
898 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
900 }
901 DPRINTF(RubySequencer,
902 "There are %d Invalidations outstanding after Cache Walk\n",
904}
905
906bool
908{
909 return m_RequestTable.empty() &&
911}
912
913RequestStatus
915{
916 // HTM abort signals must be allowed to reach the Sequencer
917 // the same cycle they are issued. They cannot be retried.
919 !pkt->req->isHTMAbort()) {
920 return RequestStatus_BufferFull;
921 }
922
923 RubyRequestType primary_type = RubyRequestType_NULL;
924 RubyRequestType secondary_type = RubyRequestType_NULL;
925
926 if (pkt->isLLSC()) {
927 // LL/SC instructions need to be handled carefully by the cache
928 // coherence protocol to ensure they follow the proper semantics. In
929 // particular, by identifying the operations as atomic, the protocol
930 // should understand that migratory sharing optimizations should not
931 // be performed (i.e. a load between the LL and SC should not steal
932 // away exclusive permission).
933 //
934 // The following logic works correctly with the semantics
935 // of armV8 LDEX/STEX instructions.
936
937 if (pkt->isWrite()) {
938 DPRINTF(RubySequencer, "Issuing SC\n");
939 primary_type = RubyRequestType_Store_Conditional;
940#if defined (PROTOCOL_MESI_Three_Level) || defined (PROTOCOL_MESI_Three_Level_HTM)
941 secondary_type = RubyRequestType_Store_Conditional;
942#else
943 secondary_type = RubyRequestType_ST;
944#endif
945 } else {
946 DPRINTF(RubySequencer, "Issuing LL\n");
947 assert(pkt->isRead());
948 primary_type = RubyRequestType_Load_Linked;
949 secondary_type = RubyRequestType_LD;
950 }
951 } else if (pkt->req->isLockedRMW()) {
952 //
953 // x86 locked instructions are translated to store cache coherence
954 // requests because these requests should always be treated as read
955 // exclusive operations and should leverage any migratory sharing
956 // optimization built into the protocol.
957 //
958 if (pkt->isWrite()) {
959 DPRINTF(RubySequencer, "Issuing Locked RMW Write\n");
960 primary_type = RubyRequestType_Locked_RMW_Write;
961 } else {
962 DPRINTF(RubySequencer, "Issuing Locked RMW Read\n");
963 assert(pkt->isRead());
964 primary_type = RubyRequestType_Locked_RMW_Read;
965 }
966 secondary_type = RubyRequestType_ST;
967 } else if (pkt->req->isTlbiCmd()) {
968 primary_type = secondary_type = tlbiCmdToRubyRequestType(pkt);
969 DPRINTF(RubySequencer, "Issuing TLBI\n");
970#if defined (PROTOCOL_CHI)
971 } else if (pkt->isAtomicOp()) {
972 if (pkt->req->isAtomicReturn()){
973 DPRINTF(RubySequencer, "Issuing ATOMIC RETURN \n");
974 primary_type = secondary_type =
975 RubyRequestType_ATOMIC_RETURN;
976 } else {
977 DPRINTF(RubySequencer, "Issuing ATOMIC NO RETURN\n");
978 primary_type = secondary_type =
979 RubyRequestType_ATOMIC_NO_RETURN;
980
981 }
982#endif
983 } else {
984 //
985 // To support SwapReq, we need to check isWrite() first: a SwapReq
986 // should always be treated like a write, but since a SwapReq implies
987 // both isWrite() and isRead() are true, check isWrite() first here.
988 //
989 if (pkt->isWrite()) {
990 //
991 // Note: M5 packets do not differentiate ST from RMW_Write
992 //
993 primary_type = secondary_type = RubyRequestType_ST;
994 } else if (pkt->isRead()) {
995 // hardware transactional memory commands
996 if (pkt->req->isHTMCmd()) {
997 primary_type = secondary_type = htmCmdToRubyRequestType(pkt);
998 } else if (pkt->req->isInstFetch()) {
999 primary_type = secondary_type = RubyRequestType_IFETCH;
1000 } else {
1001 if (pkt->req->isReadModifyWrite()) {
1002 primary_type = RubyRequestType_RMW_Read;
1003 secondary_type = RubyRequestType_ST;
1004 } else {
1005 primary_type = secondary_type = RubyRequestType_LD;
1006 }
1007 }
1008 } else if (pkt->isFlush()) {
1009 primary_type = secondary_type = RubyRequestType_FLUSH;
1010 } else if (pkt->cmd == MemCmd::MemSyncReq) {
1011 primary_type = secondary_type = RubyRequestType_REPLACEMENT;
1012 assert(!m_cache_inv_pkt);
1013 m_cache_inv_pkt = pkt;
1014 invL1();
1015 } else {
1016 panic("Unsupported ruby packet type\n");
1017 }
1018 }
1019
1020 // Check if the line is blocked for a Locked_RMW
1021 if (!pkt->req->isMemMgmt() &&
1023 (primary_type != RubyRequestType_Locked_RMW_Write)) {
1024 // Return that this request's cache line address aliases with
1025 // a prior request that locked the cache line. The request cannot
1026 // proceed until the cache line is unlocked by a Locked_RMW_Write
1027 return RequestStatus_Aliased;
1028 }
1029
1030 RequestStatus status = insertRequest(pkt, primary_type, secondary_type);
1031
1032 // It is OK to receive RequestStatus_Aliased, it can be considered Issued
1033 if (status != RequestStatus_Ready && status != RequestStatus_Aliased)
1034 return status;
1035 // non-aliased with any existing request in the request table, just issue
1036 // to the cache
1037 if (status != RequestStatus_Aliased)
1038 issueRequest(pkt, secondary_type);
1039
1040 // TODO: issue hardware prefetches here
1041 return RequestStatus_Issued;
1042}
1043
1044void
1045Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
1046{
1047 assert(pkt != NULL);
1048 ContextID proc_id = pkt->req->hasContextId() ?
1049 pkt->req->contextId() : InvalidContextID;
1050
1051 ContextID core_id = coreId();
1052
1053 // If valid, copy the pc to the ruby request
1054 Addr pc = 0;
1055 if (pkt->req->hasPC()) {
1056 pc = pkt->req->getPC();
1057 }
1058
1059 // check if the packet has data as for example prefetch and flush
1060 // requests do not
1061 std::shared_ptr<RubyRequest> msg;
1062 if (pkt->req->isMemMgmt()) {
1063 msg = std::make_shared<RubyRequest>(clockEdge(),
1064 pc, secondary_type,
1065 RubyAccessMode_Supervisor, pkt,
1066 proc_id, core_id);
1067
1068 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s\n",
1069 curTick(), m_version, "Seq", "Begin", "", "",
1070 RubyRequestType_to_string(secondary_type));
1071
1072 if (pkt->req->isTlbiCmd()) {
1073 msg->m_isTlbi = true;
1074 switch (secondary_type) {
1075 case RubyRequestType_TLBI_EXT_SYNC_COMP:
1076 msg->m_tlbiTransactionUid = pkt->req->getExtraData();
1077 break;
1078 case RubyRequestType_TLBI:
1079 case RubyRequestType_TLBI_SYNC:
1080 msg->m_tlbiTransactionUid = \
1081 getCurrentUnaddressedTransactionID();
1082 break;
1083 default:
1084 panic("Unexpected TLBI RubyRequestType");
1085 }
1086 DPRINTF(RubySequencer, "Issuing TLBI %016x\n",
1087 msg->m_tlbiTransactionUid);
1088 }
1089 } else {
1090 msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
1091 pkt->getSize(), pc, secondary_type,
1092 RubyAccessMode_Supervisor, pkt,
1093 PrefetchBit_No, proc_id, core_id);
1094
1095 if (pkt->isAtomicOp() &&
1096 ((secondary_type == RubyRequestType_ATOMIC_RETURN) ||
1097 (secondary_type == RubyRequestType_ATOMIC_NO_RETURN))){
1098 // Create the blocksize, access mask and atomicops
1099 uint32_t offset = getOffset(pkt->getAddr());
1101 atomicOps.push_back(std::make_pair<int,AtomicOpFunctor*>
1102 (offset, pkt->getAtomicOp()));
1103
1104 msg->setWriteMask(offset, pkt->getSize(), atomicOps);
1105 }
1106
1107 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
1108 curTick(), m_version, "Seq", "Begin", "", "",
1109 printAddress(msg->getPhysicalAddress()),
1110 RubyRequestType_to_string(secondary_type));
1111 }
1112
1113 // hardware transactional memory
1114 // If the request originates in a transaction,
1115 // then mark the Ruby message as such.
1116 if (pkt->isHtmTransactional()) {
1117 msg->m_htmFromTransaction = true;
1118 msg->m_htmTransactionUid = pkt->getHtmTransactionUid();
1119 }
1120
1121 Tick latency = cyclesToTicks(
1122 m_controller->mandatoryQueueLatency(secondary_type));
1123 assert(latency > 0);
1124
1125 assert(m_mandatory_q_ptr != NULL);
1126 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
1127}
1128
1129template <class KEY, class VALUE>
1130std::ostream &
1131operator<<(std::ostream &out, const std::unordered_map<KEY, VALUE> &map)
1132{
1133 for (const auto &table_entry : map) {
1134 out << "[ " << table_entry.first << " =";
1135 for (const auto &seq_req : table_entry.second) {
1136 out << " " << RubyRequestType_to_string(seq_req.m_second_type);
1137 }
1138 }
1139 out << " ]";
1140
1141 return out;
1142}
1143
1144void
1145Sequencer::print(std::ostream& out) const
1146{
1147 out << "[Sequencer: " << m_version
1148 << ", outstanding requests: " << m_outstanding_count
1149 << ", request table: " << m_RequestTable
1150 << "]";
1151}
1152
1153void
1154Sequencer::recordRequestType(SequencerRequestType requestType) {
1155 DPRINTF(RubyStats, "Recorded statistic: %s\n",
1156 SequencerRequestType_to_string(requestType));
1157}
1158
1159void
1161{
1162 llscClearMonitor(address);
1163 ruby_eviction_callback(address);
1164}
1165
1166void
1168{
1170 // Limit m_unaddressedTransactionCnt to 32 bits,
1171 // top 32 bits should always be zeroed out
1172 uint64_t aligned_txid = \
1174
1175 if (aligned_txid > 0xFFFFFFFFull) {
1177 }
1178}
1179
1180uint64_t
1182{
1183 return (
1184 uint64_t(m_version & 0xFFFFFFFF) << 32) |
1186 );
1187}
1188
1189} // namespace ruby
1190} // namespace gem5
#define DPRINTFR(x,...)
Definition trace.hh:224
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
Tick clockPeriod() const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
bool isAtomicOp() const
Definition packet.hh:846
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:523
void setData(const uint8_t *p)
Copy data into the packet from the provided pointer.
Definition packet.hh:1293
bool isWrite() const
Definition packet.hh:594
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition packet.cc:529
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition packet.hh:845
bool isLLSC() const
Definition packet.hh:620
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isMaskedWrite() const
Definition packet.hh:1450
bool isFlush() const
Definition packet.hh:624
void writeData(uint8_t *p) const
Copy data from the packet to the memory at the provided pointer.
Definition packet.hh:1322
bool isLocked(int context) const
virtual void notifyCoalesced(const Addr &addr, const RubyRequestType &type, const RequestPtr &req, const DataBlock &data_blk, const bool &was_miss)
Notifies controller of a request coalesced at the sequencer.
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
void blockOnQueue(Addr, MessageBuffer *)
Addr getAddressAtIdx(int idx) const
void clearLockedAll(int context)
AbstractCacheEntry * lookup(Addr address)
void enqueueNextFetchRequest()
Function for fetching warming up the memory and the caches.
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool bypassStrictFIFO=false)
void ruby_hit_callback(PacketPtr pkt)
Definition RubyPort.cc:452
void ruby_unaddressed_callback(PacketPtr pkt)
Definition RubyPort.cc:475
void ruby_stale_translation_callback(Addr txnId)
Definition RubyPort.cc:494
RubySystem * m_ruby_system
Definition RubyPort.hh:202
virtual int functionalWrite(Packet *func_pkt)
Definition RubyPort.cc:729
AbstractController * m_controller
Definition RubyPort.hh:204
void ruby_eviction_callback(Addr address)
Definition RubyPort.cc:695
MessageBuffer * m_mandatory_q_ptr
Definition RubyPort.hh:205
static bool getWarmupEnabled()
Definition RubySystem.hh:75
static uint32_t getBlockSizeBits()
Definition RubySystem.hh:73
CacheRecorder * m_cache_recorder
static bool getCooldownEnabled()
Definition RubySystem.hh:76
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
Definition Sequencer.hh:274
void resetStats() override
Callback to reset stats.
Definition Sequencer.cc:276
std::unordered_map< Addr, std::list< SequencerRequest > > m_RequestTable
Definition Sequencer.hh:236
virtual bool empty() const
Definition Sequencer.cc:907
void writeCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0), const bool noCoales=false)
Definition Sequencer.cc:459
std::vector< statistics::Counter > m_IncompleteTimes
Definition Sequencer.hh:306
bool llscCheckMonitor(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:205
virtual int functionalWrite(Packet *func_pkt) override
Definition Sequencer.cc:262
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Definition Sequencer.hh:303
std::vector< statistics::Histogram * > m_hitMachLatencyHist
Histograms for profiling the latencies for requests that did not required external messages.
Definition Sequencer.hh:287
void recordMissLatency(SequencerRequest *srequest, bool llscSuccess, const MachineType respondingMach, bool isExternalHit, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime)
Definition Sequencer.cc:386
void completeHitCallback(std::vector< PacketPtr > &list)
Definition Sequencer.cc:827
std::vector< statistics::Histogram * > m_typeLatencyHist
Definition Sequencer.hh:278
PacketPtr m_cache_inv_pkt
Definition Sequencer.hh:252
void atomicCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:616
void writeCallbackScFail(Addr address, DataBlock &data)
Proxy function to writeCallback that first invalidates the line address in the local monitor.
Definition Sequencer.cc:452
CacheMemory * m_dataCache_ptr
Definition Sequencer.hh:254
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
Definition Sequencer.hh:305
void incrementUnaddressedTransactionCnt()
Increment the unaddressed transaction counter.
void hitCallback(SequencerRequest *srequest, DataBlock &data, bool llscSuccess, const MachineType mach, const bool externalHit, const Cycles initialRequestTime, const Cycles forwardRequestTime, const Cycles firstResponseTime, const bool was_coalesced)
Definition Sequencer.cc:672
void llscLoadLinked(const Addr)
Places the cache line address into the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:155
uint64_t getCurrentUnaddressedTransactionID() const
Generate the current unaddressed transaction ID based on the counter and the Sequencer object's versi...
Sequencer(const Params &)
Definition Sequencer.cc:70
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
Definition Sequencer.hh:277
void issueRequest(PacketPtr pkt, RubyRequestType type)
bool llscStoreConditional(const Addr)
Searches for cache line address in the global monitor tagged with this Sequencer object's version id.
Definition Sequencer.cc:182
void unaddressedCallback(Addr unaddressedReqId, RubyRequestType requestType, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:777
virtual RequestStatus insertRequest(PacketPtr pkt, RubyRequestType primary_type, RubyRequestType secondary_type)
Definition Sequencer.cc:308
EventFunctionWrapper deadlockCheckEvent
Definition Sequencer.hh:308
std::unordered_map< uint64_t, SequencerRequest > m_UnaddressedRequestTable
Definition Sequencer.hh:239
uint64_t m_unaddressedTransactionCnt
Definition Sequencer.hh:269
std::vector< statistics::Histogram * > m_hitTypeLatencyHist
Definition Sequencer.hh:283
statistics::Histogram m_hitLatencyHist
Histogram for holding latency profile of all requests that hit in the controller connected to this se...
Definition Sequencer.hh:282
RequestStatus makeRequest(PacketPtr pkt) override
Definition Sequencer.cc:914
void llscClearLocalMonitor()
Removes all addresses from the local monitor.
Definition Sequencer.cc:221
void recordRequestType(SequencerRequestType requestType)
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
Definition Sequencer.hh:302
virtual void print(std::ostream &out) const
RubySequencerParams Params
Definition Sequencer.hh:89
std::vector< std::vector< statistics::Histogram * > > m_hitTypeMachLatencyHist
Definition Sequencer.hh:288
void llscClearMonitor(const Addr)
Removes the cache line address from the global monitor.
Definition Sequencer.cc:168
virtual void wakeup()
Definition Sequencer.cc:227
void evictionCallback(Addr address)
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
Definition Sequencer.hh:304
void readCallback(Addr address, DataBlock &data, const bool externalHit=false, const MachineType mach=MachineType_NUM, const Cycles initialRequestTime=Cycles(0), const Cycles forwardRequestTime=Cycles(0), const Cycles firstResponseTime=Cycles(0))
Definition Sequencer.cc:563
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
Definition Sequencer.hh:297
std::vector< statistics::Histogram * > m_missTypeLatencyHist
Definition Sequencer.hh:293
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
Definition Sequencer.hh:292
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
Definition Sequencer.hh:299
void mergeFrom(const DataBlock &data)
Definition SubBlock.hh:66
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
void reset()
Reset stat value to default.
A simple histogram stat.
STL vector class.
Definition stl.hh:37
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
@ Draining
Draining buffers pending serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define warn_once(...)
Definition logging.hh:260
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 9, 8 > rs
Bitfield< 5, 0 > status
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 3 > addr
Definition types.hh:84
RubyRequestType tlbiCmdToRubyRequestType(const Packet *pkt)
bool isTlbiCmdRequest(RubyRequestType type)
Addr makeLineAddress(Addr addr)
Definition Address.cc:60
Addr getOffset(Addr addr)
Definition Address.cc:54
RubyRequestType htmCmdToRubyRequestType(const Packet *pkt)
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
Definition BoolVec.cc:49
std::string printAddress(Addr addr)
Definition Address.cc:80
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
T safe_cast(U &&ref_or_ptr)
Definition cast.hh:74
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
int ContextID
Globally unique thread context ID.
Definition types.hh:239
const ContextID InvalidContextID
Definition types.hh:240
Declaration of the Packet class.
RubyRequestType m_second_type
Definition Sequencer.hh:66

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0