gem5 v24.0.0.0
Loading...
Searching...
No Matches
mem_ctrl.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "mem/mem_ctrl.hh"
42
43#include "base/trace.hh"
44#include "debug/DRAM.hh"
45#include "debug/Drain.hh"
46#include "debug/MemCtrl.hh"
47#include "debug/NVM.hh"
48#include "debug/QOS.hh"
49#include "mem/dram_interface.hh"
50#include "mem/mem_interface.hh"
51#include "mem/nvm_interface.hh"
52#include "sim/system.hh"
53
54namespace gem5
55{
56
57namespace memory
58{
59
60MemCtrl::MemCtrl(const MemCtrlParams &p) :
61 qos::MemCtrl(p),
62 port(name() + ".port", *this), isTimingMode(false),
63 retryRdReq(false), retryWrReq(false),
64 nextReqEvent([this] {processNextReqEvent(dram, respQueue,
66 respondEvent([this] {processRespondEvent(dram, respQueue,
67 respondEvent, retryRdReq); }, name()),
68 dram(p.dram),
69 readBufferSize(dram->readBufferSize),
70 writeBufferSize(dram->writeBufferSize),
71 writeHighThreshold(writeBufferSize * p.write_high_thresh_perc / 100.0),
72 writeLowThreshold(writeBufferSize * p.write_low_thresh_perc / 100.0),
73 minWritesPerSwitch(p.min_writes_per_switch),
74 minReadsPerSwitch(p.min_reads_per_switch),
75 memSchedPolicy(p.mem_sched_policy),
76 frontendLatency(p.static_frontend_latency),
77 backendLatency(p.static_backend_latency),
78 commandWindow(p.command_window),
79 prevArrival(0),
80 stats(*this)
81{
82 DPRINTF(MemCtrl, "Setting up controller\n");
83
84 readQueue.resize(p.qos_priorities);
85 writeQueue.resize(p.qos_priorities);
86
87 dram->setCtrl(this, commandWindow);
88
89 // perform a basic check of the write thresholds
90 if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
91 fatal("Write buffer low threshold %d must be smaller than the "
92 "high threshold %d\n", p.write_low_thresh_perc,
93 p.write_high_thresh_perc);
94 if (p.disable_sanity_check) {
95 port.disableSanityCheck();
96 }
97}
98
99void
101{
102 if (!port.isConnected()) {
103 fatal("MemCtrl %s is unconnected!\n", name());
104 } else {
106 }
107}
108
109void
111{
112 // remember the memory system mode of operation
114
115 if (isTimingMode) {
116 // shift the bus busy time sufficiently far ahead that we never
117 // have to worry about negative values when computing the time for
118 // the next request, this will add an insignificant bubble at the
119 // start of simulation
121 }
122}
123
124Tick
126{
127 if (!dram->getAddrRange().contains(pkt->getAddr())) {
128 panic("Can't handle address range for packet %s\n", pkt->print());
129 }
130
131 return recvAtomicLogic(pkt, dram);
132}
133
134
135Tick
137{
138 DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
139 pkt->cmdString(), pkt->getAddr());
140
141 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
142 "is responding");
143
144 // do the actual memory access and turn the packet into a response
145 mem_intr->access(pkt);
146
147 if (pkt->hasData()) {
148 // this value is not supposed to be accurate, just enough to
149 // keep things going, mimic a closed page
150 // also this latency can't be 0
151 return mem_intr->accessLatency();
152 }
153
154 return 0;
155}
156
157Tick
159{
160 Tick latency = recvAtomic(pkt);
161 dram->getBackdoor(backdoor);
162 return latency;
163}
164
165bool
166MemCtrl::readQueueFull(unsigned int neededEntries) const
167{
169 "Read queue limit %d, current size %d, entries needed %d\n",
171 neededEntries);
172
173 auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
174 return rdsize_new > readBufferSize;
175}
176
177bool
178MemCtrl::writeQueueFull(unsigned int neededEntries) const
179{
181 "Write queue limit %d, current size %d, entries needed %d\n",
182 writeBufferSize, totalWriteQueueSize, neededEntries);
183
184 auto wrsize_new = (totalWriteQueueSize + neededEntries);
185 return wrsize_new > writeBufferSize;
186}
187
188bool
190 unsigned int pkt_count, MemInterface* mem_intr)
191{
192 // only add to the read queue here. whenever the request is
193 // eventually done, set the readyTime, and call schedule()
194 assert(!pkt->isWrite());
195
196 assert(pkt_count != 0);
197
198 // if the request size is larger than burst size, the pkt is split into
199 // multiple packets
200 // Note if the pkt starting address is not aligened to burst size, the
201 // address of first packet is kept unaliged. Subsequent packets
202 // are aligned to burst size boundaries. This is to ensure we accurately
203 // check read packets against packets in write queue.
204 const Addr base_addr = pkt->getAddr();
205 Addr addr = base_addr;
206 unsigned pktsServicedByWrQ = 0;
207 BurstHelper* burst_helper = NULL;
208
209 uint32_t burst_size = mem_intr->bytesPerBurst();
210
211 for (int cnt = 0; cnt < pkt_count; ++cnt) {
212 unsigned size = std::min((addr | (burst_size - 1)) + 1,
213 base_addr + pkt->getSize()) - addr;
214 stats.readPktSize[ceilLog2(size)]++;
217
218 // First check write buffer to see if the data is already at
219 // the controller
220 bool foundInWrQ = false;
221 Addr burst_addr = burstAlign(addr, mem_intr);
222 // if the burst address is not present then there is no need
223 // looking any further
224 if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
225 for (const auto& vec : writeQueue) {
226 for (const auto& p : vec) {
227 // check if the read is subsumed in the write queue
228 // packet we are looking at
229 if (p->addr <= addr &&
230 ((addr + size) <= (p->addr + p->size))) {
231
232 foundInWrQ = true;
234 pktsServicedByWrQ++;
236 "Read to addr %#x with size %d serviced by "
237 "write queue\n",
238 addr, size);
239 stats.bytesReadWrQ += burst_size;
240 break;
241 }
242 }
243 }
244 }
245
246 // If not found in the write q, make a memory packet and
247 // push it onto the read queue
248 if (!foundInWrQ) {
249
250 // Make the burst helper for split packets
251 if (pkt_count > 1 && burst_helper == NULL) {
252 DPRINTF(MemCtrl, "Read to addr %#x translates to %d "
253 "memory requests\n", pkt->getAddr(), pkt_count);
254 burst_helper = new BurstHelper(pkt_count);
255 }
256
257 MemPacket* mem_pkt;
258 mem_pkt = mem_intr->decodePacket(pkt, addr, size, true,
259 mem_intr->pseudoChannel);
260
261 // Increment read entries of the rank (dram)
262 // Increment count to trigger issue of non-deterministic read (nvm)
263 mem_intr->setupRank(mem_pkt->rank, true);
264 // Default readyTime to Max; will be reset once read is issued
265 mem_pkt->readyTime = MaxTick;
266 mem_pkt->burstHelper = burst_helper;
267
268 assert(!readQueueFull(1));
270
271 DPRINTF(MemCtrl, "Adding to read queue\n");
272
273 readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
274
275 // log packet
277 pkt->qosValue(), mem_pkt->addr, 1);
278
279 mem_intr->readQueueSize++;
280
281 // Update stats
283 }
284
285 // Starting address of next memory pkt (aligned to burst boundary)
286 addr = (addr | (burst_size - 1)) + 1;
287 }
288
289 // If all packets are serviced by write queue, we send the repsonse back
290 if (pktsServicedByWrQ == pkt_count) {
291 accessAndRespond(pkt, frontendLatency, mem_intr);
292 return true;
293 }
294
295 // Update how many split packets are serviced by write queue
296 if (burst_helper != NULL)
297 burst_helper->burstsServiced = pktsServicedByWrQ;
298
299 // not all/any packets serviced by the write queue
300 return false;
301}
302
303void
304MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count,
305 MemInterface* mem_intr)
306{
307 // only add to the write queue here. whenever the request is
308 // eventually done, set the readyTime, and call schedule()
309 assert(pkt->isWrite());
310
311 // if the request size is larger than burst size, the pkt is split into
312 // multiple packets
313 const Addr base_addr = pkt->getAddr();
314 Addr addr = base_addr;
315 uint32_t burst_size = mem_intr->bytesPerBurst();
316
317 for (int cnt = 0; cnt < pkt_count; ++cnt) {
318 unsigned size = std::min((addr | (burst_size - 1)) + 1,
319 base_addr + pkt->getSize()) - addr;
320 stats.writePktSize[ceilLog2(size)]++;
323
324 // see if we can merge with an existing item in the write
325 // queue and keep track of whether we have merged or not
326 bool merged = isInWriteQueue.find(burstAlign(addr, mem_intr)) !=
327 isInWriteQueue.end();
328
329 // if the item was not merged we need to create a new write
330 // and enqueue it
331 if (!merged) {
332 MemPacket* mem_pkt;
333 mem_pkt = mem_intr->decodePacket(pkt, addr, size, false,
334 mem_intr->pseudoChannel);
335 // Default readyTime to Max if nvm interface;
336 //will be reset once read is issued
337 mem_pkt->readyTime = MaxTick;
338
339 mem_intr->setupRank(mem_pkt->rank, false);
340
343
344 DPRINTF(MemCtrl, "Adding to write queue\n");
345
346 writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
347 isInWriteQueue.insert(burstAlign(addr, mem_intr));
348
349 // log packet
351 pkt->qosValue(), mem_pkt->addr, 1);
352
353 mem_intr->writeQueueSize++;
354
355 assert(totalWriteQueueSize == isInWriteQueue.size());
356
357 // Update stats
359
360 } else {
362 "Merging write burst with existing queue entry\n");
363
364 // keep track of the fact that this burst effectively
365 // disappeared as it was merged with an existing one
367 }
368
369 // Starting address of next memory pkt (aligned to burst_size boundary)
370 addr = (addr | (burst_size - 1)) + 1;
371 }
372
373 // we do not wait for the writes to be send to the actual memory,
374 // but instead take responsibility for the consistency here and
375 // snoop the write queue for any upcoming reads
376 // @todo, if a pkt size is larger than burst size, we might need a
377 // different front end latency
378 accessAndRespond(pkt, frontendLatency, mem_intr);
379}
380
381void
383{
384#if TRACING_ON
385 DPRINTF(MemCtrl, "===READ QUEUE===\n\n");
386 for (const auto& queue : readQueue) {
387 for (const auto& packet : queue) {
388 DPRINTF(MemCtrl, "Read %#x\n", packet->addr);
389 }
390 }
391
392 DPRINTF(MemCtrl, "\n===RESP QUEUE===\n\n");
393 for (const auto& packet : respQueue) {
394 DPRINTF(MemCtrl, "Response %#x\n", packet->addr);
395 }
396
397 DPRINTF(MemCtrl, "\n===WRITE QUEUE===\n\n");
398 for (const auto& queue : writeQueue) {
399 for (const auto& packet : queue) {
400 DPRINTF(MemCtrl, "Write %#x\n", packet->addr);
401 }
402 }
403#endif // TRACING_ON
404}
405
406bool
408{
409 // This is where we enter from the outside world
410 DPRINTF(MemCtrl, "recvTimingReq: request %s addr %#x size %d\n",
411 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
412
413 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
414 "is responding");
415
416 panic_if(!(pkt->isRead() || pkt->isWrite()),
417 "Should only see read and writes at memory controller\n");
418
419 // Calc avg gap between requests
420 if (prevArrival != 0) {
422 }
424
426 "Can't handle address range for packet %s\n", pkt->print());
427
428 // Find out how many memory packets a pkt translates to
429 // If the burst size is equal or larger than the pkt size, then a pkt
430 // translates to only one memory packet. Otherwise, a pkt translates to
431 // multiple memory packets
432 unsigned size = pkt->getSize();
433 uint32_t burst_size = dram->bytesPerBurst();
434
435 unsigned offset = pkt->getAddr() & (burst_size - 1);
436 unsigned int pkt_count = divCeil(offset + size, burst_size);
437
438 // run the QoS scheduler and assign a QoS priority value to the packet
439 qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
440
441 // check local buffers and do not accept if full
442 if (pkt->isWrite()) {
443 assert(size != 0);
444 if (writeQueueFull(pkt_count)) {
445 DPRINTF(MemCtrl, "Write queue full, not accepting\n");
446 // remember that we have to retry this port
447 retryWrReq = true;
449 return false;
450 } else {
451 addToWriteQueue(pkt, pkt_count, dram);
452 // If we are not already scheduled to get a request out of the
453 // queue, do so now
454 if (!nextReqEvent.scheduled()) {
455 DPRINTF(MemCtrl, "Request scheduled immediately\n");
457 }
459 stats.bytesWrittenSys += size;
460 }
461 } else {
462 assert(pkt->isRead());
463 assert(size != 0);
464 if (readQueueFull(pkt_count)) {
465 DPRINTF(MemCtrl, "Read queue full, not accepting\n");
466 // remember that we have to retry this port
467 retryRdReq = true;
469 return false;
470 } else {
471 if (!addToReadQueue(pkt, pkt_count, dram)) {
472 // If we are not already scheduled to get a request out of the
473 // queue, do so now
474 if (!nextReqEvent.scheduled()) {
475 DPRINTF(MemCtrl, "Request scheduled immediately\n");
477 }
478 }
479 stats.readReqs++;
480 stats.bytesReadSys += size;
481 }
482 }
483
484 return true;
485}
486
487void
489 MemPacketQueue& queue,
490 EventFunctionWrapper& resp_event,
491 bool& retry_rd_req)
492{
493
495 "processRespondEvent(): Some req has reached its readyTime\n");
496
497 MemPacket* mem_pkt = queue.front();
498
499 // media specific checks and functions when read response is complete
500 // DRAM only
501 mem_intr->respondEvent(mem_pkt->rank);
502
503 if (mem_pkt->burstHelper) {
504 // it is a split packet
505 mem_pkt->burstHelper->burstsServiced++;
506 if (mem_pkt->burstHelper->burstsServiced ==
507 mem_pkt->burstHelper->burstCount) {
508 // we have now serviced all children packets of a system packet
509 // so we can now respond to the requestor
510 // @todo we probably want to have a different front end and back
511 // end latency for split packets
513 mem_intr);
514 delete mem_pkt->burstHelper;
515 mem_pkt->burstHelper = NULL;
516 }
517 } else {
518 // it is not a split packet
520 mem_intr);
521 }
522
523 queue.pop_front();
524
525 if (!queue.empty()) {
526 assert(queue.front()->readyTime >= curTick());
527 assert(!resp_event.scheduled());
528 schedule(resp_event, queue.front()->readyTime);
529 } else {
530 // if there is nothing left in any queue, signal a drain
533 allIntfDrained()) {
534
535 DPRINTF(Drain, "Controller done draining\n");
537 } else {
538 // check the refresh state and kick the refresh event loop
539 // into action again if banks already closed and just waiting
540 // for read to complete
541 // DRAM only
542 mem_intr->checkRefreshState(mem_pkt->rank);
543 }
544 }
545
546 delete mem_pkt;
547
548 // We have made a location in the queue available at this point,
549 // so if there is a read that was forced to wait, retry now
550 if (retry_rd_req) {
551 retry_rd_req = false;
553 }
554}
555
556MemPacketQueue::iterator
558 MemInterface* mem_intr)
559{
560 // This method does the arbitration between requests.
561
562 MemPacketQueue::iterator ret = queue.end();
563
564 if (!queue.empty()) {
565 if (queue.size() == 1) {
566 // available rank corresponds to state refresh idle
567 MemPacket* mem_pkt = *(queue.begin());
568 if (mem_pkt->pseudoChannel != mem_intr->pseudoChannel) {
569 return ret;
570 }
571 if (packetReady(mem_pkt, mem_intr)) {
572 ret = queue.begin();
573 DPRINTF(MemCtrl, "Single request, going to a free rank\n");
574 } else {
575 DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
576 }
577 } else if (memSchedPolicy == enums::fcfs) {
578 // check if there is a packet going to a free rank
579 for (auto i = queue.begin(); i != queue.end(); ++i) {
580 MemPacket* mem_pkt = *i;
581 if (mem_pkt->pseudoChannel != mem_intr->pseudoChannel) {
582 continue;
583 }
584 if (packetReady(mem_pkt, mem_intr)) {
585 ret = i;
586 break;
587 }
588 }
589 } else if (memSchedPolicy == enums::frfcfs) {
590 Tick col_allowed_at;
591 std::tie(ret, col_allowed_at)
592 = chooseNextFRFCFS(queue, extra_col_delay, mem_intr);
593 } else {
594 panic("No scheduling policy chosen\n");
595 }
596 }
597 return ret;
598}
599
602 MemInterface* mem_intr)
603{
604 auto selected_pkt_it = queue.end();
605 Tick col_allowed_at = MaxTick;
606
607 // time we need to issue a column command to be seamless
608 const Tick min_col_at = std::max(mem_intr->nextBurstAt + extra_col_delay,
609 curTick());
610
611 std::tie(selected_pkt_it, col_allowed_at) =
612 mem_intr->chooseNextFRFCFS(queue, min_col_at);
613
614 if (selected_pkt_it == queue.end()) {
615 DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
616 }
617
618 return std::make_pair(selected_pkt_it, col_allowed_at);
619}
620
621void
623 MemInterface* mem_intr)
624{
625 DPRINTF(MemCtrl, "Responding to Address %#x.. \n", pkt->getAddr());
626
627 bool needsResponse = pkt->needsResponse();
628 // do the actual memory access which also turns the packet into a
629 // response
630 panic_if(!mem_intr->getAddrRange().contains(pkt->getAddr()),
631 "Can't handle address range for packet %s\n", pkt->print());
632 mem_intr->access(pkt);
633
634 // turn packet around to go back to requestor if response expected
635 if (needsResponse) {
636 // access already turned the packet into a response
637 assert(pkt->isResponse());
638 // response_time consumes the static latency and is charged also
639 // with headerDelay that takes into account the delay provided by
640 // the xbar and also the payloadDelay that takes into account the
641 // number of data beats.
642 Tick response_time = curTick() + static_latency + pkt->headerDelay +
643 pkt->payloadDelay;
644 // Here we reset the timing of the packet before sending it out.
645 pkt->headerDelay = pkt->payloadDelay = 0;
646
647 // queue the packet in the response queue to be sent out after
648 // the static latency has passed
649 port.schedTimingResp(pkt, response_time);
650 } else {
651 // @todo the packet is going to be deleted, and the MemPacket
652 // is still having a pointer to it
653 pendingDelete.reset(pkt);
654 }
655
656 DPRINTF(MemCtrl, "Done\n");
657
658 return;
659}
660
661void
663{
664 auto it = burstTicks.begin();
665 while (it != burstTicks.end()) {
666 auto current_it = it++;
667 if (curTick() > *current_it) {
668 DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
669 burstTicks.erase(current_it);
670 }
671 }
672}
673
674Tick
676{
677 // get tick aligned to burst window
678 Tick burst_offset = cmd_tick % commandWindow;
679 return (cmd_tick - burst_offset);
680}
681
682Tick
683MemCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
684{
685 // start with assumption that there is no contention on command bus
686 Tick cmd_at = cmd_tick;
687
688 // get tick aligned to burst window
689 Tick burst_tick = getBurstWindow(cmd_tick);
690
691 // verify that we have command bandwidth to issue the command
692 // if not, iterate over next window(s) until slot found
693 while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
694 DPRINTF(MemCtrl, "Contention found on command bus at %d\n",
695 burst_tick);
696 burst_tick += commandWindow;
697 cmd_at = burst_tick;
698 }
699
700 // add command into burst window and return corresponding Tick
701 burstTicks.insert(burst_tick);
702 return cmd_at;
703}
704
705Tick
706MemCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
707 Tick max_multi_cmd_split)
708{
709 // start with assumption that there is no contention on command bus
710 Tick cmd_at = cmd_tick;
711
712 // get tick aligned to burst window
713 Tick burst_tick = getBurstWindow(cmd_tick);
714
715 // Command timing requirements are from 2nd command
716 // Start with assumption that 2nd command will issue at cmd_at and
717 // find prior slot for 1st command to issue
718 // Given a maximum latency of max_multi_cmd_split between the commands,
719 // find the burst at the maximum latency prior to cmd_at
720 Tick burst_offset = 0;
721 Tick first_cmd_offset = cmd_tick % commandWindow;
722 while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
723 burst_offset += commandWindow;
724 }
725 // get the earliest burst aligned address for first command
726 // ensure that the time does not go negative
727 Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
728
729 // Can required commands issue?
730 bool first_can_issue = false;
731 bool second_can_issue = false;
732 // verify that we have command bandwidth to issue the command(s)
733 while (!first_can_issue || !second_can_issue) {
734 bool same_burst = (burst_tick == first_cmd_tick);
735 auto first_cmd_count = burstTicks.count(first_cmd_tick);
736 auto second_cmd_count = same_burst ? first_cmd_count + 1 :
737 burstTicks.count(burst_tick);
738
739 first_can_issue = first_cmd_count < max_cmds_per_burst;
740 second_can_issue = second_cmd_count < max_cmds_per_burst;
741
742 if (!second_can_issue) {
743 DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
744 burst_tick);
745 burst_tick += commandWindow;
746 cmd_at = burst_tick;
747 }
748
749 // Verify max_multi_cmd_split isn't violated when command 2 is shifted
750 // If commands initially were issued in same burst, they are
751 // now in consecutive bursts and can still issue B2B
752 bool gap_violated = !same_burst &&
753 ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
754
755 if (!first_can_issue || (!second_can_issue && gap_violated)) {
756 DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
757 first_cmd_tick);
758 first_cmd_tick += commandWindow;
759 }
760 }
761
762 // Add command to burstTicks
763 burstTicks.insert(burst_tick);
764 burstTicks.insert(first_cmd_tick);
765
766 return cmd_at;
767}
768
769bool
770MemCtrl::inReadBusState(bool next_state, const MemInterface* mem_intr) const
771{
772 // check the bus state
773 if (next_state) {
774 // use busStateNext to get the state that will be used
775 // for the next burst
776 return (mem_intr->busStateNext == MemCtrl::READ);
777 } else {
778 return (mem_intr->busState == MemCtrl::READ);
779 }
780}
781
782bool
783MemCtrl::inWriteBusState(bool next_state, const MemInterface* mem_intr) const
784{
785 // check the bus state
786 if (next_state) {
787 // use busStateNext to get the state that will be used
788 // for the next burst
789 return (mem_intr->busStateNext == MemCtrl::WRITE);
790 } else {
791 return (mem_intr->busState == MemCtrl::WRITE);
792 }
793}
794
795Tick
797{
798 // first clean up the burstTick set, removing old entries
799 // before adding new entries for next burst
801
802 // When was command issued?
803 Tick cmd_at;
804
805 // Issue the next burst and update bus state to reflect
806 // when previous command was issued
807 std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
808 std::tie(cmd_at, mem_intr->nextBurstAt) =
809 mem_intr->doBurstAccess(mem_pkt, mem_intr->nextBurstAt, queue);
810
811 DPRINTF(MemCtrl, "Access to %#x, ready at %lld next burst at %lld.\n",
812 mem_pkt->addr, mem_pkt->readyTime, mem_intr->nextBurstAt);
813
814 // Update the minimum timing between the requests, this is a
815 // conservative estimate of when we have to schedule the next
816 // request to not introduce any unecessary bubbles. In most cases
817 // we will wake up sooner than we have to.
818 mem_intr->nextReqTime = mem_intr->nextBurstAt - mem_intr->commandOffset();
819
820 // Update the common bus stats
821 if (mem_pkt->isRead()) {
822 ++(mem_intr->readsThisTime);
823 // Update latency stats
825 mem_pkt->readyTime - mem_pkt->entryTime;
826 stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
827 } else {
828 ++(mem_intr->writesThisTime);
829 stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
831 mem_pkt->readyTime - mem_pkt->entryTime;
832 }
833
834 return cmd_at;
835}
836
837bool
839
840 // check ranks for refresh/wakeup - uses busStateNext, so done after
841 // turnaround decisions
842 // Default to busy status and update based on interface specifics
843 // Default state of unused interface is 'true'
844 bool mem_busy = true;
845 bool all_writes_nvm = mem_intr->numWritesQueued == mem_intr->writeQueueSize;
846 bool read_queue_empty = mem_intr->readQueueSize == 0;
847 mem_busy = mem_intr->isBusy(read_queue_empty, all_writes_nvm);
848 if (mem_busy) {
849 // if all ranks are refreshing wait for them to finish
850 // and stall this state machine without taking any further
851 // action, and do not schedule a new nextReqEvent
852 return true;
853 } else {
854 return false;
855 }
856}
857
858bool
860
861 bool all_writes_nvm = mem_intr->numWritesQueued == totalWriteQueueSize;
862 return (mem_intr->writeRespQueueFull() && all_writes_nvm);
863}
864
865void
867
868 for (auto queue = readQueue.rbegin();
869 queue != readQueue.rend(); ++queue) {
870 // select non-deterministic NVM read to issue
871 // assume that we have the command bandwidth to issue this along
872 // with additional RD/WR burst with needed bank operations
873 if (mem_intr->readsWaitingToIssue()) {
874 // select non-deterministic NVM read to issue
875 mem_intr->chooseRead(*queue);
876 }
877 }
878}
879
880void
882 MemPacketQueue& resp_queue,
883 EventFunctionWrapper& resp_event,
884 EventFunctionWrapper& next_req_event,
885 bool& retry_wr_req) {
886 // transition is handled by QoS algorithm if enabled
887 if (turnPolicy) {
888 // select bus state - only done if QoS algorithms are in use
890 }
891
892 // detect bus state change
893 bool switched_cmd_type = (mem_intr->busState != mem_intr->busStateNext);
894 // record stats
895 recordTurnaroundStats(mem_intr->busState, mem_intr->busStateNext);
896
897 DPRINTF(MemCtrl, "QoS Turnarounds selected state %s %s\n",
898 (mem_intr->busState==MemCtrl::READ)?"READ":"WRITE",
899 switched_cmd_type?"[turnaround triggered]":"");
900
901 if (switched_cmd_type) {
902 if (mem_intr->busState == MemCtrl::READ) {
904 "Switching to writes after %d reads with %d reads "
905 "waiting\n", mem_intr->readsThisTime, mem_intr->readQueueSize);
907 mem_intr->readsThisTime = 0;
908 } else {
910 "Switching to reads after %d writes with %d writes "
911 "waiting\n", mem_intr->writesThisTime, mem_intr->writeQueueSize);
913 mem_intr->writesThisTime = 0;
914 }
915 }
916
919
920 DPRINTF(Drain, "MemCtrl controller done draining\n");
922 }
923
924 // updates current state
925 mem_intr->busState = mem_intr->busStateNext;
926
927 nonDetermReads(mem_intr);
928
929 if (memBusy(mem_intr)) {
930 return;
931 }
932
933 // when we get here it is either a read or a write
934 if (mem_intr->busState == READ) {
935
936 // track if we should switch or not
937 bool switch_to_writes = false;
938
939 if (mem_intr->readQueueSize == 0) {
940 // In the case there is no read request to go next,
941 // trigger writes if we have passed the low threshold (or
942 // if we are draining)
943 if (!(mem_intr->writeQueueSize == 0) &&
945 mem_intr->writeQueueSize > writeLowThreshold)) {
946
948 "Switching to writes due to read queue empty\n");
949 switch_to_writes = true;
950 } else {
951 // check if we are drained
952 // not done draining until in PWR_IDLE state
953 // ensuring all banks are closed and
954 // have exited low power states
956 respQEmpty() && allIntfDrained()) {
957
958 DPRINTF(Drain, "MemCtrl controller done draining\n");
960 }
961
962 // nothing to do, not even any point in scheduling an
963 // event for the next request
964 return;
965 }
966 } else {
967
968 bool read_found = false;
969 MemPacketQueue::iterator to_read;
970 uint8_t prio = numPriorities();
971
972 for (auto queue = readQueue.rbegin();
973 queue != readQueue.rend(); ++queue) {
974
975 prio--;
976
977 DPRINTF(QOS,
978 "Checking READ queue [%d] priority [%d elements]\n",
979 prio, queue->size());
980
981 // Figure out which read request goes next
982 // If we are changing command type, incorporate the minimum
983 // bus turnaround delay which will be rank to rank delay
984 to_read = chooseNext((*queue), switched_cmd_type ?
985 minWriteToReadDataGap() : 0, mem_intr);
986
987 if (to_read != queue->end()) {
988 // candidate read found
989 read_found = true;
990 break;
991 }
992 }
993
994 // if no read to an available rank is found then return
995 // at this point. There could be writes to the available ranks
996 // which are above the required threshold. However, to
997 // avoid adding more complexity to the code, return and wait
998 // for a refresh event to kick things into action again.
999 if (!read_found) {
1000 DPRINTF(MemCtrl, "No Reads Found - exiting\n");
1001 return;
1002 }
1003
1004 auto mem_pkt = *to_read;
1005
1006 Tick cmd_at = doBurstAccess(mem_pkt, mem_intr);
1007
1009 "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
1010
1011 // sanity check
1012 assert(pktSizeCheck(mem_pkt, mem_intr));
1013 assert(mem_pkt->readyTime >= curTick());
1014
1015 // log the response
1016 logResponse(MemCtrl::READ, (*to_read)->requestorId(),
1017 mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1018 mem_pkt->readyTime - mem_pkt->entryTime);
1019
1020 mem_intr->readQueueSize--;
1021
1022 // Insert into response queue. It will be sent back to the
1023 // requestor at its readyTime
1024 if (resp_queue.empty()) {
1025 assert(!resp_event.scheduled());
1026 schedule(resp_event, mem_pkt->readyTime);
1027 } else {
1028 assert(resp_queue.back()->readyTime <= mem_pkt->readyTime);
1029 assert(resp_event.scheduled());
1030 }
1031
1032 resp_queue.push_back(mem_pkt);
1033
1034 // we have so many writes that we have to transition
1035 // don't transition if the writeRespQueue is full and
1036 // there are no other writes that can issue
1037 // Also ensure that we've issued a minimum defined number
1038 // of reads before switching, or have emptied the readQ
1039 if ((mem_intr->writeQueueSize > writeHighThreshold) &&
1040 (mem_intr->readsThisTime >= minReadsPerSwitch ||
1041 mem_intr->readQueueSize == 0)
1042 && !(nvmWriteBlock(mem_intr))) {
1043 switch_to_writes = true;
1044 }
1045
1046 // remove the request from the queue
1047 // the iterator is no longer valid .
1048 readQueue[mem_pkt->qosValue()].erase(to_read);
1049 }
1050
1051 // switching to writes, either because the read queue is empty
1052 // and the writes have passed the low threshold (or we are
1053 // draining), or because the writes hit the hight threshold
1054 if (switch_to_writes) {
1055 // transition to writing
1056 mem_intr->busStateNext = WRITE;
1057 }
1058 } else {
1059
1060 bool write_found = false;
1061 MemPacketQueue::iterator to_write;
1062 uint8_t prio = numPriorities();
1063
1064 for (auto queue = writeQueue.rbegin();
1065 queue != writeQueue.rend(); ++queue) {
1066
1067 prio--;
1068
1069 DPRINTF(QOS,
1070 "Checking WRITE queue [%d] priority [%d elements]\n",
1071 prio, queue->size());
1072
1073 // If we are changing command type, incorporate the minimum
1074 // bus turnaround delay
1075 to_write = chooseNext((*queue),
1076 switched_cmd_type ? minReadToWriteDataGap() : 0, mem_intr);
1077
1078 if (to_write != queue->end()) {
1079 write_found = true;
1080 break;
1081 }
1082 }
1083
1084 // if there are no writes to a rank that is available to service
1085 // requests (i.e. rank is in refresh idle state) are found then
1086 // return. There could be reads to the available ranks. However, to
1087 // avoid adding more complexity to the code, return at this point and
1088 // wait for a refresh event to kick things into action again.
1089 if (!write_found) {
1090 DPRINTF(MemCtrl, "No Writes Found - exiting\n");
1091 return;
1092 }
1093
1094 auto mem_pkt = *to_write;
1095
1096 // sanity check
1097 assert(pktSizeCheck(mem_pkt, mem_intr));
1098
1099 Tick cmd_at = doBurstAccess(mem_pkt, mem_intr);
1101 "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
1102
1103 isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_intr));
1104
1105 // log the response
1106 logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
1107 mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1108 mem_pkt->readyTime - mem_pkt->entryTime);
1109
1110 mem_intr->writeQueueSize--;
1111
1112 // remove the request from the queue - the iterator is no longer valid
1113 writeQueue[mem_pkt->qosValue()].erase(to_write);
1114
1115 delete mem_pkt;
1116
1117 // If we emptied the write queue, or got sufficiently below the
1118 // threshold (using the minWritesPerSwitch as the hysteresis) and
1119 // are not draining, or we have reads waiting and have done enough
1120 // writes, then switch to reads.
1121 // If we are interfacing to NVM and have filled the writeRespQueue,
1122 // with only NVM writes in Q, then switch to reads
1123 bool below_threshold =
1125
1126 if (mem_intr->writeQueueSize == 0 ||
1127 (below_threshold && drainState() != DrainState::Draining) ||
1128 (mem_intr->readQueueSize && mem_intr->writesThisTime >= minWritesPerSwitch) ||
1129 (mem_intr->readQueueSize && (nvmWriteBlock(mem_intr)))) {
1130
1131 // turn the bus back around for reads again
1132 mem_intr->busStateNext = MemCtrl::READ;
1133
1134 // note that the we switch back to reads also in the idle
1135 // case, which eventually will check for any draining and
1136 // also pause any further scheduling if there is really
1137 // nothing to do
1138 }
1139 }
1140 // It is possible that a refresh to another rank kicks things back into
1141 // action before reaching this point.
1142 if (!next_req_event.scheduled())
1143 schedule(next_req_event, std::max(mem_intr->nextReqTime, curTick()));
1144
1145 if (retry_wr_req && mem_intr->writeQueueSize < writeBufferSize) {
1146 retry_wr_req = false;
1148 }
1149}
1150
1151bool
1153{
1154 return mem_intr->burstReady(pkt);
1155}
1156
1157Tick
1162
1163Tick
1168
1169Addr
1171{
1172 return (addr & ~(Addr(mem_intr->bytesPerBurst() - 1)));
1173}
1174
1175bool
1177{
1178 return (mem_pkt->size <= mem_intr->bytesPerBurst());
1179}
1180
1182 : statistics::Group(&_ctrl),
1183 ctrl(_ctrl),
1184
1185 ADD_STAT(readReqs, statistics::units::Count::get(),
1186 "Number of read requests accepted"),
1187 ADD_STAT(writeReqs, statistics::units::Count::get(),
1188 "Number of write requests accepted"),
1189
1190 ADD_STAT(readBursts, statistics::units::Count::get(),
1191 "Number of controller read bursts, including those serviced by "
1192 "the write queue"),
1193 ADD_STAT(writeBursts, statistics::units::Count::get(),
1194 "Number of controller write bursts, including those merged in "
1195 "the write queue"),
1196 ADD_STAT(servicedByWrQ, statistics::units::Count::get(),
1197 "Number of controller read bursts serviced by the write queue"),
1198 ADD_STAT(mergedWrBursts, statistics::units::Count::get(),
1199 "Number of controller write bursts merged with an existing one"),
1200
1201 ADD_STAT(neitherReadNorWriteReqs, statistics::units::Count::get(),
1202 "Number of requests that are neither read nor write"),
1203
1204 ADD_STAT(avgRdQLen, statistics::units::Rate<
1205 statistics::units::Count, statistics::units::Tick>::get(),
1206 "Average read queue length when enqueuing"),
1207 ADD_STAT(avgWrQLen, statistics::units::Rate<
1208 statistics::units::Count, statistics::units::Tick>::get(),
1209 "Average write queue length when enqueuing"),
1210
1211 ADD_STAT(numRdRetry, statistics::units::Count::get(),
1212 "Number of times read queue was full causing retry"),
1213 ADD_STAT(numWrRetry, statistics::units::Count::get(),
1214 "Number of times write queue was full causing retry"),
1215
1216 ADD_STAT(readPktSize, statistics::units::Count::get(),
1217 "Read request sizes (log2)"),
1218 ADD_STAT(writePktSize, statistics::units::Count::get(),
1219 "Write request sizes (log2)"),
1220
1221 ADD_STAT(rdQLenPdf, statistics::units::Count::get(),
1222 "What read queue length does an incoming req see"),
1223 ADD_STAT(wrQLenPdf, statistics::units::Count::get(),
1224 "What write queue length does an incoming req see"),
1225
1226 ADD_STAT(rdPerTurnAround, statistics::units::Count::get(),
1227 "Reads before turning the bus around for writes"),
1228 ADD_STAT(wrPerTurnAround, statistics::units::Count::get(),
1229 "Writes before turning the bus around for reads"),
1230
1231 ADD_STAT(bytesReadWrQ, statistics::units::Byte::get(),
1232 "Total number of bytes read from write queue"),
1233 ADD_STAT(bytesReadSys, statistics::units::Byte::get(),
1234 "Total read bytes from the system interface side"),
1235 ADD_STAT(bytesWrittenSys, statistics::units::Byte::get(),
1236 "Total written bytes from the system interface side"),
1237
1238 ADD_STAT(avgRdBWSys, statistics::units::Rate<
1239 statistics::units::Byte, statistics::units::Second>::get(),
1240 "Average system read bandwidth in Byte/s"),
1241 ADD_STAT(avgWrBWSys, statistics::units::Rate<
1242 statistics::units::Byte, statistics::units::Second>::get(),
1243 "Average system write bandwidth in Byte/s"),
1244
1245 ADD_STAT(totGap, statistics::units::Tick::get(),
1246 "Total gap between requests"),
1247 ADD_STAT(avgGap, statistics::units::Rate<
1248 statistics::units::Tick, statistics::units::Count>::get(),
1249 "Average gap between requests"),
1250
1251 ADD_STAT(requestorReadBytes, statistics::units::Byte::get(),
1252 "Per-requestor bytes read from memory"),
1253 ADD_STAT(requestorWriteBytes, statistics::units::Byte::get(),
1254 "Per-requestor bytes write to memory"),
1255 ADD_STAT(requestorReadRate, statistics::units::Rate<
1256 statistics::units::Byte, statistics::units::Second>::get(),
1257 "Per-requestor bytes read from memory rate"),
1258 ADD_STAT(requestorWriteRate, statistics::units::Rate<
1259 statistics::units::Byte, statistics::units::Second>::get(),
1260 "Per-requestor bytes write to memory rate"),
1261 ADD_STAT(requestorReadAccesses, statistics::units::Count::get(),
1262 "Per-requestor read serviced memory accesses"),
1263 ADD_STAT(requestorWriteAccesses, statistics::units::Count::get(),
1264 "Per-requestor write serviced memory accesses"),
1265 ADD_STAT(requestorReadTotalLat, statistics::units::Tick::get(),
1266 "Per-requestor read total memory access latency"),
1267 ADD_STAT(requestorWriteTotalLat, statistics::units::Tick::get(),
1268 "Per-requestor write total memory access latency"),
1269 ADD_STAT(requestorReadAvgLat, statistics::units::Rate<
1270 statistics::units::Tick, statistics::units::Count>::get(),
1271 "Per-requestor read average memory access latency"),
1272 ADD_STAT(requestorWriteAvgLat, statistics::units::Rate<
1273 statistics::units::Tick, statistics::units::Count>::get(),
1274 "Per-requestor write average memory access latency")
1275{
1276}
1277
1278void
1280{
1281 using namespace statistics;
1282
1283 assert(ctrl.system());
1284 const auto max_requestors = ctrl.system()->maxRequestors();
1285
1286 avgRdQLen.precision(2);
1287 avgWrQLen.precision(2);
1288
1289 readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1290 writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1291
1292 rdQLenPdf.init(ctrl.readBufferSize);
1293 wrQLenPdf.init(ctrl.writeBufferSize);
1294
1295 rdPerTurnAround
1296 .init(ctrl.readBufferSize)
1297 .flags(nozero);
1298 wrPerTurnAround
1299 .init(ctrl.writeBufferSize)
1300 .flags(nozero);
1301
1302 avgRdBWSys.precision(8);
1303 avgWrBWSys.precision(8);
1304 avgGap.precision(2);
1305
1306 // per-requestor bytes read and written to memory
1307 requestorReadBytes
1308 .init(max_requestors)
1309 .flags(nozero | nonan);
1310
1311 requestorWriteBytes
1312 .init(max_requestors)
1313 .flags(nozero | nonan);
1314
1315 // per-requestor bytes read and written to memory rate
1316 requestorReadRate
1317 .flags(nozero | nonan)
1318 .precision(12);
1319
1320 requestorReadAccesses
1321 .init(max_requestors)
1322 .flags(nozero);
1323
1324 requestorWriteAccesses
1325 .init(max_requestors)
1326 .flags(nozero);
1327
1328 requestorReadTotalLat
1329 .init(max_requestors)
1330 .flags(nozero | nonan);
1331
1332 requestorReadAvgLat
1333 .flags(nonan)
1334 .precision(2);
1335
1336 requestorWriteRate
1337 .flags(nozero | nonan)
1338 .precision(12);
1339
1340 requestorWriteTotalLat
1341 .init(max_requestors)
1342 .flags(nozero | nonan);
1343
1344 requestorWriteAvgLat
1345 .flags(nonan)
1346 .precision(2);
1347
1348 for (int i = 0; i < max_requestors; i++) {
1349 const std::string requestor = ctrl.system()->getRequestorName(i);
1350 requestorReadBytes.subname(i, requestor);
1351 requestorReadRate.subname(i, requestor);
1352 requestorWriteBytes.subname(i, requestor);
1353 requestorWriteRate.subname(i, requestor);
1354 requestorReadAccesses.subname(i, requestor);
1355 requestorWriteAccesses.subname(i, requestor);
1356 requestorReadTotalLat.subname(i, requestor);
1357 requestorReadAvgLat.subname(i, requestor);
1358 requestorWriteTotalLat.subname(i, requestor);
1359 requestorWriteAvgLat.subname(i, requestor);
1360 }
1361
1362 // Formula stats
1363 avgRdBWSys = (bytesReadSys) / simSeconds;
1364 avgWrBWSys = (bytesWrittenSys) / simSeconds;
1365
1366 avgGap = totGap / (readReqs + writeReqs);
1367
1368 requestorReadRate = requestorReadBytes / simSeconds;
1369 requestorWriteRate = requestorWriteBytes / simSeconds;
1370 requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1371 requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1372}
1373
1374void
1376{
1377 bool found = recvFunctionalLogic(pkt, dram);
1378
1379 panic_if(!found, "Can't handle address range for packet %s\n",
1380 pkt->print());
1381}
1382
1383void
1385 MemBackdoorPtr &backdoor)
1386{
1388 "Can't handle address range for backdoor %s.",
1389 req.range().to_string());
1390
1391 dram->getBackdoor(backdoor);
1392}
1393
1394bool
1396{
1397 if (mem_intr->getAddrRange().contains(pkt->getAddr())) {
1398 // rely on the abstract memory
1399 mem_intr->functionalAccess(pkt);
1400 return true;
1401 } else {
1402 return false;
1403 }
1404}
1405
1406Port &
1407MemCtrl::getPort(const std::string &if_name, PortID idx)
1408{
1409 if (if_name != "port") {
1410 return qos::MemCtrl::getPort(if_name, idx);
1411 } else {
1412 return port;
1413 }
1414}
1415
1416bool
1418{
1419 // DRAM: ensure dram is in power down and refresh IDLE states
1420 // NVM: No outstanding NVM writes
1421 // NVM: All other queues verified as needed with calling logic
1422 return dram->allRanksDrained();
1423}
1424
1427{
1428 // if there is anything in any of our internal queues, keep track
1429 // of that as well
1431 !allIntfDrained()) {
1432 DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
1434 respQueue.size());
1435
1436 // the only queue that is not drained automatically over time
1437 // is the write queue, thus kick things into action if needed
1439 DPRINTF(Drain,"Scheduling nextReqEvent from drain\n");
1441 }
1442
1443 dram->drainRanks();
1444
1445 return DrainState::Draining;
1446 } else {
1447 return DrainState::Drained;
1448 }
1449}
1450
1451void
1453{
1454 if (!isTimingMode && system()->isTimingMode()) {
1455 // if we switched to timing mode, kick things into action,
1456 // and behave as if we restored from a checkpoint
1457 startup();
1458 dram->startup();
1459 } else if (isTimingMode && !system()->isTimingMode()) {
1460 // if we switch from timing mode, stop the refresh events to
1461 // not cause issues with KVM
1462 dram->suspend();
1463 }
1464
1465 // update the mode
1467}
1468
1471{
1472 AddrRangeList range;
1473 range.push_back(dram->getAddrRange());
1474 return range;
1475}
1476
1478MemoryPort(const std::string& name, MemCtrl& _ctrl)
1479 : QueuedResponsePort(name, queue), queue(_ctrl, *this, true),
1480 ctrl(_ctrl)
1481{ }
1482
1485{
1486 return ctrl.getAddrRanges();
1487}
1488
1489void
1491{
1492 pkt->pushLabel(ctrl.name());
1493
1494 if (!queue.trySatisfyFunctional(pkt)) {
1495 // Default implementation of SimpleTimingPort::recvFunctional()
1496 // calls recvAtomic() and throws away the latency; we can save a
1497 // little here by just not calculating the latency.
1498 ctrl.recvFunctional(pkt);
1499 } else {
1500 // The packet's request is satisfied by the queue, but queue
1501 // does not call makeResponse.
1502 // Here, change the packet to the corresponding response
1503 pkt->makeResponse();
1504 }
1505
1506 pkt->popLabel();
1507}
1508
1509void
1511 MemBackdoorPtr &backdoor)
1512{
1513 ctrl.recvMemBackdoorReq(req, backdoor);
1514}
1515
1516Tick
1518{
1519 return ctrl.recvAtomic(pkt);
1520}
1521
1522Tick
1524 PacketPtr pkt, MemBackdoorPtr &backdoor)
1525{
1526 return ctrl.recvAtomicBackdoor(pkt, backdoor);
1527}
1528
1529bool
1531{
1532 // pass it to the memory controller
1533 return ctrl.recvTimingReq(pkt);
1534}
1535
1536void
1538{
1539 queue.disableSanityCheck();
1540}
1541
1542} // namespace memory
1543} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const AddrRange & range() const
Definition backdoor.hh:140
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition packet.hh:1470
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
bool isResponse() const
Definition packet.hh:598
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
bool needsResponse() const
Definition packet.hh:608
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition packet.hh:449
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition packet.hh:1062
RequestorID requestorId() const
Definition packet.hh:780
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition packet.hh:431
bool hasData() const
Definition packet.hh:614
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
Definition packet.hh:769
bool isWrite() const
Definition packet.hh:594
unsigned getSize() const
Definition packet.hh:817
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition packet.hh:1480
bool cacheResponding() const
Definition packet.hh:659
Ports are used to interface objects to each other.
Definition port.hh:62
bool isConnected() const
Is this port currently connected to a peer?
Definition port.hh:133
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition qport.hh:62
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition qport.hh:94
void sendRangeChange() const
Called by the owner to send a range change.
Definition port.hh:380
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
Definition port.hh:489
bool isTimingMode() const
Is the system in timing mode?
Definition system.hh:270
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
void getBackdoor(MemBackdoorPtr &bd_ptr)
AddrRange getAddrRange() const
Get the address range.
A burst helper helps organize and manage a packet that is larger than the memory burst size.
Definition mem_ctrl.hh:80
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
Definition mem_ctrl.hh:87
const unsigned int burstCount
Number of bursts requred for a system packet.
Definition mem_ctrl.hh:84
void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
Definition mem_ctrl.cc:1490
MemoryPort(const std::string &name, MemCtrl &_ctrl)
Definition mem_ctrl.cc:1478
bool recvTimingReq(PacketPtr) override
Receive a timing request from the peer.
Definition mem_ctrl.cc:1530
AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition mem_ctrl.cc:1484
Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Definition mem_ctrl.cc:1517
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Receive an atomic request packet from the peer, and optionally provide a backdoor to the data being a...
Definition mem_ctrl.cc:1523
void recvMemBackdoorReq(const MemBackdoorReq &req, MemBackdoorPtr &backdoor) override
Receive a request for a back door to a range of memory.
Definition mem_ctrl.cc:1510
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition mem_ctrl.hh:247
virtual void recvFunctional(PacketPtr pkt)
Definition mem_ctrl.cc:1375
virtual void pruneBurstTick()
Remove commands that have already issued from burstTicks.
Definition mem_ctrl.cc:662
uint32_t writeLowThreshold
Definition mem_ctrl.hh:517
enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition mem_ctrl.hh:525
bool recvFunctionalLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition mem_ctrl.cc:1395
bool inReadBusState(bool next_state, const MemInterface *mem_intr) const
Check the current direction of the memory channel.
Definition mem_ctrl.cc:770
bool retryRdReq
Remember if we have to retry a request when available.
Definition mem_ctrl.hh:293
void printQs() const
Used for debugging to observe the contents of the queues.
Definition mem_ctrl.cc:382
const uint32_t minReadsPerSwitch
Definition mem_ctrl.hh:519
virtual void startup() override
startup() is the final initialization call before simulation.
Definition mem_ctrl.cc:110
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition mem_ctrl.cc:304
Tick recvAtomicLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition mem_ctrl.cc:136
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition mem_ctrl.hh:492
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
Definition mem_ctrl.hh:283
uint32_t writeHighThreshold
Definition mem_ctrl.hh:516
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition mem_ctrl.hh:628
std::vector< MemPacketQueue > writeQueue
Definition mem_ctrl.hh:473
virtual Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition mem_ctrl.cc:706
EventFunctionWrapper respondEvent
Definition mem_ctrl.hh:313
virtual MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Definition mem_ctrl.cc:557
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition mem_ctrl.cc:601
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
Definition mem_ctrl.hh:636
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
Definition mem_ctrl.cc:166
virtual Tick doBurstAccess(MemPacket *mem_pkt, MemInterface *mem_intr)
Actually do the burst based on media specific access function.
Definition mem_ctrl.cc:796
MemInterface * dram
Definition mem_ctrl.hh:504
virtual void processNextReqEvent(MemInterface *mem_intr, MemPacketQueue &resp_queue, EventFunctionWrapper &resp_event, EventFunctionWrapper &next_req_event, bool &retry_wr_req)
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
Definition mem_ctrl.cc:881
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition mem_ctrl.cc:189
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
Definition mem_ctrl.hh:499
virtual Addr burstAlign(Addr addr, MemInterface *mem_intr) const
Burst-align an address.
Definition mem_ctrl.cc:1170
bool inWriteBusState(bool next_state, const MemInterface *mem_intr) const
Check the current direction of the memory channel.
Definition mem_ctrl.cc:783
const uint32_t minWritesPerSwitch
Definition mem_ctrl.hh:518
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition mem_ctrl.cc:178
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition mem_ctrl.cc:100
const Tick backendLatency
Pipeline latency of the backend and PHY.
Definition mem_ctrl.hh:539
uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition mem_ctrl.hh:514
virtual void recvMemBackdoorReq(const MemBackdoorReq &req, MemBackdoorPtr &backdoor)
Definition mem_ctrl.cc:1384
const Tick frontendLatency
Pipeline latency of the controller frontend.
Definition mem_ctrl.hh:532
virtual bool respQEmpty()
Definition mem_ctrl.hh:641
virtual bool allIntfDrained() const
Ensure that all interfaced have drained commands.
Definition mem_ctrl.cc:1417
EventFunctionWrapper nextReqEvent
Definition mem_ctrl.hh:307
virtual bool packetReady(MemPacket *pkt, MemInterface *mem_intr)
Determine if there is a packet that can issue.
Definition mem_ctrl.cc:1152
virtual Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
Check for command bus contention for single cycle command.
Definition mem_ctrl.cc:683
virtual bool nvmWriteBlock(MemInterface *mem_intr)
Will check if all writes are for nvm interface and nvm's write resp queue is full.
Definition mem_ctrl.cc:859
virtual void processRespondEvent(MemInterface *mem_intr, MemPacketQueue &queue, EventFunctionWrapper &resp_event, bool &retry_rd_req)
Definition mem_ctrl.cc:488
virtual void accessAndRespond(PacketPtr pkt, Tick static_latency, MemInterface *mem_intr)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Definition mem_ctrl.cc:622
virtual Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
Definition mem_ctrl.cc:1164
virtual bool recvTimingReq(PacketPtr pkt)
Definition mem_ctrl.cc:407
virtual Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
Definition mem_ctrl.cc:1158
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition mem_ctrl.cc:1426
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition mem_ctrl.hh:472
bool isTimingMode
Remember if the memory system is in timing mode.
Definition mem_ctrl.hh:288
virtual Tick recvAtomic(PacketPtr pkt)
Definition mem_ctrl.cc:125
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition mem_ctrl.cc:1407
virtual bool memBusy(MemInterface *mem_intr)
Checks if the memory interface is already busy.
Definition mem_ctrl.cc:838
virtual AddrRangeList getAddrRanges()
Definition mem_ctrl.cc:1470
MemCtrl(const MemCtrlParams &p)
Definition mem_ctrl.cc:60
virtual Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
Definition mem_ctrl.cc:158
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
Definition mem_ctrl.cc:675
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
Definition mem_ctrl.hh:482
virtual bool pktSizeCheck(MemPacket *mem_pkt, MemInterface *mem_intr) const
Check if mem pkt's size is sane.
Definition mem_ctrl.cc:1176
virtual void drainResume() override
Resume execution after a successful drain.
Definition mem_ctrl.cc:1452
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition mem_ctrl.hh:545
virtual void nonDetermReads(MemInterface *mem_intr)
Will access memory interface and select non-deterministic reads to issue.
Definition mem_ctrl.cc:866
General interface to memory device Includes functions and parameters shared across media types.
virtual void setupRank(const uint8_t rank, const bool is_read)=0
Setup the rank based on packet received.
virtual MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
virtual void suspend()
This function is DRAM specific.
virtual std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)=0
This function performs the burst and update stats.
virtual Tick commandOffset() const =0
uint8_t pseudoChannel
pseudo channel number used for HBM modeling
virtual bool burstReady(MemPacket *pkt) const =0
Check if a burst operation can be issued to the interface.
virtual void checkRefreshState(uint8_t rank)
This function is DRAM specific.
virtual bool isBusy(bool read_queue_empty, bool all_writes_nvm)=0
This function checks if ranks are busy.
uint32_t readsThisTime
Reads/writes performed by the controller for this interface before bus direction is switched.
uint32_t readQueueSize
Read/write packets in the read/write queue for this interface qos/mem_ctrl.hh has similar counters,...
virtual void respondEvent(uint8_t rank)
This function is DRAM specific.
virtual Tick accessLatency() const =0
virtual bool writeRespQueueFull() const
This function is NVM specific.
virtual void chooseRead(MemPacketQueue &queue)
This function is NVM specific.
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const =0
For FR-FCFS policy, find first command that can issue Function will be overriden by interface to sele...
MemCtrl::BusState busStateNext
bus state for next request event triggered
Tick nextBurstAt
Till when the controller must wait before issuing next RD/WR burst?
virtual bool readsWaitingToIssue() const
This function is NVM specific.
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
virtual bool allRanksDrained() const =0
Check drain state of interface.
virtual void drainRanks()
This function is DRAM specific.
uint32_t bytesPerBurst() const
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition mem_ctrl.hh:99
Tick readyTime
When will request leave the controller.
Definition mem_ctrl.hh:106
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
Definition mem_ctrl.hh:163
const uint8_t pseudoChannel
pseudo channel num
Definition mem_ctrl.hh:120
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
Definition mem_ctrl.hh:152
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Definition mem_ctrl.hh:146
Addr addr
The starting address of the packet.
Definition mem_ctrl.hh:140
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition mem_ctrl.hh:193
const Tick entryTime
When did request enter the controller.
Definition mem_ctrl.hh:103
const PacketPtr pkt
This comes from the outside world.
Definition mem_ctrl.hh:109
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
Definition mem_ctrl.hh:175
const uint8_t rank
Will be populated by address decoder.
Definition mem_ctrl.hh:123
void logResponse(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
Definition mem_ctrl.cc:148
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition mem_ctrl.hh:495
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Definition mem_ctrl.hh:133
void recordTurnaroundStats(BusState busState, BusState busStateNext)
Record statistics on turnarounds based on busStateNext and busState values.
Definition mem_ctrl.cc:358
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
Definition mem_ctrl.cc:246
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
Definition mem_ctrl.hh:90
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
Definition mem_ctrl.hh:366
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition mem_ctrl.hh:130
System * system() const
read the system pointer
Definition mem_ctrl.hh:370
BusState busStateNext
bus state for next request event triggered
Definition mem_ctrl.hh:142
uint8_t schedule(RequestorID id, uint64_t data)
Definition mem_ctrl.cc:217
void logRequest(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
Definition mem_ctrl.cc:91
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
size_type size() const
Return the number of elements, always 1 for a scalar.
STL deque class.
Definition stl.hh:44
STL pair class.
Definition stl.hh:58
STL vector class.
Definition stl.hh:37
DRAMInterface declaration.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
std::string to_string() const
Get a string representation of the range.
static constexpr int ceilLog2(const T &n)
Definition intmath.hh:84
static constexpr T divCeil(const T &a, const U &b)
Definition intmath.hh:110
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
virtual void startup()
startup() is the final initialization call before simulation.
Definition sim_object.cc:96
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
MemCtrl declaration.
MemInterface declaration.
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 0 > p
Bitfield< 25 > vec
Definition misc.hh:113
Bitfield< 3 > addr
Definition types.hh:84
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
uint64_t Tick
Tick count type.
Definition types.hh:58
const Tick MaxTick
Definition types.hh:60
statistics::Formula & simSeconds
Definition stats.cc:45
NVMInterface declaration.
statistics::Scalar writeReqs
Definition mem_ctrl.hh:572
statistics::Scalar mergedWrBursts
Definition mem_ctrl.hh:576
statistics::Scalar readReqs
Definition mem_ctrl.hh:571
statistics::Scalar servicedByWrQ
Definition mem_ctrl.hh:575
statistics::Histogram rdPerTurnAround
Definition mem_ctrl.hh:588
statistics::Vector readPktSize
Definition mem_ctrl.hh:584
statistics::Scalar numWrRetry
Definition mem_ctrl.hh:583
statistics::Scalar numRdRetry
Definition mem_ctrl.hh:582
statistics::Scalar readBursts
Definition mem_ctrl.hh:573
void regStats() override
Callback to set stat parameters.
Definition mem_ctrl.cc:1279
statistics::Vector requestorReadTotalLat
Definition mem_ctrl.hh:614
statistics::Vector requestorWriteTotalLat
Definition mem_ctrl.hh:615
statistics::Vector requestorWriteBytes
Definition mem_ctrl.hh:603
statistics::Scalar writeBursts
Definition mem_ctrl.hh:574
statistics::Vector writePktSize
Definition mem_ctrl.hh:585
statistics::Histogram wrPerTurnAround
Definition mem_ctrl.hh:589
statistics::Vector requestorWriteAccesses
Definition mem_ctrl.hh:611
statistics::Scalar bytesReadSys
Definition mem_ctrl.hh:592
statistics::Average avgRdQLen
Definition mem_ctrl.hh:579
statistics::Vector requestorReadAccesses
Definition mem_ctrl.hh:610
statistics::Scalar bytesWrittenSys
Definition mem_ctrl.hh:593
statistics::Average avgWrQLen
Definition mem_ctrl.hh:580
statistics::Vector wrQLenPdf
Definition mem_ctrl.hh:587
statistics::Scalar bytesReadWrQ
Definition mem_ctrl.hh:591
statistics::Vector requestorReadBytes
Definition mem_ctrl.hh:602
statistics::Vector rdQLenPdf
Definition mem_ctrl.hh:586
Definition mem.h:38
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0