gem5  v22.1.0.0
mem_ctrl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "mem/mem_ctrl.hh"
42 
43 #include "base/trace.hh"
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
49 #include "mem/dram_interface.hh"
50 #include "mem/mem_interface.hh"
51 #include "mem/nvm_interface.hh"
52 #include "sim/system.hh"
53 
54 namespace gem5
55 {
56 
57 namespace memory
58 {
59 
60 MemCtrl::MemCtrl(const MemCtrlParams &p) :
61  qos::MemCtrl(p),
62  port(name() + ".port", *this), isTimingMode(false),
63  retryRdReq(false), retryWrReq(false),
64  nextReqEvent([this] {processNextReqEvent(dram, respQueue,
66  respondEvent([this] {processRespondEvent(dram, respQueue,
67  respondEvent, retryRdReq); }, name()),
68  dram(p.dram),
69  readBufferSize(dram->readBufferSize),
70  writeBufferSize(dram->writeBufferSize),
71  writeHighThreshold(writeBufferSize * p.write_high_thresh_perc / 100.0),
72  writeLowThreshold(writeBufferSize * p.write_low_thresh_perc / 100.0),
73  minWritesPerSwitch(p.min_writes_per_switch),
74  minReadsPerSwitch(p.min_reads_per_switch),
75  writesThisTime(0), readsThisTime(0),
76  memSchedPolicy(p.mem_sched_policy),
77  frontendLatency(p.static_frontend_latency),
78  backendLatency(p.static_backend_latency),
79  commandWindow(p.command_window),
80  prevArrival(0),
81  stats(*this)
82 {
83  DPRINTF(MemCtrl, "Setting up controller\n");
84 
85  readQueue.resize(p.qos_priorities);
86  writeQueue.resize(p.qos_priorities);
87 
88  dram->setCtrl(this, commandWindow);
89 
90  // perform a basic check of the write thresholds
91  if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
92  fatal("Write buffer low threshold %d must be smaller than the "
93  "high threshold %d\n", p.write_low_thresh_perc,
94  p.write_high_thresh_perc);
95  if (p.disable_sanity_check) {
96  port.disableSanityCheck();
97  }
98 }
99 
100 void
102 {
103  if (!port.isConnected()) {
104  fatal("MemCtrl %s is unconnected!\n", name());
105  } else {
107  }
108 }
109 
110 void
112 {
113  // remember the memory system mode of operation
115 
116  if (isTimingMode) {
117  // shift the bus busy time sufficiently far ahead that we never
118  // have to worry about negative values when computing the time for
119  // the next request, this will add an insignificant bubble at the
120  // start of simulation
122  }
123 }
124 
125 Tick
127 {
128  if (!dram->getAddrRange().contains(pkt->getAddr())) {
129  panic("Can't handle address range for packet %s\n", pkt->print());
130  }
131 
132  return recvAtomicLogic(pkt, dram);
133 }
134 
135 
136 Tick
138 {
139  DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
140  pkt->cmdString(), pkt->getAddr());
141 
142  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
143  "is responding");
144 
145  // do the actual memory access and turn the packet into a response
146  mem_intr->access(pkt);
147 
148  if (pkt->hasData()) {
149  // this value is not supposed to be accurate, just enough to
150  // keep things going, mimic a closed page
151  // also this latency can't be 0
152  return mem_intr->accessLatency();
153  }
154 
155  return 0;
156 }
157 
158 Tick
160 {
161  Tick latency = recvAtomic(pkt);
162  dram->getBackdoor(backdoor);
163  return latency;
164 }
165 
166 bool
167 MemCtrl::readQueueFull(unsigned int neededEntries) const
168 {
170  "Read queue limit %d, current size %d, entries needed %d\n",
172  neededEntries);
173 
174  auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
175  return rdsize_new > readBufferSize;
176 }
177 
178 bool
179 MemCtrl::writeQueueFull(unsigned int neededEntries) const
180 {
182  "Write queue limit %d, current size %d, entries needed %d\n",
183  writeBufferSize, totalWriteQueueSize, neededEntries);
184 
185  auto wrsize_new = (totalWriteQueueSize + neededEntries);
186  return wrsize_new > writeBufferSize;
187 }
188 
189 bool
191  unsigned int pkt_count, MemInterface* mem_intr)
192 {
193  // only add to the read queue here. whenever the request is
194  // eventually done, set the readyTime, and call schedule()
195  assert(!pkt->isWrite());
196 
197  assert(pkt_count != 0);
198 
199  // if the request size is larger than burst size, the pkt is split into
200  // multiple packets
201  // Note if the pkt starting address is not aligened to burst size, the
202  // address of first packet is kept unaliged. Subsequent packets
203  // are aligned to burst size boundaries. This is to ensure we accurately
204  // check read packets against packets in write queue.
205  const Addr base_addr = pkt->getAddr();
206  Addr addr = base_addr;
207  unsigned pktsServicedByWrQ = 0;
208  BurstHelper* burst_helper = NULL;
209 
210  uint32_t burst_size = mem_intr->bytesPerBurst();
211 
212  for (int cnt = 0; cnt < pkt_count; ++cnt) {
213  unsigned size = std::min((addr | (burst_size - 1)) + 1,
214  base_addr + pkt->getSize()) - addr;
215  stats.readPktSize[ceilLog2(size)]++;
216  stats.readBursts++;
218 
219  // First check write buffer to see if the data is already at
220  // the controller
221  bool foundInWrQ = false;
222  Addr burst_addr = burstAlign(addr, mem_intr);
223  // if the burst address is not present then there is no need
224  // looking any further
225  if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
226  for (const auto& vec : writeQueue) {
227  for (const auto& p : vec) {
228  // check if the read is subsumed in the write queue
229  // packet we are looking at
230  if (p->addr <= addr &&
231  ((addr + size) <= (p->addr + p->size))) {
232 
233  foundInWrQ = true;
235  pktsServicedByWrQ++;
237  "Read to addr %#x with size %d serviced by "
238  "write queue\n",
239  addr, size);
240  stats.bytesReadWrQ += burst_size;
241  break;
242  }
243  }
244  }
245  }
246 
247  // If not found in the write q, make a memory packet and
248  // push it onto the read queue
249  if (!foundInWrQ) {
250 
251  // Make the burst helper for split packets
252  if (pkt_count > 1 && burst_helper == NULL) {
253  DPRINTF(MemCtrl, "Read to addr %#x translates to %d "
254  "memory requests\n", pkt->getAddr(), pkt_count);
255  burst_helper = new BurstHelper(pkt_count);
256  }
257 
258  MemPacket* mem_pkt;
259  mem_pkt = mem_intr->decodePacket(pkt, addr, size, true,
260  mem_intr->pseudoChannel);
261 
262  // Increment read entries of the rank (dram)
263  // Increment count to trigger issue of non-deterministic read (nvm)
264  mem_intr->setupRank(mem_pkt->rank, true);
265  // Default readyTime to Max; will be reset once read is issued
266  mem_pkt->readyTime = MaxTick;
267  mem_pkt->burstHelper = burst_helper;
268 
269  assert(!readQueueFull(1));
271 
272  DPRINTF(MemCtrl, "Adding to read queue\n");
273 
274  readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
275 
276  // log packet
278  pkt->qosValue(), mem_pkt->addr, 1);
279 
280  // Update stats
282  }
283 
284  // Starting address of next memory pkt (aligned to burst boundary)
285  addr = (addr | (burst_size - 1)) + 1;
286  }
287 
288  // If all packets are serviced by write queue, we send the repsonse back
289  if (pktsServicedByWrQ == pkt_count) {
290  accessAndRespond(pkt, frontendLatency, mem_intr);
291  return true;
292  }
293 
294  // Update how many split packets are serviced by write queue
295  if (burst_helper != NULL)
296  burst_helper->burstsServiced = pktsServicedByWrQ;
297 
298  // not all/any packets serviced by the write queue
299  return false;
300 }
301 
302 void
303 MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count,
304  MemInterface* mem_intr)
305 {
306  // only add to the write queue here. whenever the request is
307  // eventually done, set the readyTime, and call schedule()
308  assert(pkt->isWrite());
309 
310  // if the request size is larger than burst size, the pkt is split into
311  // multiple packets
312  const Addr base_addr = pkt->getAddr();
313  Addr addr = base_addr;
314  uint32_t burst_size = mem_intr->bytesPerBurst();
315 
316  for (int cnt = 0; cnt < pkt_count; ++cnt) {
317  unsigned size = std::min((addr | (burst_size - 1)) + 1,
318  base_addr + pkt->getSize()) - addr;
319  stats.writePktSize[ceilLog2(size)]++;
320  stats.writeBursts++;
322 
323  // see if we can merge with an existing item in the write
324  // queue and keep track of whether we have merged or not
325  bool merged = isInWriteQueue.find(burstAlign(addr, mem_intr)) !=
326  isInWriteQueue.end();
327 
328  // if the item was not merged we need to create a new write
329  // and enqueue it
330  if (!merged) {
331  MemPacket* mem_pkt;
332  mem_pkt = mem_intr->decodePacket(pkt, addr, size, false,
333  mem_intr->pseudoChannel);
334  // Default readyTime to Max if nvm interface;
335  //will be reset once read is issued
336  mem_pkt->readyTime = MaxTick;
337 
338  mem_intr->setupRank(mem_pkt->rank, false);
339 
342 
343  DPRINTF(MemCtrl, "Adding to write queue\n");
344 
345  writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
346  isInWriteQueue.insert(burstAlign(addr, mem_intr));
347 
348  // log packet
350  pkt->qosValue(), mem_pkt->addr, 1);
351 
352  assert(totalWriteQueueSize == isInWriteQueue.size());
353 
354  // Update stats
356 
357  } else {
359  "Merging write burst with existing queue entry\n");
360 
361  // keep track of the fact that this burst effectively
362  // disappeared as it was merged with an existing one
364  }
365 
366  // Starting address of next memory pkt (aligned to burst_size boundary)
367  addr = (addr | (burst_size - 1)) + 1;
368  }
369 
370  // we do not wait for the writes to be send to the actual memory,
371  // but instead take responsibility for the consistency here and
372  // snoop the write queue for any upcoming reads
373  // @todo, if a pkt size is larger than burst size, we might need a
374  // different front end latency
375  accessAndRespond(pkt, frontendLatency, mem_intr);
376 }
377 
378 void
380 {
381 #if TRACING_ON
382  DPRINTF(MemCtrl, "===READ QUEUE===\n\n");
383  for (const auto& queue : readQueue) {
384  for (const auto& packet : queue) {
385  DPRINTF(MemCtrl, "Read %#x\n", packet->addr);
386  }
387  }
388 
389  DPRINTF(MemCtrl, "\n===RESP QUEUE===\n\n");
390  for (const auto& packet : respQueue) {
391  DPRINTF(MemCtrl, "Response %#x\n", packet->addr);
392  }
393 
394  DPRINTF(MemCtrl, "\n===WRITE QUEUE===\n\n");
395  for (const auto& queue : writeQueue) {
396  for (const auto& packet : queue) {
397  DPRINTF(MemCtrl, "Write %#x\n", packet->addr);
398  }
399  }
400 #endif // TRACING_ON
401 }
402 
403 bool
405 {
406  // This is where we enter from the outside world
407  DPRINTF(MemCtrl, "recvTimingReq: request %s addr %#x size %d\n",
408  pkt->cmdString(), pkt->getAddr(), pkt->getSize());
409 
410  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
411  "is responding");
412 
413  panic_if(!(pkt->isRead() || pkt->isWrite()),
414  "Should only see read and writes at memory controller\n");
415 
416  // Calc avg gap between requests
417  if (prevArrival != 0) {
419  }
420  prevArrival = curTick();
421 
422  panic_if(!(dram->getAddrRange().contains(pkt->getAddr())),
423  "Can't handle address range for packet %s\n", pkt->print());
424 
425  // Find out how many memory packets a pkt translates to
426  // If the burst size is equal or larger than the pkt size, then a pkt
427  // translates to only one memory packet. Otherwise, a pkt translates to
428  // multiple memory packets
429  unsigned size = pkt->getSize();
430  uint32_t burst_size = dram->bytesPerBurst();
431 
432  unsigned offset = pkt->getAddr() & (burst_size - 1);
433  unsigned int pkt_count = divCeil(offset + size, burst_size);
434 
435  // run the QoS scheduler and assign a QoS priority value to the packet
436  qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
437 
438  // check local buffers and do not accept if full
439  if (pkt->isWrite()) {
440  assert(size != 0);
441  if (writeQueueFull(pkt_count)) {
442  DPRINTF(MemCtrl, "Write queue full, not accepting\n");
443  // remember that we have to retry this port
444  retryWrReq = true;
445  stats.numWrRetry++;
446  return false;
447  } else {
448  addToWriteQueue(pkt, pkt_count, dram);
449  // If we are not already scheduled to get a request out of the
450  // queue, do so now
451  if (!nextReqEvent.scheduled()) {
452  DPRINTF(MemCtrl, "Request scheduled immediately\n");
454  }
455  stats.writeReqs++;
456  stats.bytesWrittenSys += size;
457  }
458  } else {
459  assert(pkt->isRead());
460  assert(size != 0);
461  if (readQueueFull(pkt_count)) {
462  DPRINTF(MemCtrl, "Read queue full, not accepting\n");
463  // remember that we have to retry this port
464  retryRdReq = true;
465  stats.numRdRetry++;
466  return false;
467  } else {
468  if (!addToReadQueue(pkt, pkt_count, dram)) {
469  // If we are not already scheduled to get a request out of the
470  // queue, do so now
471  if (!nextReqEvent.scheduled()) {
472  DPRINTF(MemCtrl, "Request scheduled immediately\n");
474  }
475  }
476  stats.readReqs++;
477  stats.bytesReadSys += size;
478  }
479  }
480 
481  return true;
482 }
483 
484 void
486  MemPacketQueue& queue,
487  EventFunctionWrapper& resp_event,
488  bool& retry_rd_req)
489 {
490 
492  "processRespondEvent(): Some req has reached its readyTime\n");
493 
494  MemPacket* mem_pkt = queue.front();
495 
496  // media specific checks and functions when read response is complete
497  // DRAM only
498  mem_intr->respondEvent(mem_pkt->rank);
499 
500  if (mem_pkt->burstHelper) {
501  // it is a split packet
502  mem_pkt->burstHelper->burstsServiced++;
503  if (mem_pkt->burstHelper->burstsServiced ==
504  mem_pkt->burstHelper->burstCount) {
505  // we have now serviced all children packets of a system packet
506  // so we can now respond to the requestor
507  // @todo we probably want to have a different front end and back
508  // end latency for split packets
510  mem_intr);
511  delete mem_pkt->burstHelper;
512  mem_pkt->burstHelper = NULL;
513  }
514  } else {
515  // it is not a split packet
517  mem_intr);
518  }
519 
520  queue.pop_front();
521 
522  if (!queue.empty()) {
523  assert(queue.front()->readyTime >= curTick());
524  assert(!resp_event.scheduled());
525  schedule(resp_event, queue.front()->readyTime);
526  } else {
527  // if there is nothing left in any queue, signal a drain
528  if (drainState() == DrainState::Draining &&
530  allIntfDrained()) {
531 
532  DPRINTF(Drain, "Controller done draining\n");
533  signalDrainDone();
534  } else {
535  // check the refresh state and kick the refresh event loop
536  // into action again if banks already closed and just waiting
537  // for read to complete
538  // DRAM only
539  mem_intr->checkRefreshState(mem_pkt->rank);
540  }
541  }
542 
543  delete mem_pkt;
544 
545  // We have made a location in the queue available at this point,
546  // so if there is a read that was forced to wait, retry now
547  if (retry_rd_req) {
548  retry_rd_req = false;
549  port.sendRetryReq();
550  }
551 }
552 
553 MemPacketQueue::iterator
554 MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay,
555  MemInterface* mem_intr)
556 {
557  // This method does the arbitration between requests.
558 
559  MemPacketQueue::iterator ret = queue.end();
560 
561  if (!queue.empty()) {
562  if (queue.size() == 1) {
563  // available rank corresponds to state refresh idle
564  MemPacket* mem_pkt = *(queue.begin());
565  if (mem_pkt->pseudoChannel != mem_intr->pseudoChannel) {
566  return ret;
567  }
568  if (packetReady(mem_pkt, mem_intr)) {
569  ret = queue.begin();
570  DPRINTF(MemCtrl, "Single request, going to a free rank\n");
571  } else {
572  DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
573  }
574  } else if (memSchedPolicy == enums::fcfs) {
575  // check if there is a packet going to a free rank
576  for (auto i = queue.begin(); i != queue.end(); ++i) {
577  MemPacket* mem_pkt = *i;
578  if (packetReady(mem_pkt, mem_intr)) {
579  ret = i;
580  break;
581  }
582  }
583  } else if (memSchedPolicy == enums::frfcfs) {
584  Tick col_allowed_at;
585  std::tie(ret, col_allowed_at)
586  = chooseNextFRFCFS(queue, extra_col_delay, mem_intr);
587  } else {
588  panic("No scheduling policy chosen\n");
589  }
590  }
591  return ret;
592 }
593 
596  MemInterface* mem_intr)
597 {
598  auto selected_pkt_it = queue.end();
599  Tick col_allowed_at = MaxTick;
600 
601  // time we need to issue a column command to be seamless
602  const Tick min_col_at = std::max(mem_intr->nextBurstAt + extra_col_delay,
603  curTick());
604 
605  std::tie(selected_pkt_it, col_allowed_at) =
606  mem_intr->chooseNextFRFCFS(queue, min_col_at);
607 
608  if (selected_pkt_it == queue.end()) {
609  DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
610  }
611 
612  return std::make_pair(selected_pkt_it, col_allowed_at);
613 }
614 
615 void
617  MemInterface* mem_intr)
618 {
619  DPRINTF(MemCtrl, "Responding to Address %#x.. \n", pkt->getAddr());
620 
621  bool needsResponse = pkt->needsResponse();
622  // do the actual memory access which also turns the packet into a
623  // response
624  panic_if(!mem_intr->getAddrRange().contains(pkt->getAddr()),
625  "Can't handle address range for packet %s\n", pkt->print());
626  mem_intr->access(pkt);
627 
628  // turn packet around to go back to requestor if response expected
629  if (needsResponse) {
630  // access already turned the packet into a response
631  assert(pkt->isResponse());
632  // response_time consumes the static latency and is charged also
633  // with headerDelay that takes into account the delay provided by
634  // the xbar and also the payloadDelay that takes into account the
635  // number of data beats.
636  Tick response_time = curTick() + static_latency + pkt->headerDelay +
637  pkt->payloadDelay;
638  // Here we reset the timing of the packet before sending it out.
639  pkt->headerDelay = pkt->payloadDelay = 0;
640 
641  // queue the packet in the response queue to be sent out after
642  // the static latency has passed
643  port.schedTimingResp(pkt, response_time);
644  } else {
645  // @todo the packet is going to be deleted, and the MemPacket
646  // is still having a pointer to it
647  pendingDelete.reset(pkt);
648  }
649 
650  DPRINTF(MemCtrl, "Done\n");
651 
652  return;
653 }
654 
655 void
657 {
658  auto it = burstTicks.begin();
659  while (it != burstTicks.end()) {
660  auto current_it = it++;
661  if (curTick() > *current_it) {
662  DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
663  burstTicks.erase(current_it);
664  }
665  }
666 }
667 
668 Tick
670 {
671  // get tick aligned to burst window
672  Tick burst_offset = cmd_tick % commandWindow;
673  return (cmd_tick - burst_offset);
674 }
675 
676 Tick
677 MemCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
678 {
679  // start with assumption that there is no contention on command bus
680  Tick cmd_at = cmd_tick;
681 
682  // get tick aligned to burst window
683  Tick burst_tick = getBurstWindow(cmd_tick);
684 
685  // verify that we have command bandwidth to issue the command
686  // if not, iterate over next window(s) until slot found
687  while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
688  DPRINTF(MemCtrl, "Contention found on command bus at %d\n",
689  burst_tick);
690  burst_tick += commandWindow;
691  cmd_at = burst_tick;
692  }
693 
694  // add command into burst window and return corresponding Tick
695  burstTicks.insert(burst_tick);
696  return cmd_at;
697 }
698 
699 Tick
700 MemCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
701  Tick max_multi_cmd_split)
702 {
703  // start with assumption that there is no contention on command bus
704  Tick cmd_at = cmd_tick;
705 
706  // get tick aligned to burst window
707  Tick burst_tick = getBurstWindow(cmd_tick);
708 
709  // Command timing requirements are from 2nd command
710  // Start with assumption that 2nd command will issue at cmd_at and
711  // find prior slot for 1st command to issue
712  // Given a maximum latency of max_multi_cmd_split between the commands,
713  // find the burst at the maximum latency prior to cmd_at
714  Tick burst_offset = 0;
715  Tick first_cmd_offset = cmd_tick % commandWindow;
716  while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
717  burst_offset += commandWindow;
718  }
719  // get the earliest burst aligned address for first command
720  // ensure that the time does not go negative
721  Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
722 
723  // Can required commands issue?
724  bool first_can_issue = false;
725  bool second_can_issue = false;
726  // verify that we have command bandwidth to issue the command(s)
727  while (!first_can_issue || !second_can_issue) {
728  bool same_burst = (burst_tick == first_cmd_tick);
729  auto first_cmd_count = burstTicks.count(first_cmd_tick);
730  auto second_cmd_count = same_burst ? first_cmd_count + 1 :
731  burstTicks.count(burst_tick);
732 
733  first_can_issue = first_cmd_count < max_cmds_per_burst;
734  second_can_issue = second_cmd_count < max_cmds_per_burst;
735 
736  if (!second_can_issue) {
737  DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
738  burst_tick);
739  burst_tick += commandWindow;
740  cmd_at = burst_tick;
741  }
742 
743  // Verify max_multi_cmd_split isn't violated when command 2 is shifted
744  // If commands initially were issued in same burst, they are
745  // now in consecutive bursts and can still issue B2B
746  bool gap_violated = !same_burst &&
747  ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
748 
749  if (!first_can_issue || (!second_can_issue && gap_violated)) {
750  DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
751  first_cmd_tick);
752  first_cmd_tick += commandWindow;
753  }
754  }
755 
756  // Add command to burstTicks
757  burstTicks.insert(burst_tick);
758  burstTicks.insert(first_cmd_tick);
759 
760  return cmd_at;
761 }
762 
763 bool
764 MemCtrl::inReadBusState(bool next_state) const
765 {
766  // check the bus state
767  if (next_state) {
768  // use busStateNext to get the state that will be used
769  // for the next burst
770  return (busStateNext == MemCtrl::READ);
771  } else {
772  return (busState == MemCtrl::READ);
773  }
774 }
775 
776 bool
777 MemCtrl::inWriteBusState(bool next_state) const
778 {
779  // check the bus state
780  if (next_state) {
781  // use busStateNext to get the state that will be used
782  // for the next burst
783  return (busStateNext == MemCtrl::WRITE);
784  } else {
785  return (busState == MemCtrl::WRITE);
786  }
787 }
788 
789 Tick
791 {
792  // first clean up the burstTick set, removing old entries
793  // before adding new entries for next burst
794  pruneBurstTick();
795 
796  // When was command issued?
797  Tick cmd_at;
798 
799  // Issue the next burst and update bus state to reflect
800  // when previous command was issued
801  std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
802  std::tie(cmd_at, mem_intr->nextBurstAt) =
803  mem_intr->doBurstAccess(mem_pkt, mem_intr->nextBurstAt, queue);
804 
805  DPRINTF(MemCtrl, "Access to %#x, ready at %lld next burst at %lld.\n",
806  mem_pkt->addr, mem_pkt->readyTime, mem_intr->nextBurstAt);
807 
808  // Update the minimum timing between the requests, this is a
809  // conservative estimate of when we have to schedule the next
810  // request to not introduce any unecessary bubbles. In most cases
811  // we will wake up sooner than we have to.
812  mem_intr->nextReqTime = mem_intr->nextBurstAt - mem_intr->commandOffset();
813 
814  // Update the common bus stats
815  if (mem_pkt->isRead()) {
816  ++readsThisTime;
817  // Update latency stats
819  mem_pkt->readyTime - mem_pkt->entryTime;
820  stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
821  } else {
822  ++writesThisTime;
823  stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
825  mem_pkt->readyTime - mem_pkt->entryTime;
826  }
827 
828  return cmd_at;
829 }
830 
831 bool
833 
834  // check ranks for refresh/wakeup - uses busStateNext, so done after
835  // turnaround decisions
836  // Default to busy status and update based on interface specifics
837  // Default state of unused interface is 'true'
838  bool mem_busy = true;
839  bool all_writes_nvm = mem_intr->numWritesQueued == totalWriteQueueSize;
840  bool read_queue_empty = totalReadQueueSize == 0;
841  mem_busy = mem_intr->isBusy(read_queue_empty, all_writes_nvm);
842  if (mem_busy) {
843  // if all ranks are refreshing wait for them to finish
844  // and stall this state machine without taking any further
845  // action, and do not schedule a new nextReqEvent
846  return true;
847  } else {
848  return false;
849  }
850 }
851 
852 bool
854 
855  bool all_writes_nvm = mem_intr->numWritesQueued == totalWriteQueueSize;
856  return (mem_intr->writeRespQueueFull() && all_writes_nvm);
857 }
858 
859 void
861 
862  for (auto queue = readQueue.rbegin();
863  queue != readQueue.rend(); ++queue) {
864  // select non-deterministic NVM read to issue
865  // assume that we have the command bandwidth to issue this along
866  // with additional RD/WR burst with needed bank operations
867  if (mem_intr->readsWaitingToIssue()) {
868  // select non-deterministic NVM read to issue
869  mem_intr->chooseRead(*queue);
870  }
871  }
872 }
873 
874 void
876  MemPacketQueue& resp_queue,
877  EventFunctionWrapper& resp_event,
878  EventFunctionWrapper& next_req_event,
879  bool& retry_wr_req) {
880  // transition is handled by QoS algorithm if enabled
881  if (turnPolicy) {
882  // select bus state - only done if QoS algorithms are in use
884  }
885 
886  // detect bus state change
887  bool switched_cmd_type = (busState != busStateNext);
888  // record stats
890 
891  DPRINTF(MemCtrl, "QoS Turnarounds selected state %s %s\n",
892  (busState==MemCtrl::READ)?"READ":"WRITE",
893  switched_cmd_type?"[turnaround triggered]":"");
894 
895  if (switched_cmd_type) {
896  if (busState == MemCtrl::READ) {
898  "Switching to writes after %d reads with %d reads "
899  "waiting\n", readsThisTime, totalReadQueueSize);
901  readsThisTime = 0;
902  } else {
904  "Switching to reads after %d writes with %d writes "
905  "waiting\n", writesThisTime, totalWriteQueueSize);
907  writesThisTime = 0;
908  }
909  }
910 
911  // updates current state
913 
914  nonDetermReads(mem_intr);
915 
916  if (memBusy(mem_intr)) {
917  return;
918  }
919 
920  // when we get here it is either a read or a write
921  if (busState == READ) {
922 
923  // track if we should switch or not
924  bool switch_to_writes = false;
925 
926  if (totalReadQueueSize == 0) {
927  // In the case there is no read request to go next,
928  // trigger writes if we have passed the low threshold (or
929  // if we are draining)
930  if (!(totalWriteQueueSize == 0) &&
933 
935  "Switching to writes due to read queue empty\n");
936  switch_to_writes = true;
937  } else {
938  // check if we are drained
939  // not done draining until in PWR_IDLE state
940  // ensuring all banks are closed and
941  // have exited low power states
942  if (drainState() == DrainState::Draining &&
943  respQEmpty() && allIntfDrained()) {
944 
945  DPRINTF(Drain, "MemCtrl controller done draining\n");
946  signalDrainDone();
947  }
948 
949  // nothing to do, not even any point in scheduling an
950  // event for the next request
951  return;
952  }
953  } else {
954 
955  bool read_found = false;
956  MemPacketQueue::iterator to_read;
957  uint8_t prio = numPriorities();
958 
959  for (auto queue = readQueue.rbegin();
960  queue != readQueue.rend(); ++queue) {
961 
962  prio--;
963 
964  DPRINTF(QOS,
965  "Checking READ queue [%d] priority [%d elements]\n",
966  prio, queue->size());
967 
968  // Figure out which read request goes next
969  // If we are changing command type, incorporate the minimum
970  // bus turnaround delay which will be rank to rank delay
971  to_read = chooseNext((*queue), switched_cmd_type ?
972  minWriteToReadDataGap() : 0, mem_intr);
973 
974  if (to_read != queue->end()) {
975  // candidate read found
976  read_found = true;
977  break;
978  }
979  }
980 
981  // if no read to an available rank is found then return
982  // at this point. There could be writes to the available ranks
983  // which are above the required threshold. However, to
984  // avoid adding more complexity to the code, return and wait
985  // for a refresh event to kick things into action again.
986  if (!read_found) {
987  DPRINTF(MemCtrl, "No Reads Found - exiting\n");
988  return;
989  }
990 
991  auto mem_pkt = *to_read;
992 
993  Tick cmd_at = doBurstAccess(mem_pkt, mem_intr);
994 
996  "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
997 
998  // sanity check
999  assert(pktSizeCheck(mem_pkt, mem_intr));
1000  assert(mem_pkt->readyTime >= curTick());
1001 
1002  // log the response
1003  logResponse(MemCtrl::READ, (*to_read)->requestorId(),
1004  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1005  mem_pkt->readyTime - mem_pkt->entryTime);
1006 
1007 
1008  // Insert into response queue. It will be sent back to the
1009  // requestor at its readyTime
1010  if (resp_queue.empty()) {
1011  assert(!resp_event.scheduled());
1012  schedule(resp_event, mem_pkt->readyTime);
1013  } else {
1014  assert(resp_queue.back()->readyTime <= mem_pkt->readyTime);
1015  assert(resp_event.scheduled());
1016  }
1017 
1018  resp_queue.push_back(mem_pkt);
1019 
1020  // we have so many writes that we have to transition
1021  // don't transition if the writeRespQueue is full and
1022  // there are no other writes that can issue
1023  // Also ensure that we've issued a minimum defined number
1024  // of reads before switching, or have emptied the readQ
1027  && !(nvmWriteBlock(mem_intr))) {
1028  switch_to_writes = true;
1029  }
1030 
1031  // remove the request from the queue
1032  // the iterator is no longer valid .
1033  readQueue[mem_pkt->qosValue()].erase(to_read);
1034  }
1035 
1036  // switching to writes, either because the read queue is empty
1037  // and the writes have passed the low threshold (or we are
1038  // draining), or because the writes hit the hight threshold
1039  if (switch_to_writes) {
1040  // transition to writing
1041  busStateNext = WRITE;
1042  }
1043  } else {
1044 
1045  bool write_found = false;
1046  MemPacketQueue::iterator to_write;
1047  uint8_t prio = numPriorities();
1048 
1049  for (auto queue = writeQueue.rbegin();
1050  queue != writeQueue.rend(); ++queue) {
1051 
1052  prio--;
1053 
1054  DPRINTF(QOS,
1055  "Checking WRITE queue [%d] priority [%d elements]\n",
1056  prio, queue->size());
1057 
1058  // If we are changing command type, incorporate the minimum
1059  // bus turnaround delay
1060  to_write = chooseNext((*queue),
1061  switched_cmd_type ? minReadToWriteDataGap() : 0, mem_intr);
1062 
1063  if (to_write != queue->end()) {
1064  write_found = true;
1065  break;
1066  }
1067  }
1068 
1069  // if there are no writes to a rank that is available to service
1070  // requests (i.e. rank is in refresh idle state) are found then
1071  // return. There could be reads to the available ranks. However, to
1072  // avoid adding more complexity to the code, return at this point and
1073  // wait for a refresh event to kick things into action again.
1074  if (!write_found) {
1075  DPRINTF(MemCtrl, "No Writes Found - exiting\n");
1076  return;
1077  }
1078 
1079  auto mem_pkt = *to_write;
1080 
1081  // sanity check
1082  assert(pktSizeCheck(mem_pkt, mem_intr));
1083 
1084  Tick cmd_at = doBurstAccess(mem_pkt, mem_intr);
1085  DPRINTF(MemCtrl,
1086  "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
1087 
1088  isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_intr));
1089 
1090  // log the response
1091  logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
1092  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1093  mem_pkt->readyTime - mem_pkt->entryTime);
1094 
1095 
1096  // remove the request from the queue - the iterator is no longer valid
1097  writeQueue[mem_pkt->qosValue()].erase(to_write);
1098 
1099  delete mem_pkt;
1100 
1101  // If we emptied the write queue, or got sufficiently below the
1102  // threshold (using the minWritesPerSwitch as the hysteresis) and
1103  // are not draining, or we have reads waiting and have done enough
1104  // writes, then switch to reads.
1105  // If we are interfacing to NVM and have filled the writeRespQueue,
1106  // with only NVM writes in Q, then switch to reads
1107  bool below_threshold =
1109 
1110  if (totalWriteQueueSize == 0 ||
1111  (below_threshold && drainState() != DrainState::Draining) ||
1113  (totalReadQueueSize && (nvmWriteBlock(mem_intr)))) {
1114 
1115  // turn the bus back around for reads again
1117 
1118  // note that the we switch back to reads also in the idle
1119  // case, which eventually will check for any draining and
1120  // also pause any further scheduling if there is really
1121  // nothing to do
1122  }
1123  }
1124  // It is possible that a refresh to another rank kicks things back into
1125  // action before reaching this point.
1126  if (!next_req_event.scheduled())
1127  schedule(next_req_event, std::max(mem_intr->nextReqTime, curTick()));
1128 
1129  if (retry_wr_req && totalWriteQueueSize < writeBufferSize) {
1130  retry_wr_req = false;
1131  port.sendRetryReq();
1132  }
1133 }
1134 
1135 bool
1137 {
1138  return mem_intr->burstReady(pkt);
1139 }
1140 
1141 Tick
1143 {
1144  return dram->minReadToWriteDataGap();
1145 }
1146 
1147 Tick
1149 {
1150  return dram->minWriteToReadDataGap();
1151 }
1152 
1153 Addr
1155 {
1156  return (addr & ~(Addr(mem_intr->bytesPerBurst() - 1)));
1157 }
1158 
1159 bool
1161 {
1162  return (mem_pkt->size <= mem_intr->bytesPerBurst());
1163 }
1164 
1166  : statistics::Group(&_ctrl),
1167  ctrl(_ctrl),
1168 
1169  ADD_STAT(readReqs, statistics::units::Count::get(),
1170  "Number of read requests accepted"),
1171  ADD_STAT(writeReqs, statistics::units::Count::get(),
1172  "Number of write requests accepted"),
1173 
1174  ADD_STAT(readBursts, statistics::units::Count::get(),
1175  "Number of controller read bursts, including those serviced by "
1176  "the write queue"),
1177  ADD_STAT(writeBursts, statistics::units::Count::get(),
1178  "Number of controller write bursts, including those merged in "
1179  "the write queue"),
1180  ADD_STAT(servicedByWrQ, statistics::units::Count::get(),
1181  "Number of controller read bursts serviced by the write queue"),
1182  ADD_STAT(mergedWrBursts, statistics::units::Count::get(),
1183  "Number of controller write bursts merged with an existing one"),
1184 
1185  ADD_STAT(neitherReadNorWriteReqs, statistics::units::Count::get(),
1186  "Number of requests that are neither read nor write"),
1187 
1188  ADD_STAT(avgRdQLen, statistics::units::Rate<
1189  statistics::units::Count, statistics::units::Tick>::get(),
1190  "Average read queue length when enqueuing"),
1191  ADD_STAT(avgWrQLen, statistics::units::Rate<
1192  statistics::units::Count, statistics::units::Tick>::get(),
1193  "Average write queue length when enqueuing"),
1194 
1195  ADD_STAT(numRdRetry, statistics::units::Count::get(),
1196  "Number of times read queue was full causing retry"),
1197  ADD_STAT(numWrRetry, statistics::units::Count::get(),
1198  "Number of times write queue was full causing retry"),
1199 
1200  ADD_STAT(readPktSize, statistics::units::Count::get(),
1201  "Read request sizes (log2)"),
1202  ADD_STAT(writePktSize, statistics::units::Count::get(),
1203  "Write request sizes (log2)"),
1204 
1205  ADD_STAT(rdQLenPdf, statistics::units::Count::get(),
1206  "What read queue length does an incoming req see"),
1207  ADD_STAT(wrQLenPdf, statistics::units::Count::get(),
1208  "What write queue length does an incoming req see"),
1209 
1210  ADD_STAT(rdPerTurnAround, statistics::units::Count::get(),
1211  "Reads before turning the bus around for writes"),
1212  ADD_STAT(wrPerTurnAround, statistics::units::Count::get(),
1213  "Writes before turning the bus around for reads"),
1214 
1215  ADD_STAT(bytesReadWrQ, statistics::units::Byte::get(),
1216  "Total number of bytes read from write queue"),
1217  ADD_STAT(bytesReadSys, statistics::units::Byte::get(),
1218  "Total read bytes from the system interface side"),
1219  ADD_STAT(bytesWrittenSys, statistics::units::Byte::get(),
1220  "Total written bytes from the system interface side"),
1221 
1222  ADD_STAT(avgRdBWSys, statistics::units::Rate<
1223  statistics::units::Byte, statistics::units::Second>::get(),
1224  "Average system read bandwidth in Byte/s"),
1225  ADD_STAT(avgWrBWSys, statistics::units::Rate<
1226  statistics::units::Byte, statistics::units::Second>::get(),
1227  "Average system write bandwidth in Byte/s"),
1228 
1229  ADD_STAT(totGap, statistics::units::Tick::get(),
1230  "Total gap between requests"),
1231  ADD_STAT(avgGap, statistics::units::Rate<
1232  statistics::units::Tick, statistics::units::Count>::get(),
1233  "Average gap between requests"),
1234 
1235  ADD_STAT(requestorReadBytes, statistics::units::Byte::get(),
1236  "Per-requestor bytes read from memory"),
1237  ADD_STAT(requestorWriteBytes, statistics::units::Byte::get(),
1238  "Per-requestor bytes write to memory"),
1239  ADD_STAT(requestorReadRate, statistics::units::Rate<
1240  statistics::units::Byte, statistics::units::Second>::get(),
1241  "Per-requestor bytes read from memory rate"),
1242  ADD_STAT(requestorWriteRate, statistics::units::Rate<
1243  statistics::units::Byte, statistics::units::Second>::get(),
1244  "Per-requestor bytes write to memory rate"),
1245  ADD_STAT(requestorReadAccesses, statistics::units::Count::get(),
1246  "Per-requestor read serviced memory accesses"),
1247  ADD_STAT(requestorWriteAccesses, statistics::units::Count::get(),
1248  "Per-requestor write serviced memory accesses"),
1249  ADD_STAT(requestorReadTotalLat, statistics::units::Tick::get(),
1250  "Per-requestor read total memory access latency"),
1251  ADD_STAT(requestorWriteTotalLat, statistics::units::Tick::get(),
1252  "Per-requestor write total memory access latency"),
1253  ADD_STAT(requestorReadAvgLat, statistics::units::Rate<
1254  statistics::units::Tick, statistics::units::Count>::get(),
1255  "Per-requestor read average memory access latency"),
1256  ADD_STAT(requestorWriteAvgLat, statistics::units::Rate<
1257  statistics::units::Tick, statistics::units::Count>::get(),
1258  "Per-requestor write average memory access latency")
1259 {
1260 }
1261 
1262 void
1264 {
1265  using namespace statistics;
1266 
1267  assert(ctrl.system());
1268  const auto max_requestors = ctrl.system()->maxRequestors();
1269 
1270  avgRdQLen.precision(2);
1271  avgWrQLen.precision(2);
1272 
1273  readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1274  writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1275 
1276  rdQLenPdf.init(ctrl.readBufferSize);
1277  wrQLenPdf.init(ctrl.writeBufferSize);
1278 
1279  rdPerTurnAround
1280  .init(ctrl.readBufferSize)
1281  .flags(nozero);
1282  wrPerTurnAround
1283  .init(ctrl.writeBufferSize)
1284  .flags(nozero);
1285 
1286  avgRdBWSys.precision(8);
1287  avgWrBWSys.precision(8);
1288  avgGap.precision(2);
1289 
1290  // per-requestor bytes read and written to memory
1291  requestorReadBytes
1292  .init(max_requestors)
1293  .flags(nozero | nonan);
1294 
1295  requestorWriteBytes
1296  .init(max_requestors)
1297  .flags(nozero | nonan);
1298 
1299  // per-requestor bytes read and written to memory rate
1300  requestorReadRate
1301  .flags(nozero | nonan)
1302  .precision(12);
1303 
1304  requestorReadAccesses
1305  .init(max_requestors)
1306  .flags(nozero);
1307 
1308  requestorWriteAccesses
1309  .init(max_requestors)
1310  .flags(nozero);
1311 
1312  requestorReadTotalLat
1313  .init(max_requestors)
1314  .flags(nozero | nonan);
1315 
1316  requestorReadAvgLat
1317  .flags(nonan)
1318  .precision(2);
1319 
1320  requestorWriteRate
1321  .flags(nozero | nonan)
1322  .precision(12);
1323 
1324  requestorWriteTotalLat
1325  .init(max_requestors)
1326  .flags(nozero | nonan);
1327 
1328  requestorWriteAvgLat
1329  .flags(nonan)
1330  .precision(2);
1331 
1332  for (int i = 0; i < max_requestors; i++) {
1333  const std::string requestor = ctrl.system()->getRequestorName(i);
1334  requestorReadBytes.subname(i, requestor);
1335  requestorReadRate.subname(i, requestor);
1336  requestorWriteBytes.subname(i, requestor);
1337  requestorWriteRate.subname(i, requestor);
1338  requestorReadAccesses.subname(i, requestor);
1339  requestorWriteAccesses.subname(i, requestor);
1340  requestorReadTotalLat.subname(i, requestor);
1341  requestorReadAvgLat.subname(i, requestor);
1342  requestorWriteTotalLat.subname(i, requestor);
1343  requestorWriteAvgLat.subname(i, requestor);
1344  }
1345 
1346  // Formula stats
1347  avgRdBWSys = (bytesReadSys) / simSeconds;
1348  avgWrBWSys = (bytesWrittenSys) / simSeconds;
1349 
1350  avgGap = totGap / (readReqs + writeReqs);
1351 
1352  requestorReadRate = requestorReadBytes / simSeconds;
1353  requestorWriteRate = requestorWriteBytes / simSeconds;
1354  requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1355  requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1356 }
1357 
1358 void
1360 {
1361  bool found = recvFunctionalLogic(pkt, dram);
1362 
1363  panic_if(!found, "Can't handle address range for packet %s\n",
1364  pkt->print());
1365 }
1366 
1367 bool
1369 {
1370  if (mem_intr->getAddrRange().contains(pkt->getAddr())) {
1371  // rely on the abstract memory
1372  mem_intr->functionalAccess(pkt);
1373  return true;
1374  } else {
1375  return false;
1376  }
1377 }
1378 
1379 Port &
1380 MemCtrl::getPort(const std::string &if_name, PortID idx)
1381 {
1382  if (if_name != "port") {
1383  return qos::MemCtrl::getPort(if_name, idx);
1384  } else {
1385  return port;
1386  }
1387 }
1388 
1389 bool
1391 {
1392  // DRAM: ensure dram is in power down and refresh IDLE states
1393  // NVM: No outstanding NVM writes
1394  // NVM: All other queues verified as needed with calling logic
1395  return dram->allRanksDrained();
1396 }
1397 
1398 DrainState
1400 {
1401  // if there is anything in any of our internal queues, keep track
1402  // of that as well
1403  if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
1404  allIntfDrained())) {
1405 
1406  DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
1407  " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
1408  respQueue.size());
1409 
1410  // the only queue that is not drained automatically over time
1411  // is the write queue, thus kick things into action if needed
1414  }
1415 
1416  dram->drainRanks();
1417 
1418  return DrainState::Draining;
1419  } else {
1420  return DrainState::Drained;
1421  }
1422 }
1423 
1424 void
1426 {
1427  if (!isTimingMode && system()->isTimingMode()) {
1428  // if we switched to timing mode, kick things into action,
1429  // and behave as if we restored from a checkpoint
1430  startup();
1431  dram->startup();
1432  } else if (isTimingMode && !system()->isTimingMode()) {
1433  // if we switch from timing mode, stop the refresh events to
1434  // not cause issues with KVM
1435  dram->suspend();
1436  }
1437 
1438  // update the mode
1440 }
1441 
1444 {
1445  AddrRangeList range;
1446  range.push_back(dram->getAddrRange());
1447  return range;
1448 }
1449 
1451 MemoryPort(const std::string& name, MemCtrl& _ctrl)
1452  : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
1453  ctrl(_ctrl)
1454 { }
1455 
1458 {
1459  return ctrl.getAddrRanges();
1460 }
1461 
1462 void
1464 {
1465  pkt->pushLabel(ctrl.name());
1466 
1467  if (!queue.trySatisfyFunctional(pkt)) {
1468  // Default implementation of SimpleTimingPort::recvFunctional()
1469  // calls recvAtomic() and throws away the latency; we can save a
1470  // little here by just not calculating the latency.
1471  ctrl.recvFunctional(pkt);
1472  }
1473 
1474  pkt->popLabel();
1475 }
1476 
1477 Tick
1479 {
1480  return ctrl.recvAtomic(pkt);
1481 }
1482 
1483 Tick
1485  PacketPtr pkt, MemBackdoorPtr &backdoor)
1486 {
1487  return ctrl.recvAtomicBackdoor(pkt, backdoor);
1488 }
1489 
1490 bool
1492 {
1493  // pass it to the memory controller
1494  return ctrl.recvTimingReq(pkt);
1495 }
1496 
1497 void
1499 {
1500  queue.disableSanityCheck();
1501 }
1502 
1503 } // namespace memory
1504 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
virtual std::string name() const
Definition: named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
bool isRead() const
Definition: packet.hh:592
Addr getAddr() const
Definition: packet.hh:805
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1448
bool isResponse() const
Definition: packet.hh:597
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:368
bool needsResponse() const
Definition: packet.hh:607
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:448
RequestorID requestorId() const
Definition: packet.hh:778
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:430
bool hasData() const
Definition: packet.hh:613
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
Definition: packet.hh:767
bool isWrite() const
Definition: packet.hh:593
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:587
unsigned getSize() const
Definition: packet.hh:815
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1458
bool cacheResponding() const
Definition: packet.hh:657
Ports are used to interface objects to each other.
Definition: port.hh:62
bool isConnected() const
Is this port currently connected to a peer?
Definition: port.hh:133
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:62
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:93
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:296
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
Definition: port.hh:401
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:273
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
void getBackdoor(MemBackdoorPtr &bd_ptr)
AddrRange getAddrRange() const
Get the address range.
A burst helper helps organize and manage a packet that is larger than the memory burst size.
Definition: mem_ctrl.hh:80
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
Definition: mem_ctrl.hh:87
const unsigned int burstCount
Number of bursts requred for a system packet.
Definition: mem_ctrl.hh:84
void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
Definition: mem_ctrl.cc:1463
MemoryPort(const std::string &name, MemCtrl &_ctrl)
Definition: mem_ctrl.cc:1451
bool recvTimingReq(PacketPtr) override
Receive a timing request from the peer.
Definition: mem_ctrl.cc:1491
AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: mem_ctrl.cc:1457
Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Definition: mem_ctrl.cc:1478
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Receive an atomic request packet from the peer, and optionally provide a backdoor to the data being a...
Definition: mem_ctrl.cc:1484
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition: mem_ctrl.hh:247
virtual void recvFunctional(PacketPtr pkt)
Definition: mem_ctrl.cc:1359
virtual void pruneBurstTick()
Remove commands that have already issued from burstTicks.
Definition: mem_ctrl.cc:656
uint32_t writeLowThreshold
Definition: mem_ctrl.hh:515
enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition: mem_ctrl.hh:525
bool recvFunctionalLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition: mem_ctrl.cc:1368
bool retryRdReq
Remember if we have to retry a request when available.
Definition: mem_ctrl.hh:291
void printQs() const
Used for debugging to observe the contents of the queues.
Definition: mem_ctrl.cc:379
const uint32_t minReadsPerSwitch
Definition: mem_ctrl.hh:517
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: mem_ctrl.cc:111
uint32_t readsThisTime
Definition: mem_ctrl.hh:519
uint32_t writesThisTime
Definition: mem_ctrl.hh:518
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition: mem_ctrl.cc:303
Tick recvAtomicLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition: mem_ctrl.cc:137
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition: mem_ctrl.hh:490
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
Definition: mem_ctrl.hh:281
uint32_t writeHighThreshold
Definition: mem_ctrl.hh:514
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: mem_ctrl.hh:628
std::vector< MemPacketQueue > writeQueue
Definition: mem_ctrl.hh:471
virtual Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition: mem_ctrl.cc:700
EventFunctionWrapper respondEvent
Definition: mem_ctrl.hh:311
virtual MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Definition: mem_ctrl.cc:554
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition: mem_ctrl.cc:595
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
Definition: mem_ctrl.cc:167
virtual Tick doBurstAccess(MemPacket *mem_pkt, MemInterface *mem_intr)
Actually do the burst based on media specific access function.
Definition: mem_ctrl.cc:790
MemInterface * dram
Definition: mem_ctrl.hh:502
virtual void processNextReqEvent(MemInterface *mem_intr, MemPacketQueue &resp_queue, EventFunctionWrapper &resp_event, EventFunctionWrapper &next_req_event, bool &retry_wr_req)
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
Definition: mem_ctrl.cc:875
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition: mem_ctrl.cc:190
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
Definition: mem_ctrl.hh:497
virtual Addr burstAlign(Addr addr, MemInterface *mem_intr) const
Burst-align an address.
Definition: mem_ctrl.cc:1154
const uint32_t minWritesPerSwitch
Definition: mem_ctrl.hh:516
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition: mem_ctrl.cc:179
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mem_ctrl.cc:101
const Tick backendLatency
Pipeline latency of the backend and PHY.
Definition: mem_ctrl.hh:539
uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition: mem_ctrl.hh:512
const Tick frontendLatency
Pipeline latency of the controller frontend.
Definition: mem_ctrl.hh:532
virtual bool respQEmpty()
Definition: mem_ctrl.hh:641
virtual bool allIntfDrained() const
Ensure that all interfaced have drained commands.
Definition: mem_ctrl.cc:1390
EventFunctionWrapper nextReqEvent
Definition: mem_ctrl.hh:305
virtual bool packetReady(MemPacket *pkt, MemInterface *mem_intr)
Determine if there is a packet that can issue.
Definition: mem_ctrl.cc:1136
virtual Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
Check for command bus contention for single cycle command.
Definition: mem_ctrl.cc:677
virtual bool nvmWriteBlock(MemInterface *mem_intr)
Will check if all writes are for nvm interface and nvm's write resp queue is full.
Definition: mem_ctrl.cc:853
virtual void processRespondEvent(MemInterface *mem_intr, MemPacketQueue &queue, EventFunctionWrapper &resp_event, bool &retry_rd_req)
Definition: mem_ctrl.cc:485
virtual void accessAndRespond(PacketPtr pkt, Tick static_latency, MemInterface *mem_intr)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Definition: mem_ctrl.cc:616
virtual Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
Definition: mem_ctrl.cc:1148
virtual bool recvTimingReq(PacketPtr pkt)
Definition: mem_ctrl.cc:404
bool inReadBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:764
virtual Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
Definition: mem_ctrl.cc:1142
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: mem_ctrl.cc:1399
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition: mem_ctrl.hh:470
bool isTimingMode
Remember if the memory system is in timing mode.
Definition: mem_ctrl.hh:286
virtual Tick recvAtomic(PacketPtr pkt)
Definition: mem_ctrl.cc:126
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: mem_ctrl.cc:1380
virtual bool memBusy(MemInterface *mem_intr)
Checks if the memory interface is already busy.
Definition: mem_ctrl.cc:832
uint32_t writeBufferSize
Definition: mem_ctrl.hh:513
virtual AddrRangeList getAddrRanges()
Definition: mem_ctrl.cc:1443
MemCtrl(const MemCtrlParams &p)
Definition: mem_ctrl.cc:60
virtual Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
Definition: mem_ctrl.cc:159
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
Definition: mem_ctrl.cc:669
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
Definition: mem_ctrl.hh:636
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
Definition: mem_ctrl.hh:480
virtual bool pktSizeCheck(MemPacket *mem_pkt, MemInterface *mem_intr) const
Check if mem pkt's size is sane.
Definition: mem_ctrl.cc:1160
virtual void drainResume() override
Resume execution after a successful drain.
Definition: mem_ctrl.cc:1425
bool inWriteBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:777
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition: mem_ctrl.hh:545
virtual void nonDetermReads(MemInterface *mem_intr)
Will access memory interface and select non-deterministic reads to issue.
Definition: mem_ctrl.cc:860
General interface to memory device Includes functions and parameters shared across media types.
virtual void setupRank(const uint8_t rank, const bool is_read)=0
Setup the rank based on packet received.
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const =0
For FR-FCFS policy, find first command that can issue Function will be overriden by interface to sele...
virtual void suspend()
This function is DRAM specific.
virtual Tick commandOffset() const =0
uint8_t pseudoChannel
pseudo channel number used for HBM modeling
virtual bool burstReady(MemPacket *pkt) const =0
Check if a burst operation can be issued to the interface.
virtual void checkRefreshState(uint8_t rank)
This function is DRAM specific.
virtual bool isBusy(bool read_queue_empty, bool all_writes_nvm)=0
This function checks if ranks are busy.
virtual void respondEvent(uint8_t rank)
This function is DRAM specific.
virtual std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)=0
This function performs the burst and update stats.
virtual MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
virtual Tick accessLatency() const =0
virtual bool writeRespQueueFull() const
This function is NVM specific.
virtual void chooseRead(MemPacketQueue &queue)
This function is NVM specific.
Tick nextBurstAt
Till when the controller must wait before issuing next RD/WR burst?
Tick minWriteToReadDataGap() const
virtual bool readsWaitingToIssue() const
This function is NVM specific.
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
Tick minReadToWriteDataGap() const
virtual bool allRanksDrained() const =0
Check drain state of interface.
virtual void drainRanks()
This function is DRAM specific.
uint32_t bytesPerBurst() const
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition: mem_ctrl.hh:99
Tick readyTime
When will request leave the controller.
Definition: mem_ctrl.hh:106
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
Definition: mem_ctrl.hh:163
const uint8_t pseudoChannel
pseudo channel num
Definition: mem_ctrl.hh:120
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
Definition: mem_ctrl.hh:152
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Definition: mem_ctrl.hh:146
Addr addr
The starting address of the packet.
Definition: mem_ctrl.hh:140
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition: mem_ctrl.hh:193
const Tick entryTime
When did request enter the controller.
Definition: mem_ctrl.hh:103
const PacketPtr pkt
This comes from the outside world.
Definition: mem_ctrl.hh:109
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
Definition: mem_ctrl.hh:175
const uint8_t rank
Will be populated by address decoder.
Definition: mem_ctrl.hh:123
void logResponse(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
Definition: mem_ctrl.cc:149
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition: mem_ctrl.hh:496
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Definition: mem_ctrl.hh:134
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Definition: mem_ctrl.cc:359
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
Definition: mem_ctrl.cc:247
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
Definition: mem_ctrl.hh:91
System * system() const
read the system pointer
Definition: mem_ctrl.hh:371
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
Definition: mem_ctrl.hh:367
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request.
Definition: mem_ctrl.hh:140
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition: mem_ctrl.hh:131
BusState busStateNext
bus state for next request event triggered
Definition: mem_ctrl.hh:143
uint8_t schedule(RequestorID id, uint64_t data)
Definition: mem_ctrl.cc:218
void logRequest(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
Definition: mem_ctrl.cc:92
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
Statistics container.
Definition: group.hh:94
STL deque class.
Definition: stl.hh:44
STL pair class.
Definition: stl.hh:58
STL vector class.
Definition: stl.hh:37
DRAMInterface declaration.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:471
static constexpr int ceilLog2(const T &n)
Definition: intmath.hh:84
static constexpr T divCeil(const T &a, const U &b)
Definition: intmath.hh:110
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
DrainState
Object drain/handover states.
Definition: drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
virtual void startup()
startup() is the final initialization call before simulation.
Definition: sim_object.cc:99
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
MemCtrl declaration.
MemInterface declaration.
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 23, 0 > offset
Definition: types.hh:144
Bitfield< 25 > vec
Definition: misc.hh:113
Bitfield< 54 > p
Definition: pagetable.hh:70
Bitfield< 3 > addr
Definition: types.hh:84
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:245
uint64_t Tick
Tick count type.
Definition: types.hh:58
const Tick MaxTick
Definition: types.hh:60
statistics::Formula & simSeconds
Definition: stats.cc:45
NVMInterface declaration.
statistics::Scalar writeReqs
Definition: mem_ctrl.hh:572
statistics::Scalar mergedWrBursts
Definition: mem_ctrl.hh:576
statistics::Scalar readReqs
Definition: mem_ctrl.hh:571
statistics::Scalar servicedByWrQ
Definition: mem_ctrl.hh:575
statistics::Histogram rdPerTurnAround
Definition: mem_ctrl.hh:588
statistics::Vector readPktSize
Definition: mem_ctrl.hh:584
statistics::Scalar numWrRetry
Definition: mem_ctrl.hh:583
statistics::Scalar totGap
Definition: mem_ctrl.hh:598
statistics::Scalar numRdRetry
Definition: mem_ctrl.hh:582
statistics::Scalar readBursts
Definition: mem_ctrl.hh:573
void regStats() override
Callback to set stat parameters.
Definition: mem_ctrl.cc:1263
statistics::Vector requestorReadTotalLat
Definition: mem_ctrl.hh:614
statistics::Vector requestorWriteTotalLat
Definition: mem_ctrl.hh:615
statistics::Vector requestorWriteBytes
Definition: mem_ctrl.hh:603
statistics::Scalar writeBursts
Definition: mem_ctrl.hh:574
statistics::Vector writePktSize
Definition: mem_ctrl.hh:585
statistics::Histogram wrPerTurnAround
Definition: mem_ctrl.hh:589
statistics::Vector requestorWriteAccesses
Definition: mem_ctrl.hh:611
statistics::Scalar bytesReadSys
Definition: mem_ctrl.hh:592
statistics::Average avgRdQLen
Definition: mem_ctrl.hh:579
statistics::Vector requestorReadAccesses
Definition: mem_ctrl.hh:610
statistics::Scalar bytesWrittenSys
Definition: mem_ctrl.hh:593
statistics::Average avgWrQLen
Definition: mem_ctrl.hh:580
statistics::Vector wrQLenPdf
Definition: mem_ctrl.hh:587
statistics::Scalar bytesReadWrQ
Definition: mem_ctrl.hh:591
statistics::Vector requestorReadBytes
Definition: mem_ctrl.hh:602
statistics::Vector rdQLenPdf
Definition: mem_ctrl.hh:586
Definition: mem.h:38
const std::string & name()
Definition: trace.cc:49

Generated on Wed Dec 21 2022 10:22:37 for gem5 by doxygen 1.9.1