gem5  v21.1.0.2
mem_ctrl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "mem/mem_ctrl.hh"
42 
43 #include "base/trace.hh"
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
49 #include "mem/mem_interface.hh"
50 #include "sim/system.hh"
51 
52 namespace gem5
53 {
54 
55 namespace memory
56 {
57 
58 MemCtrl::MemCtrl(const MemCtrlParams &p) :
59  qos::MemCtrl(p),
60  port(name() + ".port", *this), isTimingMode(false),
61  retryRdReq(false), retryWrReq(false),
62  nextReqEvent([this]{ processNextReqEvent(); }, name()),
63  respondEvent([this]{ processRespondEvent(); }, name()),
64  dram(p.dram), nvm(p.nvm),
65  readBufferSize((dram ? dram->readBufferSize : 0) +
66  (nvm ? nvm->readBufferSize : 0)),
67  writeBufferSize((dram ? dram->writeBufferSize : 0) +
68  (nvm ? nvm->writeBufferSize : 0)),
69  writeHighThreshold(writeBufferSize * p.write_high_thresh_perc / 100.0),
70  writeLowThreshold(writeBufferSize * p.write_low_thresh_perc / 100.0),
71  minWritesPerSwitch(p.min_writes_per_switch),
72  writesThisTime(0), readsThisTime(0),
73  memSchedPolicy(p.mem_sched_policy),
74  frontendLatency(p.static_frontend_latency),
75  backendLatency(p.static_backend_latency),
76  commandWindow(p.command_window),
77  nextBurstAt(0), prevArrival(0),
78  nextReqTime(0),
79  stats(*this)
80 {
81  DPRINTF(MemCtrl, "Setting up controller\n");
82  readQueue.resize(p.qos_priorities);
83  writeQueue.resize(p.qos_priorities);
84 
85  // Hook up interfaces to the controller
86  if (dram)
87  dram->setCtrl(this, commandWindow);
88  if (nvm)
89  nvm->setCtrl(this, commandWindow);
90 
91  fatal_if(!dram && !nvm, "Memory controller must have an interface");
92 
93  // perform a basic check of the write thresholds
94  if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
95  fatal("Write buffer low threshold %d must be smaller than the "
96  "high threshold %d\n", p.write_low_thresh_perc,
97  p.write_high_thresh_perc);
98 }
99 
100 void
102 {
103  if (!port.isConnected()) {
104  fatal("MemCtrl %s is unconnected!\n", name());
105  } else {
107  }
108 }
109 
110 void
112 {
113  // remember the memory system mode of operation
115 
116  if (isTimingMode) {
117  // shift the bus busy time sufficiently far ahead that we never
118  // have to worry about negative values when computing the time for
119  // the next request, this will add an insignificant bubble at the
120  // start of simulation
122  nvm->commandOffset());
123  }
124 }
125 
126 Tick
128 {
129  DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
130  pkt->cmdString(), pkt->getAddr());
131 
132  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
133  "is responding");
134 
135  Tick latency = 0;
136  // do the actual memory access and turn the packet into a response
137  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
138  dram->access(pkt);
139 
140  if (pkt->hasData()) {
141  // this value is not supposed to be accurate, just enough to
142  // keep things going, mimic a closed page
143  latency = dram->accessLatency();
144  }
145  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
146  nvm->access(pkt);
147 
148  if (pkt->hasData()) {
149  // this value is not supposed to be accurate, just enough to
150  // keep things going, mimic a closed page
151  latency = nvm->accessLatency();
152  }
153  } else {
154  panic("Can't handle address range for packet %s\n",
155  pkt->print());
156  }
157 
158  return latency;
159 }
160 
161 Tick
163 {
164  Tick latency = recvAtomic(pkt);
165  if (dram) {
166  dram->getBackdoor(backdoor);
167  } else if (nvm) {
168  nvm->getBackdoor(backdoor);
169  }
170  return latency;
171 }
172 
173 bool
174 MemCtrl::readQueueFull(unsigned int neededEntries) const
175 {
177  "Read queue limit %d, current size %d, entries needed %d\n",
179  neededEntries);
180 
181  auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
182  return rdsize_new > readBufferSize;
183 }
184 
185 bool
186 MemCtrl::writeQueueFull(unsigned int neededEntries) const
187 {
189  "Write queue limit %d, current size %d, entries needed %d\n",
190  writeBufferSize, totalWriteQueueSize, neededEntries);
191 
192  auto wrsize_new = (totalWriteQueueSize + neededEntries);
193  return wrsize_new > writeBufferSize;
194 }
195 
196 void
197 MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
198 {
199  // only add to the read queue here. whenever the request is
200  // eventually done, set the readyTime, and call schedule()
201  assert(!pkt->isWrite());
202 
203  assert(pkt_count != 0);
204 
205  // if the request size is larger than burst size, the pkt is split into
206  // multiple packets
207  // Note if the pkt starting address is not aligened to burst size, the
208  // address of first packet is kept unaliged. Subsequent packets
209  // are aligned to burst size boundaries. This is to ensure we accurately
210  // check read packets against packets in write queue.
211  const Addr base_addr = pkt->getAddr();
212  Addr addr = base_addr;
213  unsigned pktsServicedByWrQ = 0;
214  BurstHelper* burst_helper = NULL;
215 
216  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
217  nvm->bytesPerBurst();
218  for (int cnt = 0; cnt < pkt_count; ++cnt) {
219  unsigned size = std::min((addr | (burst_size - 1)) + 1,
220  base_addr + pkt->getSize()) - addr;
221  stats.readPktSize[ceilLog2(size)]++;
222  stats.readBursts++;
224 
225  // First check write buffer to see if the data is already at
226  // the controller
227  bool foundInWrQ = false;
228  Addr burst_addr = burstAlign(addr, is_dram);
229  // if the burst address is not present then there is no need
230  // looking any further
231  if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
232  for (const auto& vec : writeQueue) {
233  for (const auto& p : vec) {
234  // check if the read is subsumed in the write queue
235  // packet we are looking at
236  if (p->addr <= addr &&
237  ((addr + size) <= (p->addr + p->size))) {
238 
239  foundInWrQ = true;
241  pktsServicedByWrQ++;
243  "Read to addr %#x with size %d serviced by "
244  "write queue\n",
245  addr, size);
246  stats.bytesReadWrQ += burst_size;
247  break;
248  }
249  }
250  }
251  }
252 
253  // If not found in the write q, make a memory packet and
254  // push it onto the read queue
255  if (!foundInWrQ) {
256 
257  // Make the burst helper for split packets
258  if (pkt_count > 1 && burst_helper == NULL) {
259  DPRINTF(MemCtrl, "Read to addr %#x translates to %d "
260  "memory requests\n", pkt->getAddr(), pkt_count);
261  burst_helper = new BurstHelper(pkt_count);
262  }
263 
264  MemPacket* mem_pkt;
265  if (is_dram) {
266  mem_pkt = dram->decodePacket(pkt, addr, size, true, true);
267  // increment read entries of the rank
268  dram->setupRank(mem_pkt->rank, true);
269  } else {
270  mem_pkt = nvm->decodePacket(pkt, addr, size, true, false);
271  // Increment count to trigger issue of non-deterministic read
272  nvm->setupRank(mem_pkt->rank, true);
273  // Default readyTime to Max; will be reset once read is issued
274  mem_pkt->readyTime = MaxTick;
275  }
276  mem_pkt->burstHelper = burst_helper;
277 
278  assert(!readQueueFull(1));
280 
281  DPRINTF(MemCtrl, "Adding to read queue\n");
282 
283  readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
284 
285  // log packet
287  mem_pkt->addr, 1);
288 
289  // Update stats
291  }
292 
293  // Starting address of next memory pkt (aligned to burst boundary)
294  addr = (addr | (burst_size - 1)) + 1;
295  }
296 
297  // If all packets are serviced by write queue, we send the repsonse back
298  if (pktsServicedByWrQ == pkt_count) {
300  return;
301  }
302 
303  // Update how many split packets are serviced by write queue
304  if (burst_helper != NULL)
305  burst_helper->burstsServiced = pktsServicedByWrQ;
306 
307  // If we are not already scheduled to get a request out of the
308  // queue, do so now
309  if (!nextReqEvent.scheduled()) {
310  DPRINTF(MemCtrl, "Request scheduled immediately\n");
312  }
313 }
314 
315 void
316 MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
317 {
318  // only add to the write queue here. whenever the request is
319  // eventually done, set the readyTime, and call schedule()
320  assert(pkt->isWrite());
321 
322  // if the request size is larger than burst size, the pkt is split into
323  // multiple packets
324  const Addr base_addr = pkt->getAddr();
325  Addr addr = base_addr;
326  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
327  nvm->bytesPerBurst();
328  for (int cnt = 0; cnt < pkt_count; ++cnt) {
329  unsigned size = std::min((addr | (burst_size - 1)) + 1,
330  base_addr + pkt->getSize()) - addr;
331  stats.writePktSize[ceilLog2(size)]++;
332  stats.writeBursts++;
334 
335  // see if we can merge with an existing item in the write
336  // queue and keep track of whether we have merged or not
337  bool merged = isInWriteQueue.find(burstAlign(addr, is_dram)) !=
338  isInWriteQueue.end();
339 
340  // if the item was not merged we need to create a new write
341  // and enqueue it
342  if (!merged) {
343  MemPacket* mem_pkt;
344  if (is_dram) {
345  mem_pkt = dram->decodePacket(pkt, addr, size, false, true);
346  dram->setupRank(mem_pkt->rank, false);
347  } else {
348  mem_pkt = nvm->decodePacket(pkt, addr, size, false, false);
349  nvm->setupRank(mem_pkt->rank, false);
350  }
353 
354  DPRINTF(MemCtrl, "Adding to write queue\n");
355 
356  writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
357  isInWriteQueue.insert(burstAlign(addr, is_dram));
358 
359  // log packet
361  mem_pkt->addr, 1);
362 
363  assert(totalWriteQueueSize == isInWriteQueue.size());
364 
365  // Update stats
367 
368  } else {
370  "Merging write burst with existing queue entry\n");
371 
372  // keep track of the fact that this burst effectively
373  // disappeared as it was merged with an existing one
375  }
376 
377  // Starting address of next memory pkt (aligned to burst_size boundary)
378  addr = (addr | (burst_size - 1)) + 1;
379  }
380 
381  // we do not wait for the writes to be send to the actual memory,
382  // but instead take responsibility for the consistency here and
383  // snoop the write queue for any upcoming reads
384  // @todo, if a pkt size is larger than burst size, we might need a
385  // different front end latency
387 
388  // If we are not already scheduled to get a request out of the
389  // queue, do so now
390  if (!nextReqEvent.scheduled()) {
391  DPRINTF(MemCtrl, "Request scheduled immediately\n");
393  }
394 }
395 
396 void
398 {
399 #if TRACING_ON
400  DPRINTF(MemCtrl, "===READ QUEUE===\n\n");
401  for (const auto& queue : readQueue) {
402  for (const auto& packet : queue) {
403  DPRINTF(MemCtrl, "Read %#x\n", packet->addr);
404  }
405  }
406 
407  DPRINTF(MemCtrl, "\n===RESP QUEUE===\n\n");
408  for (const auto& packet : respQueue) {
409  DPRINTF(MemCtrl, "Response %#x\n", packet->addr);
410  }
411 
412  DPRINTF(MemCtrl, "\n===WRITE QUEUE===\n\n");
413  for (const auto& queue : writeQueue) {
414  for (const auto& packet : queue) {
415  DPRINTF(MemCtrl, "Write %#x\n", packet->addr);
416  }
417  }
418 #endif // TRACING_ON
419 }
420 
421 bool
423 {
424  // This is where we enter from the outside world
425  DPRINTF(MemCtrl, "recvTimingReq: request %s addr %#x size %d\n",
426  pkt->cmdString(), pkt->getAddr(), pkt->getSize());
427 
428  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
429  "is responding");
430 
431  panic_if(!(pkt->isRead() || pkt->isWrite()),
432  "Should only see read and writes at memory controller\n");
433 
434  // Calc avg gap between requests
435  if (prevArrival != 0) {
437  }
438  prevArrival = curTick();
439 
440  // What type of media does this packet access?
441  bool is_dram;
442  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
443  is_dram = true;
444  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
445  is_dram = false;
446  } else {
447  panic("Can't handle address range for packet %s\n",
448  pkt->print());
449  }
450 
451 
452  // Find out how many memory packets a pkt translates to
453  // If the burst size is equal or larger than the pkt size, then a pkt
454  // translates to only one memory packet. Otherwise, a pkt translates to
455  // multiple memory packets
456  unsigned size = pkt->getSize();
457  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
458  nvm->bytesPerBurst();
459  unsigned offset = pkt->getAddr() & (burst_size - 1);
460  unsigned int pkt_count = divCeil(offset + size, burst_size);
461 
462  // run the QoS scheduler and assign a QoS priority value to the packet
463  qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
464 
465  // check local buffers and do not accept if full
466  if (pkt->isWrite()) {
467  assert(size != 0);
468  if (writeQueueFull(pkt_count)) {
469  DPRINTF(MemCtrl, "Write queue full, not accepting\n");
470  // remember that we have to retry this port
471  retryWrReq = true;
472  stats.numWrRetry++;
473  return false;
474  } else {
475  addToWriteQueue(pkt, pkt_count, is_dram);
476  stats.writeReqs++;
477  stats.bytesWrittenSys += size;
478  }
479  } else {
480  assert(pkt->isRead());
481  assert(size != 0);
482  if (readQueueFull(pkt_count)) {
483  DPRINTF(MemCtrl, "Read queue full, not accepting\n");
484  // remember that we have to retry this port
485  retryRdReq = true;
486  stats.numRdRetry++;
487  return false;
488  } else {
489  addToReadQueue(pkt, pkt_count, is_dram);
490  stats.readReqs++;
491  stats.bytesReadSys += size;
492  }
493  }
494 
495  return true;
496 }
497 
498 void
500 {
502  "processRespondEvent(): Some req has reached its readyTime\n");
503 
504  MemPacket* mem_pkt = respQueue.front();
505 
506  if (mem_pkt->isDram()) {
507  // media specific checks and functions when read response is complete
508  dram->respondEvent(mem_pkt->rank);
509  }
510 
511  if (mem_pkt->burstHelper) {
512  // it is a split packet
513  mem_pkt->burstHelper->burstsServiced++;
514  if (mem_pkt->burstHelper->burstsServiced ==
515  mem_pkt->burstHelper->burstCount) {
516  // we have now serviced all children packets of a system packet
517  // so we can now respond to the requestor
518  // @todo we probably want to have a different front end and back
519  // end latency for split packets
521  delete mem_pkt->burstHelper;
522  mem_pkt->burstHelper = NULL;
523  }
524  } else {
525  // it is not a split packet
527  }
528 
529  respQueue.pop_front();
530 
531  if (!respQueue.empty()) {
532  assert(respQueue.front()->readyTime >= curTick());
533  assert(!respondEvent.scheduled());
534  schedule(respondEvent, respQueue.front()->readyTime);
535  } else {
536  // if there is nothing left in any queue, signal a drain
537  if (drainState() == DrainState::Draining &&
539  allIntfDrained()) {
540 
541  DPRINTF(Drain, "Controller done draining\n");
542  signalDrainDone();
543  } else if (mem_pkt->isDram()) {
544  // check the refresh state and kick the refresh event loop
545  // into action again if banks already closed and just waiting
546  // for read to complete
547  dram->checkRefreshState(mem_pkt->rank);
548  }
549  }
550 
551  delete mem_pkt;
552 
553  // We have made a location in the queue available at this point,
554  // so if there is a read that was forced to wait, retry now
555  if (retryRdReq) {
556  retryRdReq = false;
557  port.sendRetryReq();
558  }
559 }
560 
561 MemPacketQueue::iterator
562 MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
563 {
564  // This method does the arbitration between requests.
565 
566  MemPacketQueue::iterator ret = queue.end();
567 
568  if (!queue.empty()) {
569  if (queue.size() == 1) {
570  // available rank corresponds to state refresh idle
571  MemPacket* mem_pkt = *(queue.begin());
572  if (packetReady(mem_pkt)) {
573  ret = queue.begin();
574  DPRINTF(MemCtrl, "Single request, going to a free rank\n");
575  } else {
576  DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
577  }
578  } else if (memSchedPolicy == enums::fcfs) {
579  // check if there is a packet going to a free rank
580  for (auto i = queue.begin(); i != queue.end(); ++i) {
581  MemPacket* mem_pkt = *i;
582  if (packetReady(mem_pkt)) {
583  ret = i;
584  break;
585  }
586  }
587  } else if (memSchedPolicy == enums::frfcfs) {
588  ret = chooseNextFRFCFS(queue, extra_col_delay);
589  } else {
590  panic("No scheduling policy chosen\n");
591  }
592  }
593  return ret;
594 }
595 
596 MemPacketQueue::iterator
598 {
599  auto selected_pkt_it = queue.end();
600  Tick col_allowed_at = MaxTick;
601 
602  // time we need to issue a column command to be seamless
603  const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
604 
605  // find optimal packet for each interface
606  if (dram && nvm) {
607  // create 2nd set of parameters for NVM
608  auto nvm_pkt_it = queue.end();
609  Tick nvm_col_at = MaxTick;
610 
611  // Select packet by default to give priority if both
612  // can issue at the same time or seamlessly
613  std::tie(selected_pkt_it, col_allowed_at) =
614  dram->chooseNextFRFCFS(queue, min_col_at);
615  std::tie(nvm_pkt_it, nvm_col_at) =
616  nvm->chooseNextFRFCFS(queue, min_col_at);
617 
618  // Compare DRAM and NVM and select NVM if it can issue
619  // earlier than the DRAM packet
620  if (col_allowed_at > nvm_col_at) {
621  selected_pkt_it = nvm_pkt_it;
622  }
623  } else if (dram) {
624  std::tie(selected_pkt_it, col_allowed_at) =
625  dram->chooseNextFRFCFS(queue, min_col_at);
626  } else if (nvm) {
627  std::tie(selected_pkt_it, col_allowed_at) =
628  nvm->chooseNextFRFCFS(queue, min_col_at);
629  }
630 
631  if (selected_pkt_it == queue.end()) {
632  DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
633  }
634 
635  return selected_pkt_it;
636 }
637 
638 void
640 {
641  DPRINTF(MemCtrl, "Responding to Address %#x.. \n", pkt->getAddr());
642 
643  bool needsResponse = pkt->needsResponse();
644  // do the actual memory access which also turns the packet into a
645  // response
646  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
647  dram->access(pkt);
648  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
649  nvm->access(pkt);
650  } else {
651  panic("Can't handle address range for packet %s\n",
652  pkt->print());
653  }
654 
655  // turn packet around to go back to requestor if response expected
656  if (needsResponse) {
657  // access already turned the packet into a response
658  assert(pkt->isResponse());
659  // response_time consumes the static latency and is charged also
660  // with headerDelay that takes into account the delay provided by
661  // the xbar and also the payloadDelay that takes into account the
662  // number of data beats.
663  Tick response_time = curTick() + static_latency + pkt->headerDelay +
664  pkt->payloadDelay;
665  // Here we reset the timing of the packet before sending it out.
666  pkt->headerDelay = pkt->payloadDelay = 0;
667 
668  // queue the packet in the response queue to be sent out after
669  // the static latency has passed
670  port.schedTimingResp(pkt, response_time);
671  } else {
672  // @todo the packet is going to be deleted, and the MemPacket
673  // is still having a pointer to it
674  pendingDelete.reset(pkt);
675  }
676 
677  DPRINTF(MemCtrl, "Done\n");
678 
679  return;
680 }
681 
682 void
684 {
685  auto it = burstTicks.begin();
686  while (it != burstTicks.end()) {
687  auto current_it = it++;
688  if (curTick() > *current_it) {
689  DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
690  burstTicks.erase(current_it);
691  }
692  }
693 }
694 
695 Tick
697 {
698  // get tick aligned to burst window
699  Tick burst_offset = cmd_tick % commandWindow;
700  return (cmd_tick - burst_offset);
701 }
702 
703 Tick
704 MemCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
705 {
706  // start with assumption that there is no contention on command bus
707  Tick cmd_at = cmd_tick;
708 
709  // get tick aligned to burst window
710  Tick burst_tick = getBurstWindow(cmd_tick);
711 
712  // verify that we have command bandwidth to issue the command
713  // if not, iterate over next window(s) until slot found
714  while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
715  DPRINTF(MemCtrl, "Contention found on command bus at %d\n",
716  burst_tick);
717  burst_tick += commandWindow;
718  cmd_at = burst_tick;
719  }
720 
721  // add command into burst window and return corresponding Tick
722  burstTicks.insert(burst_tick);
723  return cmd_at;
724 }
725 
726 Tick
727 MemCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
728  Tick max_multi_cmd_split)
729 {
730  // start with assumption that there is no contention on command bus
731  Tick cmd_at = cmd_tick;
732 
733  // get tick aligned to burst window
734  Tick burst_tick = getBurstWindow(cmd_tick);
735 
736  // Command timing requirements are from 2nd command
737  // Start with assumption that 2nd command will issue at cmd_at and
738  // find prior slot for 1st command to issue
739  // Given a maximum latency of max_multi_cmd_split between the commands,
740  // find the burst at the maximum latency prior to cmd_at
741  Tick burst_offset = 0;
742  Tick first_cmd_offset = cmd_tick % commandWindow;
743  while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
744  burst_offset += commandWindow;
745  }
746  // get the earliest burst aligned address for first command
747  // ensure that the time does not go negative
748  Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
749 
750  // Can required commands issue?
751  bool first_can_issue = false;
752  bool second_can_issue = false;
753  // verify that we have command bandwidth to issue the command(s)
754  while (!first_can_issue || !second_can_issue) {
755  bool same_burst = (burst_tick == first_cmd_tick);
756  auto first_cmd_count = burstTicks.count(first_cmd_tick);
757  auto second_cmd_count = same_burst ? first_cmd_count + 1 :
758  burstTicks.count(burst_tick);
759 
760  first_can_issue = first_cmd_count < max_cmds_per_burst;
761  second_can_issue = second_cmd_count < max_cmds_per_burst;
762 
763  if (!second_can_issue) {
764  DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
765  burst_tick);
766  burst_tick += commandWindow;
767  cmd_at = burst_tick;
768  }
769 
770  // Verify max_multi_cmd_split isn't violated when command 2 is shifted
771  // If commands initially were issued in same burst, they are
772  // now in consecutive bursts and can still issue B2B
773  bool gap_violated = !same_burst &&
774  ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
775 
776  if (!first_can_issue || (!second_can_issue && gap_violated)) {
777  DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
778  first_cmd_tick);
779  first_cmd_tick += commandWindow;
780  }
781  }
782 
783  // Add command to burstTicks
784  burstTicks.insert(burst_tick);
785  burstTicks.insert(first_cmd_tick);
786 
787  return cmd_at;
788 }
789 
790 bool
791 MemCtrl::inReadBusState(bool next_state) const
792 {
793  // check the bus state
794  if (next_state) {
795  // use busStateNext to get the state that will be used
796  // for the next burst
797  return (busStateNext == MemCtrl::READ);
798  } else {
799  return (busState == MemCtrl::READ);
800  }
801 }
802 
803 bool
804 MemCtrl::inWriteBusState(bool next_state) const
805 {
806  // check the bus state
807  if (next_state) {
808  // use busStateNext to get the state that will be used
809  // for the next burst
810  return (busStateNext == MemCtrl::WRITE);
811  } else {
812  return (busState == MemCtrl::WRITE);
813  }
814 }
815 
816 void
818 {
819  // first clean up the burstTick set, removing old entries
820  // before adding new entries for next burst
821  pruneBurstTick();
822 
823  // When was command issued?
824  Tick cmd_at;
825 
826  // Issue the next burst and update bus state to reflect
827  // when previous command was issued
828  if (mem_pkt->isDram()) {
829  std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
830  std::tie(cmd_at, nextBurstAt) =
831  dram->doBurstAccess(mem_pkt, nextBurstAt, queue);
832 
833  // Update timing for NVM ranks if NVM is configured on this channel
834  if (nvm)
835  nvm->addRankToRankDelay(cmd_at);
836 
837  } else {
838  std::tie(cmd_at, nextBurstAt) =
839  nvm->doBurstAccess(mem_pkt, nextBurstAt);
840 
841  // Update timing for NVM ranks if NVM is configured on this channel
842  if (dram)
843  dram->addRankToRankDelay(cmd_at);
844 
845  }
846 
847  DPRINTF(MemCtrl, "Access to %#x, ready at %lld next burst at %lld.\n",
848  mem_pkt->addr, mem_pkt->readyTime, nextBurstAt);
849 
850  // Update the minimum timing between the requests, this is a
851  // conservative estimate of when we have to schedule the next
852  // request to not introduce any unecessary bubbles. In most cases
853  // we will wake up sooner than we have to.
855  nvm->commandOffset());
856 
857 
858  // Update the common bus stats
859  if (mem_pkt->isRead()) {
860  ++readsThisTime;
861  // Update latency stats
863  mem_pkt->readyTime - mem_pkt->entryTime;
864  stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
865  } else {
866  ++writesThisTime;
867  stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
869  mem_pkt->readyTime - mem_pkt->entryTime;
870  }
871 }
872 
873 void
875 {
876  // transition is handled by QoS algorithm if enabled
877  if (turnPolicy) {
878  // select bus state - only done if QoS algorithms are in use
880  }
881 
882  // detect bus state change
883  bool switched_cmd_type = (busState != busStateNext);
884  // record stats
886 
887  DPRINTF(MemCtrl, "QoS Turnarounds selected state %s %s\n",
888  (busState==MemCtrl::READ)?"READ":"WRITE",
889  switched_cmd_type?"[turnaround triggered]":"");
890 
891  if (switched_cmd_type) {
892  if (busState == MemCtrl::READ) {
894  "Switching to writes after %d reads with %d reads "
895  "waiting\n", readsThisTime, totalReadQueueSize);
897  readsThisTime = 0;
898  } else {
900  "Switching to reads after %d writes with %d writes "
901  "waiting\n", writesThisTime, totalWriteQueueSize);
903  writesThisTime = 0;
904  }
905  }
906 
907  // updates current state
909 
910  if (nvm) {
911  for (auto queue = readQueue.rbegin();
912  queue != readQueue.rend(); ++queue) {
913  // select non-deterministic NVM read to issue
914  // assume that we have the command bandwidth to issue this along
915  // with additional RD/WR burst with needed bank operations
916  if (nvm->readsWaitingToIssue()) {
917  // select non-deterministic NVM read to issue
918  nvm->chooseRead(*queue);
919  }
920  }
921  }
922 
923  // check ranks for refresh/wakeup - uses busStateNext, so done after
924  // turnaround decisions
925  // Default to busy status and update based on interface specifics
926  bool dram_busy = dram ? dram->isBusy() : true;
927  bool nvm_busy = true;
928  bool all_writes_nvm = false;
929  if (nvm) {
930  all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
931  bool read_queue_empty = totalReadQueueSize == 0;
932  nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
933  }
934  // Default state of unused interface is 'true'
935  // Simply AND the busy signals to determine if system is busy
936  if (dram_busy && nvm_busy) {
937  // if all ranks are refreshing wait for them to finish
938  // and stall this state machine without taking any further
939  // action, and do not schedule a new nextReqEvent
940  return;
941  }
942 
943  // when we get here it is either a read or a write
944  if (busState == READ) {
945 
946  // track if we should switch or not
947  bool switch_to_writes = false;
948 
949  if (totalReadQueueSize == 0) {
950  // In the case there is no read request to go next,
951  // trigger writes if we have passed the low threshold (or
952  // if we are draining)
953  if (!(totalWriteQueueSize == 0) &&
956 
958  "Switching to writes due to read queue empty\n");
959  switch_to_writes = true;
960  } else {
961  // check if we are drained
962  // not done draining until in PWR_IDLE state
963  // ensuring all banks are closed and
964  // have exited low power states
965  if (drainState() == DrainState::Draining &&
966  respQueue.empty() && allIntfDrained()) {
967 
968  DPRINTF(Drain, "MemCtrl controller done draining\n");
969  signalDrainDone();
970  }
971 
972  // nothing to do, not even any point in scheduling an
973  // event for the next request
974  return;
975  }
976  } else {
977 
978  bool read_found = false;
979  MemPacketQueue::iterator to_read;
980  uint8_t prio = numPriorities();
981 
982  for (auto queue = readQueue.rbegin();
983  queue != readQueue.rend(); ++queue) {
984 
985  prio--;
986 
987  DPRINTF(QOS,
988  "Checking READ queue [%d] priority [%d elements]\n",
989  prio, queue->size());
990 
991  // Figure out which read request goes next
992  // If we are changing command type, incorporate the minimum
993  // bus turnaround delay which will be rank to rank delay
994  to_read = chooseNext((*queue), switched_cmd_type ?
995  minWriteToReadDataGap() : 0);
996 
997  if (to_read != queue->end()) {
998  // candidate read found
999  read_found = true;
1000  break;
1001  }
1002  }
1003 
1004  // if no read to an available rank is found then return
1005  // at this point. There could be writes to the available ranks
1006  // which are above the required threshold. However, to
1007  // avoid adding more complexity to the code, return and wait
1008  // for a refresh event to kick things into action again.
1009  if (!read_found) {
1010  DPRINTF(MemCtrl, "No Reads Found - exiting\n");
1011  return;
1012  }
1013 
1014  auto mem_pkt = *to_read;
1015 
1016  doBurstAccess(mem_pkt);
1017 
1018  // sanity check
1019  assert(mem_pkt->size <= (mem_pkt->isDram() ?
1020  dram->bytesPerBurst() :
1021  nvm->bytesPerBurst()) );
1022  assert(mem_pkt->readyTime >= curTick());
1023 
1024  // log the response
1025  logResponse(MemCtrl::READ, (*to_read)->requestorId(),
1026  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1027  mem_pkt->readyTime - mem_pkt->entryTime);
1028 
1029 
1030  // Insert into response queue. It will be sent back to the
1031  // requestor at its readyTime
1032  if (respQueue.empty()) {
1033  assert(!respondEvent.scheduled());
1034  schedule(respondEvent, mem_pkt->readyTime);
1035  } else {
1036  assert(respQueue.back()->readyTime <= mem_pkt->readyTime);
1037  assert(respondEvent.scheduled());
1038  }
1039 
1040  respQueue.push_back(mem_pkt);
1041 
1042  // we have so many writes that we have to transition
1043  // don't transition if the writeRespQueue is full and
1044  // there are no other writes that can issue
1046  !(nvm && all_writes_nvm && nvm->writeRespQueueFull())) {
1047  switch_to_writes = true;
1048  }
1049 
1050  // remove the request from the queue
1051  // the iterator is no longer valid .
1052  readQueue[mem_pkt->qosValue()].erase(to_read);
1053  }
1054 
1055  // switching to writes, either because the read queue is empty
1056  // and the writes have passed the low threshold (or we are
1057  // draining), or because the writes hit the hight threshold
1058  if (switch_to_writes) {
1059  // transition to writing
1060  busStateNext = WRITE;
1061  }
1062  } else {
1063 
1064  bool write_found = false;
1065  MemPacketQueue::iterator to_write;
1066  uint8_t prio = numPriorities();
1067 
1068  for (auto queue = writeQueue.rbegin();
1069  queue != writeQueue.rend(); ++queue) {
1070 
1071  prio--;
1072 
1073  DPRINTF(QOS,
1074  "Checking WRITE queue [%d] priority [%d elements]\n",
1075  prio, queue->size());
1076 
1077  // If we are changing command type, incorporate the minimum
1078  // bus turnaround delay
1079  to_write = chooseNext((*queue),
1080  switched_cmd_type ? minReadToWriteDataGap() : 0);
1081 
1082  if (to_write != queue->end()) {
1083  write_found = true;
1084  break;
1085  }
1086  }
1087 
1088  // if there are no writes to a rank that is available to service
1089  // requests (i.e. rank is in refresh idle state) are found then
1090  // return. There could be reads to the available ranks. However, to
1091  // avoid adding more complexity to the code, return at this point and
1092  // wait for a refresh event to kick things into action again.
1093  if (!write_found) {
1094  DPRINTF(MemCtrl, "No Writes Found - exiting\n");
1095  return;
1096  }
1097 
1098  auto mem_pkt = *to_write;
1099 
1100  // sanity check
1101  assert(mem_pkt->size <= (mem_pkt->isDram() ?
1102  dram->bytesPerBurst() :
1103  nvm->bytesPerBurst()) );
1104 
1105  doBurstAccess(mem_pkt);
1106 
1107  isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
1108 
1109  // log the response
1110  logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
1111  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1112  mem_pkt->readyTime - mem_pkt->entryTime);
1113 
1114 
1115  // remove the request from the queue - the iterator is no longer valid
1116  writeQueue[mem_pkt->qosValue()].erase(to_write);
1117 
1118  delete mem_pkt;
1119 
1120  // If we emptied the write queue, or got sufficiently below the
1121  // threshold (using the minWritesPerSwitch as the hysteresis) and
1122  // are not draining, or we have reads waiting and have done enough
1123  // writes, then switch to reads.
1124  // If we are interfacing to NVM and have filled the writeRespQueue,
1125  // with only NVM writes in Q, then switch to reads
1126  bool below_threshold =
1128 
1129  if (totalWriteQueueSize == 0 ||
1130  (below_threshold && drainState() != DrainState::Draining) ||
1133  all_writes_nvm)) {
1134 
1135  // turn the bus back around for reads again
1137 
1138  // note that the we switch back to reads also in the idle
1139  // case, which eventually will check for any draining and
1140  // also pause any further scheduling if there is really
1141  // nothing to do
1142  }
1143  }
1144  // It is possible that a refresh to another rank kicks things back into
1145  // action before reaching this point.
1146  if (!nextReqEvent.scheduled())
1147  schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1148 
1149  // If there is space available and we have writes waiting then let
1150  // them retry. This is done here to ensure that the retry does not
1151  // cause a nextReqEvent to be scheduled before we do so as part of
1152  // the next request processing
1154  retryWrReq = false;
1155  port.sendRetryReq();
1156  }
1157 }
1158 
1159 bool
1161 {
1162  return (pkt->isDram() ?
1163  dram->burstReady(pkt) : nvm->burstReady(pkt));
1164 }
1165 
1166 Tick
1168 {
1169  Tick dram_min = dram ? dram->minReadToWriteDataGap() : MaxTick;
1170  Tick nvm_min = nvm ? nvm->minReadToWriteDataGap() : MaxTick;
1171  return std::min(dram_min, nvm_min);
1172 }
1173 
1174 Tick
1176 {
1177  Tick dram_min = dram ? dram->minWriteToReadDataGap() : MaxTick;
1178  Tick nvm_min = nvm ? nvm->minWriteToReadDataGap() : MaxTick;
1179  return std::min(dram_min, nvm_min);
1180 }
1181 
1182 Addr
1183 MemCtrl::burstAlign(Addr addr, bool is_dram) const
1184 {
1185  if (is_dram)
1186  return (addr & ~(Addr(dram->bytesPerBurst() - 1)));
1187  else
1188  return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
1189 }
1190 
1192  : statistics::Group(&_ctrl),
1193  ctrl(_ctrl),
1194 
1195  ADD_STAT(readReqs, statistics::units::Count::get(),
1196  "Number of read requests accepted"),
1197  ADD_STAT(writeReqs, statistics::units::Count::get(),
1198  "Number of write requests accepted"),
1199 
1200  ADD_STAT(readBursts, statistics::units::Count::get(),
1201  "Number of controller read bursts, including those serviced by "
1202  "the write queue"),
1203  ADD_STAT(writeBursts, statistics::units::Count::get(),
1204  "Number of controller write bursts, including those merged in "
1205  "the write queue"),
1206  ADD_STAT(servicedByWrQ, statistics::units::Count::get(),
1207  "Number of controller read bursts serviced by the write queue"),
1208  ADD_STAT(mergedWrBursts, statistics::units::Count::get(),
1209  "Number of controller write bursts merged with an existing one"),
1210 
1211  ADD_STAT(neitherReadNorWriteReqs, statistics::units::Count::get(),
1212  "Number of requests that are neither read nor write"),
1213 
1214  ADD_STAT(avgRdQLen, statistics::units::Rate<
1215  statistics::units::Count, statistics::units::Tick>::get(),
1216  "Average read queue length when enqueuing"),
1217  ADD_STAT(avgWrQLen, statistics::units::Rate<
1218  statistics::units::Count, statistics::units::Tick>::get(),
1219  "Average write queue length when enqueuing"),
1220 
1221  ADD_STAT(numRdRetry, statistics::units::Count::get(),
1222  "Number of times read queue was full causing retry"),
1223  ADD_STAT(numWrRetry, statistics::units::Count::get(),
1224  "Number of times write queue was full causing retry"),
1225 
1226  ADD_STAT(readPktSize, statistics::units::Count::get(),
1227  "Read request sizes (log2)"),
1228  ADD_STAT(writePktSize, statistics::units::Count::get(),
1229  "Write request sizes (log2)"),
1230 
1231  ADD_STAT(rdQLenPdf, statistics::units::Count::get(),
1232  "What read queue length does an incoming req see"),
1233  ADD_STAT(wrQLenPdf, statistics::units::Count::get(),
1234  "What write queue length does an incoming req see"),
1235 
1236  ADD_STAT(rdPerTurnAround, statistics::units::Count::get(),
1237  "Reads before turning the bus around for writes"),
1238  ADD_STAT(wrPerTurnAround, statistics::units::Count::get(),
1239  "Writes before turning the bus around for reads"),
1240 
1241  ADD_STAT(bytesReadWrQ, statistics::units::Byte::get(),
1242  "Total number of bytes read from write queue"),
1243  ADD_STAT(bytesReadSys, statistics::units::Byte::get(),
1244  "Total read bytes from the system interface side"),
1245  ADD_STAT(bytesWrittenSys, statistics::units::Byte::get(),
1246  "Total written bytes from the system interface side"),
1247 
1248  ADD_STAT(avgRdBWSys, statistics::units::Rate<
1249  statistics::units::Byte, statistics::units::Second>::get(),
1250  "Average system read bandwidth in Byte/s"),
1251  ADD_STAT(avgWrBWSys, statistics::units::Rate<
1252  statistics::units::Byte, statistics::units::Second>::get(),
1253  "Average system write bandwidth in Byte/s"),
1254 
1255  ADD_STAT(totGap, statistics::units::Tick::get(), "Total gap between requests"),
1256  ADD_STAT(avgGap, statistics::units::Rate<
1257  statistics::units::Tick, statistics::units::Count>::get(),
1258  "Average gap between requests"),
1259 
1260  ADD_STAT(requestorReadBytes, statistics::units::Byte::get(),
1261  "Per-requestor bytes read from memory"),
1262  ADD_STAT(requestorWriteBytes, statistics::units::Byte::get(),
1263  "Per-requestor bytes write to memory"),
1264  ADD_STAT(requestorReadRate, statistics::units::Rate<
1265  statistics::units::Byte, statistics::units::Second>::get(),
1266  "Per-requestor bytes read from memory rate"),
1267  ADD_STAT(requestorWriteRate, statistics::units::Rate<
1268  statistics::units::Byte, statistics::units::Second>::get(),
1269  "Per-requestor bytes write to memory rate"),
1270  ADD_STAT(requestorReadAccesses, statistics::units::Count::get(),
1271  "Per-requestor read serviced memory accesses"),
1272  ADD_STAT(requestorWriteAccesses, statistics::units::Count::get(),
1273  "Per-requestor write serviced memory accesses"),
1274  ADD_STAT(requestorReadTotalLat, statistics::units::Tick::get(),
1275  "Per-requestor read total memory access latency"),
1276  ADD_STAT(requestorWriteTotalLat, statistics::units::Tick::get(),
1277  "Per-requestor write total memory access latency"),
1278  ADD_STAT(requestorReadAvgLat, statistics::units::Rate<
1279  statistics::units::Tick, statistics::units::Count>::get(),
1280  "Per-requestor read average memory access latency"),
1281  ADD_STAT(requestorWriteAvgLat, statistics::units::Rate<
1282  statistics::units::Tick, statistics::units::Count>::get(),
1283  "Per-requestor write average memory access latency")
1284 {
1285 }
1286 
1287 void
1289 {
1290  using namespace statistics;
1291 
1292  assert(ctrl.system());
1293  const auto max_requestors = ctrl.system()->maxRequestors();
1294 
1295  avgRdQLen.precision(2);
1296  avgWrQLen.precision(2);
1297 
1298  readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1299  writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1300 
1301  rdQLenPdf.init(ctrl.readBufferSize);
1302  wrQLenPdf.init(ctrl.writeBufferSize);
1303 
1304  rdPerTurnAround
1305  .init(ctrl.readBufferSize)
1306  .flags(nozero);
1307  wrPerTurnAround
1308  .init(ctrl.writeBufferSize)
1309  .flags(nozero);
1310 
1311  avgRdBWSys.precision(8);
1312  avgWrBWSys.precision(8);
1313  avgGap.precision(2);
1314 
1315  // per-requestor bytes read and written to memory
1316  requestorReadBytes
1317  .init(max_requestors)
1318  .flags(nozero | nonan);
1319 
1320  requestorWriteBytes
1321  .init(max_requestors)
1322  .flags(nozero | nonan);
1323 
1324  // per-requestor bytes read and written to memory rate
1325  requestorReadRate
1326  .flags(nozero | nonan)
1327  .precision(12);
1328 
1329  requestorReadAccesses
1330  .init(max_requestors)
1331  .flags(nozero);
1332 
1333  requestorWriteAccesses
1334  .init(max_requestors)
1335  .flags(nozero);
1336 
1337  requestorReadTotalLat
1338  .init(max_requestors)
1339  .flags(nozero | nonan);
1340 
1341  requestorReadAvgLat
1342  .flags(nonan)
1343  .precision(2);
1344 
1345  requestorWriteRate
1346  .flags(nozero | nonan)
1347  .precision(12);
1348 
1349  requestorWriteTotalLat
1350  .init(max_requestors)
1351  .flags(nozero | nonan);
1352 
1353  requestorWriteAvgLat
1354  .flags(nonan)
1355  .precision(2);
1356 
1357  for (int i = 0; i < max_requestors; i++) {
1358  const std::string requestor = ctrl.system()->getRequestorName(i);
1359  requestorReadBytes.subname(i, requestor);
1360  requestorReadRate.subname(i, requestor);
1361  requestorWriteBytes.subname(i, requestor);
1362  requestorWriteRate.subname(i, requestor);
1363  requestorReadAccesses.subname(i, requestor);
1364  requestorWriteAccesses.subname(i, requestor);
1365  requestorReadTotalLat.subname(i, requestor);
1366  requestorReadAvgLat.subname(i, requestor);
1367  requestorWriteTotalLat.subname(i, requestor);
1368  requestorWriteAvgLat.subname(i, requestor);
1369  }
1370 
1371  // Formula stats
1372  avgRdBWSys = (bytesReadSys) / simSeconds;
1373  avgWrBWSys = (bytesWrittenSys) / simSeconds;
1374 
1375  avgGap = totGap / (readReqs + writeReqs);
1376 
1377  requestorReadRate = requestorReadBytes / simSeconds;
1378  requestorWriteRate = requestorWriteBytes / simSeconds;
1379  requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1380  requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1381 }
1382 
1383 void
1385 {
1386  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
1387  // rely on the abstract memory
1388  dram->functionalAccess(pkt);
1389  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
1390  // rely on the abstract memory
1391  nvm->functionalAccess(pkt);
1392  } else {
1393  panic("Can't handle address range for packet %s\n",
1394  pkt->print());
1395  }
1396 }
1397 
1398 Port &
1399 MemCtrl::getPort(const std::string &if_name, PortID idx)
1400 {
1401  if (if_name != "port") {
1402  return qos::MemCtrl::getPort(if_name, idx);
1403  } else {
1404  return port;
1405  }
1406 }
1407 
1408 bool
1410 {
1411  // ensure dram is in power down and refresh IDLE states
1412  bool dram_drained = !dram || dram->allRanksDrained();
1413  // No outstanding NVM writes
1414  // All other queues verified as needed with calling logic
1415  bool nvm_drained = !nvm || nvm->allRanksDrained();
1416  return (dram_drained && nvm_drained);
1417 }
1418 
1419 DrainState
1421 {
1422  // if there is anything in any of our internal queues, keep track
1423  // of that as well
1424  if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
1425  allIntfDrained())) {
1426 
1427  DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
1428  " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
1429  respQueue.size());
1430 
1431  // the only queue that is not drained automatically over time
1432  // is the write queue, thus kick things into action if needed
1435  }
1436 
1437  if (dram)
1438  dram->drainRanks();
1439 
1440  return DrainState::Draining;
1441  } else {
1442  return DrainState::Drained;
1443  }
1444 }
1445 
1446 void
1448 {
1449  if (!isTimingMode && system()->isTimingMode()) {
1450  // if we switched to timing mode, kick things into action,
1451  // and behave as if we restored from a checkpoint
1452  startup();
1453  dram->startup();
1454  } else if (isTimingMode && !system()->isTimingMode()) {
1455  // if we switch from timing mode, stop the refresh events to
1456  // not cause issues with KVM
1457  if (dram)
1458  dram->suspend();
1459  }
1460 
1461  // update the mode
1463 }
1464 
1465 MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
1466  : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
1467  ctrl(_ctrl)
1468 { }
1469 
1472 {
1473  AddrRangeList ranges;
1474  if (ctrl.dram) {
1475  DPRINTF(DRAM, "Pushing DRAM ranges to port\n");
1476  ranges.push_back(ctrl.dram->getAddrRange());
1477  }
1478  if (ctrl.nvm) {
1479  DPRINTF(NVM, "Pushing NVM ranges to port\n");
1480  ranges.push_back(ctrl.nvm->getAddrRange());
1481  }
1482  return ranges;
1483 }
1484 
1485 void
1487 {
1488  pkt->pushLabel(ctrl.name());
1489 
1490  if (!queue.trySatisfyFunctional(pkt)) {
1491  // Default implementation of SimpleTimingPort::recvFunctional()
1492  // calls recvAtomic() and throws away the latency; we can save a
1493  // little here by just not calculating the latency.
1494  ctrl.recvFunctional(pkt);
1495  }
1496 
1497  pkt->popLabel();
1498 }
1499 
1500 Tick
1502 {
1503  return ctrl.recvAtomic(pkt);
1504 }
1505 
1506 Tick
1508  PacketPtr pkt, MemBackdoorPtr &backdoor)
1509 {
1510  return ctrl.recvAtomicBackdoor(pkt, backdoor);
1511 }
1512 
1513 bool
1515 {
1516  // pass it to the memory controller
1517  return ctrl.recvTimingReq(pkt);
1518 }
1519 
1520 } // namespace memory
1521 } // namespace gem5
gem5::memory::MemCtrl::readBufferSize
const uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition: mem_ctrl.hh:485
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:189
gem5::PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:252
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:577
gem5::memory::qos::MemCtrl::totalReadQueueSize
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition: mem_ctrl.hh:131
gem5::memory::NVMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first NVM command that can issue default to first command to prepped region.
Definition: mem_interface.cc:2107
gem5::SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:126
gem5::memory::DRAMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the DRAM.
Definition: mem_interface.hh:978
gem5::memory::MemCtrl::backendLatency
const Tick backendLatency
Pipeline latency of the backend and PHY.
Definition: mem_ctrl.hh:511
gem5::memory::DRAMInterface::respondEvent
void respondEvent(uint8_t rank)
Complete response process for DRAM when read burst is complete This will update the counters and chec...
Definition: mem_interface.cc:946
gem5::memory::MemCtrl::dram
DRAMInterface *const dram
Create pointer to interface of the actual dram media when connected.
Definition: mem_ctrl.hh:472
gem5::memory::MemCtrl::CtrlStats::bytesReadWrQ
statistics::Scalar bytesReadWrQ
Definition: mem_ctrl.hh:563
gem5::memory::MemCtrl::nextReqEvent
EventFunctionWrapper nextReqEvent
Definition: mem_ctrl.hh:296
gem5::memory::qos::MemCtrl::logResponse
void logResponse(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
Definition: mem_ctrl.cc:149
gem5::memory::NVMInterface::accessLatency
Tick accessLatency() const override
Definition: mem_interface.hh:1235
system.hh
gem5::memory::MemCtrl::MemoryPort::recvTimingReq
bool recvTimingReq(PacketPtr) override
Receive a timing request from the peer.
Definition: mem_ctrl.cc:1514
gem5::memory::MemPacket::burstHelper
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
Definition: mem_ctrl.hh:148
gem5::ResponsePort::sendRetryReq
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
Definition: port.hh:401
gem5::memory::NVMInterface::numWritesQueued
uint32_t numWritesQueued
Definition: mem_interface.hh:1155
gem5::memory::MemPacket::readyTime
Tick readyTime
When will request leave the controller.
Definition: mem_ctrl.hh:105
gem5::memory::MemPacket::size
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Definition: mem_ctrl.hh:142
gem5::memory::MemCtrl::readsThisTime
uint32_t readsThisTime
Definition: mem_ctrl.hh:491
gem5::memory::MemCtrl::isInWriteQueue
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
Definition: mem_ctrl.hh:450
gem5::Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:324
memory
Definition: mem.h:38
gem5::memory::MemCtrl::recvAtomicBackdoor
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
Definition: mem_ctrl.cc:162
gem5::memory::NVMInterface::isBusy
bool isBusy(bool read_queue_empty, bool all_writes_nvm)
This function checks if ranks are busy.
Definition: mem_interface.cc:2503
gem5::memory::MemCtrl::CtrlStats::requestorWriteAccesses
statistics::Vector requestorWriteAccesses
Definition: mem_ctrl.hh:583
gem5::memory::MemCtrl::MemoryPort::MemoryPort
MemoryPort(const std::string &name, MemCtrl &_ctrl)
Definition: mem_ctrl.cc:1465
gem5::memory::DRAMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all DRAM banks in alli ranks when access to an alternate inte...
Definition: mem_interface.cc:723
gem5::memory::MemCtrl::addToWriteQueue
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition: mem_ctrl.cc:316
gem5::memory::AbstractMemory::getAddrRange
AddrRange getAddrRange() const
Get the address range.
Definition: abstract_mem.cc:250
gem5::memory::qos::MemCtrl::READ
@ READ
Definition: mem_ctrl.hh:84
gem5::AddrRange::contains
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:438
gem5::memory::MemCtrl::processRespondEvent
void processRespondEvent()
Definition: mem_ctrl.cc:499
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::memory::MemCtrl::verifySingleCmd
Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
Check for command bus contention for single cycle command.
Definition: mem_ctrl.cc:704
gem5::memory::MemCtrl::CtrlStats::requestorReadTotalLat
statistics::Vector requestorReadTotalLat
Definition: mem_ctrl.hh:586
gem5::memory::DRAMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first DRAM command that can issue.
Definition: mem_interface.cc:175
gem5::memory::MemPacket::pkt
const PacketPtr pkt
This comes from the outside world.
Definition: mem_ctrl.hh:108
gem5::Packet::qosValue
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
Definition: packet.hh:756
gem5::memory::MemCtrl
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition: mem_ctrl.hh:242
gem5::memory::NVMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all NVM banks in alli ranks when access to an alternate inter...
Definition: mem_interface.cc:2486
gem5::Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:646
gem5::memory::NVMInterface::readsWaitingToIssue
bool readsWaitingToIssue() const
Definition: mem_interface.hh:1249
gem5::memory::NVMInterface::allRanksDrained
bool allRanksDrained() const override
Check drain state of NVM interface.
Definition: mem_interface.hh:1176
gem5::memory::DRAMInterface::allRanksDrained
bool allRanksDrained() const override
Return true once refresh is complete for all ranks and there are no additional commands enqueued.
Definition: mem_interface.cc:1020
gem5::MaxTick
const Tick MaxTick
Definition: types.hh:60
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::memory::MemCtrl::writeBufferSize
const uint32_t writeBufferSize
Definition: mem_ctrl.hh:486
gem5::memory::NVMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: mem_interface.cc:2095
std::vector
STL vector class.
Definition: stl.hh:37
gem5::memory::MemCtrl::respondEvent
EventFunctionWrapper respondEvent
Definition: mem_ctrl.hh:299
gem5::statistics::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
gem5::memory::MemCtrl::startup
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: mem_ctrl.cc:111
gem5::memory::MemCtrl::CtrlStats::requestorWriteTotalLat
statistics::Vector requestorWriteTotalLat
Definition: mem_ctrl.hh:587
gem5::memory::MemCtrl::prevArrival
Tick prevArrival
Definition: mem_ctrl.hh:524
gem5::memory::MemCtrl::selQueue
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
Definition: mem_ctrl.hh:608
gem5::memory::MemCtrl::memSchedPolicy
enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition: mem_ctrl.hh:497
gem5::memory::MemCtrl::packetReady
bool packetReady(MemPacket *pkt)
Determine if there is a packet that can issue.
Definition: mem_ctrl.cc:1160
gem5::memory::NVMInterface::writeRespQueueFull
bool writeRespQueueFull() const
Check if the write response queue has reached defined threshold.
Definition: mem_interface.hh:1243
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::memory::MemCtrl::CtrlStats::readPktSize
statistics::Vector readPktSize
Definition: mem_ctrl.hh:556
gem5::Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:420
gem5::memory::MemCtrl::MemCtrl
MemCtrl(const MemCtrlParams &p)
Definition: mem_ctrl.cc:58
gem5::memory::DRAMInterface::drainRanks
void drainRanks()
Iterate through dram ranks to exit self-refresh in order to drain.
Definition: mem_interface.cc:1006
gem5::memory::DRAMInterface::accessLatency
Tick accessLatency() const override
Definition: mem_interface.hh:939
gem5::memory::qos::MemCtrl::busStateNext
BusState busStateNext
bus state for next request event triggered
Definition: mem_ctrl.hh:143
gem5::memory::qos::MemCtrl::busState
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request.
Definition: mem_ctrl.hh:140
gem5::memory::qos::MemCtrl::qosSchedule
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition: mem_ctrl.hh:496
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1325
gem5::memory::MemCtrl::CtrlStats::bytesReadSys
statistics::Scalar bytesReadSys
Definition: mem_ctrl.hh:564
gem5::memory::DRAMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: mem_interface.cc:935
gem5::memory::DRAMInterface::suspend
void suspend()
Iterate through DRAM ranks and suspend them.
Definition: mem_interface.cc:1035
gem5::memory::MemCtrl::inReadBusState
bool inReadBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:791
gem5::memory::MemCtrl::retryRdReq
bool retryRdReq
Remember if we have to retry a request when available.
Definition: mem_ctrl.hh:286
gem5::memory::MemCtrl::CtrlStats::requestorReadAccesses
statistics::Vector requestorReadAccesses
Definition: mem_ctrl.hh:582
gem5::memory::MemPacket::entryTime
const Tick entryTime
When did request enter the controller.
Definition: mem_ctrl.hh:102
gem5::Packet::hasData
bool hasData() const
Definition: packet.hh:603
gem5::memory::MemInterface::minWriteToReadDataGap
Tick minWriteToReadDataGap() const
Definition: mem_interface.hh:271
gem5::memory::qos::MemCtrl::turnPolicy
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
Definition: mem_ctrl.hh:91
gem5::memory::MemCtrl::nvm
NVMInterface *const nvm
Create pointer to interface of the actual nvm media when connected.
Definition: mem_ctrl.hh:477
gem5::memory::MemCtrl::CtrlStats::avgWrQLen
statistics::Average avgWrQLen
Definition: mem_ctrl.hh:552
gem5::Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:438
gem5::memory::MemCtrl::recvAtomic
Tick recvAtomic(PacketPtr pkt)
Definition: mem_ctrl.cc:127
gem5::memory::MemCtrl::CtrlStats::writePktSize
statistics::Vector writePktSize
Definition: mem_ctrl.hh:557
gem5::memory::MemCtrl::writeQueue
std::vector< MemPacketQueue > writeQueue
Definition: mem_ctrl.hh:441
gem5::memory::MemCtrl::writeHighThreshold
const uint32_t writeHighThreshold
Definition: mem_ctrl.hh:487
gem5::DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:74
gem5::memory::MemCtrl::inWriteBusState
bool inWriteBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:804
gem5::memory::qos::MemCtrl::schedule
uint8_t schedule(RequestorID id, uint64_t data)
Definition: mem_ctrl.cc:218
gem5::memory::MemCtrl::accessAndRespond
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Definition: mem_ctrl.cc:639
mem_interface.hh
gem5::Packet::isRead
bool isRead() const
Definition: packet.hh:582
gem5::memory::MemPacket::addr
Addr addr
The starting address of the packet.
Definition: mem_ctrl.hh:136
gem5::memory::MemCtrl::minWriteToReadDataGap
Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
Definition: mem_ctrl.cc:1175
gem5::memory::DRAMInterface::isBusy
bool isBusy()
This function checks if ranks are actively refreshing and therefore busy.
Definition: mem_interface.cc:898
gem5::memory::MemCtrl::port
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
Definition: mem_ctrl.hh:276
gem5::memory::MemCtrl::CtrlStats::CtrlStats
CtrlStats(MemCtrl &ctrl)
Definition: mem_ctrl.cc:1191
gem5::memory::MemCtrl::nextBurstAt
Tick nextBurstAt
Till when must we wait before issuing next RD/WR burst?
Definition: mem_ctrl.hh:522
gem5::memory::MemCtrl::processNextReqEvent
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
Definition: mem_ctrl.cc:874
gem5::memory::MemCtrl::CtrlStats::requestorReadBytes
statistics::Vector requestorReadBytes
Definition: mem_ctrl.hh:574
gem5::memory::AbstractMemory::functionalAccess
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: abstract_mem.cc:482
gem5::memory::MemCtrl::CtrlStats::numWrRetry
statistics::Scalar numWrRetry
Definition: mem_ctrl.hh:555
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::Packet::requestorId
RequestorID requestorId() const
Definition: packet.hh:767
gem5::Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:373
gem5::memory::qos::MemCtrl::selectNextBusState
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
Definition: mem_ctrl.cc:247
gem5::QueuedResponsePort
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:61
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::memory::MemCtrl::readQueueFull
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
Definition: mem_ctrl.cc:174
gem5::MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:326
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::memory::MemPacket::isDram
bool isDram() const
Return true if its a DRAM access.
Definition: mem_ctrl.hh:200
gem5::memory::MemCtrl::recvFunctional
void recvFunctional(PacketPtr pkt)
Definition: mem_ctrl.cc:1384
gem5::memory::DRAMInterface::commandOffset
Tick commandOffset() const override
Definition: mem_interface.hh:934
gem5::memory::MemCtrl::recvTimingReq
bool recvTimingReq(PacketPtr pkt)
Definition: mem_ctrl.cc:422
gem5::memory::MemPacket
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition: mem_ctrl.hh:97
gem5::memory::MemCtrl::stats
CtrlStats stats
Definition: mem_ctrl.hh:594
gem5::memory::MemCtrl::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: mem_ctrl.cc:1420
gem5::memory::qos::MemCtrl::system
System * system() const
read the system pointer
Definition: mem_ctrl.hh:371
gem5::memory::MemCtrl::init
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mem_ctrl.cc:101
gem5::memory::MemPacket::qosValue
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
Definition: mem_ctrl.hh:159
gem5::memory::MemCtrl::pruneBurstTick
void pruneBurstTick()
Remove commands that have already issued from burstTicks.
Definition: mem_ctrl.cc:683
gem5::memory::MemCtrl::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: mem_ctrl.hh:600
gem5::memory::MemCtrl::retryWrReq
bool retryWrReq
Definition: mem_ctrl.hh:287
gem5::memory::qos::MemCtrl::totalWriteQueueSize
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Definition: mem_ctrl.hh:134
gem5::memory::DRAMInterface::checkRefreshState
void checkRefreshState(uint8_t rank)
Check the refresh state to determine if refresh needs to be kicked back into action after a read resp...
Definition: mem_interface.cc:993
gem5::ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:144
gem5::memory::MemCtrl::allIntfDrained
bool allIntfDrained() const
Ensure that all interfaced have drained commands.
Definition: mem_ctrl.cc:1409
gem5::memory::MemCtrl::CtrlStats::rdQLenPdf
statistics::Vector rdQLenPdf
Definition: mem_ctrl.hh:558
gem5::memory::MemCtrl::chooseNextFRFCFS
MemPacketQueue::iterator chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition: mem_ctrl.cc:597
gem5::Port::isConnected
bool isConnected() const
Is this port currently connected to a peer?
Definition: port.hh:133
gem5::memory::MemCtrl::CtrlStats::bytesWrittenSys
statistics::Scalar bytesWrittenSys
Definition: mem_ctrl.hh:565
mem_ctrl.hh
gem5::memory::MemCtrl::MemoryPort::recvFunctional
void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
Definition: mem_ctrl.cc:1486
gem5::memory::MemCtrl::CtrlStats::writeReqs
statistics::Scalar writeReqs
Definition: mem_ctrl.hh:544
gem5::memory::BurstHelper::burstsServiced
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
Definition: mem_ctrl.hh:86
gem5::memory::MemCtrl::CtrlStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: mem_ctrl.cc:1288
gem5::memory::MemPacket::requestorId
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
Definition: mem_ctrl.hh:171
gem5::DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
gem5::System::isTimingMode
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:276
gem5::memory::MemInterface::minReadToWriteDataGap
Tick minReadToWriteDataGap() const
Definition: mem_interface.hh:265
gem5::memory::MemCtrl::isTimingMode
bool isTimingMode
Remember if the memory system is in timing mode.
Definition: mem_ctrl.hh:281
gem5::Packet::pushLabel
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1420
gem5::Packet::popLabel
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1430
gem5::Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:597
gem5::memory::MemCtrl::CtrlStats::numRdRetry
statistics::Scalar numRdRetry
Definition: mem_ctrl.hh:554
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::memory::MemCtrl::respQueue
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition: mem_ctrl.hh:460
gem5::memory::MemCtrl::frontendLatency
const Tick frontendLatency
Pipeline latency of the controller frontend.
Definition: mem_ctrl.hh:504
gem5::memory::MemCtrl::CtrlStats::wrQLenPdf
statistics::Vector wrQLenPdf
Definition: mem_ctrl.hh:559
gem5::memory::MemCtrl::MemoryPort::recvAtomicBackdoor
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Receive an atomic request packet from the peer, and optionally provide a backdoor to the data being a...
Definition: mem_ctrl.cc:1507
name
const std::string & name()
Definition: trace.cc:49
gem5::MemBackdoor
Definition: backdoor.hh:41
gem5::memory::MemCtrl::CtrlStats::servicedByWrQ
statistics::Scalar servicedByWrQ
Definition: mem_ctrl.hh:547
gem5::memory::MemCtrl::CtrlStats::totGap
statistics::Scalar totGap
Definition: mem_ctrl.hh:570
gem5::ResponsePort::sendRangeChange
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:296
gem5::memory::NVMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the NVM.
Definition: mem_interface.cc:2303
gem5::memory::AbstractMemory::getBackdoor
void getBackdoor(MemBackdoorPtr &bd_ptr)
Definition: abstract_mem.hh:238
gem5::memory::NVMInterface::chooseRead
void chooseRead(MemPacketQueue &queue)
Select read command to issue asynchronously.
Definition: mem_interface.cc:2163
gem5::memory::MemCtrl::readQueue
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition: mem_ctrl.hh:440
gem5::memory::MemCtrl::CtrlStats::readBursts
statistics::Scalar readBursts
Definition: mem_ctrl.hh:545
gem5::divCeil
static constexpr T divCeil(const T &a, const U &b)
Definition: intmath.hh:110
gem5::memory::MemCtrl::burstTicks
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
Definition: mem_ctrl.hh:467
gem5::Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:305
gem5::memory::NVMInterface::commandOffset
Tick commandOffset() const override
Definition: mem_interface.hh:1181
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:203
gem5::memory::MemCtrl::CtrlStats::readReqs
statistics::Scalar readReqs
Definition: mem_ctrl.hh:543
gem5::memory::MemCtrl::doBurstAccess
void doBurstAccess(MemPacket *mem_pkt)
Actually do the burst based on media specific access function.
Definition: mem_ctrl.cc:817
gem5::memory::NVMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *pkt, Tick next_burst_at)
Actually do the burst and update stats.
Definition: mem_interface.cc:2312
gem5::memory::MemCtrl::minReadToWriteDataGap
Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
Definition: mem_ctrl.cc:1167
gem5::memory::DRAMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)
Actually do the burst - figure out the latency it will take to service the req based on bank state,...
Definition: mem_interface.cc:462
gem5::ceilLog2
static constexpr int ceilLog2(const T &n)
Definition: intmath.hh:84
gem5::memory::MemCtrl::writeLowThreshold
const uint32_t writeLowThreshold
Definition: mem_ctrl.hh:488
gem5::memory::MemCtrl::MemoryPort::getAddrRanges
AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: mem_ctrl.cc:1471
gem5::memory::MemCtrl::verifyMultiCmd
Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition: mem_ctrl.cc:727
gem5::memory::MemCtrl::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: mem_ctrl.cc:1399
gem5::memory::MemCtrl::MemoryPort::recvAtomic
Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Definition: mem_ctrl.cc:1501
gem5::Port
Ports are used to interface objects to each other.
Definition: port.hh:61
gem5::memory::MemPacket::rank
const uint8_t rank
Will be populated by address decoder.
Definition: mem_ctrl.hh:119
gem5::memory::MemCtrl::burstAlign
Addr burstAlign(Addr addr, bool is_dram) const
Burst-align an address.
Definition: mem_ctrl.cc:1183
gem5::PowerISA::vec
Bitfield< 25 > vec
Definition: misc.hh:108
std::deque
STL deque class.
Definition: stl.hh:44
gem5::memory::MemCtrl::writeQueueFull
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition: mem_ctrl.cc:186
gem5::memory::qos::MemCtrl::numPriorities
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
Definition: mem_ctrl.hh:367
gem5::simSeconds
statistics::Formula & simSeconds
Definition: stats.cc:45
gem5::memory::MemCtrl::CtrlStats::writeBursts
statistics::Scalar writeBursts
Definition: mem_ctrl.hh:546
gem5::memory::MemCtrl::CtrlStats::rdPerTurnAround
statistics::Histogram rdPerTurnAround
Definition: mem_ctrl.hh:560
gem5::memory::BurstHelper
A burst helper helps organize and manage a packet that is larger than the memory burst size.
Definition: mem_ctrl.hh:78
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::memory::qos::MemCtrl::recordTurnaroundStats
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Definition: mem_ctrl.cc:359
gem5::memory::qos::MemCtrl::WRITE
@ WRITE
Definition: mem_ctrl.hh:84
gem5::memory::MemCtrl::getBurstWindow
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
Definition: mem_ctrl.cc:696
gem5::memory::MemCtrl::chooseNext
MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Definition: mem_ctrl.cc:562
gem5::memory::MemInterface::decodePacket
MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, bool is_dram)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Definition: mem_interface.cc:89
trace.hh
gem5::memory::MemCtrl::minWritesPerSwitch
const uint32_t minWritesPerSwitch
Definition: mem_ctrl.hh:489
gem5::memory::MemInterface::bytesPerBurst
uint32_t bytesPerBurst() const
Definition: mem_interface.hh:240
gem5::memory::MemCtrl::drainResume
virtual void drainResume() override
Resume execution after a successful drain.
Definition: mem_ctrl.cc:1447
std::list< AddrRange >
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::memory::MemCtrl::commandWindow
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition: mem_ctrl.hh:517
gem5::memory::MemCtrl::CtrlStats::mergedWrBursts
statistics::Scalar mergedWrBursts
Definition: mem_ctrl.hh:548
gem5::QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:93
gem5::memory::MemCtrl::printQs
void printQs() const
Used for debugging to observe the contents of the queues.
Definition: mem_ctrl.cc:397
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:225
gem5::memory::AbstractMemory::access
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: abstract_mem.cc:380
gem5::memory::MemCtrl::nextReqTime
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
Definition: mem_ctrl.hh:532
gem5::memory::BurstHelper::burstCount
const unsigned int burstCount
Number of bursts requred for a system packet.
Definition: mem_ctrl.hh:83
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::memory::MemPacket::isRead
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition: mem_ctrl.hh:189
gem5::memory::MemCtrl::CtrlStats::avgRdQLen
statistics::Average avgRdQLen
Definition: mem_ctrl.hh:551
gem5::memory::qos::MemCtrl::logRequest
void logRequest(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
Definition: mem_ctrl.cc:92
gem5::memory::MemCtrl::addToReadQueue
void addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition: mem_ctrl.cc:197
gem5::memory::MemCtrl::CtrlStats::wrPerTurnAround
statistics::Histogram wrPerTurnAround
Definition: mem_ctrl.hh:561
gem5::memory::MemCtrl::writesThisTime
uint32_t writesThisTime
Definition: mem_ctrl.hh:490
gem5::Packet::isResponse
bool isResponse() const
Definition: packet.hh:587
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::memory::MemCtrl::CtrlStats::requestorWriteBytes
statistics::Vector requestorWriteBytes
Definition: mem_ctrl.hh:575
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
gem5::memory::DRAMInterface::startup
void startup() override
Iterate through dram ranks and instantiate per rank startup routine.
Definition: mem_interface.cc:885
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84

Generated on Tue Sep 21 2021 12:25:32 for gem5 by doxygen 1.8.17