gem5  v20.1.0.0
mem_ctrl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "mem/mem_ctrl.hh"
42 
43 #include "base/trace.hh"
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
49 #include "mem/mem_interface.hh"
50 #include "sim/system.hh"
51 
52 using namespace std;
53 
54 MemCtrl::MemCtrl(const MemCtrlParams* p) :
55  QoS::MemCtrl(p),
56  port(name() + ".port", *this), isTimingMode(false),
57  retryRdReq(false), retryWrReq(false),
58  nextReqEvent([this]{ processNextReqEvent(); }, name()),
59  respondEvent([this]{ processRespondEvent(); }, name()),
60  dram(p->dram), nvm(p->nvm),
61  readBufferSize((dram ? dram->readBufferSize : 0) +
62  (nvm ? nvm->readBufferSize : 0)),
63  writeBufferSize((dram ? dram->writeBufferSize : 0) +
64  (nvm ? nvm->writeBufferSize : 0)),
65  writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
66  writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
67  minWritesPerSwitch(p->min_writes_per_switch),
68  writesThisTime(0), readsThisTime(0),
69  memSchedPolicy(p->mem_sched_policy),
70  frontendLatency(p->static_frontend_latency),
71  backendLatency(p->static_backend_latency),
72  commandWindow(p->command_window),
73  nextBurstAt(0), prevArrival(0),
74  nextReqTime(0),
75  stats(*this)
76 {
77  DPRINTF(MemCtrl, "Setting up controller\n");
78  readQueue.resize(p->qos_priorities);
79  writeQueue.resize(p->qos_priorities);
80 
81  // Hook up interfaces to the controller
82  if (dram)
83  dram->setCtrl(this, commandWindow);
84  if (nvm)
85  nvm->setCtrl(this, commandWindow);
86 
87  fatal_if(!dram && !nvm, "Memory controller must have an interface");
88 
89  // perform a basic check of the write thresholds
90  if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
91  fatal("Write buffer low threshold %d must be smaller than the "
92  "high threshold %d\n", p->write_low_thresh_perc,
93  p->write_high_thresh_perc);
94 }
95 
96 void
98 {
99  if (!port.isConnected()) {
100  fatal("MemCtrl %s is unconnected!\n", name());
101  } else {
103  }
104 }
105 
106 void
108 {
109  // remember the memory system mode of operation
111 
112  if (isTimingMode) {
113  // shift the bus busy time sufficiently far ahead that we never
114  // have to worry about negative values when computing the time for
115  // the next request, this will add an insignificant bubble at the
116  // start of simulation
118  nvm->commandOffset());
119  }
120 }
121 
122 Tick
124 {
125  DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
126  pkt->cmdString(), pkt->getAddr());
127 
128  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
129  "is responding");
130 
131  Tick latency = 0;
132  // do the actual memory access and turn the packet into a response
133  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
134  dram->access(pkt);
135 
136  if (pkt->hasData()) {
137  // this value is not supposed to be accurate, just enough to
138  // keep things going, mimic a closed page
139  latency = dram->accessLatency();
140  }
141  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
142  nvm->access(pkt);
143 
144  if (pkt->hasData()) {
145  // this value is not supposed to be accurate, just enough to
146  // keep things going, mimic a closed page
147  latency = nvm->accessLatency();
148  }
149  } else {
150  panic("Can't handle address range for packet %s\n",
151  pkt->print());
152  }
153 
154  return latency;
155 }
156 
157 bool
158 MemCtrl::readQueueFull(unsigned int neededEntries) const
159 {
161  "Read queue limit %d, current size %d, entries needed %d\n",
163  neededEntries);
164 
165  auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
166  return rdsize_new > readBufferSize;
167 }
168 
169 bool
170 MemCtrl::writeQueueFull(unsigned int neededEntries) const
171 {
173  "Write queue limit %d, current size %d, entries needed %d\n",
174  writeBufferSize, totalWriteQueueSize, neededEntries);
175 
176  auto wrsize_new = (totalWriteQueueSize + neededEntries);
177  return wrsize_new > writeBufferSize;
178 }
179 
180 void
181 MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
182 {
183  // only add to the read queue here. whenever the request is
184  // eventually done, set the readyTime, and call schedule()
185  assert(!pkt->isWrite());
186 
187  assert(pkt_count != 0);
188 
189  // if the request size is larger than burst size, the pkt is split into
190  // multiple packets
191  // Note if the pkt starting address is not aligened to burst size, the
192  // address of first packet is kept unaliged. Subsequent packets
193  // are aligned to burst size boundaries. This is to ensure we accurately
194  // check read packets against packets in write queue.
195  const Addr base_addr = pkt->getAddr();
196  Addr addr = base_addr;
197  unsigned pktsServicedByWrQ = 0;
198  BurstHelper* burst_helper = NULL;
199 
200  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
201  nvm->bytesPerBurst();
202  for (int cnt = 0; cnt < pkt_count; ++cnt) {
203  unsigned size = std::min((addr | (burst_size - 1)) + 1,
204  base_addr + pkt->getSize()) - addr;
205  stats.readPktSize[ceilLog2(size)]++;
206  stats.readBursts++;
208 
209  // First check write buffer to see if the data is already at
210  // the controller
211  bool foundInWrQ = false;
212  Addr burst_addr = burstAlign(addr, is_dram);
213  // if the burst address is not present then there is no need
214  // looking any further
215  if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
216  for (const auto& vec : writeQueue) {
217  for (const auto& p : vec) {
218  // check if the read is subsumed in the write queue
219  // packet we are looking at
220  if (p->addr <= addr &&
221  ((addr + size) <= (p->addr + p->size))) {
222 
223  foundInWrQ = true;
225  pktsServicedByWrQ++;
227  "Read to addr %lld with size %d serviced by "
228  "write queue\n",
229  addr, size);
230  stats.bytesReadWrQ += burst_size;
231  break;
232  }
233  }
234  }
235  }
236 
237  // If not found in the write q, make a memory packet and
238  // push it onto the read queue
239  if (!foundInWrQ) {
240 
241  // Make the burst helper for split packets
242  if (pkt_count > 1 && burst_helper == NULL) {
243  DPRINTF(MemCtrl, "Read to addr %lld translates to %d "
244  "memory requests\n", pkt->getAddr(), pkt_count);
245  burst_helper = new BurstHelper(pkt_count);
246  }
247 
248  MemPacket* mem_pkt;
249  if (is_dram) {
250  mem_pkt = dram->decodePacket(pkt, addr, size, true, true);
251  // increment read entries of the rank
252  dram->setupRank(mem_pkt->rank, true);
253  } else {
254  mem_pkt = nvm->decodePacket(pkt, addr, size, true, false);
255  // Increment count to trigger issue of non-deterministic read
256  nvm->setupRank(mem_pkt->rank, true);
257  // Default readyTime to Max; will be reset once read is issued
258  mem_pkt->readyTime = MaxTick;
259  }
260  mem_pkt->burstHelper = burst_helper;
261 
262  assert(!readQueueFull(1));
264 
265  DPRINTF(MemCtrl, "Adding to read queue\n");
266 
267  readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
268 
269  // log packet
271  mem_pkt->addr, 1);
272 
273  // Update stats
275  }
276 
277  // Starting address of next memory pkt (aligned to burst boundary)
278  addr = (addr | (burst_size - 1)) + 1;
279  }
280 
281  // If all packets are serviced by write queue, we send the repsonse back
282  if (pktsServicedByWrQ == pkt_count) {
284  return;
285  }
286 
287  // Update how many split packets are serviced by write queue
288  if (burst_helper != NULL)
289  burst_helper->burstsServiced = pktsServicedByWrQ;
290 
291  // If we are not already scheduled to get a request out of the
292  // queue, do so now
293  if (!nextReqEvent.scheduled()) {
294  DPRINTF(MemCtrl, "Request scheduled immediately\n");
296  }
297 }
298 
299 void
300 MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
301 {
302  // only add to the write queue here. whenever the request is
303  // eventually done, set the readyTime, and call schedule()
304  assert(pkt->isWrite());
305 
306  // if the request size is larger than burst size, the pkt is split into
307  // multiple packets
308  const Addr base_addr = pkt->getAddr();
309  Addr addr = base_addr;
310  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
311  nvm->bytesPerBurst();
312  for (int cnt = 0; cnt < pkt_count; ++cnt) {
313  unsigned size = std::min((addr | (burst_size - 1)) + 1,
314  base_addr + pkt->getSize()) - addr;
315  stats.writePktSize[ceilLog2(size)]++;
316  stats.writeBursts++;
318 
319  // see if we can merge with an existing item in the write
320  // queue and keep track of whether we have merged or not
321  bool merged = isInWriteQueue.find(burstAlign(addr, is_dram)) !=
322  isInWriteQueue.end();
323 
324  // if the item was not merged we need to create a new write
325  // and enqueue it
326  if (!merged) {
327  MemPacket* mem_pkt;
328  if (is_dram) {
329  mem_pkt = dram->decodePacket(pkt, addr, size, false, true);
330  dram->setupRank(mem_pkt->rank, false);
331  } else {
332  mem_pkt = nvm->decodePacket(pkt, addr, size, false, false);
333  nvm->setupRank(mem_pkt->rank, false);
334  }
337 
338  DPRINTF(MemCtrl, "Adding to write queue\n");
339 
340  writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
341  isInWriteQueue.insert(burstAlign(addr, is_dram));
342 
343  // log packet
345  mem_pkt->addr, 1);
346 
347  assert(totalWriteQueueSize == isInWriteQueue.size());
348 
349  // Update stats
351 
352  } else {
354  "Merging write burst with existing queue entry\n");
355 
356  // keep track of the fact that this burst effectively
357  // disappeared as it was merged with an existing one
359  }
360 
361  // Starting address of next memory pkt (aligned to burst_size boundary)
362  addr = (addr | (burst_size - 1)) + 1;
363  }
364 
365  // we do not wait for the writes to be send to the actual memory,
366  // but instead take responsibility for the consistency here and
367  // snoop the write queue for any upcoming reads
368  // @todo, if a pkt size is larger than burst size, we might need a
369  // different front end latency
371 
372  // If we are not already scheduled to get a request out of the
373  // queue, do so now
374  if (!nextReqEvent.scheduled()) {
375  DPRINTF(MemCtrl, "Request scheduled immediately\n");
377  }
378 }
379 
380 void
382 {
383 #if TRACING_ON
384  DPRINTF(MemCtrl, "===READ QUEUE===\n\n");
385  for (const auto& queue : readQueue) {
386  for (const auto& packet : queue) {
387  DPRINTF(MemCtrl, "Read %lu\n", packet->addr);
388  }
389  }
390 
391  DPRINTF(MemCtrl, "\n===RESP QUEUE===\n\n");
392  for (const auto& packet : respQueue) {
393  DPRINTF(MemCtrl, "Response %lu\n", packet->addr);
394  }
395 
396  DPRINTF(MemCtrl, "\n===WRITE QUEUE===\n\n");
397  for (const auto& queue : writeQueue) {
398  for (const auto& packet : queue) {
399  DPRINTF(MemCtrl, "Write %lu\n", packet->addr);
400  }
401  }
402 #endif // TRACING_ON
403 }
404 
405 bool
407 {
408  // This is where we enter from the outside world
409  DPRINTF(MemCtrl, "recvTimingReq: request %s addr %lld size %d\n",
410  pkt->cmdString(), pkt->getAddr(), pkt->getSize());
411 
412  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
413  "is responding");
414 
415  panic_if(!(pkt->isRead() || pkt->isWrite()),
416  "Should only see read and writes at memory controller\n");
417 
418  // Calc avg gap between requests
419  if (prevArrival != 0) {
421  }
422  prevArrival = curTick();
423 
424  // What type of media does this packet access?
425  bool is_dram;
426  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
427  is_dram = true;
428  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
429  is_dram = false;
430  } else {
431  panic("Can't handle address range for packet %s\n",
432  pkt->print());
433  }
434 
435 
436  // Find out how many memory packets a pkt translates to
437  // If the burst size is equal or larger than the pkt size, then a pkt
438  // translates to only one memory packet. Otherwise, a pkt translates to
439  // multiple memory packets
440  unsigned size = pkt->getSize();
441  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
442  nvm->bytesPerBurst();
443  unsigned offset = pkt->getAddr() & (burst_size - 1);
444  unsigned int pkt_count = divCeil(offset + size, burst_size);
445 
446  // run the QoS scheduler and assign a QoS priority value to the packet
447  qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
448 
449  // check local buffers and do not accept if full
450  if (pkt->isWrite()) {
451  assert(size != 0);
452  if (writeQueueFull(pkt_count)) {
453  DPRINTF(MemCtrl, "Write queue full, not accepting\n");
454  // remember that we have to retry this port
455  retryWrReq = true;
456  stats.numWrRetry++;
457  return false;
458  } else {
459  addToWriteQueue(pkt, pkt_count, is_dram);
460  stats.writeReqs++;
461  stats.bytesWrittenSys += size;
462  }
463  } else {
464  assert(pkt->isRead());
465  assert(size != 0);
466  if (readQueueFull(pkt_count)) {
467  DPRINTF(MemCtrl, "Read queue full, not accepting\n");
468  // remember that we have to retry this port
469  retryRdReq = true;
470  stats.numRdRetry++;
471  return false;
472  } else {
473  addToReadQueue(pkt, pkt_count, is_dram);
474  stats.readReqs++;
475  stats.bytesReadSys += size;
476  }
477  }
478 
479  return true;
480 }
481 
482 void
484 {
486  "processRespondEvent(): Some req has reached its readyTime\n");
487 
488  MemPacket* mem_pkt = respQueue.front();
489 
490  if (mem_pkt->isDram()) {
491  // media specific checks and functions when read response is complete
492  dram->respondEvent(mem_pkt->rank);
493  }
494 
495  if (mem_pkt->burstHelper) {
496  // it is a split packet
497  mem_pkt->burstHelper->burstsServiced++;
498  if (mem_pkt->burstHelper->burstsServiced ==
499  mem_pkt->burstHelper->burstCount) {
500  // we have now serviced all children packets of a system packet
501  // so we can now respond to the requestor
502  // @todo we probably want to have a different front end and back
503  // end latency for split packets
505  delete mem_pkt->burstHelper;
506  mem_pkt->burstHelper = NULL;
507  }
508  } else {
509  // it is not a split packet
511  }
512 
513  delete respQueue.front();
514  respQueue.pop_front();
515 
516  if (!respQueue.empty()) {
517  assert(respQueue.front()->readyTime >= curTick());
518  assert(!respondEvent.scheduled());
519  schedule(respondEvent, respQueue.front()->readyTime);
520  } else {
521  // if there is nothing left in any queue, signal a drain
522  if (drainState() == DrainState::Draining &&
524  allIntfDrained()) {
525 
526  DPRINTF(Drain, "Controller done draining\n");
527  signalDrainDone();
528  } else if (mem_pkt->isDram()) {
529  // check the refresh state and kick the refresh event loop
530  // into action again if banks already closed and just waiting
531  // for read to complete
532  dram->checkRefreshState(mem_pkt->rank);
533  }
534  }
535 
536  // We have made a location in the queue available at this point,
537  // so if there is a read that was forced to wait, retry now
538  if (retryRdReq) {
539  retryRdReq = false;
540  port.sendRetryReq();
541  }
542 }
543 
544 MemPacketQueue::iterator
545 MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
546 {
547  // This method does the arbitration between requests.
548 
549  MemPacketQueue::iterator ret = queue.end();
550 
551  if (!queue.empty()) {
552  if (queue.size() == 1) {
553  // available rank corresponds to state refresh idle
554  MemPacket* mem_pkt = *(queue.begin());
555  if (packetReady(mem_pkt)) {
556  ret = queue.begin();
557  DPRINTF(MemCtrl, "Single request, going to a free rank\n");
558  } else {
559  DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
560  }
561  } else if (memSchedPolicy == Enums::fcfs) {
562  // check if there is a packet going to a free rank
563  for (auto i = queue.begin(); i != queue.end(); ++i) {
564  MemPacket* mem_pkt = *i;
565  if (packetReady(mem_pkt)) {
566  ret = i;
567  break;
568  }
569  }
570  } else if (memSchedPolicy == Enums::frfcfs) {
571  ret = chooseNextFRFCFS(queue, extra_col_delay);
572  } else {
573  panic("No scheduling policy chosen\n");
574  }
575  }
576  return ret;
577 }
578 
579 MemPacketQueue::iterator
581 {
582  auto selected_pkt_it = queue.end();
583  Tick col_allowed_at = MaxTick;
584 
585  // time we need to issue a column command to be seamless
586  const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
587 
588  // find optimal packet for each interface
589  if (dram && nvm) {
590  // create 2nd set of parameters for NVM
591  auto nvm_pkt_it = queue.end();
592  Tick nvm_col_at = MaxTick;
593 
594  // Select packet by default to give priority if both
595  // can issue at the same time or seamlessly
596  std::tie(selected_pkt_it, col_allowed_at) =
597  dram->chooseNextFRFCFS(queue, min_col_at);
598  std::tie(nvm_pkt_it, nvm_col_at) =
599  nvm->chooseNextFRFCFS(queue, min_col_at);
600 
601  // Compare DRAM and NVM and select NVM if it can issue
602  // earlier than the DRAM packet
603  if (col_allowed_at > nvm_col_at) {
604  selected_pkt_it = nvm_pkt_it;
605  }
606  } else if (dram) {
607  std::tie(selected_pkt_it, col_allowed_at) =
608  dram->chooseNextFRFCFS(queue, min_col_at);
609  } else if (nvm) {
610  std::tie(selected_pkt_it, col_allowed_at) =
611  nvm->chooseNextFRFCFS(queue, min_col_at);
612  }
613 
614  if (selected_pkt_it == queue.end()) {
615  DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
616  }
617 
618  return selected_pkt_it;
619 }
620 
621 void
623 {
624  DPRINTF(MemCtrl, "Responding to Address %lld.. \n",pkt->getAddr());
625 
626  bool needsResponse = pkt->needsResponse();
627  // do the actual memory access which also turns the packet into a
628  // response
629  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
630  dram->access(pkt);
631  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
632  nvm->access(pkt);
633  } else {
634  panic("Can't handle address range for packet %s\n",
635  pkt->print());
636  }
637 
638  // turn packet around to go back to requestor if response expected
639  if (needsResponse) {
640  // access already turned the packet into a response
641  assert(pkt->isResponse());
642  // response_time consumes the static latency and is charged also
643  // with headerDelay that takes into account the delay provided by
644  // the xbar and also the payloadDelay that takes into account the
645  // number of data beats.
646  Tick response_time = curTick() + static_latency + pkt->headerDelay +
647  pkt->payloadDelay;
648  // Here we reset the timing of the packet before sending it out.
649  pkt->headerDelay = pkt->payloadDelay = 0;
650 
651  // queue the packet in the response queue to be sent out after
652  // the static latency has passed
653  port.schedTimingResp(pkt, response_time);
654  } else {
655  // @todo the packet is going to be deleted, and the MemPacket
656  // is still having a pointer to it
657  pendingDelete.reset(pkt);
658  }
659 
660  DPRINTF(MemCtrl, "Done\n");
661 
662  return;
663 }
664 
665 void
667 {
668  auto it = burstTicks.begin();
669  while (it != burstTicks.end()) {
670  auto current_it = it++;
671  if (curTick() > *current_it) {
672  DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
673  burstTicks.erase(current_it);
674  }
675  }
676 }
677 
678 Tick
680 {
681  // get tick aligned to burst window
682  Tick burst_offset = cmd_tick % commandWindow;
683  return (cmd_tick - burst_offset);
684 }
685 
686 Tick
687 MemCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
688 {
689  // start with assumption that there is no contention on command bus
690  Tick cmd_at = cmd_tick;
691 
692  // get tick aligned to burst window
693  Tick burst_tick = getBurstWindow(cmd_tick);
694 
695  // verify that we have command bandwidth to issue the command
696  // if not, iterate over next window(s) until slot found
697  while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
698  DPRINTF(MemCtrl, "Contention found on command bus at %d\n",
699  burst_tick);
700  burst_tick += commandWindow;
701  cmd_at = burst_tick;
702  }
703 
704  // add command into burst window and return corresponding Tick
705  burstTicks.insert(burst_tick);
706  return cmd_at;
707 }
708 
709 Tick
710 MemCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
711  Tick max_multi_cmd_split)
712 {
713  // start with assumption that there is no contention on command bus
714  Tick cmd_at = cmd_tick;
715 
716  // get tick aligned to burst window
717  Tick burst_tick = getBurstWindow(cmd_tick);
718 
719  // Command timing requirements are from 2nd command
720  // Start with assumption that 2nd command will issue at cmd_at and
721  // find prior slot for 1st command to issue
722  // Given a maximum latency of max_multi_cmd_split between the commands,
723  // find the burst at the maximum latency prior to cmd_at
724  Tick burst_offset = 0;
725  Tick first_cmd_offset = cmd_tick % commandWindow;
726  while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
727  burst_offset += commandWindow;
728  }
729  // get the earliest burst aligned address for first command
730  // ensure that the time does not go negative
731  Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
732 
733  // Can required commands issue?
734  bool first_can_issue = false;
735  bool second_can_issue = false;
736  // verify that we have command bandwidth to issue the command(s)
737  while (!first_can_issue || !second_can_issue) {
738  bool same_burst = (burst_tick == first_cmd_tick);
739  auto first_cmd_count = burstTicks.count(first_cmd_tick);
740  auto second_cmd_count = same_burst ? first_cmd_count + 1 :
741  burstTicks.count(burst_tick);
742 
743  first_can_issue = first_cmd_count < max_cmds_per_burst;
744  second_can_issue = second_cmd_count < max_cmds_per_burst;
745 
746  if (!second_can_issue) {
747  DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
748  burst_tick);
749  burst_tick += commandWindow;
750  cmd_at = burst_tick;
751  }
752 
753  // Verify max_multi_cmd_split isn't violated when command 2 is shifted
754  // If commands initially were issued in same burst, they are
755  // now in consecutive bursts and can still issue B2B
756  bool gap_violated = !same_burst &&
757  ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
758 
759  if (!first_can_issue || (!second_can_issue && gap_violated)) {
760  DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
761  first_cmd_tick);
762  first_cmd_tick += commandWindow;
763  }
764  }
765 
766  // Add command to burstTicks
767  burstTicks.insert(burst_tick);
768  burstTicks.insert(first_cmd_tick);
769 
770  return cmd_at;
771 }
772 
773 bool
774 MemCtrl::inReadBusState(bool next_state) const
775 {
776  // check the bus state
777  if (next_state) {
778  // use busStateNext to get the state that will be used
779  // for the next burst
780  return (busStateNext == MemCtrl::READ);
781  } else {
782  return (busState == MemCtrl::READ);
783  }
784 }
785 
786 bool
787 MemCtrl::inWriteBusState(bool next_state) const
788 {
789  // check the bus state
790  if (next_state) {
791  // use busStateNext to get the state that will be used
792  // for the next burst
793  return (busStateNext == MemCtrl::WRITE);
794  } else {
795  return (busState == MemCtrl::WRITE);
796  }
797 }
798 
799 void
801 {
802  // first clean up the burstTick set, removing old entries
803  // before adding new entries for next burst
804  pruneBurstTick();
805 
806  // When was command issued?
807  Tick cmd_at;
808 
809  // Issue the next burst and update bus state to reflect
810  // when previous command was issued
811  if (mem_pkt->isDram()) {
812  std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
813  std::tie(cmd_at, nextBurstAt) =
814  dram->doBurstAccess(mem_pkt, nextBurstAt, queue);
815 
816  // Update timing for NVM ranks if NVM is configured on this channel
817  if (nvm)
818  nvm->addRankToRankDelay(cmd_at);
819 
820  } else {
821  std::tie(cmd_at, nextBurstAt) =
822  nvm->doBurstAccess(mem_pkt, nextBurstAt);
823 
824  // Update timing for NVM ranks if NVM is configured on this channel
825  if (dram)
826  dram->addRankToRankDelay(cmd_at);
827 
828  }
829 
830  DPRINTF(MemCtrl, "Access to %lld, ready at %lld next burst at %lld.\n",
831  mem_pkt->addr, mem_pkt->readyTime, nextBurstAt);
832 
833  // Update the minimum timing between the requests, this is a
834  // conservative estimate of when we have to schedule the next
835  // request to not introduce any unecessary bubbles. In most cases
836  // we will wake up sooner than we have to.
838  nvm->commandOffset());
839 
840 
841  // Update the common bus stats
842  if (mem_pkt->isRead()) {
843  ++readsThisTime;
844  // Update latency stats
846  mem_pkt->readyTime - mem_pkt->entryTime;
847  stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
848  } else {
849  ++writesThisTime;
850  stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
852  mem_pkt->readyTime - mem_pkt->entryTime;
853  }
854 }
855 
856 void
858 {
859  // transition is handled by QoS algorithm if enabled
860  if (turnPolicy) {
861  // select bus state - only done if QoS algorithms are in use
863  }
864 
865  // detect bus state change
866  bool switched_cmd_type = (busState != busStateNext);
867  // record stats
869 
870  DPRINTF(MemCtrl, "QoS Turnarounds selected state %s %s\n",
871  (busState==MemCtrl::READ)?"READ":"WRITE",
872  switched_cmd_type?"[turnaround triggered]":"");
873 
874  if (switched_cmd_type) {
875  if (busState == MemCtrl::READ) {
877  "Switching to writes after %d reads with %d reads "
878  "waiting\n", readsThisTime, totalReadQueueSize);
880  readsThisTime = 0;
881  } else {
883  "Switching to reads after %d writes with %d writes "
884  "waiting\n", writesThisTime, totalWriteQueueSize);
886  writesThisTime = 0;
887  }
888  }
889 
890  // updates current state
892 
893  if (nvm) {
894  for (auto queue = readQueue.rbegin();
895  queue != readQueue.rend(); ++queue) {
896  // select non-deterministic NVM read to issue
897  // assume that we have the command bandwidth to issue this along
898  // with additional RD/WR burst with needed bank operations
899  if (nvm->readsWaitingToIssue()) {
900  // select non-deterministic NVM read to issue
901  nvm->chooseRead(*queue);
902  }
903  }
904  }
905 
906  // check ranks for refresh/wakeup - uses busStateNext, so done after
907  // turnaround decisions
908  // Default to busy status and update based on interface specifics
909  bool dram_busy = dram ? dram->isBusy() : true;
910  bool nvm_busy = true;
911  bool all_writes_nvm = false;
912  if (nvm) {
913  all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
914  bool read_queue_empty = totalReadQueueSize == 0;
915  nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
916  }
917  // Default state of unused interface is 'true'
918  // Simply AND the busy signals to determine if system is busy
919  if (dram_busy && nvm_busy) {
920  // if all ranks are refreshing wait for them to finish
921  // and stall this state machine without taking any further
922  // action, and do not schedule a new nextReqEvent
923  return;
924  }
925 
926  // when we get here it is either a read or a write
927  if (busState == READ) {
928 
929  // track if we should switch or not
930  bool switch_to_writes = false;
931 
932  if (totalReadQueueSize == 0) {
933  // In the case there is no read request to go next,
934  // trigger writes if we have passed the low threshold (or
935  // if we are draining)
936  if (!(totalWriteQueueSize == 0) &&
939 
941  "Switching to writes due to read queue empty\n");
942  switch_to_writes = true;
943  } else {
944  // check if we are drained
945  // not done draining until in PWR_IDLE state
946  // ensuring all banks are closed and
947  // have exited low power states
948  if (drainState() == DrainState::Draining &&
949  respQueue.empty() && allIntfDrained()) {
950 
951  DPRINTF(Drain, "MemCtrl controller done draining\n");
952  signalDrainDone();
953  }
954 
955  // nothing to do, not even any point in scheduling an
956  // event for the next request
957  return;
958  }
959  } else {
960 
961  bool read_found = false;
962  MemPacketQueue::iterator to_read;
963  uint8_t prio = numPriorities();
964 
965  for (auto queue = readQueue.rbegin();
966  queue != readQueue.rend(); ++queue) {
967 
968  prio--;
969 
970  DPRINTF(QOS,
971  "Checking READ queue [%d] priority [%d elements]\n",
972  prio, queue->size());
973 
974  // Figure out which read request goes next
975  // If we are changing command type, incorporate the minimum
976  // bus turnaround delay which will be rank to rank delay
977  to_read = chooseNext((*queue), switched_cmd_type ?
978  minWriteToReadDataGap() : 0);
979 
980  if (to_read != queue->end()) {
981  // candidate read found
982  read_found = true;
983  break;
984  }
985  }
986 
987  // if no read to an available rank is found then return
988  // at this point. There could be writes to the available ranks
989  // which are above the required threshold. However, to
990  // avoid adding more complexity to the code, return and wait
991  // for a refresh event to kick things into action again.
992  if (!read_found) {
993  DPRINTF(MemCtrl, "No Reads Found - exiting\n");
994  return;
995  }
996 
997  auto mem_pkt = *to_read;
998 
999  doBurstAccess(mem_pkt);
1000 
1001  // sanity check
1002  assert(mem_pkt->size <= (mem_pkt->isDram() ?
1003  dram->bytesPerBurst() :
1004  nvm->bytesPerBurst()) );
1005  assert(mem_pkt->readyTime >= curTick());
1006 
1007  // log the response
1008  logResponse(MemCtrl::READ, (*to_read)->requestorId(),
1009  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1010  mem_pkt->readyTime - mem_pkt->entryTime);
1011 
1012 
1013  // Insert into response queue. It will be sent back to the
1014  // requestor at its readyTime
1015  if (respQueue.empty()) {
1016  assert(!respondEvent.scheduled());
1017  schedule(respondEvent, mem_pkt->readyTime);
1018  } else {
1019  assert(respQueue.back()->readyTime <= mem_pkt->readyTime);
1020  assert(respondEvent.scheduled());
1021  }
1022 
1023  respQueue.push_back(mem_pkt);
1024 
1025  // we have so many writes that we have to transition
1026  // don't transition if the writeRespQueue is full and
1027  // there are no other writes that can issue
1029  !(nvm && all_writes_nvm && nvm->writeRespQueueFull())) {
1030  switch_to_writes = true;
1031  }
1032 
1033  // remove the request from the queue
1034  // the iterator is no longer valid .
1035  readQueue[mem_pkt->qosValue()].erase(to_read);
1036  }
1037 
1038  // switching to writes, either because the read queue is empty
1039  // and the writes have passed the low threshold (or we are
1040  // draining), or because the writes hit the hight threshold
1041  if (switch_to_writes) {
1042  // transition to writing
1043  busStateNext = WRITE;
1044  }
1045  } else {
1046 
1047  bool write_found = false;
1048  MemPacketQueue::iterator to_write;
1049  uint8_t prio = numPriorities();
1050 
1051  for (auto queue = writeQueue.rbegin();
1052  queue != writeQueue.rend(); ++queue) {
1053 
1054  prio--;
1055 
1056  DPRINTF(QOS,
1057  "Checking WRITE queue [%d] priority [%d elements]\n",
1058  prio, queue->size());
1059 
1060  // If we are changing command type, incorporate the minimum
1061  // bus turnaround delay
1062  to_write = chooseNext((*queue),
1063  switched_cmd_type ? minReadToWriteDataGap() : 0);
1064 
1065  if (to_write != queue->end()) {
1066  write_found = true;
1067  break;
1068  }
1069  }
1070 
1071  // if there are no writes to a rank that is available to service
1072  // requests (i.e. rank is in refresh idle state) are found then
1073  // return. There could be reads to the available ranks. However, to
1074  // avoid adding more complexity to the code, return at this point and
1075  // wait for a refresh event to kick things into action again.
1076  if (!write_found) {
1077  DPRINTF(MemCtrl, "No Writes Found - exiting\n");
1078  return;
1079  }
1080 
1081  auto mem_pkt = *to_write;
1082 
1083  // sanity check
1084  assert(mem_pkt->size <= (mem_pkt->isDram() ?
1085  dram->bytesPerBurst() :
1086  nvm->bytesPerBurst()) );
1087 
1088  doBurstAccess(mem_pkt);
1089 
1090  isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
1091 
1092  // log the response
1093  logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
1094  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1095  mem_pkt->readyTime - mem_pkt->entryTime);
1096 
1097 
1098  // remove the request from the queue - the iterator is no longer valid
1099  writeQueue[mem_pkt->qosValue()].erase(to_write);
1100 
1101  delete mem_pkt;
1102 
1103  // If we emptied the write queue, or got sufficiently below the
1104  // threshold (using the minWritesPerSwitch as the hysteresis) and
1105  // are not draining, or we have reads waiting and have done enough
1106  // writes, then switch to reads.
1107  // If we are interfacing to NVM and have filled the writeRespQueue,
1108  // with only NVM writes in Q, then switch to reads
1109  bool below_threshold =
1111 
1112  if (totalWriteQueueSize == 0 ||
1113  (below_threshold && drainState() != DrainState::Draining) ||
1116  all_writes_nvm)) {
1117 
1118  // turn the bus back around for reads again
1120 
1121  // note that the we switch back to reads also in the idle
1122  // case, which eventually will check for any draining and
1123  // also pause any further scheduling if there is really
1124  // nothing to do
1125  }
1126  }
1127  // It is possible that a refresh to another rank kicks things back into
1128  // action before reaching this point.
1129  if (!nextReqEvent.scheduled())
1130  schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1131 
1132  // If there is space available and we have writes waiting then let
1133  // them retry. This is done here to ensure that the retry does not
1134  // cause a nextReqEvent to be scheduled before we do so as part of
1135  // the next request processing
1137  retryWrReq = false;
1138  port.sendRetryReq();
1139  }
1140 }
1141 
1142 bool
1144 {
1145  return (pkt->isDram() ?
1146  dram->burstReady(pkt) : nvm->burstReady(pkt));
1147 }
1148 
1149 Tick
1151 {
1152  Tick dram_min = dram ? dram->minReadToWriteDataGap() : MaxTick;
1153  Tick nvm_min = nvm ? nvm->minReadToWriteDataGap() : MaxTick;
1154  return std::min(dram_min, nvm_min);
1155 }
1156 
1157 Tick
1159 {
1160  Tick dram_min = dram ? dram->minWriteToReadDataGap() : MaxTick;
1161  Tick nvm_min = nvm ? nvm->minWriteToReadDataGap() : MaxTick;
1162  return std::min(dram_min, nvm_min);
1163 }
1164 
1165 Addr
1166 MemCtrl::burstAlign(Addr addr, bool is_dram) const
1167 {
1168  if (is_dram)
1169  return (addr & ~(Addr(dram->bytesPerBurst() - 1)));
1170  else
1171  return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
1172 }
1173 
1175  : Stats::Group(&_ctrl),
1176  ctrl(_ctrl),
1177 
1178  ADD_STAT(readReqs, "Number of read requests accepted"),
1179  ADD_STAT(writeReqs, "Number of write requests accepted"),
1180 
1181  ADD_STAT(readBursts,
1182  "Number of controller read bursts, "
1183  "including those serviced by the write queue"),
1184  ADD_STAT(writeBursts,
1185  "Number of controller write bursts, "
1186  "including those merged in the write queue"),
1187  ADD_STAT(servicedByWrQ,
1188  "Number of controller read bursts serviced by the write queue"),
1189  ADD_STAT(mergedWrBursts,
1190  "Number of controller write bursts merged with an existing one"),
1191 
1192  ADD_STAT(neitherReadNorWriteReqs,
1193  "Number of requests that are neither read nor write"),
1194 
1195  ADD_STAT(avgRdQLen, "Average read queue length when enqueuing"),
1196  ADD_STAT(avgWrQLen, "Average write queue length when enqueuing"),
1197 
1198  ADD_STAT(numRdRetry, "Number of times read queue was full causing retry"),
1199  ADD_STAT(numWrRetry, "Number of times write queue was full causing retry"),
1200 
1201  ADD_STAT(readPktSize, "Read request sizes (log2)"),
1202  ADD_STAT(writePktSize, "Write request sizes (log2)"),
1203 
1204  ADD_STAT(rdQLenPdf, "What read queue length does an incoming req see"),
1205  ADD_STAT(wrQLenPdf, "What write queue length does an incoming req see"),
1206 
1207  ADD_STAT(rdPerTurnAround,
1208  "Reads before turning the bus around for writes"),
1209  ADD_STAT(wrPerTurnAround,
1210  "Writes before turning the bus around for reads"),
1211 
1212  ADD_STAT(bytesReadWrQ, "Total number of bytes read from write queue"),
1213  ADD_STAT(bytesReadSys, "Total read bytes from the system interface side"),
1214  ADD_STAT(bytesWrittenSys,
1215  "Total written bytes from the system interface side"),
1216 
1217  ADD_STAT(avgRdBWSys, "Average system read bandwidth in MiByte/s"),
1218  ADD_STAT(avgWrBWSys, "Average system write bandwidth in MiByte/s"),
1219 
1220  ADD_STAT(totGap, "Total gap between requests"),
1221  ADD_STAT(avgGap, "Average gap between requests"),
1222 
1223  ADD_STAT(requestorReadBytes, "Per-requestor bytes read from memory"),
1224  ADD_STAT(requestorWriteBytes, "Per-requestor bytes write to memory"),
1225  ADD_STAT(requestorReadRate,
1226  "Per-requestor bytes read from memory rate (Bytes/sec)"),
1227  ADD_STAT(requestorWriteRate,
1228  "Per-requestor bytes write to memory rate (Bytes/sec)"),
1229  ADD_STAT(requestorReadAccesses,
1230  "Per-requestor read serviced memory accesses"),
1231  ADD_STAT(requestorWriteAccesses,
1232  "Per-requestor write serviced memory accesses"),
1233  ADD_STAT(requestorReadTotalLat,
1234  "Per-requestor read total memory access latency"),
1235  ADD_STAT(requestorWriteTotalLat,
1236  "Per-requestor write total memory access latency"),
1237  ADD_STAT(requestorReadAvgLat,
1238  "Per-requestor read average memory access latency"),
1239  ADD_STAT(requestorWriteAvgLat,
1240  "Per-requestor write average memory access latency")
1241 
1242 {
1243 }
1244 
1245 void
1247 {
1248  using namespace Stats;
1249 
1250  assert(ctrl.system());
1251  const auto max_requestors = ctrl.system()->maxRequestors();
1252 
1253  avgRdQLen.precision(2);
1254  avgWrQLen.precision(2);
1255 
1256  readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1257  writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1258 
1259  rdQLenPdf.init(ctrl.readBufferSize);
1260  wrQLenPdf.init(ctrl.writeBufferSize);
1261 
1262  rdPerTurnAround
1263  .init(ctrl.readBufferSize)
1264  .flags(nozero);
1265  wrPerTurnAround
1266  .init(ctrl.writeBufferSize)
1267  .flags(nozero);
1268 
1269  avgRdBWSys.precision(2);
1270  avgWrBWSys.precision(2);
1271  avgGap.precision(2);
1272 
1273  // per-requestor bytes read and written to memory
1274  requestorReadBytes
1275  .init(max_requestors)
1276  .flags(nozero | nonan);
1277 
1278  requestorWriteBytes
1279  .init(max_requestors)
1280  .flags(nozero | nonan);
1281 
1282  // per-requestor bytes read and written to memory rate
1283  requestorReadRate
1284  .flags(nozero | nonan)
1285  .precision(12);
1286 
1287  requestorReadAccesses
1288  .init(max_requestors)
1289  .flags(nozero);
1290 
1291  requestorWriteAccesses
1292  .init(max_requestors)
1293  .flags(nozero);
1294 
1295  requestorReadTotalLat
1296  .init(max_requestors)
1297  .flags(nozero | nonan);
1298 
1299  requestorReadAvgLat
1300  .flags(nonan)
1301  .precision(2);
1302 
1303  requestorWriteRate
1304  .flags(nozero | nonan)
1305  .precision(12);
1306 
1307  requestorWriteTotalLat
1308  .init(max_requestors)
1309  .flags(nozero | nonan);
1310 
1311  requestorWriteAvgLat
1312  .flags(nonan)
1313  .precision(2);
1314 
1315  for (int i = 0; i < max_requestors; i++) {
1316  const std::string requestor = ctrl.system()->getRequestorName(i);
1317  requestorReadBytes.subname(i, requestor);
1318  requestorReadRate.subname(i, requestor);
1319  requestorWriteBytes.subname(i, requestor);
1320  requestorWriteRate.subname(i, requestor);
1321  requestorReadAccesses.subname(i, requestor);
1322  requestorWriteAccesses.subname(i, requestor);
1323  requestorReadTotalLat.subname(i, requestor);
1324  requestorReadAvgLat.subname(i, requestor);
1325  requestorWriteTotalLat.subname(i, requestor);
1326  requestorWriteAvgLat.subname(i, requestor);
1327  }
1328 
1329  // Formula stats
1330  avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
1331  avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
1332 
1333  avgGap = totGap / (readReqs + writeReqs);
1334 
1335  requestorReadRate = requestorReadBytes / simSeconds;
1336  requestorWriteRate = requestorWriteBytes / simSeconds;
1337  requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1338  requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1339 }
1340 
1341 void
1343 {
1344  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
1345  // rely on the abstract memory
1346  dram->functionalAccess(pkt);
1347  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
1348  // rely on the abstract memory
1349  nvm->functionalAccess(pkt);
1350  } else {
1351  panic("Can't handle address range for packet %s\n",
1352  pkt->print());
1353  }
1354 }
1355 
1356 Port &
1357 MemCtrl::getPort(const string &if_name, PortID idx)
1358 {
1359  if (if_name != "port") {
1360  return QoS::MemCtrl::getPort(if_name, idx);
1361  } else {
1362  return port;
1363  }
1364 }
1365 
1366 bool
1368 {
1369  // ensure dram is in power down and refresh IDLE states
1370  bool dram_drained = !dram || dram->allRanksDrained();
1371  // No outstanding NVM writes
1372  // All other queues verified as needed with calling logic
1373  bool nvm_drained = !nvm || nvm->allRanksDrained();
1374  return (dram_drained && nvm_drained);
1375 }
1376 
1377 DrainState
1379 {
1380  // if there is anything in any of our internal queues, keep track
1381  // of that as well
1382  if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
1383  allIntfDrained())) {
1384 
1385  DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
1386  " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
1387  respQueue.size());
1388 
1389  // the only queue that is not drained automatically over time
1390  // is the write queue, thus kick things into action if needed
1393  }
1394 
1395  if (dram)
1396  dram->drainRanks();
1397 
1398  return DrainState::Draining;
1399  } else {
1400  return DrainState::Drained;
1401  }
1402 }
1403 
1404 void
1406 {
1407  if (!isTimingMode && system()->isTimingMode()) {
1408  // if we switched to timing mode, kick things into action,
1409  // and behave as if we restored from a checkpoint
1410  startup();
1411  dram->startup();
1412  } else if (isTimingMode && !system()->isTimingMode()) {
1413  // if we switch from timing mode, stop the refresh events to
1414  // not cause issues with KVM
1415  if (dram)
1416  dram->suspend();
1417  }
1418 
1419  // update the mode
1421 }
1422 
1423 MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
1424  : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
1425  ctrl(_ctrl)
1426 { }
1427 
1430 {
1431  AddrRangeList ranges;
1432  if (ctrl.dram) {
1433  DPRINTF(DRAM, "Pushing DRAM ranges to port\n");
1434  ranges.push_back(ctrl.dram->getAddrRange());
1435  }
1436  if (ctrl.nvm) {
1437  DPRINTF(NVM, "Pushing NVM ranges to port\n");
1438  ranges.push_back(ctrl.nvm->getAddrRange());
1439  }
1440  return ranges;
1441 }
1442 
1443 void
1445 {
1446  pkt->pushLabel(ctrl.name());
1447 
1448  if (!queue.trySatisfyFunctional(pkt)) {
1449  // Default implementation of SimpleTimingPort::recvFunctional()
1450  // calls recvAtomic() and throws away the latency; we can save a
1451  // little here by just not calculating the latency.
1452  ctrl.recvFunctional(pkt);
1453  }
1454 
1455  pkt->popLabel();
1456 }
1457 
1458 Tick
1460 {
1461  return ctrl.recvAtomic(pkt);
1462 }
1463 
1464 bool
1466 {
1467  // pass it to the memory controller
1468  return ctrl.recvTimingReq(pkt);
1469 }
1470 
1471 MemCtrl*
1472 MemCtrlParams::create()
1473 {
1474  return new MemCtrl(this);
1475 }
BurstHelper::burstCount
const unsigned int burstCount
Number of bursts requred for a system packet.
Definition: mem_ctrl.hh:77
MemPacket::rank
const uint8_t rank
Will be populated by address decoder.
Definition: mem_ctrl.hh:113
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
BurstHelper::burstsServiced
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
Definition: mem_ctrl.hh:80
MemPacket::requestorId
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
Definition: mem_ctrl.hh:165
MemCtrl::processNextReqEvent
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
Definition: mem_ctrl.cc:857
QoS
Definition: mem_ctrl.cc:42
Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:460
MemCtrl::nextReqEvent
EventFunctionWrapper nextReqEvent
Definition: mem_ctrl.hh:288
NVMInterface::numWritesQueued
uint32_t numWritesQueued
Definition: mem_interface.hh:1148
MemCtrl::CtrlStats::avgRdQLen
Stats::Average avgRdQLen
Definition: mem_ctrl.hh:543
Packet::isResponse
bool isResponse() const
Definition: packet.hh:560
MemPacket::burstHelper
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
Definition: mem_ctrl.hh:142
AbstractMemory::getAddrRange
AddrRange getAddrRange() const
Get the address range.
Definition: abstract_mem.cc:242
MemCtrl::CtrlStats::writePktSize
Stats::Vector writePktSize
Definition: mem_ctrl.hh:549
MemInterface::minWriteToReadDataGap
Tick minWriteToReadDataGap() const
Definition: mem_interface.hh:264
system.hh
DRAMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)
Actually do the burst - figure out the latency it will take to service the req based on bank state,...
Definition: mem_interface.cc:456
Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:619
MemCtrl::startup
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: mem_ctrl.cc:107
MemCtrl::writeQueueFull
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition: mem_ctrl.cc:170
MemCtrl::CtrlStats::writeReqs
Stats::Scalar writeReqs
Definition: mem_ctrl.hh:536
QoS::MemCtrl::WRITE
@ WRITE
Definition: mem_ctrl.hh:63
System::isTimingMode
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:269
MemCtrl::writeLowThreshold
const uint32_t writeLowThreshold
Definition: mem_ctrl.hh:480
MemPacket::isDram
bool isDram() const
Return true if its a DRAM access.
Definition: mem_ctrl.hh:194
Packet::getAddr
Addr getAddr() const
Definition: packet.hh:754
MemCtrl::CtrlStats::requestorWriteBytes
Stats::Vector requestorWriteBytes
Definition: mem_ctrl.hh:567
QoS::MemCtrl::schedule
uint8_t schedule(RequestorID id, uint64_t data)
Definition: mem_ctrl.cc:207
MemPacket::readyTime
Tick readyTime
When will request leave the controller.
Definition: mem_ctrl.hh:99
QoS::MemCtrl::recordTurnaroundStats
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Definition: mem_ctrl.cc:348
Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:412
MemCtrl::CtrlStats::bytesReadSys
Stats::Scalar bytesReadSys
Definition: mem_ctrl.hh:556
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
MemCtrl::CtrlStats::requestorReadAccesses
Stats::Vector requestorReadAccesses
Definition: mem_ctrl.hh:574
MemCtrl::doBurstAccess
void doBurstAccess(MemPacket *mem_pkt)
Actually do the burst based on media specific access function.
Definition: mem_ctrl.cc:800
MemCtrl::CtrlStats::writeBursts
Stats::Scalar writeBursts
Definition: mem_ctrl.hh:538
NVMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first NVM command that can issue default to first command to prepped region.
Definition: mem_interface.cc:2073
MemCtrl::writeQueue
std::vector< MemPacketQueue > writeQueue
Definition: mem_ctrl.hh:433
MemCtrl::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: mem_ctrl.cc:1357
QoS::MemCtrl::numPriorities
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
Definition: mem_ctrl.hh:346
MemCtrl::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: mem_ctrl.cc:1378
DRAMInterface::respondEvent
void respondEvent(uint8_t rank)
Complete response process for DRAM when read burst is complete This will update the counters and chec...
Definition: mem_interface.cc:940
Packet::isRead
bool isRead() const
Definition: packet.hh:556
Tick
uint64_t Tick
Tick count type.
Definition: types.hh:63
DRAMInterface::isBusy
bool isBusy()
This function checks if ranks are actively refreshing and therefore busy.
Definition: mem_interface.cc:892
DRAMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: mem_interface.cc:929
MemCtrl::CtrlStats::numRdRetry
Stats::Scalar numRdRetry
Definition: mem_ctrl.hh:546
PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:237
Packet::pushLabel
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1393
AddrRange::contains
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:435
QoS::MemCtrl::READ
@ READ
Definition: mem_ctrl.hh:63
Packet::requestorId
RequestorID requestorId() const
Definition: packet.hh:740
std::vector
STL vector class.
Definition: stl.hh:37
MemCtrl::isInWriteQueue
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
Definition: mem_ctrl.hh:442
Packet::getSize
unsigned getSize() const
Definition: packet.hh:764
MemCtrl::drainResume
virtual void drainResume() override
Resume execution after a successful drain.
Definition: mem_ctrl.cc:1405
MemCtrl::burstAlign
Addr burstAlign(Addr addr, bool is_dram) const
Burst-align an address.
Definition: mem_ctrl.cc:1166
MemCtrl::CtrlStats::servicedByWrQ
Stats::Scalar servicedByWrQ
Definition: mem_ctrl.hh:539
QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:90
MemCtrl::CtrlStats::readPktSize
Stats::Vector readPktSize
Definition: mem_ctrl.hh:548
MemCtrl::CtrlStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: mem_ctrl.cc:1246
MemCtrl::readBufferSize
const uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition: mem_ctrl.hh:477
NVMInterface::accessLatency
Tick accessLatency() const override
Definition: mem_interface.hh:1228
Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:394
Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:389
AbstractMemory::functionalAccess
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: abstract_mem.cc:475
MemCtrl::selQueue
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
Definition: mem_ctrl.hh:600
MemPacket::entryTime
const Tick entryTime
When did request enter the controller.
Definition: mem_ctrl.hh:96
MemCtrl::retryWrReq
bool retryWrReq
Definition: mem_ctrl.hh:279
DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
QoS::MemCtrl::qosSchedule
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition: mem_ctrl.hh:475
MemCtrl::allIntfDrained
bool allIntfDrained() const
Ensure that all interfaced have drained commands.
Definition: mem_ctrl.cc:1367
NVMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: mem_interface.cc:2061
MemCtrl::CtrlStats::wrQLenPdf
Stats::Vector wrQLenPdf
Definition: mem_ctrl.hh:551
MemCtrl::readsThisTime
uint32_t readsThisTime
Definition: mem_ctrl.hh:483
MemCtrl::CtrlStats::requestorWriteAccesses
Stats::Vector requestorWriteAccesses
Definition: mem_ctrl.hh:575
DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:71
MemCtrl::writesThisTime
uint32_t writesThisTime
Definition: mem_ctrl.hh:482
NVMInterface::allRanksDrained
bool allRanksDrained() const override
Check drain state of NVM interface.
Definition: mem_interface.hh:1169
QoS::MemCtrl::logResponse
void logResponse(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
Definition: mem_ctrl.cc:138
MemCtrl::CtrlStats::rdQLenPdf
Stats::Vector rdQLenPdf
Definition: mem_ctrl.hh:550
NVMInterface::writeRespQueueFull
bool writeRespQueueFull() const
Check if the write response queue has reached defined threshold.
Definition: mem_interface.hh:1236
MemCtrl::prevArrival
Tick prevArrival
Definition: mem_ctrl.hh:516
MemCtrl::MemoryPort::recvFunctional
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
Definition: mem_ctrl.cc:1444
QoS::MemCtrl::totalWriteQueueSize
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Definition: mem_ctrl.hh:113
MemCtrl::CtrlStats::bytesWrittenSys
Stats::Scalar bytesWrittenSys
Definition: mem_ctrl.hh:557
MemCtrl::CtrlStats::mergedWrBursts
Stats::Scalar mergedWrBursts
Definition: mem_ctrl.hh:540
ceilLog2
int ceilLog2(const T &n)
Definition: intmath.hh:88
Packet::qosValue
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
Definition: packet.hh:729
divCeil
T divCeil(const T &a, const U &b)
Definition: intmath.hh:114
mem_interface.hh
AbstractMemory::access
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: abstract_mem.cc:373
MemCtrl::minReadToWriteDataGap
Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
Definition: mem_ctrl.cc:1150
DRAMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the DRAM.
Definition: mem_interface.hh:971
MemCtrl::CtrlStats::requestorReadTotalLat
Stats::Vector requestorReadTotalLat
Definition: mem_ctrl.hh:578
MemCtrl::minWriteToReadDataGap
Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
Definition: mem_ctrl.cc:1158
DRAMInterface::checkRefreshState
void checkRefreshState(uint8_t rank)
Check the refresh state to determine if refresh needs to be kicked back into action after a read resp...
Definition: mem_interface.cc:987
QoS::MemCtrl::logRequest
void logRequest(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
Definition: mem_ctrl.cc:81
QoS::MemCtrl::busStateNext
BusState busStateNext
bus state for next request event triggered
Definition: mem_ctrl.hh:122
SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:123
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:67
MemCtrl::frontendLatency
const Tick frontendLatency
Pipeline latency of the controller frontend.
Definition: mem_ctrl.hh:496
simSeconds
Stats::Formula simSeconds
Definition: stat_control.cc:61
MemCtrl::inWriteBusState
bool inWriteBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:787
MemCtrl::respondEvent
EventFunctionWrapper respondEvent
Definition: mem_ctrl.hh:291
MemCtrl::stats
CtrlStats stats
Definition: mem_ctrl.hh:586
Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:551
MemCtrl::memSchedPolicy
Enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition: mem_ctrl.hh:489
MemCtrl::nextBurstAt
Tick nextBurstAt
Till when must we wait before issuing next RD/WR burst?
Definition: mem_ctrl.hh:514
Port
Ports are used to interface objects to each other.
Definition: port.hh:56
Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:570
QoS::MemCtrl::totalReadQueueSize
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition: mem_ctrl.hh:110
MemCtrl::CtrlStats::readReqs
Stats::Scalar readReqs
Definition: mem_ctrl.hh:535
DRAMInterface::commandOffset
Tick commandOffset() const override
Definition: mem_interface.hh:927
MemCtrl::pruneBurstTick
void pruneBurstTick()
Remove commands that have already issued from burstTicks.
Definition: mem_ctrl.cc:666
MemPacket::size
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Definition: mem_ctrl.hh:136
MemCtrl::CtrlStats::rdPerTurnAround
Stats::Histogram rdPerTurnAround
Definition: mem_ctrl.hh:552
NVMInterface::commandOffset
Tick commandOffset() const override
Definition: mem_interface.hh:1174
MemCtrl::MemoryPort::getAddrRanges
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: mem_ctrl.cc:1429
MemCtrl::CtrlStats::avgWrQLen
Stats::Average avgWrQLen
Definition: mem_ctrl.hh:544
Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:301
MemCtrl::printQs
void printQs() const
Used for debugging to observe the contents of the queues.
Definition: mem_ctrl.cc:381
MemCtrl::dram
DRAMInterface *const dram
Create pointer to interface of the actual dram media when connected.
Definition: mem_ctrl.hh:464
MemCtrl::processRespondEvent
void processRespondEvent()
Definition: mem_ctrl.cc:483
MemPacket
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition: mem_ctrl.hh:91
QueuedResponsePort
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:58
NVMInterface::isBusy
bool isBusy(bool read_queue_empty, bool all_writes_nvm)
This function checks if ranks are busy.
Definition: mem_interface.cc:2469
mem_ctrl.hh
MemCtrl::MemoryPort::recvTimingReq
bool recvTimingReq(PacketPtr)
Receive a timing request from the peer.
Definition: mem_ctrl.cc:1465
MemCtrl::writeHighThreshold
const uint32_t writeHighThreshold
Definition: mem_ctrl.hh:479
DRAMInterface::startup
void startup() override
Iterate through dram ranks and instantiate per rank startup routine.
Definition: mem_interface.cc:879
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
MemCtrl::nvm
NVMInterface *const nvm
Create pointer to interface of the actual nvm media when connected.
Definition: mem_ctrl.hh:469
DRAMInterface::allRanksDrained
bool allRanksDrained() const override
Return true once refresh is complete for all ranks and there are no additional commands enqueued.
Definition: mem_interface.cc:1014
name
const std::string & name()
Definition: trace.cc:50
MemCtrl::packetReady
bool packetReady(MemPacket *pkt)
Determine if there is a packet that can issue.
Definition: mem_ctrl.cc:1143
Stats::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:57
MemCtrl::init
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mem_ctrl.cc:97
Packet::hasData
bool hasData() const
Definition: packet.hh:576
Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:320
MemCtrl::retryRdReq
bool retryRdReq
Remember if we have to retry a request when available.
Definition: mem_ctrl.hh:278
MemCtrl::commandWindow
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition: mem_ctrl.hh:509
MemCtrl::isTimingMode
bool isTimingMode
Remember if the memory system is in timing mode.
Definition: mem_ctrl.hh:273
SimObject::name
virtual const std::string name() const
Definition: sim_object.hh:133
MemCtrl::respQueue
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition: mem_ctrl.hh:452
MemCtrl::verifySingleCmd
Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
Check for command bus contention for single cycle command.
Definition: mem_ctrl.cc:687
QoS::MemCtrl::turnPolicy
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
Definition: mem_ctrl.hh:70
MemCtrl::writeBufferSize
const uint32_t writeBufferSize
Definition: mem_ctrl.hh:478
MemCtrl::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: mem_ctrl.hh:592
DRAMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all DRAM banks in alli ranks when access to an alternate inte...
Definition: mem_interface.cc:717
MemCtrl::CtrlStats::bytesReadWrQ
Stats::Scalar bytesReadWrQ
Definition: mem_ctrl.hh:555
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:197
MemCtrl::addToWriteQueue
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition: mem_ctrl.cc:300
MemCtrl::accessAndRespond
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Definition: mem_ctrl.cc:622
MemPacket::pkt
const PacketPtr pkt
This comes from the outside world.
Definition: mem_ctrl.hh:102
MemCtrl
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition: mem_ctrl.hh:236
MemCtrl::verifyMultiCmd
Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition: mem_ctrl.cc:710
MemInterface::bytesPerBurst
uint32_t bytesPerBurst() const
Definition: mem_interface.hh:233
MemCtrl::minWritesPerSwitch
const uint32_t minWritesPerSwitch
Definition: mem_ctrl.hh:481
MemCtrl::CtrlStats::CtrlStats
CtrlStats(MemCtrl &ctrl)
Definition: mem_ctrl.cc:1174
std
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
MemPacket::qosValue
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
Definition: mem_ctrl.hh:153
MemCtrl::recvTimingReq
bool recvTimingReq(PacketPtr pkt)
Definition: mem_ctrl.cc:406
NVMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all NVM banks in alli ranks when access to an alternate inter...
Definition: mem_interface.cc:2452
MemCtrl::port
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
Definition: mem_ctrl.hh:268
MemCtrl::MemCtrl
MemCtrl(const MemCtrlParams *p)
Definition: mem_ctrl.cc:54
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
MemInterface::minReadToWriteDataGap
Tick minReadToWriteDataGap() const
Definition: mem_interface.hh:258
std::deque< MemPacket * >
MemCtrl::inReadBusState
bool inReadBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:774
MemCtrl::addToReadQueue
void addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition: mem_ctrl.cc:181
MemCtrl::readQueue
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition: mem_ctrl.hh:432
MemCtrl::CtrlStats::numWrRetry
Stats::Scalar numWrRetry
Definition: mem_ctrl.hh:547
DRAMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first DRAM command that can issue.
Definition: mem_interface.cc:169
addr
ip6_addr_t addr
Definition: inet.hh:423
Port::isConnected
bool isConnected() const
Is this port currently connected to a peer?
Definition: port.hh:128
MemCtrl::CtrlStats::totGap
Stats::Scalar totGap
Definition: mem_ctrl.hh:562
Stats::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1924
Packet::isWrite
bool isWrite() const
Definition: packet.hh:557
MemPacket::addr
Addr addr
The starting address of the packet.
Definition: mem_ctrl.hh:130
MemCtrl::CtrlStats::requestorReadBytes
Stats::Vector requestorReadBytes
Definition: mem_ctrl.hh:566
MemCtrl::CtrlStats::wrPerTurnAround
Stats::Histogram wrPerTurnAround
Definition: mem_ctrl.hh:553
MemCtrl::burstTicks
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
Definition: mem_ctrl.hh:459
Stats
Definition: statistics.cc:61
QoS::MemCtrl::system
System * system() const
read the system pointer
Definition: mem_ctrl.hh:350
MemCtrl::getBurstWindow
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
Definition: mem_ctrl.cc:679
trace.hh
MemCtrl::recvFunctional
void recvFunctional(PacketPtr pkt)
Definition: mem_ctrl.cc:1342
ResponsePort::sendRetryReq
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
Definition: port.hh:398
MemCtrl::CtrlStats::readBursts
Stats::Scalar readBursts
Definition: mem_ctrl.hh:537
DRAMInterface::drainRanks
void drainRanks()
Iterate through dram ranks to exit self-refresh in order to drain.
Definition: mem_interface.cc:1000
MemCtrl::chooseNextFRFCFS
MemPacketQueue::iterator chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition: mem_ctrl.cc:580
MemCtrl::MemoryPort::MemoryPort
MemoryPort(const std::string &name, MemCtrl &_ctrl)
Definition: mem_ctrl.cc:1423
MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:323
std::list< AddrRange >
MemCtrl::backendLatency
const Tick backendLatency
Pipeline latency of the backend and PHY.
Definition: mem_ctrl.hh:503
MemCtrl::CtrlStats::requestorWriteTotalLat
Stats::Vector requestorWriteTotalLat
Definition: mem_ctrl.hh:579
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:219
NVMInterface::chooseRead
void chooseRead(MemPacketQueue &queue)
Select read command to issue asynchronously.
Definition: mem_interface.cc:2129
MemCtrl::chooseNext
MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Definition: mem_ctrl.cc:545
DRAMInterface::accessLatency
Tick accessLatency() const override
Definition: mem_interface.hh:932
ResponsePort::sendRangeChange
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:293
MemCtrl::recvAtomic
Tick recvAtomic(PacketPtr pkt)
Definition: mem_ctrl.cc:123
QoS::MemCtrl::busState
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request.
Definition: mem_ctrl.hh:119
QoS::MemCtrl::selectNextBusState
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
Definition: mem_ctrl.cc:236
MemInterface::decodePacket
MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, bool is_dram)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Definition: mem_interface.cc:83
DRAMInterface::suspend
void suspend()
Iterate through DRAM ranks and suspend them.
Definition: mem_interface.cc:1029
MaxTick
const Tick MaxTick
Definition: types.hh:65
NVMInterface::readsWaitingToIssue
bool readsWaitingToIssue() const
Definition: mem_interface.hh:1242
MemCtrl::MemoryPort::recvAtomic
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
Definition: mem_ctrl.cc:1459
MemPacket::isRead
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition: mem_ctrl.hh:183
Stats::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:59
NVMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *pkt, Tick next_burst_at)
Actually do the burst and update stats.
Definition: mem_interface.cc:2278
Packet::popLabel
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1403
DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
MemCtrl::readQueueFull
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
Definition: mem_ctrl.cc:158
MemCtrl::nextReqTime
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
Definition: mem_ctrl.hh:524
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
NVMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the NVM.
Definition: mem_interface.cc:2269
BurstHelper
A burst helper helps organize and manage a packet that is larger than the memory burst size.
Definition: mem_ctrl.hh:72
curTick
Tick curTick()
The current simulated tick.
Definition: core.hh:45
ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:153

Generated on Wed Sep 30 2020 14:02:13 for gem5 by doxygen 1.8.17