gem5  v21.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mem_ctrl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "mem/mem_ctrl.hh"
42 
43 #include "base/trace.hh"
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
49 #include "mem/mem_interface.hh"
50 #include "sim/system.hh"
51 
52 MemCtrl::MemCtrl(const MemCtrlParams &p) :
53  QoS::MemCtrl(p),
54  port(name() + ".port", *this), isTimingMode(false),
55  retryRdReq(false), retryWrReq(false),
56  nextReqEvent([this]{ processNextReqEvent(); }, name()),
57  respondEvent([this]{ processRespondEvent(); }, name()),
58  dram(p.dram), nvm(p.nvm),
59  readBufferSize((dram ? dram->readBufferSize : 0) +
60  (nvm ? nvm->readBufferSize : 0)),
61  writeBufferSize((dram ? dram->writeBufferSize : 0) +
62  (nvm ? nvm->writeBufferSize : 0)),
63  writeHighThreshold(writeBufferSize * p.write_high_thresh_perc / 100.0),
64  writeLowThreshold(writeBufferSize * p.write_low_thresh_perc / 100.0),
65  minWritesPerSwitch(p.min_writes_per_switch),
66  writesThisTime(0), readsThisTime(0),
67  memSchedPolicy(p.mem_sched_policy),
68  frontendLatency(p.static_frontend_latency),
69  backendLatency(p.static_backend_latency),
70  commandWindow(p.command_window),
71  nextBurstAt(0), prevArrival(0),
72  nextReqTime(0),
73  stats(*this)
74 {
75  DPRINTF(MemCtrl, "Setting up controller\n");
76  readQueue.resize(p.qos_priorities);
77  writeQueue.resize(p.qos_priorities);
78 
79  // Hook up interfaces to the controller
80  if (dram)
81  dram->setCtrl(this, commandWindow);
82  if (nvm)
83  nvm->setCtrl(this, commandWindow);
84 
85  fatal_if(!dram && !nvm, "Memory controller must have an interface");
86 
87  // perform a basic check of the write thresholds
88  if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
89  fatal("Write buffer low threshold %d must be smaller than the "
90  "high threshold %d\n", p.write_low_thresh_perc,
91  p.write_high_thresh_perc);
92 }
93 
94 void
96 {
97  if (!port.isConnected()) {
98  fatal("MemCtrl %s is unconnected!\n", name());
99  } else {
101  }
102 }
103 
104 void
106 {
107  // remember the memory system mode of operation
109 
110  if (isTimingMode) {
111  // shift the bus busy time sufficiently far ahead that we never
112  // have to worry about negative values when computing the time for
113  // the next request, this will add an insignificant bubble at the
114  // start of simulation
116  nvm->commandOffset());
117  }
118 }
119 
120 Tick
122 {
123  DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
124  pkt->cmdString(), pkt->getAddr());
125 
126  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
127  "is responding");
128 
129  Tick latency = 0;
130  // do the actual memory access and turn the packet into a response
131  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
132  dram->access(pkt);
133 
134  if (pkt->hasData()) {
135  // this value is not supposed to be accurate, just enough to
136  // keep things going, mimic a closed page
137  latency = dram->accessLatency();
138  }
139  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
140  nvm->access(pkt);
141 
142  if (pkt->hasData()) {
143  // this value is not supposed to be accurate, just enough to
144  // keep things going, mimic a closed page
145  latency = nvm->accessLatency();
146  }
147  } else {
148  panic("Can't handle address range for packet %s\n",
149  pkt->print());
150  }
151 
152  return latency;
153 }
154 
155 Tick
157 {
158  Tick latency = recvAtomic(pkt);
159  if (dram) {
160  dram->getBackdoor(backdoor);
161  } else if (nvm) {
162  nvm->getBackdoor(backdoor);
163  }
164  return latency;
165 }
166 
167 bool
168 MemCtrl::readQueueFull(unsigned int neededEntries) const
169 {
171  "Read queue limit %d, current size %d, entries needed %d\n",
173  neededEntries);
174 
175  auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
176  return rdsize_new > readBufferSize;
177 }
178 
179 bool
180 MemCtrl::writeQueueFull(unsigned int neededEntries) const
181 {
183  "Write queue limit %d, current size %d, entries needed %d\n",
184  writeBufferSize, totalWriteQueueSize, neededEntries);
185 
186  auto wrsize_new = (totalWriteQueueSize + neededEntries);
187  return wrsize_new > writeBufferSize;
188 }
189 
190 void
191 MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
192 {
193  // only add to the read queue here. whenever the request is
194  // eventually done, set the readyTime, and call schedule()
195  assert(!pkt->isWrite());
196 
197  assert(pkt_count != 0);
198 
199  // if the request size is larger than burst size, the pkt is split into
200  // multiple packets
201  // Note if the pkt starting address is not aligened to burst size, the
202  // address of first packet is kept unaliged. Subsequent packets
203  // are aligned to burst size boundaries. This is to ensure we accurately
204  // check read packets against packets in write queue.
205  const Addr base_addr = pkt->getAddr();
206  Addr addr = base_addr;
207  unsigned pktsServicedByWrQ = 0;
208  BurstHelper* burst_helper = NULL;
209 
210  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
211  nvm->bytesPerBurst();
212  for (int cnt = 0; cnt < pkt_count; ++cnt) {
213  unsigned size = std::min((addr | (burst_size - 1)) + 1,
214  base_addr + pkt->getSize()) - addr;
215  stats.readPktSize[ceilLog2(size)]++;
216  stats.readBursts++;
218 
219  // First check write buffer to see if the data is already at
220  // the controller
221  bool foundInWrQ = false;
222  Addr burst_addr = burstAlign(addr, is_dram);
223  // if the burst address is not present then there is no need
224  // looking any further
225  if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
226  for (const auto& vec : writeQueue) {
227  for (const auto& p : vec) {
228  // check if the read is subsumed in the write queue
229  // packet we are looking at
230  if (p->addr <= addr &&
231  ((addr + size) <= (p->addr + p->size))) {
232 
233  foundInWrQ = true;
235  pktsServicedByWrQ++;
237  "Read to addr %lld with size %d serviced by "
238  "write queue\n",
239  addr, size);
240  stats.bytesReadWrQ += burst_size;
241  break;
242  }
243  }
244  }
245  }
246 
247  // If not found in the write q, make a memory packet and
248  // push it onto the read queue
249  if (!foundInWrQ) {
250 
251  // Make the burst helper for split packets
252  if (pkt_count > 1 && burst_helper == NULL) {
253  DPRINTF(MemCtrl, "Read to addr %lld translates to %d "
254  "memory requests\n", pkt->getAddr(), pkt_count);
255  burst_helper = new BurstHelper(pkt_count);
256  }
257 
258  MemPacket* mem_pkt;
259  if (is_dram) {
260  mem_pkt = dram->decodePacket(pkt, addr, size, true, true);
261  // increment read entries of the rank
262  dram->setupRank(mem_pkt->rank, true);
263  } else {
264  mem_pkt = nvm->decodePacket(pkt, addr, size, true, false);
265  // Increment count to trigger issue of non-deterministic read
266  nvm->setupRank(mem_pkt->rank, true);
267  // Default readyTime to Max; will be reset once read is issued
268  mem_pkt->readyTime = MaxTick;
269  }
270  mem_pkt->burstHelper = burst_helper;
271 
272  assert(!readQueueFull(1));
274 
275  DPRINTF(MemCtrl, "Adding to read queue\n");
276 
277  readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
278 
279  // log packet
281  mem_pkt->addr, 1);
282 
283  // Update stats
285  }
286 
287  // Starting address of next memory pkt (aligned to burst boundary)
288  addr = (addr | (burst_size - 1)) + 1;
289  }
290 
291  // If all packets are serviced by write queue, we send the repsonse back
292  if (pktsServicedByWrQ == pkt_count) {
294  return;
295  }
296 
297  // Update how many split packets are serviced by write queue
298  if (burst_helper != NULL)
299  burst_helper->burstsServiced = pktsServicedByWrQ;
300 
301  // If we are not already scheduled to get a request out of the
302  // queue, do so now
303  if (!nextReqEvent.scheduled()) {
304  DPRINTF(MemCtrl, "Request scheduled immediately\n");
306  }
307 }
308 
309 void
310 MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
311 {
312  // only add to the write queue here. whenever the request is
313  // eventually done, set the readyTime, and call schedule()
314  assert(pkt->isWrite());
315 
316  // if the request size is larger than burst size, the pkt is split into
317  // multiple packets
318  const Addr base_addr = pkt->getAddr();
319  Addr addr = base_addr;
320  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
321  nvm->bytesPerBurst();
322  for (int cnt = 0; cnt < pkt_count; ++cnt) {
323  unsigned size = std::min((addr | (burst_size - 1)) + 1,
324  base_addr + pkt->getSize()) - addr;
325  stats.writePktSize[ceilLog2(size)]++;
326  stats.writeBursts++;
328 
329  // see if we can merge with an existing item in the write
330  // queue and keep track of whether we have merged or not
331  bool merged = isInWriteQueue.find(burstAlign(addr, is_dram)) !=
332  isInWriteQueue.end();
333 
334  // if the item was not merged we need to create a new write
335  // and enqueue it
336  if (!merged) {
337  MemPacket* mem_pkt;
338  if (is_dram) {
339  mem_pkt = dram->decodePacket(pkt, addr, size, false, true);
340  dram->setupRank(mem_pkt->rank, false);
341  } else {
342  mem_pkt = nvm->decodePacket(pkt, addr, size, false, false);
343  nvm->setupRank(mem_pkt->rank, false);
344  }
347 
348  DPRINTF(MemCtrl, "Adding to write queue\n");
349 
350  writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
351  isInWriteQueue.insert(burstAlign(addr, is_dram));
352 
353  // log packet
355  mem_pkt->addr, 1);
356 
357  assert(totalWriteQueueSize == isInWriteQueue.size());
358 
359  // Update stats
361 
362  } else {
364  "Merging write burst with existing queue entry\n");
365 
366  // keep track of the fact that this burst effectively
367  // disappeared as it was merged with an existing one
369  }
370 
371  // Starting address of next memory pkt (aligned to burst_size boundary)
372  addr = (addr | (burst_size - 1)) + 1;
373  }
374 
375  // we do not wait for the writes to be send to the actual memory,
376  // but instead take responsibility for the consistency here and
377  // snoop the write queue for any upcoming reads
378  // @todo, if a pkt size is larger than burst size, we might need a
379  // different front end latency
381 
382  // If we are not already scheduled to get a request out of the
383  // queue, do so now
384  if (!nextReqEvent.scheduled()) {
385  DPRINTF(MemCtrl, "Request scheduled immediately\n");
387  }
388 }
389 
390 void
392 {
393 #if TRACING_ON
394  DPRINTF(MemCtrl, "===READ QUEUE===\n\n");
395  for (const auto& queue : readQueue) {
396  for (const auto& packet : queue) {
397  DPRINTF(MemCtrl, "Read %lu\n", packet->addr);
398  }
399  }
400 
401  DPRINTF(MemCtrl, "\n===RESP QUEUE===\n\n");
402  for (const auto& packet : respQueue) {
403  DPRINTF(MemCtrl, "Response %lu\n", packet->addr);
404  }
405 
406  DPRINTF(MemCtrl, "\n===WRITE QUEUE===\n\n");
407  for (const auto& queue : writeQueue) {
408  for (const auto& packet : queue) {
409  DPRINTF(MemCtrl, "Write %lu\n", packet->addr);
410  }
411  }
412 #endif // TRACING_ON
413 }
414 
415 bool
417 {
418  // This is where we enter from the outside world
419  DPRINTF(MemCtrl, "recvTimingReq: request %s addr %lld size %d\n",
420  pkt->cmdString(), pkt->getAddr(), pkt->getSize());
421 
422  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
423  "is responding");
424 
425  panic_if(!(pkt->isRead() || pkt->isWrite()),
426  "Should only see read and writes at memory controller\n");
427 
428  // Calc avg gap between requests
429  if (prevArrival != 0) {
431  }
432  prevArrival = curTick();
433 
434  // What type of media does this packet access?
435  bool is_dram;
436  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
437  is_dram = true;
438  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
439  is_dram = false;
440  } else {
441  panic("Can't handle address range for packet %s\n",
442  pkt->print());
443  }
444 
445 
446  // Find out how many memory packets a pkt translates to
447  // If the burst size is equal or larger than the pkt size, then a pkt
448  // translates to only one memory packet. Otherwise, a pkt translates to
449  // multiple memory packets
450  unsigned size = pkt->getSize();
451  uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
452  nvm->bytesPerBurst();
453  unsigned offset = pkt->getAddr() & (burst_size - 1);
454  unsigned int pkt_count = divCeil(offset + size, burst_size);
455 
456  // run the QoS scheduler and assign a QoS priority value to the packet
457  qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
458 
459  // check local buffers and do not accept if full
460  if (pkt->isWrite()) {
461  assert(size != 0);
462  if (writeQueueFull(pkt_count)) {
463  DPRINTF(MemCtrl, "Write queue full, not accepting\n");
464  // remember that we have to retry this port
465  retryWrReq = true;
466  stats.numWrRetry++;
467  return false;
468  } else {
469  addToWriteQueue(pkt, pkt_count, is_dram);
470  stats.writeReqs++;
471  stats.bytesWrittenSys += size;
472  }
473  } else {
474  assert(pkt->isRead());
475  assert(size != 0);
476  if (readQueueFull(pkt_count)) {
477  DPRINTF(MemCtrl, "Read queue full, not accepting\n");
478  // remember that we have to retry this port
479  retryRdReq = true;
480  stats.numRdRetry++;
481  return false;
482  } else {
483  addToReadQueue(pkt, pkt_count, is_dram);
484  stats.readReqs++;
485  stats.bytesReadSys += size;
486  }
487  }
488 
489  return true;
490 }
491 
492 void
494 {
496  "processRespondEvent(): Some req has reached its readyTime\n");
497 
498  MemPacket* mem_pkt = respQueue.front();
499 
500  if (mem_pkt->isDram()) {
501  // media specific checks and functions when read response is complete
502  dram->respondEvent(mem_pkt->rank);
503  }
504 
505  if (mem_pkt->burstHelper) {
506  // it is a split packet
507  mem_pkt->burstHelper->burstsServiced++;
508  if (mem_pkt->burstHelper->burstsServiced ==
509  mem_pkt->burstHelper->burstCount) {
510  // we have now serviced all children packets of a system packet
511  // so we can now respond to the requestor
512  // @todo we probably want to have a different front end and back
513  // end latency for split packets
515  delete mem_pkt->burstHelper;
516  mem_pkt->burstHelper = NULL;
517  }
518  } else {
519  // it is not a split packet
521  }
522 
523  delete respQueue.front();
524  respQueue.pop_front();
525 
526  if (!respQueue.empty()) {
527  assert(respQueue.front()->readyTime >= curTick());
528  assert(!respondEvent.scheduled());
529  schedule(respondEvent, respQueue.front()->readyTime);
530  } else {
531  // if there is nothing left in any queue, signal a drain
532  if (drainState() == DrainState::Draining &&
534  allIntfDrained()) {
535 
536  DPRINTF(Drain, "Controller done draining\n");
537  signalDrainDone();
538  } else if (mem_pkt->isDram()) {
539  // check the refresh state and kick the refresh event loop
540  // into action again if banks already closed and just waiting
541  // for read to complete
542  dram->checkRefreshState(mem_pkt->rank);
543  }
544  }
545 
546  // We have made a location in the queue available at this point,
547  // so if there is a read that was forced to wait, retry now
548  if (retryRdReq) {
549  retryRdReq = false;
550  port.sendRetryReq();
551  }
552 }
553 
554 MemPacketQueue::iterator
555 MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
556 {
557  // This method does the arbitration between requests.
558 
559  MemPacketQueue::iterator ret = queue.end();
560 
561  if (!queue.empty()) {
562  if (queue.size() == 1) {
563  // available rank corresponds to state refresh idle
564  MemPacket* mem_pkt = *(queue.begin());
565  if (packetReady(mem_pkt)) {
566  ret = queue.begin();
567  DPRINTF(MemCtrl, "Single request, going to a free rank\n");
568  } else {
569  DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
570  }
571  } else if (memSchedPolicy == Enums::fcfs) {
572  // check if there is a packet going to a free rank
573  for (auto i = queue.begin(); i != queue.end(); ++i) {
574  MemPacket* mem_pkt = *i;
575  if (packetReady(mem_pkt)) {
576  ret = i;
577  break;
578  }
579  }
580  } else if (memSchedPolicy == Enums::frfcfs) {
581  ret = chooseNextFRFCFS(queue, extra_col_delay);
582  } else {
583  panic("No scheduling policy chosen\n");
584  }
585  }
586  return ret;
587 }
588 
589 MemPacketQueue::iterator
591 {
592  auto selected_pkt_it = queue.end();
593  Tick col_allowed_at = MaxTick;
594 
595  // time we need to issue a column command to be seamless
596  const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
597 
598  // find optimal packet for each interface
599  if (dram && nvm) {
600  // create 2nd set of parameters for NVM
601  auto nvm_pkt_it = queue.end();
602  Tick nvm_col_at = MaxTick;
603 
604  // Select packet by default to give priority if both
605  // can issue at the same time or seamlessly
606  std::tie(selected_pkt_it, col_allowed_at) =
607  dram->chooseNextFRFCFS(queue, min_col_at);
608  std::tie(nvm_pkt_it, nvm_col_at) =
609  nvm->chooseNextFRFCFS(queue, min_col_at);
610 
611  // Compare DRAM and NVM and select NVM if it can issue
612  // earlier than the DRAM packet
613  if (col_allowed_at > nvm_col_at) {
614  selected_pkt_it = nvm_pkt_it;
615  }
616  } else if (dram) {
617  std::tie(selected_pkt_it, col_allowed_at) =
618  dram->chooseNextFRFCFS(queue, min_col_at);
619  } else if (nvm) {
620  std::tie(selected_pkt_it, col_allowed_at) =
621  nvm->chooseNextFRFCFS(queue, min_col_at);
622  }
623 
624  if (selected_pkt_it == queue.end()) {
625  DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
626  }
627 
628  return selected_pkt_it;
629 }
630 
631 void
633 {
634  DPRINTF(MemCtrl, "Responding to Address %lld.. \n",pkt->getAddr());
635 
636  bool needsResponse = pkt->needsResponse();
637  // do the actual memory access which also turns the packet into a
638  // response
639  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
640  dram->access(pkt);
641  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
642  nvm->access(pkt);
643  } else {
644  panic("Can't handle address range for packet %s\n",
645  pkt->print());
646  }
647 
648  // turn packet around to go back to requestor if response expected
649  if (needsResponse) {
650  // access already turned the packet into a response
651  assert(pkt->isResponse());
652  // response_time consumes the static latency and is charged also
653  // with headerDelay that takes into account the delay provided by
654  // the xbar and also the payloadDelay that takes into account the
655  // number of data beats.
656  Tick response_time = curTick() + static_latency + pkt->headerDelay +
657  pkt->payloadDelay;
658  // Here we reset the timing of the packet before sending it out.
659  pkt->headerDelay = pkt->payloadDelay = 0;
660 
661  // queue the packet in the response queue to be sent out after
662  // the static latency has passed
663  port.schedTimingResp(pkt, response_time);
664  } else {
665  // @todo the packet is going to be deleted, and the MemPacket
666  // is still having a pointer to it
667  pendingDelete.reset(pkt);
668  }
669 
670  DPRINTF(MemCtrl, "Done\n");
671 
672  return;
673 }
674 
675 void
677 {
678  auto it = burstTicks.begin();
679  while (it != burstTicks.end()) {
680  auto current_it = it++;
681  if (curTick() > *current_it) {
682  DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
683  burstTicks.erase(current_it);
684  }
685  }
686 }
687 
688 Tick
690 {
691  // get tick aligned to burst window
692  Tick burst_offset = cmd_tick % commandWindow;
693  return (cmd_tick - burst_offset);
694 }
695 
696 Tick
697 MemCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
698 {
699  // start with assumption that there is no contention on command bus
700  Tick cmd_at = cmd_tick;
701 
702  // get tick aligned to burst window
703  Tick burst_tick = getBurstWindow(cmd_tick);
704 
705  // verify that we have command bandwidth to issue the command
706  // if not, iterate over next window(s) until slot found
707  while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
708  DPRINTF(MemCtrl, "Contention found on command bus at %d\n",
709  burst_tick);
710  burst_tick += commandWindow;
711  cmd_at = burst_tick;
712  }
713 
714  // add command into burst window and return corresponding Tick
715  burstTicks.insert(burst_tick);
716  return cmd_at;
717 }
718 
719 Tick
720 MemCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
721  Tick max_multi_cmd_split)
722 {
723  // start with assumption that there is no contention on command bus
724  Tick cmd_at = cmd_tick;
725 
726  // get tick aligned to burst window
727  Tick burst_tick = getBurstWindow(cmd_tick);
728 
729  // Command timing requirements are from 2nd command
730  // Start with assumption that 2nd command will issue at cmd_at and
731  // find prior slot for 1st command to issue
732  // Given a maximum latency of max_multi_cmd_split between the commands,
733  // find the burst at the maximum latency prior to cmd_at
734  Tick burst_offset = 0;
735  Tick first_cmd_offset = cmd_tick % commandWindow;
736  while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
737  burst_offset += commandWindow;
738  }
739  // get the earliest burst aligned address for first command
740  // ensure that the time does not go negative
741  Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
742 
743  // Can required commands issue?
744  bool first_can_issue = false;
745  bool second_can_issue = false;
746  // verify that we have command bandwidth to issue the command(s)
747  while (!first_can_issue || !second_can_issue) {
748  bool same_burst = (burst_tick == first_cmd_tick);
749  auto first_cmd_count = burstTicks.count(first_cmd_tick);
750  auto second_cmd_count = same_burst ? first_cmd_count + 1 :
751  burstTicks.count(burst_tick);
752 
753  first_can_issue = first_cmd_count < max_cmds_per_burst;
754  second_can_issue = second_cmd_count < max_cmds_per_burst;
755 
756  if (!second_can_issue) {
757  DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
758  burst_tick);
759  burst_tick += commandWindow;
760  cmd_at = burst_tick;
761  }
762 
763  // Verify max_multi_cmd_split isn't violated when command 2 is shifted
764  // If commands initially were issued in same burst, they are
765  // now in consecutive bursts and can still issue B2B
766  bool gap_violated = !same_burst &&
767  ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
768 
769  if (!first_can_issue || (!second_can_issue && gap_violated)) {
770  DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
771  first_cmd_tick);
772  first_cmd_tick += commandWindow;
773  }
774  }
775 
776  // Add command to burstTicks
777  burstTicks.insert(burst_tick);
778  burstTicks.insert(first_cmd_tick);
779 
780  return cmd_at;
781 }
782 
783 bool
784 MemCtrl::inReadBusState(bool next_state) const
785 {
786  // check the bus state
787  if (next_state) {
788  // use busStateNext to get the state that will be used
789  // for the next burst
790  return (busStateNext == MemCtrl::READ);
791  } else {
792  return (busState == MemCtrl::READ);
793  }
794 }
795 
796 bool
797 MemCtrl::inWriteBusState(bool next_state) const
798 {
799  // check the bus state
800  if (next_state) {
801  // use busStateNext to get the state that will be used
802  // for the next burst
803  return (busStateNext == MemCtrl::WRITE);
804  } else {
805  return (busState == MemCtrl::WRITE);
806  }
807 }
808 
809 void
811 {
812  // first clean up the burstTick set, removing old entries
813  // before adding new entries for next burst
814  pruneBurstTick();
815 
816  // When was command issued?
817  Tick cmd_at;
818 
819  // Issue the next burst and update bus state to reflect
820  // when previous command was issued
821  if (mem_pkt->isDram()) {
822  std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
823  std::tie(cmd_at, nextBurstAt) =
824  dram->doBurstAccess(mem_pkt, nextBurstAt, queue);
825 
826  // Update timing for NVM ranks if NVM is configured on this channel
827  if (nvm)
828  nvm->addRankToRankDelay(cmd_at);
829 
830  } else {
831  std::tie(cmd_at, nextBurstAt) =
832  nvm->doBurstAccess(mem_pkt, nextBurstAt);
833 
834  // Update timing for NVM ranks if NVM is configured on this channel
835  if (dram)
836  dram->addRankToRankDelay(cmd_at);
837 
838  }
839 
840  DPRINTF(MemCtrl, "Access to %lld, ready at %lld next burst at %lld.\n",
841  mem_pkt->addr, mem_pkt->readyTime, nextBurstAt);
842 
843  // Update the minimum timing between the requests, this is a
844  // conservative estimate of when we have to schedule the next
845  // request to not introduce any unecessary bubbles. In most cases
846  // we will wake up sooner than we have to.
848  nvm->commandOffset());
849 
850 
851  // Update the common bus stats
852  if (mem_pkt->isRead()) {
853  ++readsThisTime;
854  // Update latency stats
856  mem_pkt->readyTime - mem_pkt->entryTime;
857  stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
858  } else {
859  ++writesThisTime;
860  stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
862  mem_pkt->readyTime - mem_pkt->entryTime;
863  }
864 }
865 
866 void
868 {
869  // transition is handled by QoS algorithm if enabled
870  if (turnPolicy) {
871  // select bus state - only done if QoS algorithms are in use
873  }
874 
875  // detect bus state change
876  bool switched_cmd_type = (busState != busStateNext);
877  // record stats
879 
880  DPRINTF(MemCtrl, "QoS Turnarounds selected state %s %s\n",
881  (busState==MemCtrl::READ)?"READ":"WRITE",
882  switched_cmd_type?"[turnaround triggered]":"");
883 
884  if (switched_cmd_type) {
885  if (busState == MemCtrl::READ) {
887  "Switching to writes after %d reads with %d reads "
888  "waiting\n", readsThisTime, totalReadQueueSize);
890  readsThisTime = 0;
891  } else {
893  "Switching to reads after %d writes with %d writes "
894  "waiting\n", writesThisTime, totalWriteQueueSize);
896  writesThisTime = 0;
897  }
898  }
899 
900  // updates current state
902 
903  if (nvm) {
904  for (auto queue = readQueue.rbegin();
905  queue != readQueue.rend(); ++queue) {
906  // select non-deterministic NVM read to issue
907  // assume that we have the command bandwidth to issue this along
908  // with additional RD/WR burst with needed bank operations
909  if (nvm->readsWaitingToIssue()) {
910  // select non-deterministic NVM read to issue
911  nvm->chooseRead(*queue);
912  }
913  }
914  }
915 
916  // check ranks for refresh/wakeup - uses busStateNext, so done after
917  // turnaround decisions
918  // Default to busy status and update based on interface specifics
919  bool dram_busy = dram ? dram->isBusy() : true;
920  bool nvm_busy = true;
921  bool all_writes_nvm = false;
922  if (nvm) {
923  all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
924  bool read_queue_empty = totalReadQueueSize == 0;
925  nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
926  }
927  // Default state of unused interface is 'true'
928  // Simply AND the busy signals to determine if system is busy
929  if (dram_busy && nvm_busy) {
930  // if all ranks are refreshing wait for them to finish
931  // and stall this state machine without taking any further
932  // action, and do not schedule a new nextReqEvent
933  return;
934  }
935 
936  // when we get here it is either a read or a write
937  if (busState == READ) {
938 
939  // track if we should switch or not
940  bool switch_to_writes = false;
941 
942  if (totalReadQueueSize == 0) {
943  // In the case there is no read request to go next,
944  // trigger writes if we have passed the low threshold (or
945  // if we are draining)
946  if (!(totalWriteQueueSize == 0) &&
949 
951  "Switching to writes due to read queue empty\n");
952  switch_to_writes = true;
953  } else {
954  // check if we are drained
955  // not done draining until in PWR_IDLE state
956  // ensuring all banks are closed and
957  // have exited low power states
958  if (drainState() == DrainState::Draining &&
959  respQueue.empty() && allIntfDrained()) {
960 
961  DPRINTF(Drain, "MemCtrl controller done draining\n");
962  signalDrainDone();
963  }
964 
965  // nothing to do, not even any point in scheduling an
966  // event for the next request
967  return;
968  }
969  } else {
970 
971  bool read_found = false;
972  MemPacketQueue::iterator to_read;
973  uint8_t prio = numPriorities();
974 
975  for (auto queue = readQueue.rbegin();
976  queue != readQueue.rend(); ++queue) {
977 
978  prio--;
979 
980  DPRINTF(QOS,
981  "Checking READ queue [%d] priority [%d elements]\n",
982  prio, queue->size());
983 
984  // Figure out which read request goes next
985  // If we are changing command type, incorporate the minimum
986  // bus turnaround delay which will be rank to rank delay
987  to_read = chooseNext((*queue), switched_cmd_type ?
988  minWriteToReadDataGap() : 0);
989 
990  if (to_read != queue->end()) {
991  // candidate read found
992  read_found = true;
993  break;
994  }
995  }
996 
997  // if no read to an available rank is found then return
998  // at this point. There could be writes to the available ranks
999  // which are above the required threshold. However, to
1000  // avoid adding more complexity to the code, return and wait
1001  // for a refresh event to kick things into action again.
1002  if (!read_found) {
1003  DPRINTF(MemCtrl, "No Reads Found - exiting\n");
1004  return;
1005  }
1006 
1007  auto mem_pkt = *to_read;
1008 
1009  doBurstAccess(mem_pkt);
1010 
1011  // sanity check
1012  assert(mem_pkt->size <= (mem_pkt->isDram() ?
1013  dram->bytesPerBurst() :
1014  nvm->bytesPerBurst()) );
1015  assert(mem_pkt->readyTime >= curTick());
1016 
1017  // log the response
1018  logResponse(MemCtrl::READ, (*to_read)->requestorId(),
1019  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1020  mem_pkt->readyTime - mem_pkt->entryTime);
1021 
1022 
1023  // Insert into response queue. It will be sent back to the
1024  // requestor at its readyTime
1025  if (respQueue.empty()) {
1026  assert(!respondEvent.scheduled());
1027  schedule(respondEvent, mem_pkt->readyTime);
1028  } else {
1029  assert(respQueue.back()->readyTime <= mem_pkt->readyTime);
1030  assert(respondEvent.scheduled());
1031  }
1032 
1033  respQueue.push_back(mem_pkt);
1034 
1035  // we have so many writes that we have to transition
1036  // don't transition if the writeRespQueue is full and
1037  // there are no other writes that can issue
1039  !(nvm && all_writes_nvm && nvm->writeRespQueueFull())) {
1040  switch_to_writes = true;
1041  }
1042 
1043  // remove the request from the queue
1044  // the iterator is no longer valid .
1045  readQueue[mem_pkt->qosValue()].erase(to_read);
1046  }
1047 
1048  // switching to writes, either because the read queue is empty
1049  // and the writes have passed the low threshold (or we are
1050  // draining), or because the writes hit the hight threshold
1051  if (switch_to_writes) {
1052  // transition to writing
1053  busStateNext = WRITE;
1054  }
1055  } else {
1056 
1057  bool write_found = false;
1058  MemPacketQueue::iterator to_write;
1059  uint8_t prio = numPriorities();
1060 
1061  for (auto queue = writeQueue.rbegin();
1062  queue != writeQueue.rend(); ++queue) {
1063 
1064  prio--;
1065 
1066  DPRINTF(QOS,
1067  "Checking WRITE queue [%d] priority [%d elements]\n",
1068  prio, queue->size());
1069 
1070  // If we are changing command type, incorporate the minimum
1071  // bus turnaround delay
1072  to_write = chooseNext((*queue),
1073  switched_cmd_type ? minReadToWriteDataGap() : 0);
1074 
1075  if (to_write != queue->end()) {
1076  write_found = true;
1077  break;
1078  }
1079  }
1080 
1081  // if there are no writes to a rank that is available to service
1082  // requests (i.e. rank is in refresh idle state) are found then
1083  // return. There could be reads to the available ranks. However, to
1084  // avoid adding more complexity to the code, return at this point and
1085  // wait for a refresh event to kick things into action again.
1086  if (!write_found) {
1087  DPRINTF(MemCtrl, "No Writes Found - exiting\n");
1088  return;
1089  }
1090 
1091  auto mem_pkt = *to_write;
1092 
1093  // sanity check
1094  assert(mem_pkt->size <= (mem_pkt->isDram() ?
1095  dram->bytesPerBurst() :
1096  nvm->bytesPerBurst()) );
1097 
1098  doBurstAccess(mem_pkt);
1099 
1100  isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
1101 
1102  // log the response
1103  logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
1104  mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1105  mem_pkt->readyTime - mem_pkt->entryTime);
1106 
1107 
1108  // remove the request from the queue - the iterator is no longer valid
1109  writeQueue[mem_pkt->qosValue()].erase(to_write);
1110 
1111  delete mem_pkt;
1112 
1113  // If we emptied the write queue, or got sufficiently below the
1114  // threshold (using the minWritesPerSwitch as the hysteresis) and
1115  // are not draining, or we have reads waiting and have done enough
1116  // writes, then switch to reads.
1117  // If we are interfacing to NVM and have filled the writeRespQueue,
1118  // with only NVM writes in Q, then switch to reads
1119  bool below_threshold =
1121 
1122  if (totalWriteQueueSize == 0 ||
1123  (below_threshold && drainState() != DrainState::Draining) ||
1126  all_writes_nvm)) {
1127 
1128  // turn the bus back around for reads again
1130 
1131  // note that the we switch back to reads also in the idle
1132  // case, which eventually will check for any draining and
1133  // also pause any further scheduling if there is really
1134  // nothing to do
1135  }
1136  }
1137  // It is possible that a refresh to another rank kicks things back into
1138  // action before reaching this point.
1139  if (!nextReqEvent.scheduled())
1140  schedule(nextReqEvent, std::max(nextReqTime, curTick()));
1141 
1142  // If there is space available and we have writes waiting then let
1143  // them retry. This is done here to ensure that the retry does not
1144  // cause a nextReqEvent to be scheduled before we do so as part of
1145  // the next request processing
1147  retryWrReq = false;
1148  port.sendRetryReq();
1149  }
1150 }
1151 
1152 bool
1154 {
1155  return (pkt->isDram() ?
1156  dram->burstReady(pkt) : nvm->burstReady(pkt));
1157 }
1158 
1159 Tick
1161 {
1162  Tick dram_min = dram ? dram->minReadToWriteDataGap() : MaxTick;
1163  Tick nvm_min = nvm ? nvm->minReadToWriteDataGap() : MaxTick;
1164  return std::min(dram_min, nvm_min);
1165 }
1166 
1167 Tick
1169 {
1170  Tick dram_min = dram ? dram->minWriteToReadDataGap() : MaxTick;
1171  Tick nvm_min = nvm ? nvm->minWriteToReadDataGap() : MaxTick;
1172  return std::min(dram_min, nvm_min);
1173 }
1174 
1175 Addr
1176 MemCtrl::burstAlign(Addr addr, bool is_dram) const
1177 {
1178  if (is_dram)
1179  return (addr & ~(Addr(dram->bytesPerBurst() - 1)));
1180  else
1181  return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
1182 }
1183 
1185  : Stats::Group(&_ctrl),
1186  ctrl(_ctrl),
1187 
1188  ADD_STAT(readReqs, UNIT_COUNT, "Number of read requests accepted"),
1189  ADD_STAT(writeReqs, UNIT_COUNT, "Number of write requests accepted"),
1190 
1191  ADD_STAT(readBursts, UNIT_COUNT,
1192  "Number of controller read bursts, including those serviced by "
1193  "the write queue"),
1194  ADD_STAT(writeBursts, UNIT_COUNT,
1195  "Number of controller write bursts, including those merged in "
1196  "the write queue"),
1197  ADD_STAT(servicedByWrQ, UNIT_COUNT,
1198  "Number of controller read bursts serviced by the write queue"),
1199  ADD_STAT(mergedWrBursts, UNIT_COUNT,
1200  "Number of controller write bursts merged with an existing one"),
1201 
1202  ADD_STAT(neitherReadNorWriteReqs, UNIT_COUNT,
1203  "Number of requests that are neither read nor write"),
1204 
1205  ADD_STAT(avgRdQLen,
1206  UNIT_RATE(Stats::Units::Count, Stats::Units::Tick),
1207  "Average read queue length when enqueuing"),
1208  ADD_STAT(avgWrQLen,
1209  UNIT_RATE(Stats::Units::Count, Stats::Units::Tick),
1210  "Average write queue length when enqueuing"),
1211 
1212  ADD_STAT(numRdRetry, UNIT_COUNT,
1213  "Number of times read queue was full causing retry"),
1214  ADD_STAT(numWrRetry, UNIT_COUNT,
1215  "Number of times write queue was full causing retry"),
1216 
1217  ADD_STAT(readPktSize, UNIT_COUNT, "Read request sizes (log2)"),
1218  ADD_STAT(writePktSize, UNIT_COUNT, "Write request sizes (log2)"),
1219 
1220  ADD_STAT(rdQLenPdf, UNIT_COUNT,
1221  "What read queue length does an incoming req see"),
1222  ADD_STAT(wrQLenPdf, UNIT_COUNT,
1223  "What write queue length does an incoming req see"),
1224 
1225  ADD_STAT(rdPerTurnAround, UNIT_COUNT,
1226  "Reads before turning the bus around for writes"),
1227  ADD_STAT(wrPerTurnAround, UNIT_COUNT,
1228  "Writes before turning the bus around for reads"),
1229 
1230  ADD_STAT(bytesReadWrQ, UNIT_BYTE,
1231  "Total number of bytes read from write queue"),
1232  ADD_STAT(bytesReadSys, UNIT_BYTE,
1233  "Total read bytes from the system interface side"),
1234  ADD_STAT(bytesWrittenSys, UNIT_BYTE,
1235  "Total written bytes from the system interface side"),
1236 
1237  ADD_STAT(avgRdBWSys, UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
1238  "Average system read bandwidth in Byte/s"),
1239  ADD_STAT(avgWrBWSys, UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
1240  "Average system write bandwidth in Byte/s"),
1241 
1242  ADD_STAT(totGap, UNIT_TICK, "Total gap between requests"),
1243  ADD_STAT(avgGap, UNIT_RATE(Stats::Units::Tick, Stats::Units::Count),
1244  "Average gap between requests"),
1245 
1246  ADD_STAT(requestorReadBytes, UNIT_BYTE,
1247  "Per-requestor bytes read from memory"),
1248  ADD_STAT(requestorWriteBytes, UNIT_BYTE,
1249  "Per-requestor bytes write to memory"),
1250  ADD_STAT(requestorReadRate,
1251  UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
1252  "Per-requestor bytes read from memory rate"),
1253  ADD_STAT(requestorWriteRate,
1254  UNIT_RATE(Stats::Units::Byte, Stats::Units::Second),
1255  "Per-requestor bytes write to memory rate"),
1256  ADD_STAT(requestorReadAccesses, UNIT_COUNT,
1257  "Per-requestor read serviced memory accesses"),
1258  ADD_STAT(requestorWriteAccesses, UNIT_COUNT,
1259  "Per-requestor write serviced memory accesses"),
1260  ADD_STAT(requestorReadTotalLat, UNIT_TICK,
1261  "Per-requestor read total memory access latency"),
1262  ADD_STAT(requestorWriteTotalLat, UNIT_TICK,
1263  "Per-requestor write total memory access latency"),
1264  ADD_STAT(requestorReadAvgLat,
1265  UNIT_RATE(Stats::Units::Tick, Stats::Units::Count),
1266  "Per-requestor read average memory access latency"),
1267  ADD_STAT(requestorWriteAvgLat,
1268  UNIT_RATE(Stats::Units::Tick, Stats::Units::Count),
1269  "Per-requestor write average memory access latency")
1270 
1271 {
1272 }
1273 
1274 void
1276 {
1277  using namespace Stats;
1278 
1279  assert(ctrl.system());
1280  const auto max_requestors = ctrl.system()->maxRequestors();
1281 
1282  avgRdQLen.precision(2);
1283  avgWrQLen.precision(2);
1284 
1285  readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1286  writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1287 
1288  rdQLenPdf.init(ctrl.readBufferSize);
1289  wrQLenPdf.init(ctrl.writeBufferSize);
1290 
1291  rdPerTurnAround
1292  .init(ctrl.readBufferSize)
1293  .flags(nozero);
1294  wrPerTurnAround
1295  .init(ctrl.writeBufferSize)
1296  .flags(nozero);
1297 
1298  avgRdBWSys.precision(8);
1299  avgWrBWSys.precision(8);
1300  avgGap.precision(2);
1301 
1302  // per-requestor bytes read and written to memory
1303  requestorReadBytes
1304  .init(max_requestors)
1305  .flags(nozero | nonan);
1306 
1307  requestorWriteBytes
1308  .init(max_requestors)
1309  .flags(nozero | nonan);
1310 
1311  // per-requestor bytes read and written to memory rate
1312  requestorReadRate
1313  .flags(nozero | nonan)
1314  .precision(12);
1315 
1316  requestorReadAccesses
1317  .init(max_requestors)
1318  .flags(nozero);
1319 
1320  requestorWriteAccesses
1321  .init(max_requestors)
1322  .flags(nozero);
1323 
1324  requestorReadTotalLat
1325  .init(max_requestors)
1326  .flags(nozero | nonan);
1327 
1328  requestorReadAvgLat
1329  .flags(nonan)
1330  .precision(2);
1331 
1332  requestorWriteRate
1333  .flags(nozero | nonan)
1334  .precision(12);
1335 
1336  requestorWriteTotalLat
1337  .init(max_requestors)
1338  .flags(nozero | nonan);
1339 
1340  requestorWriteAvgLat
1341  .flags(nonan)
1342  .precision(2);
1343 
1344  for (int i = 0; i < max_requestors; i++) {
1345  const std::string requestor = ctrl.system()->getRequestorName(i);
1346  requestorReadBytes.subname(i, requestor);
1347  requestorReadRate.subname(i, requestor);
1348  requestorWriteBytes.subname(i, requestor);
1349  requestorWriteRate.subname(i, requestor);
1350  requestorReadAccesses.subname(i, requestor);
1351  requestorWriteAccesses.subname(i, requestor);
1352  requestorReadTotalLat.subname(i, requestor);
1353  requestorReadAvgLat.subname(i, requestor);
1354  requestorWriteTotalLat.subname(i, requestor);
1355  requestorWriteAvgLat.subname(i, requestor);
1356  }
1357 
1358  // Formula stats
1359  avgRdBWSys = (bytesReadSys) / simSeconds;
1360  avgWrBWSys = (bytesWrittenSys) / simSeconds;
1361 
1362  avgGap = totGap / (readReqs + writeReqs);
1363 
1364  requestorReadRate = requestorReadBytes / simSeconds;
1365  requestorWriteRate = requestorWriteBytes / simSeconds;
1366  requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1367  requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1368 }
1369 
1370 void
1372 {
1373  if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
1374  // rely on the abstract memory
1375  dram->functionalAccess(pkt);
1376  } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
1377  // rely on the abstract memory
1378  nvm->functionalAccess(pkt);
1379  } else {
1380  panic("Can't handle address range for packet %s\n",
1381  pkt->print());
1382  }
1383 }
1384 
1385 Port &
1386 MemCtrl::getPort(const std::string &if_name, PortID idx)
1387 {
1388  if (if_name != "port") {
1389  return QoS::MemCtrl::getPort(if_name, idx);
1390  } else {
1391  return port;
1392  }
1393 }
1394 
1395 bool
1397 {
1398  // ensure dram is in power down and refresh IDLE states
1399  bool dram_drained = !dram || dram->allRanksDrained();
1400  // No outstanding NVM writes
1401  // All other queues verified as needed with calling logic
1402  bool nvm_drained = !nvm || nvm->allRanksDrained();
1403  return (dram_drained && nvm_drained);
1404 }
1405 
1406 DrainState
1408 {
1409  // if there is anything in any of our internal queues, keep track
1410  // of that as well
1411  if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
1412  allIntfDrained())) {
1413 
1414  DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
1415  " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
1416  respQueue.size());
1417 
1418  // the only queue that is not drained automatically over time
1419  // is the write queue, thus kick things into action if needed
1422  }
1423 
1424  if (dram)
1425  dram->drainRanks();
1426 
1427  return DrainState::Draining;
1428  } else {
1429  return DrainState::Drained;
1430  }
1431 }
1432 
1433 void
1435 {
1436  if (!isTimingMode && system()->isTimingMode()) {
1437  // if we switched to timing mode, kick things into action,
1438  // and behave as if we restored from a checkpoint
1439  startup();
1440  dram->startup();
1441  } else if (isTimingMode && !system()->isTimingMode()) {
1442  // if we switch from timing mode, stop the refresh events to
1443  // not cause issues with KVM
1444  if (dram)
1445  dram->suspend();
1446  }
1447 
1448  // update the mode
1450 }
1451 
1452 MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
1453  : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
1454  ctrl(_ctrl)
1455 { }
1456 
1459 {
1460  AddrRangeList ranges;
1461  if (ctrl.dram) {
1462  DPRINTF(DRAM, "Pushing DRAM ranges to port\n");
1463  ranges.push_back(ctrl.dram->getAddrRange());
1464  }
1465  if (ctrl.nvm) {
1466  DPRINTF(NVM, "Pushing NVM ranges to port\n");
1467  ranges.push_back(ctrl.nvm->getAddrRange());
1468  }
1469  return ranges;
1470 }
1471 
1472 void
1474 {
1475  pkt->pushLabel(ctrl.name());
1476 
1477  if (!queue.trySatisfyFunctional(pkt)) {
1478  // Default implementation of SimpleTimingPort::recvFunctional()
1479  // calls recvAtomic() and throws away the latency; we can save a
1480  // little here by just not calculating the latency.
1481  ctrl.recvFunctional(pkt);
1482  }
1483 
1484  pkt->popLabel();
1485 }
1486 
1487 Tick
1489 {
1490  return ctrl.recvAtomic(pkt);
1491 }
1492 
1493 Tick
1495  PacketPtr pkt, MemBackdoorPtr &backdoor)
1496 {
1497  return ctrl.recvAtomicBackdoor(pkt, backdoor);
1498 }
1499 
1500 bool
1502 {
1503  // pass it to the memory controller
1504  return ctrl.recvTimingReq(pkt);
1505 }
BurstHelper::burstCount
const unsigned int burstCount
Number of bursts requred for a system packet.
Definition: mem_ctrl.hh:77
MemPacket::rank
const uint8_t rank
Will be populated by address decoder.
Definition: mem_ctrl.hh:113
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
BurstHelper::burstsServiced
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
Definition: mem_ctrl.hh:80
MemPacket::requestorId
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
Definition: mem_ctrl.hh:165
MemCtrl::processNextReqEvent
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
Definition: mem_ctrl.cc:867
QoS
Definition: mem_ctrl.cc:42
Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:462
MemCtrl::nextReqEvent
EventFunctionWrapper nextReqEvent
Definition: mem_ctrl.hh:290
NVMInterface::numWritesQueued
uint32_t numWritesQueued
Definition: mem_interface.hh:1148
UNIT_BYTE
#define UNIT_BYTE
Definition: units.hh:43
MemCtrl::CtrlStats::avgRdQLen
Stats::Average avgRdQLen
Definition: mem_ctrl.hh:545
Packet::isResponse
bool isResponse() const
Definition: packet.hh:561
MemCtrl::MemoryPort::recvFunctional
void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
Definition: mem_ctrl.cc:1473
MemBackdoor
Definition: backdoor.hh:38
MemPacket::burstHelper
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
Definition: mem_ctrl.hh:142
AbstractMemory::getAddrRange
AddrRange getAddrRange() const
Get the address range.
Definition: abstract_mem.cc:238
MemCtrl::CtrlStats::writePktSize
Stats::Vector writePktSize
Definition: mem_ctrl.hh:551
MemInterface::minWriteToReadDataGap
Tick minWriteToReadDataGap() const
Definition: mem_interface.hh:264
system.hh
Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:620
MemCtrl::startup
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: mem_ctrl.cc:105
MemCtrl::writeQueueFull
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition: mem_ctrl.cc:180
MemCtrl::CtrlStats::writeReqs
Stats::Scalar writeReqs
Definition: mem_ctrl.hh:538
QoS::MemCtrl::WRITE
@ WRITE
Definition: mem_ctrl.hh:63
System::isTimingMode
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:264
MemCtrl::writeLowThreshold
const uint32_t writeLowThreshold
Definition: mem_ctrl.hh:482
MemPacket::isDram
bool isDram() const
Return true if its a DRAM access.
Definition: mem_ctrl.hh:194
Packet::getAddr
Addr getAddr() const
Definition: packet.hh:755
MemCtrl::CtrlStats::requestorWriteBytes
Stats::Vector requestorWriteBytes
Definition: mem_ctrl.hh:569
QoS::MemCtrl::schedule
uint8_t schedule(RequestorID id, uint64_t data)
Definition: mem_ctrl.cc:207
MemPacket::readyTime
Tick readyTime
When will request leave the controller.
Definition: mem_ctrl.hh:99
QoS::MemCtrl::recordTurnaroundStats
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Definition: mem_ctrl.cc:348
Packet::payloadDelay
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
Definition: packet.hh:413
MemCtrl::CtrlStats::bytesReadSys
Stats::Scalar bytesReadSys
Definition: mem_ctrl.hh:558
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
MemCtrl::CtrlStats::requestorReadAccesses
Stats::Vector requestorReadAccesses
Definition: mem_ctrl.hh:576
MemCtrl::doBurstAccess
void doBurstAccess(MemPacket *mem_pkt)
Actually do the burst based on media specific access function.
Definition: mem_ctrl.cc:810
MemCtrl::CtrlStats::writeBursts
Stats::Scalar writeBursts
Definition: mem_ctrl.hh:540
MemCtrl::writeQueue
std::vector< MemPacketQueue > writeQueue
Definition: mem_ctrl.hh:435
MemCtrl::getPort
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Definition: mem_ctrl.cc:1386
QoS::MemCtrl::numPriorities
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
Definition: mem_ctrl.hh:346
MemCtrl::drain
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition: mem_ctrl.cc:1407
DRAMInterface::respondEvent
void respondEvent(uint8_t rank)
Complete response process for DRAM when read burst is complete This will update the counters and chec...
Definition: mem_interface.cc:940
Packet::isRead
bool isRead() const
Definition: packet.hh:557
UNIT_TICK
#define UNIT_TICK
Definition: units.hh:40
Tick
uint64_t Tick
Tick count type.
Definition: types.hh:59
DRAMInterface::isBusy
bool isBusy()
This function checks if ranks are actively refreshing and therefore busy.
Definition: mem_interface.cc:892
DRAMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: mem_interface.cc:929
MemCtrl::CtrlStats::numRdRetry
Stats::Scalar numRdRetry
Definition: mem_ctrl.hh:548
PortID
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition: types.hh:243
Packet::pushLabel
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1394
AddrRange::contains
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:435
QoS::MemCtrl::READ
@ READ
Definition: mem_ctrl.hh:63
Packet::requestorId
RequestorID requestorId() const
Definition: packet.hh:741
std::vector
STL vector class.
Definition: stl.hh:37
MemCtrl::isInWriteQueue
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
Definition: mem_ctrl.hh:444
Packet::getSize
unsigned getSize() const
Definition: packet.hh:765
MemCtrl::drainResume
virtual void drainResume() override
Resume execution after a successful drain.
Definition: mem_ctrl.cc:1434
MemCtrl::burstAlign
Addr burstAlign(Addr addr, bool is_dram) const
Burst-align an address.
Definition: mem_ctrl.cc:1176
MemCtrl::CtrlStats::servicedByWrQ
Stats::Scalar servicedByWrQ
Definition: mem_ctrl.hh:541
QueuedResponsePort::schedTimingResp
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
Definition: qport.hh:90
MemCtrl::CtrlStats::readPktSize
Stats::Vector readPktSize
Definition: mem_ctrl.hh:550
MemCtrl::CtrlStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: mem_ctrl.cc:1275
MemCtrl::readBufferSize
const uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition: mem_ctrl.hh:479
NVMInterface::accessLatency
Tick accessLatency() const override
Definition: mem_interface.hh:1228
Packet::headerDelay
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Definition: packet.hh:395
Packet::print
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:389
AbstractMemory::functionalAccess
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
Definition: abstract_mem.cc:470
MemCtrl::selQueue
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
Definition: mem_ctrl.hh:602
MemPacket::entryTime
const Tick entryTime
When did request enter the controller.
Definition: mem_ctrl.hh:96
MemCtrl::retryWrReq
bool retryWrReq
Definition: mem_ctrl.hh:281
DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
QoS::MemCtrl::qosSchedule
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition: mem_ctrl.hh:475
MemCtrl::allIntfDrained
bool allIntfDrained() const
Ensure that all interfaced have drained commands.
Definition: mem_ctrl.cc:1396
NVMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: mem_interface.cc:2068
MemCtrl::CtrlStats::wrQLenPdf
Stats::Vector wrQLenPdf
Definition: mem_ctrl.hh:553
MemCtrl::readsThisTime
uint32_t readsThisTime
Definition: mem_ctrl.hh:485
MemCtrl::CtrlStats::requestorWriteAccesses
Stats::Vector requestorWriteAccesses
Definition: mem_ctrl.hh:577
DrainState
DrainState
Object drain/handover states.
Definition: drain.hh:71
MemCtrl::writesThisTime
uint32_t writesThisTime
Definition: mem_ctrl.hh:484
NVMInterface::allRanksDrained
bool allRanksDrained() const override
Check drain state of NVM interface.
Definition: mem_interface.hh:1169
QoS::MemCtrl::logResponse
void logResponse(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
Definition: mem_ctrl.cc:138
MemCtrl::CtrlStats::rdQLenPdf
Stats::Vector rdQLenPdf
Definition: mem_ctrl.hh:552
NVMInterface::writeRespQueueFull
bool writeRespQueueFull() const
Check if the write response queue has reached defined threshold.
Definition: mem_interface.hh:1236
MemCtrl::prevArrival
Tick prevArrival
Definition: mem_ctrl.hh:518
QoS::MemCtrl::totalWriteQueueSize
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Definition: mem_ctrl.hh:113
MemCtrl::CtrlStats::bytesWrittenSys
Stats::Scalar bytesWrittenSys
Definition: mem_ctrl.hh:559
MemCtrl::CtrlStats::mergedWrBursts
Stats::Scalar mergedWrBursts
Definition: mem_ctrl.hh:542
ceilLog2
int ceilLog2(const T &n)
Definition: intmath.hh:88
MemCtrl::MemoryPort::recvAtomic
Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Definition: mem_ctrl.cc:1488
Packet::qosValue
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
Definition: packet.hh:730
divCeil
T divCeil(const T &a, const U &b)
Definition: intmath.hh:114
mem_interface.hh
AbstractMemory::access
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Definition: abstract_mem.cc:368
MemCtrl::minReadToWriteDataGap
Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
Definition: mem_ctrl.cc:1160
DRAMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the DRAM.
Definition: mem_interface.hh:971
MemCtrl::CtrlStats::requestorReadTotalLat
Stats::Vector requestorReadTotalLat
Definition: mem_ctrl.hh:580
MemCtrl::minWriteToReadDataGap
Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
Definition: mem_ctrl.cc:1168
DRAMInterface::checkRefreshState
void checkRefreshState(uint8_t rank)
Check the refresh state to determine if refresh needs to be kicked back into action after a read resp...
Definition: mem_interface.cc:987
QoS::MemCtrl::logRequest
void logRequest(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
Definition: mem_ctrl.cc:81
QoS::MemCtrl::busStateNext
BusState busStateNext
bus state for next request event triggered
Definition: mem_ctrl.hh:122
DRAMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first DRAM command that can issue.
Definition: mem_interface.cc:169
SimObject::getPort
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Definition: sim_object.cc:120
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:237
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:71
MemCtrl::frontendLatency
const Tick frontendLatency
Pipeline latency of the controller frontend.
Definition: mem_ctrl.hh:498
MemCtrl::inWriteBusState
bool inWriteBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:797
MemCtrl::respondEvent
EventFunctionWrapper respondEvent
Definition: mem_ctrl.hh:293
MemCtrl::stats
CtrlStats stats
Definition: mem_ctrl.hh:588
Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:552
MemCtrl::memSchedPolicy
Enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition: mem_ctrl.hh:491
MemCtrl::nextBurstAt
Tick nextBurstAt
Till when must we wait before issuing next RD/WR burst?
Definition: mem_ctrl.hh:516
Port
Ports are used to interface objects to each other.
Definition: port.hh:56
DRAMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)
Actually do the burst - figure out the latency it will take to service the req based on bank state,...
Definition: mem_interface.cc:456
Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:571
QoS::MemCtrl::totalReadQueueSize
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition: mem_ctrl.hh:110
MemCtrl::CtrlStats::readReqs
Stats::Scalar readReqs
Definition: mem_ctrl.hh:537
MemCtrl::MemoryPort::getAddrRanges
AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Definition: mem_ctrl.cc:1458
DRAMInterface::commandOffset
Tick commandOffset() const override
Definition: mem_interface.hh:927
MemCtrl::pruneBurstTick
void pruneBurstTick()
Remove commands that have already issued from burstTicks.
Definition: mem_ctrl.cc:676
MemPacket::size
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Definition: mem_ctrl.hh:136
MemCtrl::CtrlStats::rdPerTurnAround
Stats::Histogram rdPerTurnAround
Definition: mem_ctrl.hh:554
NVMInterface::commandOffset
Tick commandOffset() const override
Definition: mem_interface.hh:1174
MemCtrl::CtrlStats::avgWrQLen
Stats::Average avgWrQLen
Definition: mem_ctrl.hh:546
Drainable::signalDrainDone
void signalDrainDone() const
Signal that an object is drained.
Definition: drain.hh:301
MemCtrl::printQs
void printQs() const
Used for debugging to observe the contents of the queues.
Definition: mem_ctrl.cc:391
MemCtrl::dram
DRAMInterface *const dram
Create pointer to interface of the actual dram media when connected.
Definition: mem_ctrl.hh:466
MemCtrl::processRespondEvent
void processRespondEvent()
Definition: mem_ctrl.cc:493
MemPacket
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition: mem_ctrl.hh:91
QueuedResponsePort
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
Definition: qport.hh:58
NVMInterface::isBusy
bool isBusy(bool read_queue_empty, bool all_writes_nvm)
This function checks if ranks are busy.
Definition: mem_interface.cc:2476
mem_ctrl.hh
MemCtrl::writeHighThreshold
const uint32_t writeHighThreshold
Definition: mem_ctrl.hh:481
DRAMInterface::startup
void startup() override
Iterate through dram ranks and instantiate per rank startup routine.
Definition: mem_interface.cc:879
UNIT_COUNT
#define UNIT_COUNT
Definition: units.hh:49
NVMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *pkt, Tick next_burst_at)
Actually do the burst and update stats.
Definition: mem_interface.cc:2285
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:148
MemCtrl::nvm
NVMInterface *const nvm
Create pointer to interface of the actual nvm media when connected.
Definition: mem_ctrl.hh:471
simSeconds
Stats::Formula & simSeconds
Definition: stats.cc:42
DRAMInterface::allRanksDrained
bool allRanksDrained() const override
Return true once refresh is complete for all ranks and there are no additional commands enqueued.
Definition: mem_interface.cc:1014
name
const std::string & name()
Definition: trace.cc:48
MemCtrl::packetReady
bool packetReady(MemPacket *pkt)
Determine if there is a packet that can issue.
Definition: mem_ctrl.cc:1153
LaneSize::Byte
@ Byte
Stats::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:58
MemCtrl::init
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mem_ctrl.cc:95
Packet::hasData
bool hasData() const
Definition: packet.hh:577
Drainable::drainState
DrainState drainState() const
Return the current drain state of an object.
Definition: drain.hh:320
MemCtrl::retryRdReq
bool retryRdReq
Remember if we have to retry a request when available.
Definition: mem_ctrl.hh:280
MemCtrl::commandWindow
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition: mem_ctrl.hh:511
MemCtrl::isTimingMode
bool isTimingMode
Remember if the memory system is in timing mode.
Definition: mem_ctrl.hh:275
X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:80
SimObject::name
virtual const std::string name() const
Definition: sim_object.hh:182
MemCtrl::respQueue
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition: mem_ctrl.hh:454
MemCtrl::verifySingleCmd
Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
Check for command bus contention for single cycle command.
Definition: mem_ctrl.cc:697
QoS::MemCtrl::turnPolicy
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
Definition: mem_ctrl.hh:70
UNIT_RATE
#define UNIT_RATE(T1, T2)
Definition: units.hh:47
MemCtrl::writeBufferSize
const uint32_t writeBufferSize
Definition: mem_ctrl.hh:480
MemCtrl::pendingDelete
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
Definition: mem_ctrl.hh:594
DRAMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all DRAM banks in alli ranks when access to an alternate inte...
Definition: mem_interface.cc:717
MemCtrl::CtrlStats::bytesReadWrQ
Stats::Scalar bytesReadWrQ
Definition: mem_ctrl.hh:557
AbstractMemory::getBackdoor
void getBackdoor(MemBackdoorPtr &bd_ptr)
Definition: abstract_mem.hh:231
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:197
MemCtrl::addToWriteQueue
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition: mem_ctrl.cc:310
MemCtrl::accessAndRespond
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
Definition: mem_ctrl.cc:632
MemPacket::pkt
const PacketPtr pkt
This comes from the outside world.
Definition: mem_ctrl.hh:102
MemCtrl
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition: mem_ctrl.hh:236
MemCtrl::verifyMultiCmd
Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition: mem_ctrl.cc:720
MemInterface::bytesPerBurst
uint32_t bytesPerBurst() const
Definition: mem_interface.hh:233
MemCtrl::minWritesPerSwitch
const uint32_t minWritesPerSwitch
Definition: mem_ctrl.hh:483
MemCtrl::CtrlStats::CtrlStats
CtrlStats(MemCtrl &ctrl)
Definition: mem_ctrl.cc:1184
MemPacket::qosValue
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
Definition: mem_ctrl.hh:153
MemCtrl::recvTimingReq
bool recvTimingReq(PacketPtr pkt)
Definition: mem_ctrl.cc:416
NVMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all NVM banks in alli ranks when access to an alternate inter...
Definition: mem_interface.cc:2459
MemCtrl::port
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
Definition: mem_ctrl.hh:270
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:258
MemInterface::minReadToWriteDataGap
Tick minReadToWriteDataGap() const
Definition: mem_interface.hh:258
std::deque< MemPacket * >
MemCtrl::inReadBusState
bool inReadBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:784
MemCtrl::addToReadQueue
void addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition: mem_ctrl.cc:191
MemCtrl::readQueue
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition: mem_ctrl.hh:434
MemCtrl::CtrlStats::numWrRetry
Stats::Scalar numWrRetry
Definition: mem_ctrl.hh:549
Port::isConnected
bool isConnected() const
Is this port currently connected to a peer?
Definition: port.hh:128
MemCtrl::CtrlStats::totGap
Stats::Scalar totGap
Definition: mem_ctrl.hh:564
Stats::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1323
Packet::isWrite
bool isWrite() const
Definition: packet.hh:558
NVMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first NVM command that can issue default to first command to prepped region.
Definition: mem_interface.cc:2080
MemPacket::addr
Addr addr
The starting address of the packet.
Definition: mem_ctrl.hh:130
MemCtrl::CtrlStats::requestorReadBytes
Stats::Vector requestorReadBytes
Definition: mem_ctrl.hh:568
MemCtrl::CtrlStats::wrPerTurnAround
Stats::Histogram wrPerTurnAround
Definition: mem_ctrl.hh:555
MemCtrl::burstTicks
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
Definition: mem_ctrl.hh:461
Stats
Definition: statistics.cc:53
QoS::MemCtrl::system
System * system() const
read the system pointer
Definition: mem_ctrl.hh:350
MemCtrl::getBurstWindow
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
Definition: mem_ctrl.cc:689
curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:43
trace.hh
MemCtrl::recvFunctional
void recvFunctional(PacketPtr pkt)
Definition: mem_ctrl.cc:1371
ResponsePort::sendRetryReq
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
Definition: port.hh:398
MemCtrl::CtrlStats::readBursts
Stats::Scalar readBursts
Definition: mem_ctrl.hh:539
DRAMInterface::drainRanks
void drainRanks()
Iterate through dram ranks to exit self-refresh in order to drain.
Definition: mem_interface.cc:1000
MemCtrl::chooseNextFRFCFS
MemPacketQueue::iterator chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition: mem_ctrl.cc:590
MemCtrl::MemoryPort::MemoryPort
MemoryPort(const std::string &name, MemCtrl &_ctrl)
Definition: mem_ctrl.cc:1452
MipsISA::p
Bitfield< 0 > p
Definition: pra_constants.hh:323
std::list< AddrRange >
MemCtrl::backendLatency
const Tick backendLatency
Pipeline latency of the backend and PHY.
Definition: mem_ctrl.hh:505
MemCtrl::CtrlStats::requestorWriteTotalLat
Stats::Vector requestorWriteTotalLat
Definition: mem_ctrl.hh:581
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:219
MemCtrl::recvAtomicBackdoor
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
Definition: mem_ctrl.cc:156
NVMInterface::chooseRead
void chooseRead(MemPacketQueue &queue)
Select read command to issue asynchronously.
Definition: mem_interface.cc:2136
MemCtrl::chooseNext
MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Definition: mem_ctrl.cc:555
DRAMInterface::accessLatency
Tick accessLatency() const override
Definition: mem_interface.hh:932
ResponsePort::sendRangeChange
void sendRangeChange() const
Called by the owner to send a range change.
Definition: port.hh:293
MemCtrl::MemoryPort::recvAtomicBackdoor
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Receive an atomic request packet from the peer, and optionally provide a backdoor to the data being a...
Definition: mem_ctrl.cc:1494
MemCtrl::recvAtomic
Tick recvAtomic(PacketPtr pkt)
Definition: mem_ctrl.cc:121
QoS::MemCtrl::busState
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request.
Definition: mem_ctrl.hh:119
QoS::MemCtrl::selectNextBusState
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
Definition: mem_ctrl.cc:236
MemInterface::decodePacket
MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, bool is_dram)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Definition: mem_interface.cc:83
DRAMInterface::suspend
void suspend()
Iterate through DRAM ranks and suspend them.
Definition: mem_interface.cc:1029
MaxTick
const Tick MaxTick
Definition: types.hh:61
NVMInterface::readsWaitingToIssue
bool readsWaitingToIssue() const
Definition: mem_interface.hh:1242
MemPacket::isRead
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition: mem_ctrl.hh:183
Stats::nonan
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:60
MemCtrl::MemoryPort::recvTimingReq
bool recvTimingReq(PacketPtr) override
Receive a timing request from the peer.
Definition: mem_ctrl.cc:1501
Packet::popLabel
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
Definition: packet.hh:1404
DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
MemCtrl::readQueueFull
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
Definition: mem_ctrl.cc:168
MemCtrl::nextReqTime
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
Definition: mem_ctrl.hh:526
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
NVMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the NVM.
Definition: mem_interface.cc:2276
MemCtrl::MemCtrl
MemCtrl(const MemCtrlParams &p)
Definition: mem_ctrl.cc:52
BurstHelper
A burst helper helps organize and manage a packet that is larger than the memory burst size.
Definition: mem_ctrl.hh:72
ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:153

Generated on Tue Mar 23 2021 19:41:27 for gem5 by doxygen 1.8.17