gem5  v22.0.0.2
nvm_interface.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "mem/nvm_interface.hh"
42 
43 #include "base/bitfield.hh"
44 #include "base/cprintf.hh"
45 #include "base/trace.hh"
46 #include "debug/NVM.hh"
47 #include "sim/system.hh"
48 
49 namespace gem5
50 {
51 
52 namespace memory
53 {
54 
55 NVMInterface::NVMInterface(const NVMInterfaceParams &_p)
56  : MemInterface(_p),
57  maxPendingWrites(_p.max_pending_writes),
58  maxPendingReads(_p.max_pending_reads),
59  twoCycleRdWr(_p.two_cycle_rdwr),
60  tREAD(_p.tREAD), tWRITE(_p.tWRITE), tSEND(_p.tSEND),
61  stats(*this),
62  writeRespondEvent([this]{ processWriteRespondEvent(); }, name()),
63  readReadyEvent([this]{ processReadReadyEvent(); }, name()),
64  nextReadAt(0), numPendingReads(0), numReadDataReady(0),
65  numReadsToIssue(0)
66 {
67  DPRINTF(NVM, "Setting up NVM Interface\n");
68 
69  fatal_if(!isPowerOf2(burstSize), "NVM burst size %d is not allowed, "
70  "must be a power of two\n", burstSize);
71 
72  // sanity check the ranks since we rely on bit slicing for the
73  // address decoding
74  fatal_if(!isPowerOf2(ranksPerChannel), "NVM rank count of %d is "
75  "not allowed, must be a power of two\n", ranksPerChannel);
76 
77  for (int i =0; i < ranksPerChannel; i++) {
78  // Add NVM ranks to the system
79  DPRINTF(NVM, "Creating NVM rank %d \n", i);
80  Rank* rank = new Rank(_p, i, *this);
81  ranks.push_back(rank);
82  }
83 
84  uint64_t capacity = 1ULL << ceilLog2(AbstractMemory::size());
85 
86  DPRINTF(NVM, "NVM capacity %lld (%lld) bytes\n", capacity,
88 
89  rowsPerBank = capacity / (rowBufferSize *
90  banksPerRank * ranksPerChannel);
91 }
92 
93 NVMInterface::Rank::Rank(const NVMInterfaceParams &_p,
94  int _rank, NVMInterface& _nvm)
95  : EventManager(&_nvm), rank(_rank), banks(_p.banks_per_rank)
96 {
97  for (int b = 0; b < _p.banks_per_rank; b++) {
98  banks[b].bank = b;
99  // No bank groups; simply assign to bank number
100  banks[b].bankgr = b;
101  }
102 }
103 
104 void
106 {
108 }
109 
110 void NVMInterface::setupRank(const uint8_t rank, const bool is_read)
111 {
112  if (is_read) {
113  // increment count to trigger read and track number of reads in Q
114  numReadsToIssue++;
115  } else {
116  // increment count to track number of writes in Q
117  numWritesQueued++;
118  }
119 }
120 
121 MemPacket*
123  unsigned size, bool is_read, uint8_t pseudo_channel)
124 {
125  // decode the address based on the address mapping scheme, with
126  // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
127  // channel, respectively
128  uint8_t rank;
129  uint8_t bank;
130  // use a 64-bit unsigned during the computations as the row is
131  // always the top bits, and check before creating the packet
132  uint64_t row;
133 
134  // Get packed address, starting at 0
135  Addr addr = getCtrlAddr(pkt_addr);
136 
137  // truncate the address to a memory burst, which makes it unique to
138  // a specific buffer, row, bank, rank and channel
139  addr = addr / burstSize;
140 
141  // we have removed the lowest order address bits that denote the
142  // position within the column
143  if (addrMapping == enums::RoRaBaChCo || addrMapping == enums::RoRaBaCoCh) {
144  // the lowest order bits denote the column to ensure that
145  // sequential cache lines occupy the same row
147 
148  // after the channel bits, get the bank bits to interleave
149  // over the banks
150  bank = addr % banksPerRank;
151  addr = addr / banksPerRank;
152 
153  // after the bank, we get the rank bits which thus interleaves
154  // over the ranks
155  rank = addr % ranksPerChannel;
157 
158  // lastly, get the row bits, no need to remove them from addr
159  row = addr % rowsPerBank;
160  } else if (addrMapping == enums::RoCoRaBaCh) {
161  // with emerging technologies, could have small page size with
162  // interleaving granularity greater than row buffer
164  // remove column bits which are a subset of burstsPerStripe
166  } else {
167  // remove lower column bits below channel bits
169  }
170 
171  // start with the bank bits, as this provides the maximum
172  // opportunity for parallelism between requests
173  bank = addr % banksPerRank;
174  addr = addr / banksPerRank;
175 
176  // next get the rank bits
177  rank = addr % ranksPerChannel;
179 
180  // next, the higher-order column bites
183  }
184 
185  // lastly, get the row bits, no need to remove them from addr
186  row = addr % rowsPerBank;
187  } else
188  panic("Unknown address mapping policy chosen!");
189 
190  assert(rank < ranksPerChannel);
191  assert(bank < banksPerRank);
192  assert(row < rowsPerBank);
193  assert(row < Bank::NO_ROW);
194 
195  DPRINTF(NVM, "Address: %#x Rank %d Bank %d Row %d\n",
196  pkt_addr, rank, bank, row);
197 
198  // create the corresponding memory packet with the entry time and
199  // ready time set to the current tick, the latter will be updated
200  // later
201  uint16_t bank_id = banksPerRank * rank + bank;
202 
203  return new MemPacket(pkt, is_read, false, pseudo_channel, rank, bank, row,
204  bank_id, pkt_addr, size);
205 }
206 
209 {
210  // remember if we found a hit, but one that cannit issue seamlessly
211  bool found_prepped_pkt = false;
212 
213  auto selected_pkt_it = queue.end();
214  Tick selected_col_at = MaxTick;
215 
216  for (auto i = queue.begin(); i != queue.end() ; ++i) {
217  MemPacket* pkt = *i;
218 
219  // select optimal NVM packet in Q
220  if (!pkt->isDram()) {
221  const Bank& bank = ranks[pkt->rank]->banks[pkt->bank];
222  const Tick col_allowed_at = pkt->isRead() ? bank.rdAllowedAt :
223  bank.wrAllowedAt;
224 
225  // check if rank is not doing a refresh and thus is available,
226  // if not, jump to the next packet
227  if (burstReady(pkt)) {
228  DPRINTF(NVM, "%s bank %d - Rank %d available\n", __func__,
229  pkt->bank, pkt->rank);
230 
231  // no additional rank-to-rank or media delays
232  if (col_allowed_at <= min_col_at) {
233  // FCFS within entries that can issue without
234  // additional delay, such as same rank accesses
235  // or media delay requirements
236  selected_pkt_it = i;
237  selected_col_at = col_allowed_at;
238  // no need to look through the remaining queue entries
239  DPRINTF(NVM, "%s Seamless buffer hit\n", __func__);
240  break;
241  } else if (!found_prepped_pkt) {
242  // packet is to prepped region but cannnot issue
243  // seamlessly; remember this one and continue
244  selected_pkt_it = i;
245  selected_col_at = col_allowed_at;
246  DPRINTF(NVM, "%s Prepped packet found \n", __func__);
247  found_prepped_pkt = true;
248  }
249  } else {
250  DPRINTF(NVM, "%s bank %d - Rank %d not available\n", __func__,
251  pkt->bank, pkt->rank);
252  }
253  }
254  }
255 
256  if (selected_pkt_it == queue.end()) {
257  DPRINTF(NVM, "%s no available NVM ranks found\n", __func__);
258  }
259 
260  return std::make_pair(selected_pkt_it, selected_col_at);
261 }
262 
263 void
265 {
266  Tick cmd_at = std::max(curTick(), nextReadAt);
267 
268  // This method does the arbitration between non-deterministic read
269  // requests to NVM. The chosen packet is not removed from the queue
270  // at this time. Removal from the queue will occur when the data is
271  // ready and a separate SEND command is issued to retrieve it via the
272  // chooseNext function in the top-level controller.
273  assert(!queue.empty());
274 
275  assert(numReadsToIssue > 0);
276  numReadsToIssue--;
277  // For simplicity, issue non-deterministic reads in order (fcfs)
278  for (auto i = queue.begin(); i != queue.end() ; ++i) {
279  MemPacket* pkt = *i;
280 
281  // Find 1st NVM read packet that hasn't issued read command
282  if (pkt->readyTime == MaxTick && !pkt->isDram() && pkt->isRead()) {
283  // get the bank
284  Bank& bank_ref = ranks[pkt->rank]->banks[pkt->bank];
285 
286  // issueing a read, inc counter and verify we haven't overrun
287  numPendingReads++;
288  assert(numPendingReads <= maxPendingReads);
289 
290  // increment the bytes accessed and the accesses per row
291  bank_ref.bytesAccessed += burstSize;
292 
293  // Verify command bandiwth to issue
294  // Host can issue read immediately uith buffering closer
295  // to the NVM. The actual execution at the NVM may be delayed
296  // due to busy resources
297  if (twoCycleRdWr) {
298  cmd_at = ctrl->verifyMultiCmd(cmd_at,
300  } else {
301  cmd_at = ctrl->verifySingleCmd(cmd_at,
302  maxCommandsPerWindow, false);
303  }
304 
305  // Update delay to next read
306  // Ensures single read command issued per cycle
307  nextReadAt = cmd_at + tCK;
308 
309  // If accessing a new location in this bank, update timing
310  // and stats
311  if (bank_ref.openRow != pkt->row) {
312  // update the open bank, re-using row field
313  bank_ref.openRow = pkt->row;
314 
315  // sample the bytes accessed to a buffer in this bank
316  // here when we are re-buffering the data
318  // start counting anew
319  bank_ref.bytesAccessed = 0;
320 
321  // holdoff next command to this bank until the read completes
322  // and the data has been successfully buffered
323  // can pipeline accesses to the same bank, sending them
324  // across the interface B2B, but will incur full access
325  // delay between data ready responses to different buffers
326  // in a bank
327  bank_ref.actAllowedAt = std::max(cmd_at,
328  bank_ref.actAllowedAt) + tREAD;
329  }
330  // update per packet readyTime to holdoff burst read operation
331  // overloading readyTime, which will be updated again when the
332  // burst is issued
333  pkt->readyTime = std::max(cmd_at, bank_ref.actAllowedAt);
334 
335  DPRINTF(NVM, "Issuing NVM Read to bank %d at tick %d. "
336  "Data ready at %d\n",
337  bank_ref.bank, cmd_at, pkt->readyTime);
338 
339  // Insert into read ready queue. It will be handled after
340  // the media delay has been met
341  if (readReadyQueue.empty()) {
342  assert(!readReadyEvent.scheduled());
344  } else if (readReadyEvent.when() > pkt->readyTime) {
345  // move it sooner in time, to the first read with data
347  } else {
348  assert(readReadyEvent.scheduled());
349  }
350  readReadyQueue.push_back(pkt->readyTime);
351 
352  // found an NVM read to issue - break out
353  break;
354  }
355  }
356 }
357 
358 void
360 {
361  // signal that there is read data ready to be transmitted
363 
364  DPRINTF(NVM,
365  "processReadReadyEvent(): Data for an NVM read is ready. "
366  "numReadDataReady is %d\t numPendingReads is %d\n",
368 
369  // Find lowest ready time and verify it is equal to curTick
370  // also find the next lowest to schedule next event
371  // Done with this response, erase entry
372  auto ready_it = readReadyQueue.begin();
373  Tick next_ready_at = MaxTick;
374  for (auto i = readReadyQueue.begin(); i != readReadyQueue.end() ; ++i) {
375  if (*ready_it > *i) {
376  next_ready_at = *ready_it;
377  ready_it = i;
378  } else if ((next_ready_at > *i) && (i != ready_it)) {
379  next_ready_at = *i;
380  }
381  }
382 
383  // Verify we found the time of this event and remove it
384  assert(*ready_it == curTick());
385  readReadyQueue.erase(ready_it);
386 
387  if (!readReadyQueue.empty()) {
388  assert(readReadyQueue.front() >= curTick());
389  assert(!readReadyEvent.scheduled());
390  schedule(readReadyEvent, next_ready_at);
391  }
392 
393  // It is possible that a new command kicks things back into
394  // action before reaching this point but need to ensure that we
395  // continue to process new commands as read data becomes ready
396  // This will also trigger a drain if needed
397  if (!ctrl->requestEventScheduled()) {
398  DPRINTF(NVM, "Restart controller scheduler immediately\n");
400  }
401 }
402 
403 bool
405  bool read_rdy = pkt->isRead() && (ctrl->inReadBusState(true)) &&
406  (pkt->readyTime <= curTick()) && (numReadDataReady > 0);
407  bool write_rdy = !pkt->isRead() && !ctrl->inReadBusState(true) &&
409  return (read_rdy || write_rdy);
410 }
411 
414  const std::vector<MemPacketQueue>& queue)
415 {
416  DPRINTF(NVM, "NVM Timing access to addr %#x, rank/bank/row %d %d %d\n",
417  pkt->addr, pkt->rank, pkt->bank, pkt->row);
418 
419  // get the bank
420  Bank& bank_ref = ranks[pkt->rank]->banks[pkt->bank];
421 
422  // respect any constraints on the command
423  const Tick bst_allowed_at = pkt->isRead() ?
424  bank_ref.rdAllowedAt : bank_ref.wrAllowedAt;
425 
426  // we need to wait until the bus is available before we can issue
427  // the command; need minimum of tBURST between commands
428  Tick cmd_at = std::max(bst_allowed_at, curTick());
429 
430  // we need to wait until the bus is available before we can issue
431  // the command; need minimum of tBURST between commands
432  cmd_at = std::max(cmd_at, next_burst_at);
433 
434  // Verify there is command bandwidth to issue
435  // Read burst (send command) is a simple data access and only requires
436  // one command cycle
437  // Write command may require multiple cycles to enable larger address space
438  if (pkt->isRead() || !twoCycleRdWr) {
439  cmd_at = ctrl->verifySingleCmd(cmd_at, maxCommandsPerWindow, false);
440  } else {
441  cmd_at = ctrl->verifyMultiCmd(cmd_at, maxCommandsPerWindow, tCK);
442  }
443  // update the packet ready time to reflect when data will be transferred
444  // Use the same bus delays defined for NVM
445  pkt->readyTime = cmd_at + tSEND + tBURST;
446 
447  Tick dly_to_rd_cmd;
448  Tick dly_to_wr_cmd;
449  for (auto n : ranks) {
450  for (int i = 0; i < banksPerRank; i++) {
451  // base delay is a function of tBURST and bus turnaround
452  dly_to_rd_cmd = pkt->isRead() ? tBURST : writeToReadDelay();
453  dly_to_wr_cmd = pkt->isRead() ? readToWriteDelay() : tBURST;
454 
455  if (pkt->rank != n->rank) {
456  // adjust timing for different ranks
457  // Need to account for rank-to-rank switching with tCS
458  dly_to_wr_cmd = rankToRankDelay();
459  dly_to_rd_cmd = rankToRankDelay();
460  }
461  n->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
462  n->banks[i].rdAllowedAt);
463 
464  n->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
465  n->banks[i].wrAllowedAt);
466  }
467  }
468 
469  DPRINTF(NVM, "NVM Access to %#x, ready at %lld.\n",
470  pkt->addr, pkt->readyTime);
471 
472  if (pkt->isRead()) {
473  // completed the read, decrement counters
474  assert(numPendingReads != 0);
475  assert(numReadDataReady != 0);
476 
477  numPendingReads--;
479  } else {
480  // Adjust number of NVM writes in Q
481  assert(numWritesQueued > 0);
482  numWritesQueued--;
483 
484  // increment the bytes accessed and the accesses per row
485  // only increment for writes as the reads are handled when
486  // the non-deterministic read is issued, before the data transfer
487  bank_ref.bytesAccessed += burstSize;
488 
489  // Commands will be issued serially when accessing the same bank
490  // Commands can issue in parallel to different banks
491  if ((bank_ref.bank == pkt->bank) &&
492  (bank_ref.openRow != pkt->row)) {
493  // update the open buffer, re-using row field
494  bank_ref.openRow = pkt->row;
495 
496  // sample the bytes accessed to a buffer in this bank
497  // here when we are re-buffering the data
499  // start counting anew
500  bank_ref.bytesAccessed = 0;
501  }
502 
503  // Determine when write will actually complete, assuming it is
504  // scheduled to push to NVM immediately
505  // update actAllowedAt to serialize next command completion that
506  // accesses this bank; must wait until this write completes
507  // Data accesses to the same buffer in this bank
508  // can issue immediately after actAllowedAt expires, without
509  // waiting additional delay of tWRITE. Can revisit this
510  // assumption/simplification in the future.
511  bank_ref.actAllowedAt = std::max(pkt->readyTime,
512  bank_ref.actAllowedAt) + tWRITE;
513 
514  // Need to track number of outstanding writes to
515  // ensure 'buffer' on media controller does not overflow
516  assert(!writeRespQueueFull());
517 
518  // Insert into write done queue. It will be handled after
519  // the media delay has been met
520  if (writeRespQueueEmpty()) {
521  assert(!writeRespondEvent.scheduled());
523  } else {
524  assert(writeRespondEvent.scheduled());
525  }
526  writeRespQueue.push_back(bank_ref.actAllowedAt);
527  writeRespQueue.sort();
528  if (writeRespondEvent.when() > bank_ref.actAllowedAt) {
529  DPRINTF(NVM, "Rescheduled respond event from %lld to %11d\n",
530  writeRespondEvent.when(), bank_ref.actAllowedAt);
531  DPRINTF(NVM, "Front of response queue is %11d\n",
532  writeRespQueue.front());
534  }
535 
536  }
537 
538  // Update the stats
539  if (pkt->isRead()) {
540  stats.readBursts++;
542  stats.perBankRdBursts[pkt->bankId]++;
544 
545  // Update latency stats
546  stats.totMemAccLat += pkt->readyTime - pkt->entryTime;
548  stats.totQLat += cmd_at - pkt->entryTime;
549  } else {
550  stats.writeBursts++;
552  stats.perBankWrBursts[pkt->bankId]++;
553  }
554 
555  return std::make_pair(cmd_at, cmd_at + tBURST);
556 }
557 
558 void
560 {
561  DPRINTF(NVM,
562  "processWriteRespondEvent(): A NVM write reached its readyTime. "
563  "%d remaining pending NVM writes\n", writeRespQueue.size());
564 
565  // Update stat to track histogram of pending writes
567 
568  // Done with this response, pop entry
569  writeRespQueue.pop_front();
570 
571  if (!writeRespQueue.empty()) {
572  assert(writeRespQueue.front() >= curTick());
573  assert(!writeRespondEvent.scheduled());
575  }
576 
577  // It is possible that a new command kicks things back into
578  // action before reaching this point but need to ensure that we
579  // continue to process new commands as writes complete at the media and
580  // credits become available. This will also trigger a drain if needed
581  if (!ctrl->requestEventScheduled()) {
582  DPRINTF(NVM, "Restart controller scheduler immediately\n");
584  }
585 }
586 
587 void
589 {
590  // update timing for NVM ranks due to bursts issued
591  // to ranks for other media interfaces
592  for (auto n : ranks) {
593  for (int i = 0; i < banksPerRank; i++) {
594  // different rank by default
595  // Need to only account for rank-to-rank switching
596  n->banks[i].rdAllowedAt = std::max(cmd_at + rankToRankDelay(),
597  n->banks[i].rdAllowedAt);
598  n->banks[i].wrAllowedAt = std::max(cmd_at + rankToRankDelay(),
599  n->banks[i].wrAllowedAt);
600  }
601  }
602 }
603 
604 bool
605 NVMInterface::isBusy(bool read_queue_empty, bool all_writes_nvm)
606 {
607  DPRINTF(NVM,"isBusy: numReadDataReady = %d\n", numReadDataReady);
608  // Determine NVM is busy and cannot issue a burst
609  // A read burst cannot issue when data is not ready from the NVM
610  // Also check that we have reads queued to ensure we can change
611  // bus direction to service potential write commands.
612  // A write cannot issue once we've reached MAX pending writes
613  // Only assert busy for the write case when there are also
614  // no reads in Q and the write queue only contains NVM commands
615  // This allows the bus state to switch and service reads
616  return (ctrl->inReadBusState(true) ?
617  (numReadDataReady == 0) && !read_queue_empty :
618  writeRespQueueFull() && read_queue_empty &&
619  all_writes_nvm);
620 }
621 
623  : statistics::Group(&_nvm),
624  nvm(_nvm),
625 
626  ADD_STAT(readBursts, statistics::units::Count::get(),
627  "Number of NVM read bursts"),
628  ADD_STAT(writeBursts, statistics::units::Count::get(),
629  "Number of NVM write bursts"),
630 
631  ADD_STAT(perBankRdBursts, statistics::units::Count::get(),
632  "Per bank write bursts"),
633  ADD_STAT(perBankWrBursts, statistics::units::Count::get(),
634  "Per bank write bursts"),
635 
636  ADD_STAT(totQLat, statistics::units::Tick::get(),
637  "Total ticks spent queuing"),
638  ADD_STAT(totBusLat, statistics::units::Tick::get(),
639  "Total ticks spent in databus transfers"),
640  ADD_STAT(totMemAccLat, statistics::units::Tick::get(),
641  "Total ticks spent from burst creation until serviced "
642  "by the NVM"),
643  ADD_STAT(avgQLat, statistics::units::Rate<
644  statistics::units::Tick, statistics::units::Count>::get(),
645  "Average queueing delay per NVM burst"),
646  ADD_STAT(avgBusLat, statistics::units::Rate<
647  statistics::units::Tick, statistics::units::Count>::get(),
648  "Average bus latency per NVM burst"),
649  ADD_STAT(avgMemAccLat, statistics::units::Rate<
650  statistics::units::Tick, statistics::units::Count>::get(),
651  "Average memory access latency per NVM burst"),
652 
653  ADD_STAT(avgRdBW, statistics::units::Rate<
654  statistics::units::Byte, statistics::units::Second>::get(),
655  "Average DRAM read bandwidth in MiBytes/s"),
656  ADD_STAT(avgWrBW, statistics::units::Rate<
657  statistics::units::Byte, statistics::units::Second>::get(),
658  "Average DRAM write bandwidth in MiBytes/s"),
659  ADD_STAT(peakBW, statistics::units::Rate<
660  statistics::units::Byte, statistics::units::Second>::get(),
661  "Theoretical peak bandwidth in MiByte/s"),
662  ADD_STAT(busUtil, statistics::units::Ratio::get(),
663  "NVM Data bus utilization in percentage"),
664  ADD_STAT(busUtilRead, statistics::units::Ratio::get(),
665  "NVM Data bus read utilization in percentage"),
666  ADD_STAT(busUtilWrite, statistics::units::Ratio::get(),
667  "NVM Data bus write utilization in percentage"),
668 
669  ADD_STAT(pendingReads, statistics::units::Count::get(),
670  "Reads issued to NVM for which data has not been transferred"),
671  ADD_STAT(pendingWrites, statistics::units::Count::get(),
672  "Number of outstanding writes to NVM"),
673  ADD_STAT(bytesPerBank, statistics::units::Byte::get(),
674  "Bytes read within a bank before loading new bank")
675 
676 {
677 }
678 
679 void
681 {
682  using namespace statistics;
683 
684  perBankRdBursts.init(nvm.ranksPerChannel == 0 ? 1 :
685  nvm.banksPerRank * nvm.ranksPerChannel);
686 
687  perBankWrBursts.init(nvm.ranksPerChannel == 0 ? 1 :
688  nvm.banksPerRank * nvm.ranksPerChannel);
689 
690  avgQLat.precision(2);
691  avgBusLat.precision(2);
692  avgMemAccLat.precision(2);
693 
694  avgRdBW.precision(2);
695  avgWrBW.precision(2);
696  peakBW.precision(2);
697 
698  busUtil.precision(2);
699  busUtilRead.precision(2);
700  busUtilWrite.precision(2);
701 
702  pendingReads
703  .init(nvm.maxPendingReads)
704  .flags(nozero);
705 
706  pendingWrites
707  .init(nvm.maxPendingWrites)
708  .flags(nozero);
709 
710  bytesPerBank
711  .init(nvm.rowBufferSize)
712  .flags(nozero);
713 
714  avgQLat = totQLat / readBursts;
715  avgBusLat = totBusLat / readBursts;
716  avgMemAccLat = totMemAccLat / readBursts;
717 
718  avgRdBW = (bytesRead / 1000000) / simSeconds;
719  avgWrBW = (bytesWritten / 1000000) / simSeconds;
720  peakBW = (sim_clock::Frequency / nvm.tBURST) *
721  nvm.burstSize / 1000000;
722 
723  busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
724  busUtilRead = avgRdBW / peakBW * 100;
725  busUtilWrite = avgWrBW / peakBW * 100;
726 }
727 
728 } // namespace memory
729 } // namespace gem5
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
gem5::memory::NVMInterface::chooseNextFRFCFS
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first NVM command that can issue default to first command to prepped region.
Definition: nvm_interface.cc:208
gem5::memory::MemInterface::rowsPerBank
uint32_t rowsPerBank
Definition: mem_interface.hh:142
gem5::memory::NVMInterface
Interface to NVM devices with media specific parameters, statistics, and functions.
Definition: nvm_interface.hh:64
gem5::memory::MemInterface::Bank::bank
uint8_t bank
Definition: mem_interface.hh:93
gem5::Event::when
Tick when() const
Get the time that the event is scheduled.
Definition: eventq.hh:508
gem5::memory::NVMInterface::maxPendingReads
const uint32_t maxPendingReads
Definition: nvm_interface.hh:93
gem5::memory::MemInterface::readToWriteDelay
Tick readToWriteDelay() const
Definition: mem_interface.hh:161
gem5::memory::MemCtrl::verifySingleCmd
virtual Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
Check for command bus contention for single cycle command.
Definition: mem_ctrl.cc:674
system.hh
gem5::memory::MemPacket::readyTime
Tick readyTime
When will request leave the controller.
Definition: mem_ctrl.hh:106
gem5::memory::MemInterface::numWritesQueued
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
Definition: mem_interface.hh:184
gem5::memory::MemInterface::tBURST
const Tick tBURST
Definition: mem_interface.hh:149
nvm_interface.hh
gem5::memory::NVMInterface::writeRespQueueFull
bool writeRespQueueFull() const override
Check if the write response queue has reached defined threshold.
Definition: nvm_interface.hh:286
memory
Definition: mem.h:38
gem5::memory::NVMInterface::NVMInterface
NVMInterface(const NVMInterfaceParams &_p)
Definition: nvm_interface.cc:55
gem5::memory::NVMInterface::readReadyEvent
EventFunctionWrapper readReadyEvent
Definition: nvm_interface.hh:150
gem5::memory::NVMInterface::chooseRead
void chooseRead(MemPacketQueue &queue) override
Select read command to issue asynchronously.
Definition: nvm_interface.cc:264
gem5::memory::NVMInterface::Rank::banks
std::vector< Bank > banks
Vector of NVM banks.
Definition: nvm_interface.hh:83
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
gem5::SimObject::init
virtual void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: sim_object.cc:76
gem5::memory::NVMInterface::Rank::Rank
Rank(const NVMInterfaceParams &_p, int _rank, NVMInterface &_nvm)
Definition: nvm_interface.cc:93
gem5::memory::MemCtrl::restartScheduler
virtual void restartScheduler(Tick tick, uint8_t pseudo_channel=0)
restart the controller This can be used by interfaces to restart the scheduler after maintainence com...
Definition: mem_ctrl.hh:748
gem5::memory::MemInterface::Bank
A basic class to track the bank state, i.e.
Definition: mem_interface.hh:86
gem5::memory::NVMInterface::addRankToRankDelay
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all NVM banks in alli ranks when access to an alternate inter...
Definition: nvm_interface.cc:588
gem5::memory::MemInterface::burstsPerStripe
const uint32_t burstsPerStripe
Definition: mem_interface.hh:139
gem5::sim_clock::Frequency
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
Definition: core.cc:48
gem5::memory::MemCtrl::requestEventScheduled
virtual bool requestEventScheduled(uint8_t pseudo_channel=0) const
Is there a read/write burst Event scheduled?
Definition: mem_ctrl.hh:733
gem5::MaxTick
const Tick MaxTick
Definition: types.hh:60
gem5::memory::MemInterface::Bank::wrAllowedAt
Tick wrAllowedAt
Definition: mem_interface.hh:97
gem5::memory::NVMInterface::processWriteRespondEvent
void processWriteRespondEvent()
Definition: nvm_interface.cc:559
gem5::memory::NVMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: nvm_interface.cc:110
gem5::memory::NVMInterface::numReadsToIssue
uint16_t numReadsToIssue
Definition: nvm_interface.hh:188
gem5::memory::NVMInterface::NVMStats::pendingReads
statistics::Histogram pendingReads
NVM stats.
Definition: nvm_interface.hh:140
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1019
std::vector
STL vector class.
Definition: stl.hh:37
gem5::memory::NVMInterface::tREAD
const Tick tREAD
NVM specific timing requirements.
Definition: nvm_interface.hh:99
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::memory::NVMInterface::twoCycleRdWr
const bool twoCycleRdWr
Definition: nvm_interface.hh:94
gem5::isPowerOf2
static constexpr bool isPowerOf2(const T &n)
Definition: intmath.hh:98
gem5::statistics::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1328
gem5::memory::MemInterface::burstsPerRowBuffer
const uint32_t burstsPerRowBuffer
Definition: mem_interface.hh:138
gem5::memory::NVMInterface::tWRITE
const Tick tWRITE
Definition: nvm_interface.hh:100
gem5::memory::NVMInterface::readReadyQueue
std::deque< Tick > readReadyQueue
Definition: nvm_interface.hh:167
gem5::memory::MemCtrl::inReadBusState
bool inReadBusState(bool next_state) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:761
gem5::memory::NVMInterface::NVMStats::totBusLat
statistics::Scalar totBusLat
Definition: nvm_interface.hh:120
gem5::memory::MemPacket::entryTime
const Tick entryTime
When did request enter the controller.
Definition: mem_ctrl.hh:103
gem5::memory::MemInterface::addrMapping
enums::AddrMap addrMapping
Memory controller configuration initialized based on parameter values.
Definition: mem_interface.hh:126
gem5::EventManager
Definition: eventq.hh:987
gem5::memory::NVMInterface::NVMStats::totMemAccLat
statistics::Scalar totMemAccLat
Definition: nvm_interface.hh:121
gem5::memory::MemInterface::Bank::bytesAccessed
uint32_t bytesAccessed
Definition: mem_interface.hh:102
gem5::memory::MemPacket::addr
Addr addr
The starting address of the packet.
Definition: mem_ctrl.hh:140
gem5::memory::MemInterface::Bank::rdAllowedAt
Tick rdAllowedAt
Definition: mem_interface.hh:96
gem5::ArmISA::b
Bitfield< 7 > b
Definition: misc_types.hh:382
gem5::memory::MemInterface
General interface to memory device Includes functions and parameters shared across media types.
Definition: mem_interface.hh:74
bitfield.hh
gem5::memory::NVMInterface::NVMStats::bytesRead
statistics::Scalar bytesRead
Definition: nvm_interface.hh:128
gem5::memory::NVMInterface::numPendingReads
uint16_t numPendingReads
Definition: nvm_interface.hh:183
gem5::memory::MemInterface::banksPerRank
const uint32_t banksPerRank
Definition: mem_interface.hh:141
gem5::memory::NVMInterface::decodePacket
MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0) override
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Definition: nvm_interface.cc:122
gem5::memory::MemPacket::row
const uint32_t row
Definition: mem_ctrl.hh:125
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:291
gem5::memory::NVMInterface::writeRespondEvent
EventFunctionWrapper writeRespondEvent
Definition: nvm_interface.hh:147
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::memory::MemPacket::isDram
bool isDram() const
Return true if its a DRAM access.
Definition: mem_ctrl.hh:204
gem5::memory::MemPacket
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition: mem_ctrl.hh:98
gem5::memory::NVMInterface::tSEND
const Tick tSEND
Definition: nvm_interface.hh:101
gem5::memory::NVMInterface::NVMStats::totQLat
statistics::Scalar totQLat
Definition: nvm_interface.hh:119
gem5::memory::NVMInterface::NVMStats::NVMStats
NVMStats(NVMInterface &nvm)
Definition: nvm_interface.cc:622
gem5::memory::NVMInterface::processReadReadyEvent
void processReadReadyEvent()
Definition: nvm_interface.cc:359
gem5::EventManager::reschedule
void reschedule(Event &event, Tick when, bool always=false)
Definition: eventq.hh:1037
cprintf.hh
std::pair
STL pair class.
Definition: stl.hh:58
gem5::memory::MemInterface::ctrl
MemCtrl * ctrl
A pointer to the parent memory controller instance.
Definition: mem_interface.hh:114
gem5::memory::NVMInterface::isBusy
bool isBusy(bool read_queue_empty, bool all_writes_nvm) override
This function checks if ranks are busy.
Definition: nvm_interface.cc:605
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
name
const std::string & name()
Definition: trace.cc:49
gem5::memory::MemInterface::rankToRankDelay
Tick rankToRankDelay() const
Definition: mem_interface.hh:166
gem5::memory::MemInterface::writeToReadDelay
virtual Tick writeToReadDelay() const
Definition: mem_interface.hh:156
gem5::memory::NVMInterface::init
void init() override
Initialize the NVM interface and verify parameters.
Definition: nvm_interface.cc:105
gem5::memory::NVMInterface::NVMStats::writeBursts
statistics::Scalar writeBursts
Definition: nvm_interface.hh:113
gem5::memory::NVMInterface::burstReady
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the NVM.
Definition: nvm_interface.cc:404
gem5::memory::MemInterface::Bank::openRow
uint32_t openRow
Definition: mem_interface.hh:92
gem5::memory::AbstractMemory::size
uint64_t size() const
Get the memory size.
Definition: abstract_mem.hh:301
gem5::memory::NVMInterface::NVMStats::bytesPerBank
statistics::Histogram bytesPerBank
Definition: nvm_interface.hh:142
gem5::memory::MemInterface::ranksPerChannel
const uint32_t ranksPerChannel
Definition: mem_interface.hh:140
gem5::memory::MemPacket::bank
const uint8_t bank
Definition: mem_ctrl.hh:124
gem5::ceilLog2
static constexpr int ceilLog2(const T &n)
Definition: intmath.hh:84
gem5::memory::NVMInterface::ranks
std::vector< Rank * > ranks
Vector of nvm ranks.
Definition: nvm_interface.hh:155
gem5::memory::NVMInterface::doBurstAccess
std::pair< Tick, Tick > doBurstAccess(MemPacket *pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue) override
Actually do the burst and update stats.
Definition: nvm_interface.cc:413
gem5::memory::MemCtrl::verifyMultiCmd
virtual Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition: mem_ctrl.cc:697
gem5::memory::NVMInterface::NVMStats::perBankWrBursts
statistics::Vector perBankWrBursts
Definition: nvm_interface.hh:116
gem5::ArmISA::n
Bitfield< 31 > n
Definition: misc_types.hh:456
gem5::memory::NVMInterface::NVMStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: nvm_interface.cc:680
gem5::memory::MemPacket::rank
const uint8_t rank
Will be populated by address decoder.
Definition: mem_ctrl.hh:123
std::deque
STL deque class.
Definition: stl.hh:44
gem5::memory::NVMInterface::NVMStats::readBursts
statistics::Scalar readBursts
NVM stats.
Definition: nvm_interface.hh:112
gem5::simSeconds
statistics::Formula & simSeconds
Definition: stats.cc:45
gem5::memory::NVMInterface::stats
NVMStats stats
Definition: nvm_interface.hh:144
gem5::statistics::Group
Statistics container.
Definition: group.hh:93
gem5::memory::NVMInterface::NVMStats::perBankRdBursts
statistics::Vector perBankRdBursts
Definition: nvm_interface.hh:115
gem5::memory::NVMInterface::NVMStats::pendingWrites
statistics::Histogram pendingWrites
Definition: nvm_interface.hh:141
gem5::memory::MemInterface::getCtrlAddr
Addr getCtrlAddr(Addr addr)
Get an address in a dense range which starts from 0.
Definition: mem_interface.hh:216
gem5::memory::NVMInterface::nextReadAt
Tick nextReadAt
Till when must we wait before issuing next read command?
Definition: nvm_interface.hh:179
gem5::memory::MemInterface::Bank::NO_ROW
static const uint32_t NO_ROW
Definition: mem_interface.hh:90
trace.hh
gem5::memory::NVMInterface::writeRespQueue
std::list< Tick > writeRespQueue
Holding queue for non-deterministic write commands, which maintains writes that have been issued but ...
Definition: nvm_interface.hh:165
gem5::memory::MemInterface::Bank::actAllowedAt
Tick actAllowedAt
Definition: mem_interface.hh:99
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::memory::MemInterface::maxCommandsPerWindow
unsigned int maxCommandsPerWindow
Number of commands that can issue in the defined controller command window, used to verify command ba...
Definition: mem_interface.hh:120
gem5::memory::MemPacket::isRead
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition: mem_ctrl.hh:193
gem5::memory::NVMInterface::numReadDataReady
uint16_t numReadDataReady
Definition: nvm_interface.hh:184
gem5::memory::NVMInterface::NVMStats::bytesWritten
statistics::Scalar bytesWritten
Definition: nvm_interface.hh:129
gem5::memory::MemInterface::burstSize
const uint32_t burstSize
General device and channel characteristics The rowsPerBank is determined based on the capacity,...
Definition: mem_interface.hh:133
gem5::memory::NVMInterface::writeRespQueueEmpty
bool writeRespQueueEmpty() const
Check if the write response queue is empty.
Definition: nvm_interface.hh:174
gem5::memory::MemPacket::bankId
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks,...
Definition: mem_ctrl.hh:132
gem5::memory::MemInterface::tCK
const GEM5_CLASS_VAR_USED Tick tCK
General timing requirements.
Definition: mem_interface.hh:147
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84

Generated on Thu Jul 28 2022 13:32:34 for gem5 by doxygen 1.8.17