gem5  [DEVELOP-FOR-23.0]
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
dram_interface.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2013 Amin Farmahini-Farahani
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 #include "mem/dram_interface.hh"
42 
43 #include "base/bitfield.hh"
44 #include "base/cprintf.hh"
45 #include "base/trace.hh"
46 #include "debug/DRAM.hh"
47 #include "debug/DRAMPower.hh"
48 #include "debug/DRAMState.hh"
49 #include "sim/system.hh"
50 
51 namespace gem5
52 {
53 
54 using namespace Data;
55 
56 namespace memory
57 {
58 
60 DRAMInterface::chooseNextFRFCFS(MemPacketQueue& queue, Tick min_col_at) const
61 {
62  std::vector<uint32_t> earliest_banks(ranksPerChannel, 0);
63 
64  // Has minBankPrep been called to populate earliest_banks?
65  bool filled_earliest_banks = false;
66  // can the PRE/ACT sequence be done without impacting utlization?
67  bool hidden_bank_prep = false;
68 
69  // search for seamless row hits first, if no seamless row hit is
70  // found then determine if there are other packets that can be issued
71  // without incurring additional bus delay due to bank timing
72  // Will select closed rows first to enable more open row possibilies
73  // in future selections
74  bool found_hidden_bank = false;
75 
76  // remember if we found a row hit, not seamless, but bank prepped
77  // and ready
78  bool found_prepped_pkt = false;
79 
80  // if we have no row hit, prepped or not, and no seamless packet,
81  // just go for the earliest possible
82  bool found_earliest_pkt = false;
83 
84  Tick selected_col_at = MaxTick;
85  auto selected_pkt_it = queue.end();
86 
87  for (auto i = queue.begin(); i != queue.end() ; ++i) {
88  MemPacket* pkt = *i;
89 
90  // select optimal DRAM packet in Q
91  if (pkt->isDram() && (pkt->pseudoChannel == pseudoChannel)) {
92  const Bank& bank = ranks[pkt->rank]->banks[pkt->bank];
93  const Tick col_allowed_at = pkt->isRead() ? bank.rdAllowedAt :
94  bank.wrAllowedAt;
95 
96  DPRINTF(DRAM, "%s checking DRAM packet in bank %d, row %d\n",
97  __func__, pkt->bank, pkt->row);
98 
99  // check if rank is not doing a refresh and thus is available,
100  // if not, jump to the next packet
101  if (burstReady(pkt)) {
102 
103  DPRINTF(DRAM,
104  "%s bank %d - Rank %d available\n", __func__,
105  pkt->bank, pkt->rank);
106 
107  // check if it is a row hit
108  if (bank.openRow == pkt->row) {
109  // no additional rank-to-rank or same bank-group
110  // delays, or we switched read/write and might as well
111  // go for the row hit
112  if (col_allowed_at <= min_col_at) {
113  // FCFS within the hits, giving priority to
114  // commands that can issue seamlessly, without
115  // additional delay, such as same rank accesses
116  // and/or different bank-group accesses
117  DPRINTF(DRAM, "%s Seamless buffer hit\n", __func__);
118  selected_pkt_it = i;
119  selected_col_at = col_allowed_at;
120  // no need to look through the remaining queue entries
121  break;
122  } else if (!found_hidden_bank && !found_prepped_pkt) {
123  // if we did not find a packet to a closed row that can
124  // issue the bank commands without incurring delay, and
125  // did not yet find a packet to a prepped row, remember
126  // the current one
127  selected_pkt_it = i;
128  selected_col_at = col_allowed_at;
129  found_prepped_pkt = true;
130  DPRINTF(DRAM, "%s Prepped row buffer hit\n", __func__);
131  }
132  } else if (!found_earliest_pkt) {
133  // if we have not initialised the bank status, do it
134  // now, and only once per scheduling decisions
135  if (!filled_earliest_banks) {
136  // determine entries with earliest bank delay
137  std::tie(earliest_banks, hidden_bank_prep) =
138  minBankPrep(queue, min_col_at);
139  filled_earliest_banks = true;
140  }
141 
142  // bank is amongst first available banks
143  // minBankPrep will give priority to packets that can
144  // issue seamlessly
145  if (bits(earliest_banks[pkt->rank],
146  pkt->bank, pkt->bank)) {
147  found_earliest_pkt = true;
148  found_hidden_bank = hidden_bank_prep;
149 
150  // give priority to packets that can issue
151  // bank commands 'behind the scenes'
152  // any additional delay if any will be due to
153  // col-to-col command requirements
154  if (hidden_bank_prep || !found_prepped_pkt) {
155  selected_pkt_it = i;
156  selected_col_at = col_allowed_at;
157  }
158  }
159  }
160  } else {
161  DPRINTF(DRAM, "%s bank %d - Rank %d not available\n", __func__,
162  pkt->bank, pkt->rank);
163  }
164  }
165  }
166 
167  if (selected_pkt_it == queue.end()) {
168  DPRINTF(DRAM, "%s no available DRAM ranks found\n", __func__);
169  }
170 
171  return std::make_pair(selected_pkt_it, selected_col_at);
172 }
173 
174 void
175 DRAMInterface::activateBank(Rank& rank_ref, Bank& bank_ref,
176  Tick act_tick, uint32_t row)
177 {
178  assert(rank_ref.actTicks.size() == activationLimit);
179 
180  // verify that we have command bandwidth to issue the activate
181  // if not, shift to next burst window
182  Tick act_at;
183  if (twoCycleActivate)
184  act_at = ctrl->verifyMultiCmd(act_tick, maxCommandsPerWindow, tAAD);
185  else
186  act_at = ctrl->verifySingleCmd(act_tick, maxCommandsPerWindow, true);
187 
188  DPRINTF(DRAM, "Activate at tick %d\n", act_at);
189 
190  // update the open row
191  assert(bank_ref.openRow == Bank::NO_ROW);
192  bank_ref.openRow = row;
193 
194  // start counting anew, this covers both the case when we
195  // auto-precharged, and when this access is forced to
196  // precharge
197  bank_ref.bytesAccessed = 0;
198  bank_ref.rowAccesses = 0;
199 
200  ++rank_ref.numBanksActive;
201  assert(rank_ref.numBanksActive <= banksPerRank);
202 
203  DPRINTF(DRAM, "Activate bank %d, rank %d at tick %lld, now got "
204  "%d active\n", bank_ref.bank, rank_ref.rank, act_at,
205  ranks[rank_ref.rank]->numBanksActive);
206 
207  rank_ref.cmdList.push_back(Command(MemCommand::ACT, bank_ref.bank,
208  act_at));
209 
210  DPRINTF(DRAMPower, "%llu,ACT,%d,%d\n", divCeil(act_at, tCK) -
211  timeStampOffset, bank_ref.bank, rank_ref.rank);
212 
213  // The next access has to respect tRAS for this bank
214  bank_ref.preAllowedAt = act_at + tRAS;
215 
216  // Respect the row-to-column command delay for both read and write cmds
217  bank_ref.rdAllowedAt = std::max(act_at + tRCD_RD, bank_ref.rdAllowedAt);
218  bank_ref.wrAllowedAt = std::max(act_at + tRCD_WR, bank_ref.wrAllowedAt);
219 
220  // start by enforcing tRRD
221  for (int i = 0; i < banksPerRank; i++) {
222  // next activate to any bank in this rank must not happen
223  // before tRRD
224  if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
225  // bank group architecture requires longer delays between
226  // ACT commands within the same bank group. Use tRRD_L
227  // in this case
228  rank_ref.banks[i].actAllowedAt = std::max(act_at + tRRD_L,
229  rank_ref.banks[i].actAllowedAt);
230  } else {
231  // use shorter tRRD value when either
232  // 1) bank group architecture is not supportted
233  // 2) bank is in a different bank group
234  rank_ref.banks[i].actAllowedAt = std::max(act_at + tRRD,
235  rank_ref.banks[i].actAllowedAt);
236  }
237  }
238 
239  // next, we deal with tXAW, if the activation limit is disabled
240  // then we directly schedule an activate power event
241  if (!rank_ref.actTicks.empty()) {
242  // sanity check
243  if (rank_ref.actTicks.back() &&
244  (act_at - rank_ref.actTicks.back()) < tXAW) {
245  panic("Got %d activates in window %d (%llu - %llu) which "
246  "is smaller than %llu\n", activationLimit, act_at -
247  rank_ref.actTicks.back(), act_at,
248  rank_ref.actTicks.back(), tXAW);
249  }
250 
251  // shift the times used for the book keeping, the last element
252  // (highest index) is the oldest one and hence the lowest value
253  rank_ref.actTicks.pop_back();
254 
255  // record an new activation (in the future)
256  rank_ref.actTicks.push_front(act_at);
257 
258  // cannot activate more than X times in time window tXAW, push the
259  // next one (the X + 1'st activate) to be tXAW away from the
260  // oldest in our window of X
261  if (rank_ref.actTicks.back() &&
262  (act_at - rank_ref.actTicks.back()) < tXAW) {
263  DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
264  "no earlier than %llu\n", activationLimit,
265  rank_ref.actTicks.back() + tXAW);
266  for (int j = 0; j < banksPerRank; j++)
267  // next activate must not happen before end of window
268  rank_ref.banks[j].actAllowedAt =
269  std::max(rank_ref.actTicks.back() + tXAW,
270  rank_ref.banks[j].actAllowedAt);
271  }
272  }
273 
274  // at the point when this activate takes place, make sure we
275  // transition to the active power state
276  if (!rank_ref.activateEvent.scheduled())
277  schedule(rank_ref.activateEvent, act_at);
278  else if (rank_ref.activateEvent.when() > act_at)
279  // move it sooner in time
280  reschedule(rank_ref.activateEvent, act_at);
281 }
282 
283 void
284 DRAMInterface::prechargeBank(Rank& rank_ref, Bank& bank, Tick pre_tick,
285  bool auto_or_preall, bool trace)
286 {
287  // make sure the bank has an open row
288  assert(bank.openRow != Bank::NO_ROW);
289 
290  // sample the bytes per activate here since we are closing
291  // the page
292  stats.bytesPerActivate.sample(bank.bytesAccessed);
293 
294  bank.openRow = Bank::NO_ROW;
295 
296  Tick pre_at = pre_tick;
297  if (auto_or_preall) {
298  // no precharge allowed before this one
299  bank.preAllowedAt = pre_at;
300  } else {
301  // Issuing an explicit PRE command
302  // Verify that we have command bandwidth to issue the precharge
303  // if not, shift to next burst window
304  pre_at = ctrl->verifySingleCmd(pre_tick, maxCommandsPerWindow, true);
305  // enforce tPPD
306  for (int i = 0; i < banksPerRank; i++) {
307  rank_ref.banks[i].preAllowedAt = std::max(pre_at + tPPD,
308  rank_ref.banks[i].preAllowedAt);
309  }
310  }
311 
312  Tick pre_done_at = pre_at + tRP;
313 
314  bank.actAllowedAt = std::max(bank.actAllowedAt, pre_done_at);
315 
316  assert(rank_ref.numBanksActive != 0);
317  --rank_ref.numBanksActive;
318 
319  DPRINTF(DRAM, "Precharging bank %d, rank %d at tick %lld, now got "
320  "%d active\n", bank.bank, rank_ref.rank, pre_at,
321  rank_ref.numBanksActive);
322 
323  if (trace) {
324 
325  rank_ref.cmdList.push_back(Command(MemCommand::PRE, bank.bank,
326  pre_at));
327  DPRINTF(DRAMPower, "%llu,PRE,%d,%d\n", divCeil(pre_at, tCK) -
328  timeStampOffset, bank.bank, rank_ref.rank);
329  }
330 
331  // if we look at the current number of active banks we might be
332  // tempted to think the DRAM is now idle, however this can be
333  // undone by an activate that is scheduled to happen before we
334  // would have reached the idle state, so schedule an event and
335  // rather check once we actually make it to the point in time when
336  // the (last) precharge takes place
337  if (!rank_ref.prechargeEvent.scheduled()) {
338  schedule(rank_ref.prechargeEvent, pre_done_at);
339  // New event, increment count
340  ++rank_ref.outstandingEvents;
341  } else if (rank_ref.prechargeEvent.when() < pre_done_at) {
342  reschedule(rank_ref.prechargeEvent, pre_done_at);
343  }
344 }
345 
347 DRAMInterface::doBurstAccess(MemPacket* mem_pkt, Tick next_burst_at,
348  const std::vector<MemPacketQueue>& queue)
349 {
350  DPRINTF(DRAM, "Timing access to addr %#x, rank/bank/row %d %d %d\n",
351  mem_pkt->addr, mem_pkt->rank, mem_pkt->bank, mem_pkt->row);
352 
353  // get the rank
354  Rank& rank_ref = *ranks[mem_pkt->rank];
355 
356  assert(rank_ref.inRefIdleState());
357 
358  // are we in or transitioning to a low-power state and have not scheduled
359  // a power-up event?
360  // if so, wake up from power down to issue RD/WR burst
361  if (rank_ref.inLowPowerState) {
362  assert(rank_ref.pwrState != PWR_SREF);
363  rank_ref.scheduleWakeUpEvent(tXP);
364  }
365 
366  // get the bank
367  Bank& bank_ref = rank_ref.banks[mem_pkt->bank];
368 
369  // for the state we need to track if it is a row hit or not
370  bool row_hit = true;
371 
372  // Determine the access latency and update the bank state
373  if (bank_ref.openRow == mem_pkt->row) {
374  // nothing to do
375  } else {
376  row_hit = false;
377 
378  // If there is a page open, precharge it.
379  if (bank_ref.openRow != Bank::NO_ROW) {
380  prechargeBank(rank_ref, bank_ref, std::max(bank_ref.preAllowedAt,
381  curTick()));
382  }
383 
384  // next we need to account for the delay in activating the page
385  Tick act_tick = std::max(bank_ref.actAllowedAt, curTick());
386 
387  // Record the activation and deal with all the global timing
388  // constraints caused be a new activation (tRRD and tXAW)
389  activateBank(rank_ref, bank_ref, act_tick, mem_pkt->row);
390  }
391 
392  // respect any constraints on the command (e.g. tRCD or tCCD)
393  const Tick col_allowed_at = mem_pkt->isRead() ?
394  bank_ref.rdAllowedAt : bank_ref.wrAllowedAt;
395 
396  // we need to wait until the bus is available before we can issue
397  // the command; need to ensure minimum bus delay requirement is met
398  Tick cmd_at = std::max({col_allowed_at, next_burst_at, curTick()});
399 
400  // verify that we have command bandwidth to issue the burst
401  // if not, shift to next burst window
402  Tick max_sync = clkResyncDelay + (mem_pkt->isRead() ? tRL : tWL);
403  if (dataClockSync && ((cmd_at - rank_ref.lastBurstTick) > max_sync))
404  cmd_at = ctrl->verifyMultiCmd(cmd_at, maxCommandsPerWindow, tCK);
405  else
406  cmd_at = ctrl->verifySingleCmd(cmd_at, maxCommandsPerWindow, false);
407 
408  // if we are interleaving bursts, ensure that
409  // 1) we don't double interleave on next burst issue
410  // 2) we are at an interleave boundary; if not, shift to next boundary
411  Tick burst_gap = tBURST_MIN;
412  if (burstInterleave) {
413  if (cmd_at == (rank_ref.lastBurstTick + tBURST_MIN)) {
414  // already interleaving, push next command to end of full burst
415  burst_gap = tBURST;
416  } else if (cmd_at < (rank_ref.lastBurstTick + tBURST)) {
417  // not at an interleave boundary after bandwidth check
418  // Shift command to tBURST boundary to avoid data contention
419  // Command will remain in the same burst window given that
420  // tBURST is less than tBURST_MAX
421  cmd_at = rank_ref.lastBurstTick + tBURST;
422  }
423  }
424  DPRINTF(DRAM, "Schedule RD/WR burst at tick %d\n", cmd_at);
425 
426  // update the packet ready time
427  if (mem_pkt->isRead()) {
428  mem_pkt->readyTime = cmd_at + tRL + tBURST;
429  } else {
430  mem_pkt->readyTime = cmd_at + tWL + tBURST;
431  }
432 
433  rank_ref.lastBurstTick = cmd_at;
434 
435  // update the time for the next read/write burst for each
436  // bank (add a max with tCCD/tCCD_L/tCCD_L_WR here)
437  Tick dly_to_rd_cmd;
438  Tick dly_to_wr_cmd;
439  for (int j = 0; j < ranksPerChannel; j++) {
440  for (int i = 0; i < banksPerRank; i++) {
441  if (mem_pkt->rank == j) {
442  if (bankGroupArch &&
443  (bank_ref.bankgr == ranks[j]->banks[i].bankgr)) {
444  // bank group architecture requires longer delays between
445  // RD/WR burst commands to the same bank group.
446  // tCCD_L is default requirement for same BG timing
447  // tCCD_L_WR is required for write-to-write
448  // Need to also take bus turnaround delays into account
449  dly_to_rd_cmd = mem_pkt->isRead() ?
450  tCCD_L : std::max(tCCD_L, wrToRdDlySameBG);
451  dly_to_wr_cmd = mem_pkt->isRead() ?
452  std::max(tCCD_L, rdToWrDlySameBG) :
453  tCCD_L_WR;
454  } else {
455  // tBURST is default requirement for diff BG timing
456  // Need to also take bus turnaround delays into account
457  dly_to_rd_cmd = mem_pkt->isRead() ? burst_gap :
458  writeToReadDelay();
459  dly_to_wr_cmd = mem_pkt->isRead() ? readToWriteDelay() :
460  burst_gap;
461  }
462  } else {
463  // different rank is by default in a different bank group and
464  // doesn't require longer tCCD or additional RTW, WTR delays
465  // Need to account for rank-to-rank switching
466  dly_to_wr_cmd = rankToRankDelay();
467  dly_to_rd_cmd = rankToRankDelay();
468  }
469  ranks[j]->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
470  ranks[j]->banks[i].rdAllowedAt);
471  ranks[j]->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
472  ranks[j]->banks[i].wrAllowedAt);
473  }
474  }
475 
476  // Save rank of current access
477  activeRank = mem_pkt->rank;
478 
479  // If this is a write, we also need to respect the write recovery
480  // time before a precharge, in the case of a read, respect the
481  // read to precharge constraint
482  bank_ref.preAllowedAt = std::max(bank_ref.preAllowedAt,
483  mem_pkt->isRead() ? cmd_at + tRTP :
484  mem_pkt->readyTime + tWR);
485 
486  // increment the bytes accessed and the accesses per row
487  bank_ref.bytesAccessed += burstSize;
488  ++bank_ref.rowAccesses;
489 
490  // if we reached the max, then issue with an auto-precharge
491  bool auto_precharge = pageMgmt == enums::close ||
492  bank_ref.rowAccesses == maxAccessesPerRow;
493 
494  // if we did not hit the limit, we might still want to
495  // auto-precharge
496  if (!auto_precharge &&
497  (pageMgmt == enums::open_adaptive ||
498  pageMgmt == enums::close_adaptive)) {
499  // a twist on the open and close page policies:
500  // 1) open_adaptive page policy does not blindly keep the
501  // page open, but close it if there are no row hits, and there
502  // are bank conflicts in the queue
503  // 2) close_adaptive page policy does not blindly close the
504  // page, but closes it only if there are no row hits in the queue.
505  // In this case, only force an auto precharge when there
506  // are no same page hits in the queue
507  bool got_more_hits = false;
508  bool got_bank_conflict = false;
509 
510  for (uint8_t i = 0; i < ctrl->numPriorities(); ++i) {
511  auto p = queue[i].begin();
512  // keep on looking until we find a hit or reach the end of the
513  // queue
514  // 1) if a hit is found, then both open and close adaptive
515  // policies keep the page open
516  // 2) if no hit is found, got_bank_conflict is set to true if a
517  // bank conflict request is waiting in the queue
518  // 3) make sure we are not considering the packet that we are
519  // currently dealing with
520  while (!got_more_hits && p != queue[i].end()) {
521 
522  if ((*p)->pseudoChannel != pseudoChannel) {
523  // only consider if this pkt belongs to this interface
524  ++p;
525  continue;
526  }
527 
528  if (mem_pkt != (*p)) {
529  bool same_rank_bank = (mem_pkt->rank == (*p)->rank) &&
530  (mem_pkt->bank == (*p)->bank);
531 
532  bool same_row = mem_pkt->row == (*p)->row;
533  got_more_hits |= same_rank_bank && same_row;
534  got_bank_conflict |= same_rank_bank && !same_row;
535  }
536  ++p;
537  }
538 
539  if (got_more_hits)
540  break;
541  }
542 
543  // auto pre-charge when either
544  // 1) open_adaptive policy, we have not got any more hits, and
545  // have a bank conflict
546  // 2) close_adaptive policy and we have not got any more hits
547  auto_precharge = !got_more_hits &&
548  (got_bank_conflict || pageMgmt == enums::close_adaptive);
549  }
550 
551  // DRAMPower trace command to be written
552  std::string mem_cmd = mem_pkt->isRead() ? "RD" : "WR";
553 
554  // MemCommand required for DRAMPower library
555  MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
556  MemCommand::WR;
557 
558  rank_ref.cmdList.push_back(Command(command, mem_pkt->bank, cmd_at));
559 
560  DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
561  timeStampOffset, mem_cmd, mem_pkt->bank, mem_pkt->rank);
562 
563  // if this access should use auto-precharge, then we are
564  // closing the row after the read/write burst
565  if (auto_precharge) {
566  // if auto-precharge push a PRE command at the correct tick to the
567  // list used by DRAMPower library to calculate power
568  prechargeBank(rank_ref, bank_ref, std::max(curTick(),
569  bank_ref.preAllowedAt), true);
570 
571  DPRINTF(DRAM, "Auto-precharged bank: %d\n", mem_pkt->bankId);
572  }
573 
574  // Update the stats and schedule the next request
575  if (mem_pkt->isRead()) {
576  // Every respQueue which will generate an event, increment count
577  ++rank_ref.outstandingEvents;
578 
579  stats.readBursts++;
580  if (row_hit)
581  stats.readRowHits++;
582  stats.bytesRead += burstSize;
583  stats.perBankRdBursts[mem_pkt->bankId]++;
584 
585  // Update latency stats
586  stats.totMemAccLat += mem_pkt->readyTime - mem_pkt->entryTime;
587  stats.totQLat += cmd_at - mem_pkt->entryTime;
588  stats.totBusLat += tBURST;
589  } else {
590  // Schedule write done event to decrement event count
591  // after the readyTime has been reached
592  // Only schedule latest write event to minimize events
593  // required; only need to ensure that final event scheduled covers
594  // the time that writes are outstanding and bus is active
595  // to holdoff power-down entry events
596  if (!rank_ref.writeDoneEvent.scheduled()) {
597  schedule(rank_ref.writeDoneEvent, mem_pkt->readyTime);
598  // New event, increment count
599  ++rank_ref.outstandingEvents;
600 
601  } else if (rank_ref.writeDoneEvent.when() < mem_pkt->readyTime) {
602  reschedule(rank_ref.writeDoneEvent, mem_pkt->readyTime);
603  }
604  // will remove write from queue when returned to parent function
605  // decrement count for DRAM rank
606  --rank_ref.writeEntries;
607 
608  stats.writeBursts++;
609  if (row_hit)
610  stats.writeRowHits++;
611  stats.bytesWritten += burstSize;
612  stats.perBankWrBursts[mem_pkt->bankId]++;
613 
614  }
615  // Update bus state to reflect when previous command was issued
616  return std::make_pair(cmd_at, cmd_at + burst_gap);
617 }
618 
619 void
620 DRAMInterface::addRankToRankDelay(Tick cmd_at)
621 {
622  // update timing for DRAM ranks due to bursts issued
623  // to ranks on other media interfaces
624  for (auto n : ranks) {
625  for (int i = 0; i < banksPerRank; i++) {
626  // different rank by default
627  // Need to only account for rank-to-rank switching
628  n->banks[i].rdAllowedAt = std::max(cmd_at + rankToRankDelay(),
629  n->banks[i].rdAllowedAt);
630  n->banks[i].wrAllowedAt = std::max(cmd_at + rankToRankDelay(),
631  n->banks[i].wrAllowedAt);
632  }
633  }
634 }
635 
636 DRAMInterface::DRAMInterface(const DRAMInterfaceParams &_p)
637  : MemInterface(_p),
638  bankGroupsPerRank(_p.bank_groups_per_rank),
639  bankGroupArch(_p.bank_groups_per_rank > 0),
640  tRL(_p.tCL),
641  tWL(_p.tCWL),
642  tBURST_MIN(_p.tBURST_MIN), tBURST_MAX(_p.tBURST_MAX),
643  tCCD_L_WR(_p.tCCD_L_WR), tCCD_L(_p.tCCD_L),
644  tRCD_RD(_p.tRCD), tRCD_WR(_p.tRCD_WR),
645  tRP(_p.tRP), tRAS(_p.tRAS), tWR(_p.tWR), tRTP(_p.tRTP),
646  tRFC(_p.tRFC), tREFI(_p.tREFI), tRRD(_p.tRRD), tRRD_L(_p.tRRD_L),
647  tPPD(_p.tPPD), tAAD(_p.tAAD),
648  tXAW(_p.tXAW), tXP(_p.tXP), tXS(_p.tXS),
649  clkResyncDelay(_p.tBURST_MAX),
650  dataClockSync(_p.data_clock_sync),
651  burstInterleave(tBURST != tBURST_MIN),
652  twoCycleActivate(_p.two_cycle_activate),
653  activationLimit(_p.activation_limit),
654  wrToRdDlySameBG(tWL + _p.tBURST_MAX + _p.tWTR_L),
655  rdToWrDlySameBG(_p.tRTW + _p.tBURST_MAX),
656  pageMgmt(_p.page_policy),
657  maxAccessesPerRow(_p.max_accesses_per_row),
658  timeStampOffset(0), activeRank(0),
659  enableDRAMPowerdown(_p.enable_dram_powerdown),
660  lastStatsResetTick(0),
661  stats(*this)
662 {
663  DPRINTF(DRAM, "Setting up DRAM Interface\n");
664 
665  fatal_if(!isPowerOf2(burstSize), "DRAM burst size %d is not allowed, "
666  "must be a power of two\n", burstSize);
667 
668  // sanity check the ranks since we rely on bit slicing for the
669  // address decoding
670  fatal_if(!isPowerOf2(ranksPerChannel), "DRAM rank count of %d is "
671  "not allowed, must be a power of two\n", ranksPerChannel);
672 
673  for (int i = 0; i < ranksPerChannel; i++) {
674  DPRINTF(DRAM, "Creating DRAM rank %d \n", i);
675  Rank* rank = new Rank(_p, i, *this);
676  ranks.push_back(rank);
677  }
678 
679  // determine the dram actual capacity from the DRAM config in Mbytes
680  uint64_t deviceCapacity = deviceSize / (1024 * 1024) * devicesPerRank *
682 
683  uint64_t capacity = 1ULL << ceilLog2(AbstractMemory::size());
684 
685  DPRINTF(DRAM, "Memory capacity %lld (%lld) bytes\n", capacity,
687 
688  // if actual DRAM size does not match memory capacity in system warn!
689  if (deviceCapacity != capacity / (1024 * 1024))
690  warn("DRAM device capacity (%d Mbytes) does not match the "
691  "address range assigned (%d Mbytes)\n", deviceCapacity,
692  capacity / (1024 * 1024));
693 
694  DPRINTF(DRAM, "Row buffer size %d bytes with %d bursts per row buffer\n",
696 
698 
699  // some basic sanity checks
700  if (tREFI <= tRP || tREFI <= tRFC) {
701  fatal("tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
702  tREFI, tRP, tRFC);
703  }
704 
705  // basic bank group architecture checks ->
706  if (bankGroupArch) {
707  // must have at least one bank per bank group
709  fatal("banks per rank (%d) must be equal to or larger than "
710  "banks groups per rank (%d)\n",
712  }
713  // must have same number of banks in each bank group
714  if ((banksPerRank % bankGroupsPerRank) != 0) {
715  fatal("Banks per rank (%d) must be evenly divisible by bank "
716  "groups per rank (%d) for equal banks per bank group\n",
718  }
719  // tCCD_L should be greater than minimal, back-to-back burst delay
720  if (tCCD_L <= tBURST) {
721  fatal("tCCD_L (%d) should be larger than the minimum bus delay "
722  "(%d) when bank groups per rank (%d) is greater than 1\n",
724  }
725  // tCCD_L_WR should be greater than minimal, back-to-back burst delay
726  if (tCCD_L_WR <= tBURST) {
727  fatal("tCCD_L_WR (%d) should be larger than the minimum bus delay "
728  " (%d) when bank groups per rank (%d) is greater than 1\n",
730  }
731  // tRRD_L is greater than minimal, same bank group ACT-to-ACT delay
732  // some datasheets might specify it equal to tRRD
733  if (tRRD_L < tRRD) {
734  fatal("tRRD_L (%d) should be larger than tRRD (%d) when "
735  "bank groups per rank (%d) is greater than 1\n",
737  }
738  }
739 }
740 
741 void
743 {
745 
746  // a bit of sanity checks on the interleaving, save it for here to
747  // ensure that the system pointer is initialised
748  if (range.interleaved()) {
749  if (addrMapping == enums::RoRaBaChCo) {
750  if (rowBufferSize != range.granularity()) {
751  fatal("Channel interleaving of %s doesn't match RoRaBaChCo "
752  "address map\n", name());
753  }
754  } else if (addrMapping == enums::RoRaBaCoCh ||
755  addrMapping == enums::RoCoRaBaCh) {
756  // for the interleavings with channel bits in the bottom,
757  // if the system uses a channel striping granularity that
758  // is larger than the DRAM burst size, then map the
759  // sequential accesses within a stripe to a number of
760  // columns in the DRAM, effectively placing some of the
761  // lower-order column bits as the least-significant bits
762  // of the address (above the ones denoting the burst size)
763  assert(burstsPerStripe >= 1);
764 
765  // channel striping has to be done at a granularity that
766  // is equal or larger to a cache line
767  if (system()->cacheLineSize() > range.granularity()) {
768  fatal("Channel interleaving of %s must be at least as large "
769  "as the cache line size\n", name());
770  }
771 
772  // ...and equal or smaller than the row-buffer size
773  if (rowBufferSize < range.granularity()) {
774  fatal("Channel interleaving of %s must be at most as large "
775  "as the row-buffer size\n", name());
776  }
777  // this is essentially the check above, so just to be sure
779  }
780  }
781 }
782 
783 void
785 {
786  if (system()->isTimingMode()) {
787  // timestamp offset should be in clock cycles for DRAMPower
789 
790  for (auto r : ranks) {
791  r->startup(curTick() + tREFI - tRP);
792  }
793  }
794 }
795 
796 bool
797 DRAMInterface::isBusy(bool read_queue_empty, bool all_writes_nvm)
798 {
799  int busy_ranks = 0;
800  for (auto r : ranks) {
801  if (!r->inRefIdleState()) {
802  if (r->pwrState != PWR_SREF) {
803  // rank is busy refreshing
804  DPRINTF(DRAMState, "Rank %d is not available\n", r->rank);
805  busy_ranks++;
806 
807  // let the rank know that if it was waiting to drain, it
808  // is now done and ready to proceed
809  r->checkDrainDone();
810  }
811 
812  // check if we were in self-refresh and haven't started
813  // to transition out
814  if ((r->pwrState == PWR_SREF) && r->inLowPowerState) {
815  DPRINTF(DRAMState, "Rank %d is in self-refresh\n", r->rank);
816  // if we have commands queued to this rank and we don't have
817  // a minimum number of active commands enqueued,
818  // exit self-refresh
819  if (r->forceSelfRefreshExit()) {
820  DPRINTF(DRAMState, "rank %d was in self refresh and"
821  " should wake up\n", r->rank);
822  //wake up from self-refresh
823  r->scheduleWakeUpEvent(tXS);
824  // things are brought back into action once a refresh is
825  // performed after self-refresh
826  // continue with selection for other ranks
827  }
828  }
829  }
830  }
831  return (busy_ranks == ranksPerChannel);
832 }
833 
834 MemPacket*
836  unsigned size, bool is_read, uint8_t pseudo_channel)
837 {
838  // decode the address based on the address mapping scheme, with
839  // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
840  // channel, respectively
841  uint8_t rank;
842  uint8_t bank;
843  // use a 64-bit unsigned during the computations as the row is
844  // always the top bits, and check before creating the packet
845  uint64_t row;
846 
847  // Get packed address, starting at 0
848  Addr addr = getCtrlAddr(pkt_addr);
849 
850  // truncate the address to a memory burst, which makes it unique to
851  // a specific buffer, row, bank, rank and channel
852  addr = addr / burstSize;
853 
854  // we have removed the lowest order address bits that denote the
855  // position within the column
856  if (addrMapping == enums::RoRaBaChCo || addrMapping == enums::RoRaBaCoCh) {
857  // the lowest order bits denote the column to ensure that
858  // sequential cache lines occupy the same row
860 
861  // after the channel bits, get the bank bits to interleave
862  // over the banks
863  bank = addr % banksPerRank;
864  addr = addr / banksPerRank;
865 
866  // after the bank, we get the rank bits which thus interleaves
867  // over the ranks
868  rank = addr % ranksPerChannel;
870 
871  // lastly, get the row bits, no need to remove them from addr
872  row = addr % rowsPerBank;
873  } else if (addrMapping == enums::RoCoRaBaCh) {
874  // with emerging technologies, could have small page size with
875  // interleaving granularity greater than row buffer
877  // remove column bits which are a subset of burstsPerStripe
879  } else {
880  // remove lower column bits below channel bits
882  }
883 
884  // start with the bank bits, as this provides the maximum
885  // opportunity for parallelism between requests
886  bank = addr % banksPerRank;
887  addr = addr / banksPerRank;
888 
889  // next get the rank bits
890  rank = addr % ranksPerChannel;
892 
893  // next, the higher-order column bites
896  }
897 
898  // lastly, get the row bits, no need to remove them from addr
899  row = addr % rowsPerBank;
900  } else
901  panic("Unknown address mapping policy chosen!");
902 
903  assert(rank < ranksPerChannel);
904  assert(bank < banksPerRank);
905  assert(row < rowsPerBank);
906  assert(row < Bank::NO_ROW);
907 
908  DPRINTF(DRAM, "Address: %#x Rank %d Bank %d Row %d\n",
909  pkt_addr, rank, bank, row);
910 
911  // create the corresponding memory packet with the entry time and
912  // ready time set to the current tick, the latter will be updated
913  // later
914  uint16_t bank_id = banksPerRank * rank + bank;
915 
916  return new MemPacket(pkt, is_read, true, pseudo_channel, rank, bank, row,
917  bank_id, pkt_addr, size);
918 }
919 
920 void DRAMInterface::setupRank(const uint8_t rank, const bool is_read)
921 {
922  // increment entry count of the rank based on packet type
923  if (is_read) {
924  ++ranks[rank]->readEntries;
925  } else {
926  ++ranks[rank]->writeEntries;
927  }
928 }
929 
930 void
932 {
933  Rank& rank_ref = *ranks[rank];
934 
935  // if a read has reached its ready-time, decrement the number of reads
936  // At this point the packet has been handled and there is a possibility
937  // to switch to low-power mode if no other packet is available
938  --rank_ref.readEntries;
939  DPRINTF(DRAM, "number of read entries for rank %d is %d\n",
940  rank, rank_ref.readEntries);
941 
942  // counter should at least indicate one outstanding request
943  // for this read
944  assert(rank_ref.outstandingEvents > 0);
945  // read response received, decrement count
946  --rank_ref.outstandingEvents;
947 
948  // at this moment should not have transitioned to a low-power state
949  assert((rank_ref.pwrState != PWR_SREF) &&
950  (rank_ref.pwrState != PWR_PRE_PDN) &&
951  (rank_ref.pwrState != PWR_ACT_PDN));
952 
953  // track if this is the last packet before idling
954  // and that there are no outstanding commands to this rank
955  if (rank_ref.isQueueEmpty() && rank_ref.outstandingEvents == 0 &&
956  rank_ref.inRefIdleState() && enableDRAMPowerdown) {
957  // verify that there are no events scheduled
958  assert(!rank_ref.activateEvent.scheduled());
959  assert(!rank_ref.prechargeEvent.scheduled());
960 
961  // if coming from active state, schedule power event to
962  // active power-down else go to precharge power-down
963  DPRINTF(DRAMState, "Rank %d sleep at tick %d; current power state is "
964  "%d\n", rank, curTick(), rank_ref.pwrState);
965 
966  // default to ACT power-down unless already in IDLE state
967  // could be in IDLE if PRE issued before data returned
968  PowerState next_pwr_state = PWR_ACT_PDN;
969  if (rank_ref.pwrState == PWR_IDLE) {
970  next_pwr_state = PWR_PRE_PDN;
971  }
972 
973  rank_ref.powerDownSleep(next_pwr_state, curTick());
974  }
975 }
976 
977 void
979 {
980  Rank& rank_ref = *ranks[rank];
981 
982  if ((rank_ref.refreshState == REF_PRE) &&
983  !rank_ref.prechargeEvent.scheduled()) {
984  // kick the refresh event loop into action again if banks already
985  // closed and just waiting for read to complete
986  schedule(rank_ref.refreshEvent, curTick());
987  }
988 }
989 
990 void
992 {
993  // also need to kick off events to exit self-refresh
994  for (auto r : ranks) {
995  // force self-refresh exit, which in turn will issue auto-refresh
996  if (r->pwrState == PWR_SREF) {
997  DPRINTF(DRAM,"Rank%d: Forcing self-refresh wakeup in drain\n",
998  r->rank);
999  r->scheduleWakeUpEvent(tXS);
1000  }
1001  }
1002 }
1003 
1004 bool
1006 {
1007  // true until proven false
1008  bool all_ranks_drained = true;
1009  for (auto r : ranks) {
1010  // then verify that the power state is IDLE ensuring all banks are
1011  // closed and rank is not in a low power state. Also verify that rank
1012  // is idle from a refresh point of view.
1013  all_ranks_drained = r->inPwrIdleState() && r->inRefIdleState() &&
1014  all_ranks_drained;
1015  }
1016  return all_ranks_drained;
1017 }
1018 
1019 void
1021 {
1022  for (auto r : ranks) {
1023  r->suspend();
1024  }
1025 }
1026 
1029  Tick min_col_at) const
1030 {
1031  Tick min_act_at = MaxTick;
1032  std::vector<uint32_t> bank_mask(ranksPerChannel, 0);
1033 
1034  // Flag condition when burst can issue back-to-back with previous burst
1035  bool found_seamless_bank = false;
1036 
1037  // Flag condition when bank can be opened without incurring additional
1038  // delay on the data bus
1039  bool hidden_bank_prep = false;
1040 
1041  // determine if we have queued transactions targetting the
1042  // bank in question
1043  std::vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
1044  for (const auto& p : queue) {
1045  if (p->pseudoChannel != pseudoChannel)
1046  continue;
1047  if (p->isDram() && ranks[p->rank]->inRefIdleState())
1048  got_waiting[p->bankId] = true;
1049  }
1050 
1051  // Find command with optimal bank timing
1052  // Will prioritize commands that can issue seamlessly.
1053  for (int i = 0; i < ranksPerChannel; i++) {
1054  for (int j = 0; j < banksPerRank; j++) {
1055  uint16_t bank_id = i * banksPerRank + j;
1056 
1057  // if we have waiting requests for the bank, and it is
1058  // amongst the first available, update the mask
1059  if (got_waiting[bank_id]) {
1060  // make sure this rank is not currently refreshing.
1061  assert(ranks[i]->inRefIdleState());
1062  // simplistic approximation of when the bank can issue
1063  // an activate, ignoring any rank-to-rank switching
1064  // cost in this calculation
1065  Tick act_at = ranks[i]->banks[j].openRow == Bank::NO_ROW ?
1066  std::max(ranks[i]->banks[j].actAllowedAt, curTick()) :
1067  std::max(ranks[i]->banks[j].preAllowedAt, curTick()) + tRP;
1068 
1069  // latest Tick for which ACT can occur without
1070  // incurring additoinal delay on the data bus
1071  const Tick tRCD = ctrl->inReadBusState(false, this) ?
1072  tRCD_RD : tRCD_WR;
1073  const Tick hidden_act_max =
1074  std::max(min_col_at - tRCD, curTick());
1075 
1076  // When is the earliest the R/W burst can issue?
1077  const Tick col_allowed_at = ctrl->inReadBusState(false,
1078  this) ?
1079  ranks[i]->banks[j].rdAllowedAt :
1080  ranks[i]->banks[j].wrAllowedAt;
1081  Tick col_at = std::max(col_allowed_at, act_at + tRCD);
1082 
1083  // bank can issue burst back-to-back (seamlessly) with
1084  // previous burst
1085  bool new_seamless_bank = col_at <= min_col_at;
1086 
1087  // if we found a new seamless bank or we have no
1088  // seamless banks, and got a bank with an earlier
1089  // activate time, it should be added to the bit mask
1090  if (new_seamless_bank ||
1091  (!found_seamless_bank && act_at <= min_act_at)) {
1092  // if we did not have a seamless bank before, and
1093  // we do now, reset the bank mask, also reset it
1094  // if we have not yet found a seamless bank and
1095  // the activate time is smaller than what we have
1096  // seen so far
1097  if (!found_seamless_bank &&
1098  (new_seamless_bank || act_at < min_act_at)) {
1099  std::fill(bank_mask.begin(), bank_mask.end(), 0);
1100  }
1101 
1102  found_seamless_bank |= new_seamless_bank;
1103 
1104  // ACT can occur 'behind the scenes'
1105  hidden_bank_prep = act_at <= hidden_act_max;
1106 
1107  // set the bit corresponding to the available bank
1108  replaceBits(bank_mask[i], j, j, 1);
1109  min_act_at = act_at;
1110  }
1111  }
1112  }
1113  }
1114 
1115  return std::make_pair(bank_mask, hidden_bank_prep);
1116 }
1117 
1118 DRAMInterface::Rank::Rank(const DRAMInterfaceParams &_p,
1119  int _rank, DRAMInterface& _dram)
1120  : EventManager(&_dram), dram(_dram),
1121  pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
1122  pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
1123  refreshState(REF_IDLE), inLowPowerState(false), rank(_rank),
1124  readEntries(0), writeEntries(0), outstandingEvents(0),
1125  wakeUpAllowedAt(0), power(_p, false), banks(_p.banks_per_rank),
1126  numBanksActive(0), actTicks(_p.activation_limit, 0), lastBurstTick(0),
1127  writeDoneEvent([this]{ processWriteDoneEvent(); }, name()),
1128  activateEvent([this]{ processActivateEvent(); }, name()),
1129  prechargeEvent([this]{ processPrechargeEvent(); }, name()),
1130  refreshEvent([this]{ processRefreshEvent(); }, name()),
1131  powerEvent([this]{ processPowerEvent(); }, name()),
1132  wakeUpEvent([this]{ processWakeUpEvent(); }, name()),
1133  stats(_dram, *this)
1134 {
1135  for (int b = 0; b < _p.banks_per_rank; b++) {
1136  banks[b].bank = b;
1137  // GDDR addressing of banks to BG is linear.
1138  // Here we assume that all DRAM generations address bank groups as
1139  // follows:
1140  if (_p.bank_groups_per_rank > 0) {
1141  // Simply assign lower bits to bank group in order to
1142  // rotate across bank groups as banks are incremented
1143  // e.g. with 4 banks per bank group and 16 banks total:
1144  // banks 0,4,8,12 are in bank group 0
1145  // banks 1,5,9,13 are in bank group 1
1146  // banks 2,6,10,14 are in bank group 2
1147  // banks 3,7,11,15 are in bank group 3
1148  banks[b].bankgr = b % _p.bank_groups_per_rank;
1149  } else {
1150  // No bank groups; simply assign to bank number
1151  banks[b].bankgr = b;
1152  }
1153  }
1154 }
1155 
1156 void
1158 {
1159  assert(ref_tick > curTick());
1160 
1161  pwrStateTick = curTick();
1162 
1163  // kick off the refresh, and give ourselves enough time to
1164  // precharge
1165  schedule(refreshEvent, ref_tick);
1166 }
1167 
1168 void
1170 {
1171  deschedule(refreshEvent);
1172 
1173  // Update the stats
1174  updatePowerStats();
1175 
1176  // don't automatically transition back to LP state after next REF
1177  pwrStatePostRefresh = PWR_IDLE;
1178 }
1179 
1180 bool
1182 {
1183  // check commmands in Q based on current bus direction
1184  bool no_queued_cmds = (dram.ctrl->inReadBusState(true, &(this->dram))
1185  && (readEntries == 0)) ||
1186  (dram.ctrl->inWriteBusState(true, &(this->dram))
1187  && (writeEntries == 0));
1188  return no_queued_cmds;
1189 }
1190 
1191 void
1193 {
1194  // if this rank was waiting to drain it is now able to proceed to
1195  // precharge
1196  if (refreshState == REF_DRAIN) {
1197  DPRINTF(DRAM, "Refresh drain done, now precharging\n");
1198 
1199  refreshState = REF_PD_EXIT;
1200 
1201  // hand control back to the refresh event loop
1202  schedule(refreshEvent, curTick());
1203  }
1204 }
1205 
1206 void
1208 {
1209  // at the moment sort the list of commands and update the counters
1210  // for DRAMPower libray when doing a refresh
1211  sort(cmdList.begin(), cmdList.end(), DRAMInterface::sortTime);
1212 
1213  auto next_iter = cmdList.begin();
1214  // push to commands to DRAMPower
1215  for ( ; next_iter != cmdList.end() ; ++next_iter) {
1216  Command cmd = *next_iter;
1217  if (cmd.timeStamp <= curTick()) {
1218  // Move all commands at or before curTick to DRAMPower
1219  power.powerlib.doCommand(cmd.type, cmd.bank,
1220  divCeil(cmd.timeStamp, dram.tCK) -
1221  dram.timeStampOffset);
1222  } else {
1223  // done - found all commands at or before curTick()
1224  // next_iter references the 1st command after curTick
1225  break;
1226  }
1227  }
1228  // reset cmdList to only contain commands after curTick
1229  // if there are no commands after curTick, updated cmdList will be empty
1230  // in this case, next_iter is cmdList.end()
1231  cmdList.assign(next_iter, cmdList.end());
1232 }
1233 
1234 void
1236 {
1237  // we should transition to the active state as soon as any bank is active
1238  if (pwrState != PWR_ACT)
1239  // note that at this point numBanksActive could be back at
1240  // zero again due to a precharge scheduled in the future
1241  schedulePowerEvent(PWR_ACT, curTick());
1242 }
1243 
1244 void
1246 {
1247  // counter should at least indicate one outstanding request
1248  // for this precharge
1249  assert(outstandingEvents > 0);
1250  // precharge complete, decrement count
1251  --outstandingEvents;
1252 
1253  // if we reached zero, then special conditions apply as we track
1254  // if all banks are precharged for the power models
1255  if (numBanksActive == 0) {
1256  // no reads to this rank in the Q and no pending
1257  // RD/WR or refresh commands
1258  if (isQueueEmpty() && outstandingEvents == 0 &&
1259  dram.enableDRAMPowerdown) {
1260  // should still be in ACT state since bank still open
1261  assert(pwrState == PWR_ACT);
1262 
1263  // All banks closed - switch to precharge power down state.
1264  DPRINTF(DRAMState, "Rank %d sleep at tick %d\n",
1265  rank, curTick());
1266  powerDownSleep(PWR_PRE_PDN, curTick());
1267  } else {
1268  // we should transition to the idle state when the last bank
1269  // is precharged
1270  schedulePowerEvent(PWR_IDLE, curTick());
1271  }
1272  }
1273 }
1274 
1275 void
1277 {
1278  // counter should at least indicate one outstanding request
1279  // for this write
1280  assert(outstandingEvents > 0);
1281  // Write transfer on bus has completed
1282  // decrement per rank counter
1283  --outstandingEvents;
1284 }
1285 
1286 void
1288 {
1289  // when first preparing the refresh, remember when it was due
1290  if ((refreshState == REF_IDLE) || (refreshState == REF_SREF_EXIT)) {
1291  // remember when the refresh is due
1292  refreshDueAt = curTick();
1293 
1294  // proceed to drain
1295  refreshState = REF_DRAIN;
1296 
1297  // make nonzero while refresh is pending to ensure
1298  // power down and self-refresh are not entered
1299  ++outstandingEvents;
1300 
1301  DPRINTF(DRAM, "Refresh due\n");
1302  }
1303 
1304  // let any scheduled read or write to the same rank go ahead,
1305  // after which it will
1306  // hand control back to this event loop
1307  if (refreshState == REF_DRAIN) {
1308  // if a request is at the moment being handled and this request is
1309  // accessing the current rank then wait for it to finish
1310  if ((rank == dram.activeRank)
1311  && (dram.ctrl->requestEventScheduled(dram.pseudoChannel))) {
1312  // hand control over to the request loop until it is
1313  // evaluated next
1314  DPRINTF(DRAM, "Refresh awaiting draining\n");
1315  return;
1316  } else {
1317  refreshState = REF_PD_EXIT;
1318  }
1319  }
1320 
1321  // at this point, ensure that rank is not in a power-down state
1322  if (refreshState == REF_PD_EXIT) {
1323  // if rank was sleeping and we have't started exit process,
1324  // wake-up for refresh
1325  if (inLowPowerState) {
1326  DPRINTF(DRAM, "Wake Up for refresh\n");
1327  // save state and return after refresh completes
1328  scheduleWakeUpEvent(dram.tXP);
1329  return;
1330  } else {
1331  refreshState = REF_PRE;
1332  }
1333  }
1334 
1335  // at this point, ensure that all banks are precharged
1336  if (refreshState == REF_PRE) {
1337  // precharge any active bank
1338  if (numBanksActive != 0) {
1339  // at the moment, we use a precharge all even if there is
1340  // only a single bank open
1341  DPRINTF(DRAM, "Precharging all\n");
1342 
1343  // first determine when we can precharge
1344  Tick pre_at = curTick();
1345 
1346  for (auto &b : banks) {
1347  // respect both causality and any existing bank
1348  // constraints, some banks could already have a
1349  // (auto) precharge scheduled
1350  pre_at = std::max(b.preAllowedAt, pre_at);
1351  }
1352 
1353  // make sure all banks per rank are precharged, and for those that
1354  // already are, update their availability
1355  Tick act_allowed_at = pre_at + dram.tRP;
1356 
1357  for (auto &b : banks) {
1358  if (b.openRow != Bank::NO_ROW) {
1359  dram.prechargeBank(*this, b, pre_at, true, false);
1360  } else {
1361  b.actAllowedAt = std::max(b.actAllowedAt, act_allowed_at);
1362  b.preAllowedAt = std::max(b.preAllowedAt, pre_at);
1363  }
1364  }
1365 
1366  // precharge all banks in rank
1367  cmdList.push_back(Command(MemCommand::PREA, 0, pre_at));
1368 
1369  DPRINTF(DRAMPower, "%llu,PREA,0,%d\n",
1370  divCeil(pre_at, dram.tCK) -
1371  dram.timeStampOffset, rank);
1372  } else if ((pwrState == PWR_IDLE) && (outstandingEvents == 1)) {
1373  // Banks are closed, have transitioned to IDLE state, and
1374  // no outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1375  DPRINTF(DRAM, "All banks already precharged, starting refresh\n");
1376 
1377  // go ahead and kick the power state machine into gear since
1378  // we are already idle
1379  schedulePowerEvent(PWR_REF, curTick());
1380  } else {
1381  // banks state is closed but haven't transitioned pwrState to IDLE
1382  // or have outstanding ACT,RD/WR,Auto-PRE sequence scheduled
1383  // should have outstanding precharge or read response event
1384  assert(prechargeEvent.scheduled() ||
1385  dram.ctrl->respondEventScheduled(dram.pseudoChannel));
1386  // will start refresh when pwrState transitions to IDLE
1387  }
1388 
1389  assert(numBanksActive == 0);
1390 
1391  // wait for all banks to be precharged or read to complete
1392  // When precharge commands are done, power state machine will
1393  // transition to the idle state, and automatically move to a
1394  // refresh, at that point it will also call this method to get
1395  // the refresh event loop going again
1396  // Similarly, when read response completes, if all banks are
1397  // precharged, will call this method to get loop re-started
1398  return;
1399  }
1400 
1401  // last but not least we perform the actual refresh
1402  if (refreshState == REF_START) {
1403  // should never get here with any banks active
1404  assert(numBanksActive == 0);
1405  assert(pwrState == PWR_REF);
1406 
1407  Tick ref_done_at = curTick() + dram.tRFC;
1408 
1409  for (auto &b : banks) {
1410  b.actAllowedAt = ref_done_at;
1411  }
1412 
1413  // at the moment this affects all ranks
1414  cmdList.push_back(Command(MemCommand::REF, 0, curTick()));
1415 
1416  // Update the stats
1417  updatePowerStats();
1418 
1419  DPRINTF(DRAMPower, "%llu,REF,0,%d\n", divCeil(curTick(), dram.tCK) -
1420  dram.timeStampOffset, rank);
1421 
1422  // Update for next refresh
1423  refreshDueAt += dram.tREFI;
1424 
1425  // make sure we did not wait so long that we cannot make up
1426  // for it
1427  if (refreshDueAt < ref_done_at) {
1428  fatal("Refresh was delayed so long we cannot catch up\n");
1429  }
1430 
1431  // Run the refresh and schedule event to transition power states
1432  // when refresh completes
1433  refreshState = REF_RUN;
1434  schedule(refreshEvent, ref_done_at);
1435  return;
1436  }
1437 
1438  if (refreshState == REF_RUN) {
1439  // should never get here with any banks active
1440  assert(numBanksActive == 0);
1441  assert(pwrState == PWR_REF);
1442 
1443  assert(!powerEvent.scheduled());
1444 
1445  if ((dram.ctrl->drainState() == DrainState::Draining) ||
1446  (dram.ctrl->drainState() == DrainState::Drained)) {
1447  // if draining, do not re-enter low-power mode.
1448  // simply go to IDLE and wait
1449  schedulePowerEvent(PWR_IDLE, curTick());
1450  } else {
1451  // At the moment, we sleep when the refresh ends and wait to be
1452  // woken up again if previously in a low-power state.
1453  if (pwrStatePostRefresh != PWR_IDLE) {
1454  // power State should be power Refresh
1455  assert(pwrState == PWR_REF);
1456  DPRINTF(DRAMState, "Rank %d sleeping after refresh and was in "
1457  "power state %d before refreshing\n", rank,
1458  pwrStatePostRefresh);
1459  powerDownSleep(pwrState, curTick());
1460 
1461  // Force PRE power-down if there are no outstanding commands
1462  // in Q after refresh.
1463  } else if (isQueueEmpty() && dram.enableDRAMPowerdown) {
1464  // still have refresh event outstanding but there should
1465  // be no other events outstanding
1466  assert(outstandingEvents == 1);
1467  DPRINTF(DRAMState, "Rank %d sleeping after refresh but was NOT"
1468  " in a low power state before refreshing\n", rank);
1469  powerDownSleep(PWR_PRE_PDN, curTick());
1470 
1471  } else {
1472  // move to the idle power state once the refresh is done, this
1473  // will also move the refresh state machine to the refresh
1474  // idle state
1475  schedulePowerEvent(PWR_IDLE, curTick());
1476  }
1477  }
1478 
1479  // At this point, we have completed the current refresh.
1480  // In the SREF bypass case, we do not get to this state in the
1481  // refresh STM and therefore can always schedule next event.
1482  // Compensate for the delay in actually performing the refresh
1483  // when scheduling the next one
1484  schedule(refreshEvent, refreshDueAt - dram.tRP);
1485 
1486  DPRINTF(DRAMState, "Refresh done at %llu and next refresh"
1487  " at %llu\n", curTick(), refreshDueAt);
1488  }
1489 }
1490 
1491 void
1493 {
1494  // respect causality
1495  assert(tick >= curTick());
1496 
1497  if (!powerEvent.scheduled()) {
1498  DPRINTF(DRAMState, "Scheduling power event at %llu to state %d\n",
1499  tick, pwr_state);
1500 
1501  // insert the new transition
1502  pwrStateTrans = pwr_state;
1503 
1504  schedule(powerEvent, tick);
1505  } else {
1506  panic("Scheduled power event at %llu to state %d, "
1507  "with scheduled event at %llu to %d\n", tick, pwr_state,
1508  powerEvent.when(), pwrStateTrans);
1509  }
1510 }
1511 
1512 void
1514 {
1515  // if low power state is active low, schedule to active low power state.
1516  // in reality tCKE is needed to enter active low power. This is neglected
1517  // here and could be added in the future.
1518  if (pwr_state == PWR_ACT_PDN) {
1519  schedulePowerEvent(pwr_state, tick);
1520  // push command to DRAMPower
1521  cmdList.push_back(Command(MemCommand::PDN_F_ACT, 0, tick));
1522  DPRINTF(DRAMPower, "%llu,PDN_F_ACT,0,%d\n", divCeil(tick,
1523  dram.tCK) - dram.timeStampOffset, rank);
1524  } else if (pwr_state == PWR_PRE_PDN) {
1525  // if low power state is precharge low, schedule to precharge low
1526  // power state. In reality tCKE is needed to enter active low power.
1527  // This is neglected here.
1528  schedulePowerEvent(pwr_state, tick);
1529  //push Command to DRAMPower
1530  cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick));
1531  DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick,
1532  dram.tCK) - dram.timeStampOffset, rank);
1533  } else if (pwr_state == PWR_REF) {
1534  // if a refresh just occurred
1535  // transition to PRE_PDN now that all banks are closed
1536  // precharge power down requires tCKE to enter. For simplicity
1537  // this is not considered.
1538  schedulePowerEvent(PWR_PRE_PDN, tick);
1539  //push Command to DRAMPower
1540  cmdList.push_back(Command(MemCommand::PDN_F_PRE, 0, tick));
1541  DPRINTF(DRAMPower, "%llu,PDN_F_PRE,0,%d\n", divCeil(tick,
1542  dram.tCK) - dram.timeStampOffset, rank);
1543  } else if (pwr_state == PWR_SREF) {
1544  // should only enter SREF after PRE-PD wakeup to do a refresh
1545  assert(pwrStatePostRefresh == PWR_PRE_PDN);
1546  // self refresh requires time tCKESR to enter. For simplicity,
1547  // this is not considered.
1548  schedulePowerEvent(PWR_SREF, tick);
1549  // push Command to DRAMPower
1550  cmdList.push_back(Command(MemCommand::SREN, 0, tick));
1551  DPRINTF(DRAMPower, "%llu,SREN,0,%d\n", divCeil(tick,
1552  dram.tCK) - dram.timeStampOffset, rank);
1553  }
1554  // Ensure that we don't power-down and back up in same tick
1555  // Once we commit to PD entry, do it and wait for at least 1tCK
1556  // This could be replaced with tCKE if/when that is added to the model
1557  wakeUpAllowedAt = tick + dram.tCK;
1558 
1559  // Transitioning to a low power state, set flag
1560  inLowPowerState = true;
1561 }
1562 
1563 void
1565 {
1566  Tick wake_up_tick = std::max(curTick(), wakeUpAllowedAt);
1567 
1568  DPRINTF(DRAMState, "Scheduling wake-up for rank %d at tick %d\n",
1569  rank, wake_up_tick);
1570 
1571  // if waking for refresh, hold previous state
1572  // else reset state back to IDLE
1573  if (refreshState == REF_PD_EXIT) {
1574  pwrStatePostRefresh = pwrState;
1575  } else {
1576  // don't automatically transition back to LP state after next REF
1577  pwrStatePostRefresh = PWR_IDLE;
1578  }
1579 
1580  // schedule wake-up with event to ensure entry has completed before
1581  // we try to wake-up
1582  schedule(wakeUpEvent, wake_up_tick);
1583 
1584  for (auto &b : banks) {
1585  // respect both causality and any existing bank
1586  // constraints, some banks could already have a
1587  // (auto) precharge scheduled
1588  b.wrAllowedAt = std::max(wake_up_tick + exit_delay, b.wrAllowedAt);
1589  b.rdAllowedAt = std::max(wake_up_tick + exit_delay, b.rdAllowedAt);
1590  b.preAllowedAt = std::max(wake_up_tick + exit_delay, b.preAllowedAt);
1591  b.actAllowedAt = std::max(wake_up_tick + exit_delay, b.actAllowedAt);
1592  }
1593  // Transitioning out of low power state, clear flag
1594  inLowPowerState = false;
1595 
1596  // push to DRAMPower
1597  // use pwrStateTrans for cases where we have a power event scheduled
1598  // to enter low power that has not yet been processed
1599  if (pwrStateTrans == PWR_ACT_PDN) {
1600  cmdList.push_back(Command(MemCommand::PUP_ACT, 0, wake_up_tick));
1601  DPRINTF(DRAMPower, "%llu,PUP_ACT,0,%d\n", divCeil(wake_up_tick,
1602  dram.tCK) - dram.timeStampOffset, rank);
1603 
1604  } else if (pwrStateTrans == PWR_PRE_PDN) {
1605  cmdList.push_back(Command(MemCommand::PUP_PRE, 0, wake_up_tick));
1606  DPRINTF(DRAMPower, "%llu,PUP_PRE,0,%d\n", divCeil(wake_up_tick,
1607  dram.tCK) - dram.timeStampOffset, rank);
1608  } else if (pwrStateTrans == PWR_SREF) {
1609  cmdList.push_back(Command(MemCommand::SREX, 0, wake_up_tick));
1610  DPRINTF(DRAMPower, "%llu,SREX,0,%d\n", divCeil(wake_up_tick,
1611  dram.tCK) - dram.timeStampOffset, rank);
1612  }
1613 }
1614 
1615 void
1617 {
1618  // Should be in a power-down or self-refresh state
1619  assert((pwrState == PWR_ACT_PDN) || (pwrState == PWR_PRE_PDN) ||
1620  (pwrState == PWR_SREF));
1621 
1622  // Check current state to determine transition state
1623  if (pwrState == PWR_ACT_PDN) {
1624  // banks still open, transition to PWR_ACT
1625  schedulePowerEvent(PWR_ACT, curTick());
1626  } else {
1627  // transitioning from a precharge power-down or self-refresh state
1628  // banks are closed - transition to PWR_IDLE
1629  schedulePowerEvent(PWR_IDLE, curTick());
1630  }
1631 }
1632 
1633 void
1635 {
1636  assert(curTick() >= pwrStateTick);
1637  // remember where we were, and for how long
1638  Tick duration = curTick() - pwrStateTick;
1639  PowerState prev_state = pwrState;
1640 
1641  // update the accounting
1642  stats.pwrStateTime[prev_state] += duration;
1643 
1644  // track to total idle time
1645  if ((prev_state == PWR_PRE_PDN) || (prev_state == PWR_ACT_PDN) ||
1646  (prev_state == PWR_SREF)) {
1647  stats.totalIdleTime += duration;
1648  }
1649 
1650  pwrState = pwrStateTrans;
1651  pwrStateTick = curTick();
1652 
1653  // if rank was refreshing, make sure to start scheduling requests again
1654  if (prev_state == PWR_REF) {
1655  // bus IDLED prior to REF
1656  // counter should be one for refresh command only
1657  assert(outstandingEvents == 1);
1658  // REF complete, decrement count and go back to IDLE
1659  --outstandingEvents;
1660  refreshState = REF_IDLE;
1661 
1662  DPRINTF(DRAMState, "Was refreshing for %llu ticks\n", duration);
1663  // if moving back to power-down after refresh
1664  if (pwrState != PWR_IDLE) {
1665  assert(pwrState == PWR_PRE_PDN);
1666  DPRINTF(DRAMState, "Switching to power down state after refreshing"
1667  " rank %d at %llu tick\n", rank, curTick());
1668  }
1669 
1670  // completed refresh event, ensure next request is scheduled
1671  if (!(dram.ctrl->requestEventScheduled(dram.pseudoChannel))) {
1672  DPRINTF(DRAM, "Scheduling next request after refreshing"
1673  " rank %d, PC %d \n", rank, dram.pseudoChannel);
1674  dram.ctrl->restartScheduler(curTick(), dram.pseudoChannel);
1675  }
1676  }
1677 
1678  if ((pwrState == PWR_ACT) && (refreshState == REF_PD_EXIT)) {
1679  // have exited ACT PD
1680  assert(prev_state == PWR_ACT_PDN);
1681 
1682  // go back to REF event and close banks
1683  refreshState = REF_PRE;
1684  schedule(refreshEvent, curTick());
1685  } else if (pwrState == PWR_IDLE) {
1686  DPRINTF(DRAMState, "All banks precharged\n");
1687  if (prev_state == PWR_SREF) {
1688  // set refresh state to REF_SREF_EXIT, ensuring inRefIdleState
1689  // continues to return false during tXS after SREF exit
1690  // Schedule a refresh which kicks things back into action
1691  // when it finishes
1692  refreshState = REF_SREF_EXIT;
1693  schedule(refreshEvent, curTick() + dram.tXS);
1694  } else {
1695  // if we have a pending refresh, and are now moving to
1696  // the idle state, directly transition to, or schedule refresh
1697  if ((refreshState == REF_PRE) || (refreshState == REF_PD_EXIT)) {
1698  // ensure refresh is restarted only after final PRE command.
1699  // do not restart refresh if controller is in an intermediate
1700  // state, after PRE_PDN exit, when banks are IDLE but an
1701  // ACT is scheduled.
1702  if (!activateEvent.scheduled()) {
1703  // there should be nothing waiting at this point
1704  assert(!powerEvent.scheduled());
1705  if (refreshState == REF_PD_EXIT) {
1706  // exiting PRE PD, will be in IDLE until tXP expires
1707  // and then should transition to PWR_REF state
1708  assert(prev_state == PWR_PRE_PDN);
1709  schedulePowerEvent(PWR_REF, curTick() + dram.tXP);
1710  } else if (refreshState == REF_PRE) {
1711  // can directly move to PWR_REF state and proceed below
1712  pwrState = PWR_REF;
1713  }
1714  } else {
1715  // must have PRE scheduled to transition back to IDLE
1716  // and re-kick off refresh
1717  assert(prechargeEvent.scheduled());
1718  }
1719  }
1720  }
1721  }
1722 
1723  // transition to the refresh state and re-start refresh process
1724  // refresh state machine will schedule the next power state transition
1725  if (pwrState == PWR_REF) {
1726  // completed final PRE for refresh or exiting power-down
1727  assert(refreshState == REF_PRE || refreshState == REF_PD_EXIT);
1728 
1729  // exited PRE PD for refresh, with no pending commands
1730  // bypass auto-refresh and go straight to SREF, where memory
1731  // will issue refresh immediately upon entry
1732  if (pwrStatePostRefresh == PWR_PRE_PDN && isQueueEmpty() &&
1733  (dram.ctrl->drainState() != DrainState::Draining) &&
1734  (dram.ctrl->drainState() != DrainState::Drained) &&
1735  dram.enableDRAMPowerdown) {
1736  DPRINTF(DRAMState, "Rank %d bypassing refresh and transitioning "
1737  "to self refresh at %11u tick\n", rank, curTick());
1738  powerDownSleep(PWR_SREF, curTick());
1739 
1740  // Since refresh was bypassed, remove event by decrementing count
1741  assert(outstandingEvents == 1);
1742  --outstandingEvents;
1743 
1744  // reset state back to IDLE temporarily until SREF is entered
1745  pwrState = PWR_IDLE;
1746 
1747  // Not bypassing refresh for SREF entry
1748  } else {
1749  DPRINTF(DRAMState, "Refreshing\n");
1750 
1751  // there should be nothing waiting at this point
1752  assert(!powerEvent.scheduled());
1753 
1754  // kick the refresh event loop into action again, and that
1755  // in turn will schedule a transition to the idle power
1756  // state once the refresh is done
1757  schedule(refreshEvent, curTick());
1758 
1759  // Banks transitioned to IDLE, start REF
1760  refreshState = REF_START;
1761  }
1762  }
1763 
1764 }
1765 
1766 void
1768 {
1769  // All commands up to refresh have completed
1770  // flush cmdList to DRAMPower
1771  flushCmdList();
1772 
1773  // Call the function that calculates window energy at intermediate update
1774  // events like at refresh, stats dump as well as at simulation exit.
1775  // Window starts at the last time the calcWindowEnergy function was called
1776  // and is upto current time.
1777  power.powerlib.calcWindowEnergy(divCeil(curTick(), dram.tCK) -
1778  dram.timeStampOffset);
1779 
1780  // Get the energy from DRAMPower
1781  Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy();
1782 
1783  // The energy components inside the power lib are calculated over
1784  // the window so accumulate into the corresponding gem5 stat
1785  stats.actEnergy += energy.act_energy * dram.devicesPerRank;
1786  stats.preEnergy += energy.pre_energy * dram.devicesPerRank;
1787  stats.readEnergy += energy.read_energy * dram.devicesPerRank;
1788  stats.writeEnergy += energy.write_energy * dram.devicesPerRank;
1789  stats.refreshEnergy += energy.ref_energy * dram.devicesPerRank;
1790  stats.actBackEnergy += energy.act_stdby_energy * dram.devicesPerRank;
1791  stats.preBackEnergy += energy.pre_stdby_energy * dram.devicesPerRank;
1792  stats.actPowerDownEnergy += energy.f_act_pd_energy * dram.devicesPerRank;
1793  stats.prePowerDownEnergy += energy.f_pre_pd_energy * dram.devicesPerRank;
1794  stats.selfRefreshEnergy += energy.sref_energy * dram.devicesPerRank;
1795 
1796  // Accumulate window energy into the total energy.
1797  stats.totalEnergy += energy.window_energy * dram.devicesPerRank;
1798  // Average power must not be accumulated but calculated over the time
1799  // since last stats reset. sim_clock::Frequency is tick period not tick
1800  // frequency.
1801  // energy (pJ) 1e-9
1802  // power (mW) = ----------- * ----------
1803  // time (tick) tick_frequency
1804  stats.averagePower = (stats.totalEnergy.value() /
1805  (curTick() - dram.lastStatsResetTick)) *
1806  (sim_clock::Frequency / 1000000000.0);
1807 }
1808 
1809 void
1811 {
1812  DPRINTF(DRAM,"Computing stats due to a dump callback\n");
1813 
1814  // Update the stats
1815  updatePowerStats();
1816 
1817  // final update of power state times
1818  stats.pwrStateTime[pwrState] += (curTick() - pwrStateTick);
1819  pwrStateTick = curTick();
1820 }
1821 
1822 void
1824  // The only way to clear the counters in DRAMPower is to call
1825  // calcWindowEnergy function as that then calls clearCounters. The
1826  // clearCounters method itself is private.
1827  power.powerlib.calcWindowEnergy(divCeil(curTick(), dram.tCK) -
1828  dram.timeStampOffset);
1829 
1830 }
1831 
1832 bool
1834  return (readEntries != 0) ||
1835  (dram.ctrl->inWriteBusState(true, &(this->dram))
1836  && (writeEntries != 0));
1837 }
1838 
1839 void
1841 {
1842  dram.lastStatsResetTick = curTick();
1843 }
1844 
1846  : statistics::Group(&_dram),
1847  dram(_dram),
1848 
1849  ADD_STAT(readBursts, statistics::units::Count::get(),
1850  "Number of DRAM read bursts"),
1851  ADD_STAT(writeBursts, statistics::units::Count::get(),
1852  "Number of DRAM write bursts"),
1853 
1854  ADD_STAT(perBankRdBursts, statistics::units::Count::get(),
1855  "Per bank write bursts"),
1856  ADD_STAT(perBankWrBursts, statistics::units::Count::get(),
1857  "Per bank write bursts"),
1858 
1859  ADD_STAT(totQLat, statistics::units::Tick::get(),
1860  "Total ticks spent queuing"),
1861  ADD_STAT(totBusLat, statistics::units::Tick::get(),
1862  "Total ticks spent in databus transfers"),
1863  ADD_STAT(totMemAccLat, statistics::units::Tick::get(),
1864  "Total ticks spent from burst creation until serviced "
1865  "by the DRAM"),
1866 
1867  ADD_STAT(avgQLat, statistics::units::Rate<
1868  statistics::units::Tick, statistics::units::Count>::get(),
1869  "Average queueing delay per DRAM burst"),
1870  ADD_STAT(avgBusLat, statistics::units::Rate<
1871  statistics::units::Tick, statistics::units::Count>::get(),
1872  "Average bus latency per DRAM burst"),
1873  ADD_STAT(avgMemAccLat, statistics::units::Rate<
1874  statistics::units::Tick, statistics::units::Count>::get(),
1875  "Average memory access latency per DRAM burst"),
1876 
1877  ADD_STAT(readRowHits, statistics::units::Count::get(),
1878  "Number of row buffer hits during reads"),
1879  ADD_STAT(writeRowHits, statistics::units::Count::get(),
1880  "Number of row buffer hits during writes"),
1881  ADD_STAT(readRowHitRate, statistics::units::Ratio::get(),
1882  "Row buffer hit rate for reads"),
1883  ADD_STAT(writeRowHitRate, statistics::units::Ratio::get(),
1884  "Row buffer hit rate for writes"),
1885 
1886  ADD_STAT(bytesPerActivate, statistics::units::Byte::get(),
1887  "Bytes accessed per row activation"),
1888  ADD_STAT(bytesRead, statistics::units::Byte::get(),
1889  "Total bytes read"),
1890  ADD_STAT(bytesWritten, statistics::units::Byte::get(),
1891  "Total bytes written"),
1892 
1893  ADD_STAT(avgRdBW, statistics::units::Rate<
1894  statistics::units::Byte, statistics::units::Second>::get(),
1895  "Average DRAM read bandwidth in MiBytes/s"),
1896  ADD_STAT(avgWrBW, statistics::units::Rate<
1897  statistics::units::Byte, statistics::units::Second>::get(),
1898  "Average DRAM write bandwidth in MiBytes/s"),
1899  ADD_STAT(peakBW, statistics::units::Rate<
1900  statistics::units::Byte, statistics::units::Second>::get(),
1901  "Theoretical peak bandwidth in MiByte/s"),
1902 
1903  ADD_STAT(busUtil, statistics::units::Ratio::get(),
1904  "Data bus utilization in percentage"),
1905  ADD_STAT(busUtilRead, statistics::units::Ratio::get(),
1906  "Data bus utilization in percentage for reads"),
1907  ADD_STAT(busUtilWrite, statistics::units::Ratio::get(),
1908  "Data bus utilization in percentage for writes"),
1909 
1910  ADD_STAT(pageHitRate, statistics::units::Ratio::get(),
1911  "Row buffer hit rate, read and write combined")
1912 
1913 {
1914 }
1915 
1916 void
1918 {
1919  using namespace statistics;
1920 
1921  avgQLat.precision(2);
1922  avgBusLat.precision(2);
1923  avgMemAccLat.precision(2);
1924 
1925  readRowHitRate.precision(2);
1926  writeRowHitRate.precision(2);
1927 
1928  perBankRdBursts.init(dram.banksPerRank * dram.ranksPerChannel);
1929  perBankWrBursts.init(dram.banksPerRank * dram.ranksPerChannel);
1930 
1931  bytesPerActivate
1932  .init(dram.maxAccessesPerRow ?
1933  dram.maxAccessesPerRow : dram.rowBufferSize)
1934  .flags(nozero);
1935 
1936  peakBW.precision(2);
1937  busUtil.precision(2);
1938  busUtilWrite.precision(2);
1939  busUtilRead.precision(2);
1940 
1941  pageHitRate.precision(2);
1942 
1943  // Formula stats
1944  avgQLat = totQLat / readBursts;
1945  avgBusLat = totBusLat / readBursts;
1946  avgMemAccLat = totMemAccLat / readBursts;
1947 
1948  readRowHitRate = (readRowHits / readBursts) * 100;
1949  writeRowHitRate = (writeRowHits / writeBursts) * 100;
1950 
1951  avgRdBW = (bytesRead / 1000000) / simSeconds;
1952  avgWrBW = (bytesWritten / 1000000) / simSeconds;
1953  peakBW = (sim_clock::Frequency / dram.burstDelay()) *
1954  dram.bytesPerBurst() / 1000000;
1955 
1956  busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1957  busUtilRead = avgRdBW / peakBW * 100;
1958  busUtilWrite = avgWrBW / peakBW * 100;
1959 
1960  pageHitRate = (writeRowHits + readRowHits) /
1961  (writeBursts + readBursts) * 100;
1962 }
1963 
1965  : statistics::Group(&_dram, csprintf("rank%d", _rank.rank).c_str()),
1966  rank(_rank),
1967 
1968  ADD_STAT(actEnergy, statistics::units::Joule::get(),
1969  "Energy for activate commands per rank (pJ)"),
1970  ADD_STAT(preEnergy, statistics::units::Joule::get(),
1971  "Energy for precharge commands per rank (pJ)"),
1972  ADD_STAT(readEnergy, statistics::units::Joule::get(),
1973  "Energy for read commands per rank (pJ)"),
1974  ADD_STAT(writeEnergy, statistics::units::Joule::get(),
1975  "Energy for write commands per rank (pJ)"),
1976  ADD_STAT(refreshEnergy, statistics::units::Joule::get(),
1977  "Energy for refresh commands per rank (pJ)"),
1978  ADD_STAT(actBackEnergy, statistics::units::Joule::get(),
1979  "Energy for active background per rank (pJ)"),
1980  ADD_STAT(preBackEnergy, statistics::units::Joule::get(),
1981  "Energy for precharge background per rank (pJ)"),
1982  ADD_STAT(actPowerDownEnergy, statistics::units::Joule::get(),
1983  "Energy for active power-down per rank (pJ)"),
1984  ADD_STAT(prePowerDownEnergy, statistics::units::Joule::get(),
1985  "Energy for precharge power-down per rank (pJ)"),
1986  ADD_STAT(selfRefreshEnergy, statistics::units::Joule::get(),
1987  "Energy for self refresh per rank (pJ)"),
1988 
1989  ADD_STAT(totalEnergy, statistics::units::Joule::get(),
1990  "Total energy per rank (pJ)"),
1991  ADD_STAT(averagePower, statistics::units::Watt::get(),
1992  "Core power per rank (mW)"),
1993 
1994  ADD_STAT(totalIdleTime, statistics::units::Tick::get(),
1995  "Total Idle time Per DRAM Rank"),
1996  ADD_STAT(pwrStateTime, statistics::units::Tick::get(),
1997  "Time in different power states")
1998 {
1999 }
2000 
2001 void
2003 {
2005 
2006  pwrStateTime
2007  .init(6)
2008  .subname(0, "IDLE")
2009  .subname(1, "REF")
2010  .subname(2, "SREF")
2011  .subname(3, "PRE_PDN")
2012  .subname(4, "ACT")
2013  .subname(5, "ACT_PDN");
2014 }
2015 
2016 void
2018 {
2020 
2021  rank.resetStats();
2022 }
2023 
2024 void
2026 {
2028 
2029  rank.computeStats();
2030 }
2031 
2032 } // namespace memory
2033 } // namespace gem5
gem5::memory::DRAMInterface::Rank::suspend
void suspend()
Stop the refresh events.
Definition: dram_interface.cc:1169
gem5::memory::DRAMInterface::tRRD_L
const Tick tRRD_L
Definition: dram_interface.hh:516
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:200
gem5::memory::DRAMInterface::enableDRAMPowerdown
bool enableDRAMPowerdown
Enable or disable DRAM powerdown states.
Definition: dram_interface.hh:545
gem5::memory::MemInterface::rowsPerBank
uint32_t rowsPerBank
Definition: mem_interface.hh:142
gem5::MipsISA::fill
fill
Definition: pra_constants.hh:57
gem5::memory::MemInterface::Bank::bank
uint8_t bank
Definition: mem_interface.hh:93
gem5::Event::when
Tick when() const
Get the time that the event is scheduled.
Definition: eventq.hh:501
gem5::memory::DRAMInterface::Rank::computeStats
void computeStats()
Computes stats just prior to dump event.
Definition: dram_interface.cc:1810
gem5::memory::DRAMInterface::Rank::processPrechargeEvent
void processPrechargeEvent()
Definition: dram_interface.cc:1245
gem5::AddrRange::granularity
uint64_t granularity() const
Determing the interleaving granularity of the range.
Definition: addr_range.hh:294
gem5::memory::DRAMInterface::Rank::writeEntries
uint32_t writeEntries
Track number of packets in write queue going to this rank.
Definition: dram_interface.hh:303
warn
#define warn(...)
Definition: logging.hh:256
gem5::memory::DRAMInterface::PowerState
PowerState
The power state captures the different operational states of the DRAM and interacts with the bus read...
Definition: dram_interface.hh:111
gem5::memory::DRAMInterface::Rank::processPowerEvent
void processPowerEvent()
Definition: dram_interface.cc:1634
system.hh
gem5::MipsISA::misc_reg::Count
@ Count
Definition: misc.hh:94
gem5::memory::MemPacket::readyTime
Tick readyTime
When will request leave the controller.
Definition: mem_ctrl.hh:106
gem5::memory::MemInterface::tBURST
const Tick tBURST
Definition: mem_interface.hh:149
gem5::memory::DRAMInterface::PWR_PRE_PDN
@ PWR_PRE_PDN
Definition: dram_interface.hh:116
memory
Definition: mem.h:38
gem5::memory::DRAMInterface::Rank::writeDoneEvent
EventFunctionWrapper writeDoneEvent
Definition: dram_interface.hh:458
gem5::memory::DRAMInterface::tREFI
const Tick tREFI
Definition: dram_interface.hh:514
gem5::memory::DRAMInterface::Rank::processWakeUpEvent
void processWakeUpEvent()
Definition: dram_interface.cc:1616
gem5::memory::DRAMInterface::Rank::prechargeEvent
EventFunctionWrapper prechargeEvent
Definition: dram_interface.hh:464
gem5::memory::DRAMInterface::Rank::inLowPowerState
bool inLowPowerState
rank is in or transitioning to power-down or self-refresh
Definition: dram_interface.hh:288
gem5::memory::DRAMInterface::Rank::scheduleWakeUpEvent
void scheduleWakeUpEvent(Tick exit_delay)
schedule and event to wake-up from power-down or self-refresh and update bank timing parameters
Definition: dram_interface.cc:1564
gem5::replaceBits
constexpr void replaceBits(T &val, unsigned first, unsigned last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
Definition: bitfield.hh:213
gem5::memory::DRAMInterface::checkRefreshState
void checkRefreshState(uint8_t rank) override
Check the refresh state to determine if refresh needs to be kicked back into action after a read resp...
Definition: dram_interface.cc:978
gem5::statistics::nozero
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:67
gem5::memory::DRAMInterface::Command
Simple structure to hold the values needed to keep track of commands for DRAMPower.
Definition: dram_interface.hh:72
gem5::memory::DRAMInterface::PWR_REF
@ PWR_REF
Definition: dram_interface.hh:114
gem5::SimObject::init
virtual void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: sim_object.cc:73
gem5::memory::DRAMInterface::ranks
std::vector< Rank * > ranks
Vector of dram ranks.
Definition: dram_interface.hh:632
gem5::memory::MemInterface::deviceSize
const uint32_t deviceSize
Definition: mem_interface.hh:134
gem5::memory::MemInterface::Bank
A basic class to track the bank state, i.e.
Definition: mem_interface.hh:86
gem5::memory::DRAMInterface::REF_DRAIN
@ REF_DRAIN
Definition: dram_interface.hh:154
gem5::memory::MemInterface::burstsPerStripe
const uint32_t burstsPerStripe
Definition: mem_interface.hh:139
gem5::sim_clock::Frequency
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
Definition: core.cc:47
gem5::memory::DRAMInterface::allRanksDrained
bool allRanksDrained() const override
Return true once refresh is complete for all ranks and there are no additional commands enqueued.
Definition: dram_interface.cc:1005
gem5::memory::DRAMInterface::Rank::processWriteDoneEvent
void processWriteDoneEvent()
Definition: dram_interface.cc:1276
gem5::MaxTick
const Tick MaxTick
Definition: types.hh:60
gem5::memory::DRAMInterface::bankGroupArch
const bool bankGroupArch
Definition: dram_interface.hh:496
gem5::DRAMPower
DRAMPower is a standalone tool which calculates the power consumed by a DRAM in the system.
Definition: drampower.hh:56
gem5::memory::MemInterface::Bank::wrAllowedAt
Tick wrAllowedAt
Definition: mem_interface.hh:97
gem5::memory::DRAMInterface::Rank::resetStats
void resetStats()
Reset stats on a stats event.
Definition: dram_interface.cc:1823
gem5::memory::DRAMInterface::PWR_ACT
@ PWR_ACT
Definition: dram_interface.hh:117
gem5::memory::DRAMInterface::drainRanks
void drainRanks() override
Iterate through dram ranks to exit self-refresh in order to drain.
Definition: dram_interface.cc:991
gem5::memory::DRAMInterface::Rank::refreshEvent
EventFunctionWrapper refreshEvent
Definition: dram_interface.hh:467
gem5::EventManager::schedule
void schedule(Event &event, Tick when)
Definition: eventq.hh:1012
gem5::memory::DRAMInterface::Rank::outstandingEvents
uint8_t outstandingEvents
Number of ACT, RD, and WR events currently scheduled Incremented when a refresh event is started as w...
Definition: dram_interface.hh:310
std::vector< uint32_t >
gem5::memory::DRAMInterface::Rank::activateEvent
EventFunctionWrapper activateEvent
Definition: dram_interface.hh:461
gem5::memory::DRAMInterface::PWR_IDLE
@ PWR_IDLE
Definition: dram_interface.hh:113
gem5::csprintf
std::string csprintf(const char *format, const Args &...args)
Definition: cprintf.hh:161
gem5::memory::DRAMInterface::Rank::Rank
Rank(const DRAMInterfaceParams &_p, int _rank, DRAMInterface &_dram)
Definition: dram_interface.cc:1118
gem5::VegaISA::r
Bitfield< 5 > r
Definition: pagetable.hh:60
gem5::memory::DRAMInterface::Rank::checkDrainDone
void checkDrainDone()
Let the rank check if it was waiting for requests to drain to allow it to transition states.
Definition: dram_interface.cc:1192
gem5::memory::MemInterface::Bank::rowAccesses
uint32_t rowAccesses
Definition: mem_interface.hh:101
gem5::memory::DRAMInterface::Rank::powerDownSleep
void powerDownSleep(PowerState pwr_state, Tick tick)
Schedule a transition to power-down (sleep)
Definition: dram_interface.cc:1513
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::statistics::Group::resetStats
virtual void resetStats()
Callback to reset stats.
Definition: group.cc:86
gem5::isPowerOf2
static constexpr bool isPowerOf2(const T &n)
Definition: intmath.hh:98
gem5::memory::DRAMInterface::Rank::readEntries
uint32_t readEntries
Track number of packets in read queue going to this rank.
Definition: dram_interface.hh:298
gem5::memory::MemInterface::Bank::preAllowedAt
Tick preAllowedAt
Definition: mem_interface.hh:98
gem5::memory::DRAMInterface::RankStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: dram_interface.cc:2002
gem5::memory::MemInterface::burstsPerRowBuffer
const uint32_t burstsPerRowBuffer
Definition: mem_interface.hh:138
gem5::memory::DRAMInterface::Rank::processRefreshEvent
void processRefreshEvent()
Definition: dram_interface.cc:1287
gem5::memory::DRAMInterface::bankGroupsPerRank
const uint32_t bankGroupsPerRank
DRAM specific device characteristics.
Definition: dram_interface.hh:495
gem5::memory::DRAMInterface::setupRank
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
Definition: dram_interface.cc:920
gem5::memory::DRAMInterface::PWR_SREF
@ PWR_SREF
Definition: dram_interface.hh:115
gem5::memory::DRAMInterface::Rank::processActivateEvent
void processActivateEvent()
Definition: dram_interface.cc:1235
gem5::memory::DRAMInterface
Interface to DRAM devices with media specific parameters, statistics, and functions.
Definition: dram_interface.hh:65
gem5::memory::DRAMInterface::Command::type
Data::MemCommand::cmds type
Definition: dram_interface.hh:74
gem5::memory::MemPacket::entryTime
const Tick entryTime
When did request enter the controller.
Definition: mem_ctrl.hh:103
gem5::memory::DRAMInterface::REF_PRE
@ REF_PRE
Definition: dram_interface.hh:157
gem5::memory::MemInterface::addrMapping
enums::AddrMap addrMapping
Memory controller configuration initialized based on parameter values.
Definition: mem_interface.hh:126
gem5::memory::DRAMInterface::Rank::rank
uint8_t rank
Current Rank index.
Definition: dram_interface.hh:293
gem5::EventManager
Definition: eventq.hh:980
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::memory::MemInterface::Bank::bytesAccessed
uint32_t bytesAccessed
Definition: mem_interface.hh:102
gem5::memory::DRAMInterface::REF_RUN
@ REF_RUN
Definition: dram_interface.hh:159
gem5::memory::DRAMInterface::stats
DRAMStats stats
Definition: dram_interface.hh:627
gem5::memory::DRAMInterface::tRCD_RD
const Tick tRCD_RD
Definition: dram_interface.hh:507
gem5::memory::MemPacket::addr
Addr addr
The starting address of the packet.
Definition: mem_ctrl.hh:140
gem5::memory::MemInterface::Bank::rdAllowedAt
Tick rdAllowedAt
Definition: mem_interface.hh:96
gem5::ArmISA::b
Bitfield< 7 > b
Definition: misc_types.hh:438
gem5::memory::DRAMInterface::DRAMStats::regStats
void regStats() override
Callback to set stat parameters.
Definition: dram_interface.cc:1917
gem5::memory::DRAMInterface::DRAMStats::DRAMStats
DRAMStats(DRAMInterface &dram)
Definition: dram_interface.cc:1845
gem5::memory::MemInterface
General interface to memory device Includes functions and parameters shared across media types.
Definition: mem_interface.hh:74
bitfield.hh
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::VegaISA::p
Bitfield< 54 > p
Definition: pagetable.hh:70
gem5::memory::MemInterface::banksPerRank
const uint32_t banksPerRank
Definition: mem_interface.hh:141
gem5::AddrRange::interleaved
bool interleaved() const
Determine if the range is interleaved or not.
Definition: addr_range.hh:284
gem5::memory::MemInterface::Bank::bankgr
uint8_t bankgr
Definition: mem_interface.hh:94
gem5::memory::MemPacket::row
const uint32_t row
Definition: mem_ctrl.hh:125
gem5::memory::DRAMInterface::Rank::banks
std::vector< Bank > banks
Vector of Banks.
Definition: dram_interface.hh:334
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:210
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
gem5::memory::DRAMInterface::tRP
const Tick tRP
Definition: dram_interface.hh:509
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
gem5::memory::DRAMInterface::PWR_ACT_PDN
@ PWR_ACT_PDN
Definition: dram_interface.hh:118
gem5::memory::DRAMInterface::Rank::refreshState
RefreshState refreshState
current refresh state
Definition: dram_interface.hh:283
gem5::memory::DRAMInterface::minBankPrep
std::pair< std::vector< uint32_t >, bool > minBankPrep(const MemPacketQueue &queue, Tick min_col_at) const
Find which are the earliest banks ready to issue an activate for the enqueued requests.
Definition: dram_interface.cc:1028
gem5::Tick
uint64_t Tick
Tick count type.
Definition: types.hh:58
gem5::memory::MemPacket::isDram
bool isDram() const
Return true if its a DRAM access.
Definition: mem_ctrl.hh:204
gem5::memory::DRAMInterface::isBusy
bool isBusy(bool read_queue_empty, bool all_writes_nvm) override
This function checks if ranks are actively refreshing and therefore busy.
Definition: dram_interface.cc:797
gem5::memory::DRAMInterface::Rank::flushCmdList
void flushCmdList()
Push command out of cmdList queue that are scheduled at or before curTick() to DRAMPower library All ...
Definition: dram_interface.cc:1207
gem5::memory::MemPacket
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition: mem_ctrl.hh:98
gem5::memory::MemPacket::pseudoChannel
const uint8_t pseudoChannel
pseudo channel num
Definition: mem_ctrl.hh:120
gem5::memory::DRAMInterface::Rank::actTicks
std::deque< Tick > actTicks
List to keep track of activate ticks.
Definition: dram_interface.hh:343
gem5::memory::DRAMInterface::tRRD
const Tick tRRD
Definition: dram_interface.hh:515
gem5::memory::DRAMInterface::Rank::isQueueEmpty
bool isQueueEmpty() const
Check if the command queue of current rank is idle.
Definition: dram_interface.cc:1181
gem5::memory::DRAMInterface::tXS
const Tick tXS
Definition: dram_interface.hh:521
gem5::memory::MemInterface::devicesPerRank
const uint32_t devicesPerRank
Definition: mem_interface.hh:136
cprintf.hh
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
gem5::DrainState::Drained
@ Drained
Buffers drained, ready for serialization/handover.
gem5::memory::DRAMInterface::REF_IDLE
@ REF_IDLE
Definition: dram_interface.hh:153
std::pair
STL pair class.
Definition: stl.hh:58
gem5::memory::MemInterface::ctrl
MemCtrl * ctrl
A pointer to the parent memory controller instance.
Definition: mem_interface.hh:114
gem5::memory::AbstractMemory::range
AddrRange range
Definition: abstract_mem.hh:115
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::memory::DRAMInterface::Rank::updatePowerStats
void updatePowerStats()
Function to update Power Stats.
Definition: dram_interface.cc:1767
gem5::memory::DRAMInterface::tCCD_L
const Tick tCCD_L
Definition: dram_interface.hh:506
gem5::statistics::Group::regStats
virtual void regStats()
Callback to set stat parameters.
Definition: group.cc:68
gem5::EventManager::deschedule
void deschedule(Event &event)
Definition: eventq.hh:1021
gem5::memory::DRAMInterface::Rank::lastBurstTick
Tick lastBurstTick
Track when we issued the last read/write burst.
Definition: dram_interface.hh:348
gem5::divCeil
static constexpr T divCeil(const T &a, const U &b)
Definition: intmath.hh:110
gem5::memory::MemInterface::Bank::openRow
uint32_t openRow
Definition: mem_interface.hh:92
gem5::memory::AbstractMemory::size
uint64_t size() const
Get the memory size.
Definition: abstract_mem.hh:308
gem5::memory::DRAMInterface::REF_START
@ REF_START
Definition: dram_interface.hh:158
gem5::memory::DRAMInterface::tRCD_WR
const Tick tRCD_WR
Definition: dram_interface.hh:508
gem5::memory::MemInterface::ranksPerChannel
const uint32_t ranksPerChannel
Definition: mem_interface.hh:140
gem5::memory::MemPacket::bank
const uint8_t bank
Definition: mem_ctrl.hh:124
gem5::memory::DRAMInterface::Rank::inRefIdleState
bool inRefIdleState() const
Check if there is no refresh and no preparation of refresh ongoing i.e.
Definition: dram_interface.hh:374
gem5::memory::DRAMInterface::respondEvent
void respondEvent(uint8_t rank) override
Complete response process for DRAM when read burst is complete This will update the counters and chec...
Definition: dram_interface.cc:931
gem5::memory::DRAMInterface::REF_PD_EXIT
@ REF_PD_EXIT
Definition: dram_interface.hh:155
gem5::ceilLog2
static constexpr int ceilLog2(const T &n)
Definition: intmath.hh:84
gem5::memory::DRAMInterface::Rank::numBanksActive
unsigned int numBanksActive
To track number of banks which are currently active for this rank.
Definition: dram_interface.hh:340
gem5::memory::DRAMInterface::init
void init() override
Initialize the DRAM interface and verify parameters.
Definition: dram_interface.cc:742
gem5::memory::DRAMInterface::suspend
void suspend() override
Iterate through DRAM ranks and suspend them.
Definition: dram_interface.cc:1020
gem5::ArmISA::n
Bitfield< 31 > n
Definition: misc_types.hh:513
gem5::memory::MemPacket::rank
const uint8_t rank
Will be populated by address decoder.
Definition: mem_ctrl.hh:123
gem5::memory::AbstractMemory::system
System * system() const
read the system pointer Implemented for completeness with the setter
Definition: abstract_mem.hh:273
gem5::memory::DRAMInterface::decodePacket
MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0) override
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Definition: dram_interface.cc:835
gem5::memory::DRAMInterface::tCCD_L_WR
const Tick tCCD_L_WR
Definition: dram_interface.hh:505
gem5::memory::DRAMInterface::REF_SREF_EXIT
@ REF_SREF_EXIT
Definition: dram_interface.hh:156
dram_interface.hh
std::deque
STL deque class.
Definition: stl.hh:44
gem5::Clocked::tick
Tick tick
Definition: clocked_object.hh:68
gem5::simSeconds
statistics::Formula & simSeconds
Definition: stats.cc:45
gem5::memory::MemCtrl::inReadBusState
bool inReadBusState(bool next_state, const MemInterface *mem_intr) const
Check the current direction of the memory channel.
Definition: mem_ctrl.cc:770
gem5::statistics::Group
Statistics container.
Definition: group.hh:92
gem5::memory::DRAMInterface::Command::timeStamp
Tick timeStamp
Definition: dram_interface.hh:76
gem5::memory::MemInterface::rowBufferSize
const uint32_t rowBufferSize
Definition: mem_interface.hh:137
gem5::memory::MemInterface::getCtrlAddr
Addr getCtrlAddr(Addr addr)
Get an address in a dense range which starts from 0.
Definition: mem_interface.hh:238
gem5::memory::MemInterface::pseudoChannel
uint8_t pseudoChannel
pseudo channel number used for HBM modeling
Definition: mem_interface.hh:217
gem5::memory::MemInterface::Bank::NO_ROW
static const uint32_t NO_ROW
Definition: mem_interface.hh:90
gem5::memory::DRAMInterface::Rank::cmdList
std::vector< Command > cmdList
List of commands issued, to be sent to DRAMPpower at refresh and stats dump.
Definition: dram_interface.hh:328
trace.hh
gem5::memory::DRAMInterface::tRFC
const Tick tRFC
Definition: dram_interface.hh:513
gem5::memory::DRAMInterface::Rank
Rank class includes a vector of banks.
Definition: dram_interface.hh:229
gem5::memory::MemInterface::Bank::actAllowedAt
Tick actAllowedAt
Definition: mem_interface.hh:99
fatal_if
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:236
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::memory::MemPacket::isRead
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition: mem_ctrl.hh:193
gem5::memory::DRAMInterface::Rank::schedulePowerEvent
void schedulePowerEvent(PowerState pwr_state, Tick tick)
Schedule a power state transition in the future, and potentially override an already scheduled transi...
Definition: dram_interface.cc:1492
gem5::memory::DRAMInterface::DRAMStats::resetStats
void resetStats() override
Callback to reset stats.
Definition: dram_interface.cc:1840
gem5::memory::DRAMInterface::Rank::startup
void startup(Tick ref_tick)
Kick off accounting for power and refresh states and schedule initial refresh.
Definition: dram_interface.cc:1157
gem5::memory::MemInterface::burstSize
const uint32_t burstSize
General device and channel characteristics The rowsPerBank is determined based on the capacity,...
Definition: mem_interface.hh:133
gem5::memory::DRAMInterface::timeStampOffset
uint64_t timeStampOffset
Definition: dram_interface.hh:539
gem5::memory::DRAMInterface::RankStats::preDumpStats
void preDumpStats() override
Callback before stats are dumped.
Definition: dram_interface.cc:2025
gem5::memory::DRAMInterface::Command::bank
uint8_t bank
Definition: dram_interface.hh:75
gem5::memory::MemPacket::bankId
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks,...
Definition: mem_ctrl.hh:132
gem5::memory::MemInterface::tCK
const GEM5_CLASS_VAR_USED Tick tCK
General timing requirements.
Definition: mem_interface.hh:147
gem5::memory::DRAMInterface::sortTime
static bool sortTime(const Command &cmd, const Command &cmd_next)
Function for sorting Command structures based on timeStamp.
Definition: dram_interface.hh:487
gem5::statistics::Group::preDumpStats
virtual void preDumpStats()
Callback before stats are dumped.
Definition: group.cc:99
gem5::DrainState::Draining
@ Draining
Draining buffers pending serialization/handover.
gem5::memory::DRAMInterface::Rank::pwrState
PowerState pwrState
Current power state.
Definition: dram_interface.hh:278
gem5::Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:458
gem5::memory::DRAMInterface::startup
void startup() override
Iterate through dram ranks and instantiate per rank startup routine.
Definition: dram_interface.cc:784
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:188
gem5::memory::DRAMInterface::RankStats::RankStats
RankStats(DRAMInterface &dram, Rank &rank)
Definition: dram_interface.cc:1964
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::memory::DRAMInterface::RankStats::resetStats
void resetStats() override
Callback to reset stats.
Definition: dram_interface.cc:2017
gem5::memory::DRAMInterface::Rank::forceSelfRefreshExit
bool forceSelfRefreshExit() const
Trigger a self-refresh exit if there are entries enqueued Exit if there are any read entries regardle...
Definition: dram_interface.cc:1833

Generated on Sun Jul 30 2023 01:56:58 for gem5 by doxygen 1.8.17