46 #include "debug/DRAM.hh"
47 #include "debug/DRAMPower.hh"
48 #include "debug/DRAMState.hh"
65 bool filled_earliest_banks =
false;
67 bool hidden_bank_prep =
false;
74 bool found_hidden_bank =
false;
78 bool found_prepped_pkt =
false;
82 bool found_earliest_pkt =
false;
85 auto selected_pkt_it = queue.end();
87 for (
auto i = queue.begin();
i != queue.end() ; ++
i) {
96 DPRINTF(DRAM,
"%s checking DRAM packet in bank %d, row %d\n",
101 if (burstReady(pkt)) {
104 "%s bank %d - Rank %d available\n", __func__,
112 if (col_allowed_at <= min_col_at) {
117 DPRINTF(DRAM,
"%s Seamless buffer hit\n", __func__);
119 selected_col_at = col_allowed_at;
122 }
else if (!found_hidden_bank && !found_prepped_pkt) {
128 selected_col_at = col_allowed_at;
129 found_prepped_pkt =
true;
130 DPRINTF(DRAM,
"%s Prepped row buffer hit\n", __func__);
132 }
else if (!found_earliest_pkt) {
135 if (!filled_earliest_banks) {
137 std::tie(earliest_banks, hidden_bank_prep) =
138 minBankPrep(queue, min_col_at);
139 filled_earliest_banks =
true;
147 found_earliest_pkt =
true;
148 found_hidden_bank = hidden_bank_prep;
154 if (hidden_bank_prep || !found_prepped_pkt) {
156 selected_col_at = col_allowed_at;
161 DPRINTF(DRAM,
"%s bank %d - Rank %d not available\n", __func__,
167 if (selected_pkt_it == queue.end()) {
168 DPRINTF(DRAM,
"%s no available DRAM ranks found\n", __func__);
171 return std::make_pair(selected_pkt_it, selected_col_at);
175 DRAMInterface::activateBank(
Rank& rank_ref,
Bank& bank_ref,
176 Tick act_tick, uint32_t row)
178 assert(rank_ref.
actTicks.size() == activationLimit);
183 if (twoCycleActivate)
184 act_at = ctrl->verifyMultiCmd(act_tick, maxCommandsPerWindow, tAAD);
186 act_at = ctrl->verifySingleCmd(act_tick, maxCommandsPerWindow,
true);
188 DPRINTF(DRAM,
"Activate at tick %d\n", act_at);
191 assert(bank_ref.
openRow == Bank::NO_ROW);
203 DPRINTF(DRAM,
"Activate bank %d, rank %d at tick %lld, now got "
204 "%d active\n", bank_ref.
bank, rank_ref.
rank, act_at,
205 ranks[rank_ref.
rank]->numBanksActive);
211 timeStampOffset, bank_ref.
bank, rank_ref.
rank);
221 for (
int i = 0;
i < banksPerRank;
i++) {
224 if (bankGroupArch && (bank_ref.
bankgr == rank_ref.
banks[
i].bankgr)) {
228 rank_ref.
banks[
i].actAllowedAt = std::max(act_at + tRRD_L,
229 rank_ref.
banks[
i].actAllowedAt);
234 rank_ref.
banks[
i].actAllowedAt = std::max(act_at + tRRD,
235 rank_ref.
banks[
i].actAllowedAt);
244 (act_at - rank_ref.
actTicks.back()) < tXAW) {
245 panic(
"Got %d activates in window %d (%llu - %llu) which "
246 "is smaller than %llu\n", activationLimit, act_at -
256 rank_ref.
actTicks.push_front(act_at);
262 (act_at - rank_ref.
actTicks.back()) < tXAW) {
263 DPRINTF(DRAM,
"Enforcing tXAW with X = %d, next activate "
264 "no earlier than %llu\n", activationLimit,
266 for (
int j = 0;
j < banksPerRank;
j++)
268 rank_ref.
banks[
j].actAllowedAt =
269 std::max(rank_ref.
actTicks.back() + tXAW,
270 rank_ref.
banks[
j].actAllowedAt);
284 DRAMInterface::prechargeBank(
Rank& rank_ref,
Bank& bank,
Tick pre_tick,
285 bool auto_or_preall,
bool trace)
288 assert(bank.
openRow != Bank::NO_ROW);
296 Tick pre_at = pre_tick;
297 if (auto_or_preall) {
304 pre_at = ctrl->verifySingleCmd(pre_tick, maxCommandsPerWindow,
true);
306 for (
int i = 0;
i < banksPerRank;
i++) {
307 rank_ref.
banks[
i].preAllowedAt = std::max(pre_at + tPPD,
308 rank_ref.
banks[
i].preAllowedAt);
312 Tick pre_done_at = pre_at + tRP;
319 DPRINTF(DRAM,
"Precharging bank %d, rank %d at tick %lld, now got "
320 "%d active\n", bank.
bank, rank_ref.
rank, pre_at,
328 timeStampOffset, bank.
bank, rank_ref.
rank);
350 DPRINTF(DRAM,
"Timing access to addr %#x, rank/bank/row %d %d %d\n",
354 Rank& rank_ref = *ranks[mem_pkt->
rank];
362 assert(rank_ref.
pwrState != PWR_SREF);
379 if (bank_ref.
openRow != Bank::NO_ROW) {
380 prechargeBank(rank_ref, bank_ref, std::max(bank_ref.
preAllowedAt,
389 activateBank(rank_ref, bank_ref, act_tick, mem_pkt->
row);
393 const Tick col_allowed_at = mem_pkt->
isRead() ?
398 Tick cmd_at = std::max({col_allowed_at, next_burst_at,
curTick()});
402 Tick max_sync = clkResyncDelay + (mem_pkt->
isRead() ? tRL : tWL);
403 if (dataClockSync && ((cmd_at - rank_ref.
lastBurstTick) > max_sync))
404 cmd_at = ctrl->verifyMultiCmd(cmd_at, maxCommandsPerWindow, tCK);
406 cmd_at = ctrl->verifySingleCmd(cmd_at, maxCommandsPerWindow,
false);
411 Tick burst_gap = tBURST_MIN;
412 if (burstInterleave) {
424 DPRINTF(DRAM,
"Schedule RD/WR burst at tick %d\n", cmd_at);
428 mem_pkt->
readyTime = cmd_at + tRL + tBURST;
430 mem_pkt->
readyTime = cmd_at + tWL + tBURST;
439 for (
int j = 0;
j < ranksPerChannel;
j++) {
440 for (
int i = 0;
i < banksPerRank;
i++) {
441 if (mem_pkt->
rank ==
j) {
443 (bank_ref.
bankgr == ranks[
j]->banks[
i].bankgr)) {
449 dly_to_rd_cmd = mem_pkt->
isRead() ?
450 tCCD_L : std::max(tCCD_L, wrToRdDlySameBG);
451 dly_to_wr_cmd = mem_pkt->
isRead() ?
452 std::max(tCCD_L, rdToWrDlySameBG) :
457 dly_to_rd_cmd = mem_pkt->
isRead() ? burst_gap :
459 dly_to_wr_cmd = mem_pkt->
isRead() ? readToWriteDelay() :
466 dly_to_wr_cmd = rankToRankDelay();
467 dly_to_rd_cmd = rankToRankDelay();
469 ranks[
j]->banks[
i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
470 ranks[
j]->banks[
i].rdAllowedAt);
471 ranks[
j]->banks[
i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
472 ranks[
j]->banks[
i].wrAllowedAt);
477 activeRank = mem_pkt->
rank;
483 mem_pkt->
isRead() ? cmd_at + tRTP :
491 bool auto_precharge = pageMgmt == enums::close ||
496 if (!auto_precharge &&
497 (pageMgmt == enums::open_adaptive ||
498 pageMgmt == enums::close_adaptive)) {
507 bool got_more_hits =
false;
508 bool got_bank_conflict =
false;
510 for (uint8_t
i = 0;
i < ctrl->numPriorities(); ++
i) {
511 auto p = queue[
i].begin();
520 while (!got_more_hits &&
p != queue[
i].end()) {
522 if ((*p)->pseudoChannel != pseudoChannel) {
528 if (mem_pkt != (*
p)) {
529 bool same_rank_bank = (mem_pkt->
rank == (*p)->rank) &&
530 (mem_pkt->
bank == (*p)->bank);
532 bool same_row = mem_pkt->
row == (*p)->row;
533 got_more_hits |= same_rank_bank && same_row;
534 got_bank_conflict |= same_rank_bank && !same_row;
547 auto_precharge = !got_more_hits &&
548 (got_bank_conflict || pageMgmt == enums::close_adaptive);
552 std::string mem_cmd = mem_pkt->
isRead() ?
"RD" :
"WR";
555 MemCommand::cmds command = (mem_cmd ==
"RD") ? MemCommand::RD :
561 timeStampOffset, mem_cmd, mem_pkt->
bank, mem_pkt->
rank);
565 if (auto_precharge) {
568 prechargeBank(rank_ref, bank_ref, std::max(
curTick(),
571 DPRINTF(DRAM,
"Auto-precharged bank: %d\n", mem_pkt->
bankId);
582 stats.bytesRead += burstSize;
583 stats.perBankRdBursts[mem_pkt->
bankId]++;
587 stats.totQLat += cmd_at - mem_pkt->
entryTime;
588 stats.totBusLat += tBURST;
610 stats.writeRowHits++;
611 stats.bytesWritten += burstSize;
612 stats.perBankWrBursts[mem_pkt->
bankId]++;
616 return std::make_pair(cmd_at, cmd_at + burst_gap);
620 DRAMInterface::addRankToRankDelay(
Tick cmd_at)
624 for (
auto n : ranks) {
625 for (
int i = 0;
i < banksPerRank;
i++) {
628 n->banks[
i].rdAllowedAt = std::max(cmd_at + rankToRankDelay(),
629 n->banks[
i].rdAllowedAt);
630 n->banks[
i].wrAllowedAt = std::max(cmd_at + rankToRankDelay(),
631 n->banks[
i].wrAllowedAt);
636 DRAMInterface::DRAMInterface(
const DRAMInterfaceParams &_p)
638 bankGroupsPerRank(_p.bank_groups_per_rank),
639 bankGroupArch(_p.bank_groups_per_rank > 0),
642 tBURST_MIN(_p.tBURST_MIN), tBURST_MAX(_p.tBURST_MAX),
643 tCCD_L_WR(_p.tCCD_L_WR), tCCD_L(_p.tCCD_L),
644 tRCD_RD(_p.tRCD), tRCD_WR(_p.tRCD_WR),
645 tRP(_p.tRP), tRAS(_p.tRAS), tWR(_p.tWR), tRTP(_p.tRTP),
646 tRFC(_p.tRFC), tREFI(_p.tREFI), tRRD(_p.tRRD), tRRD_L(_p.tRRD_L),
647 tPPD(_p.tPPD), tAAD(_p.tAAD),
648 tXAW(_p.tXAW), tXP(_p.tXP), tXS(_p.tXS),
649 clkResyncDelay(_p.tBURST_MAX),
650 dataClockSync(_p.data_clock_sync),
651 burstInterleave(tBURST != tBURST_MIN),
652 twoCycleActivate(_p.two_cycle_activate),
653 activationLimit(_p.activation_limit),
654 wrToRdDlySameBG(tWL + _p.tBURST_MAX + _p.tWTR_L),
655 rdToWrDlySameBG(_p.tRTW + _p.tBURST_MAX),
656 pageMgmt(_p.page_policy),
657 maxAccessesPerRow(_p.max_accesses_per_row),
658 timeStampOffset(0), activeRank(0),
659 enableDRAMPowerdown(_p.enable_dram_powerdown),
660 lastStatsResetTick(0),
663 DPRINTF(DRAM,
"Setting up DRAM Interface\n");
674 DPRINTF(DRAM,
"Creating DRAM rank %d \n",
i);
676 ranks.push_back(rank);
685 DPRINTF(DRAM,
"Memory capacity %lld (%lld) bytes\n", capacity,
689 if (deviceCapacity != capacity / (1024 * 1024))
690 warn(
"DRAM device capacity (%d Mbytes) does not match the "
691 "address range assigned (%d Mbytes)\n", deviceCapacity,
692 capacity / (1024 * 1024));
694 DPRINTF(DRAM,
"Row buffer size %d bytes with %d bursts per row buffer\n",
701 fatal(
"tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
709 fatal(
"banks per rank (%d) must be equal to or larger than "
710 "banks groups per rank (%d)\n",
715 fatal(
"Banks per rank (%d) must be evenly divisible by bank "
716 "groups per rank (%d) for equal banks per bank group\n",
721 fatal(
"tCCD_L (%d) should be larger than the minimum bus delay "
722 "(%d) when bank groups per rank (%d) is greater than 1\n",
727 fatal(
"tCCD_L_WR (%d) should be larger than the minimum bus delay "
728 " (%d) when bank groups per rank (%d) is greater than 1\n",
734 fatal(
"tRRD_L (%d) should be larger than tRRD (%d) when "
735 "bank groups per rank (%d) is greater than 1\n",
751 fatal(
"Channel interleaving of %s doesn't match RoRaBaChCo "
752 "address map\n",
name());
768 fatal(
"Channel interleaving of %s must be at least as large "
769 "as the cache line size\n",
name());
774 fatal(
"Channel interleaving of %s must be at most as large "
775 "as the row-buffer size\n",
name());
786 if (
system()->isTimingMode()) {
801 if (!
r->inRefIdleState()) {
804 DPRINTF(DRAMState,
"Rank %d is not available\n",
r->rank);
814 if ((
r->pwrState ==
PWR_SREF) &&
r->inLowPowerState) {
815 DPRINTF(DRAMState,
"Rank %d is in self-refresh\n",
r->rank);
819 if (
r->forceSelfRefreshExit()) {
820 DPRINTF(DRAMState,
"rank %d was in self refresh and"
821 " should wake up\n",
r->rank);
823 r->scheduleWakeUpEvent(
tXS);
836 unsigned size,
bool is_read, uint8_t pseudo_channel)
901 panic(
"Unknown address mapping policy chosen!");
908 DPRINTF(DRAM,
"Address: %#x Rank %d Bank %d Row %d\n",
909 pkt_addr, rank, bank, row);
916 return new MemPacket(pkt, is_read,
true, pseudo_channel, rank, bank, row,
917 bank_id, pkt_addr,
size);
924 ++
ranks[rank]->readEntries;
926 ++
ranks[rank]->writeEntries;
939 DPRINTF(DRAM,
"number of read entries for rank %d is %d\n",
963 DPRINTF(DRAMState,
"Rank %d sleep at tick %d; current power state is "
997 DPRINTF(DRAM,
"Rank%d: Forcing self-refresh wakeup in drain\n",
999 r->scheduleWakeUpEvent(
tXS);
1008 bool all_ranks_drained =
true;
1013 all_ranks_drained =
r->inPwrIdleState() &&
r->inRefIdleState() &&
1016 return all_ranks_drained;
1029 Tick min_col_at)
const
1035 bool found_seamless_bank =
false;
1039 bool hidden_bank_prep =
false;
1044 for (
const auto&
p : queue) {
1047 if (
p->isDram() &&
ranks[
p->rank]->inRefIdleState())
1048 got_waiting[
p->bankId] =
true;
1059 if (got_waiting[bank_id]) {
1061 assert(
ranks[
i]->inRefIdleState());
1073 const Tick hidden_act_max =
1074 std::max(min_col_at - tRCD,
curTick());
1079 ranks[
i]->banks[
j].rdAllowedAt :
1080 ranks[
i]->banks[
j].wrAllowedAt;
1081 Tick col_at = std::max(col_allowed_at, act_at + tRCD);
1085 bool new_seamless_bank = col_at <= min_col_at;
1090 if (new_seamless_bank ||
1091 (!found_seamless_bank && act_at <= min_act_at)) {
1097 if (!found_seamless_bank &&
1098 (new_seamless_bank || act_at < min_act_at)) {
1099 std::fill(bank_mask.begin(), bank_mask.end(), 0);
1102 found_seamless_bank |= new_seamless_bank;
1105 hidden_bank_prep = act_at <= hidden_act_max;
1109 min_act_at = act_at;
1115 return std::make_pair(bank_mask, hidden_bank_prep);
1121 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
1122 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
1123 refreshState(REF_IDLE), inLowPowerState(false), rank(_rank),
1124 readEntries(0), writeEntries(0), outstandingEvents(0),
1125 wakeUpAllowedAt(0), power(_p, false), banks(_p.banks_per_rank),
1126 numBanksActive(0), actTicks(_p.activation_limit, 0), lastBurstTick(0),
1128 activateEvent([
this]{ processActivateEvent(); },
name()),
1129 prechargeEvent([
this]{ processPrechargeEvent(); },
name()),
1130 refreshEvent([
this]{ processRefreshEvent(); },
name()),
1131 powerEvent([
this]{ processPowerEvent(); },
name()),
1132 wakeUpEvent([
this]{ processWakeUpEvent(); },
name()),
1135 for (
int b = 0;
b < _p.banks_per_rank;
b++) {
1140 if (_p.bank_groups_per_rank > 0) {
1148 banks[
b].bankgr =
b % _p.bank_groups_per_rank;
1151 banks[
b].bankgr =
b;
1184 bool no_queued_cmds = (dram.ctrl->inReadBusState(
true, &(this->dram))
1185 && (readEntries == 0)) ||
1186 (dram.ctrl->inWriteBusState(
true, &(this->dram))
1187 && (writeEntries == 0));
1188 return no_queued_cmds;
1197 DPRINTF(DRAM,
"Refresh drain done, now precharging\n");
1213 auto next_iter = cmdList.begin();
1215 for ( ; next_iter != cmdList.end() ; ++next_iter) {
1219 power.powerlib.doCommand(cmd.
type, cmd.
bank,
1221 dram.timeStampOffset);
1231 cmdList.assign(next_iter, cmdList.end());
1249 assert(outstandingEvents > 0);
1251 --outstandingEvents;
1255 if (numBanksActive == 0) {
1258 if (isQueueEmpty() && outstandingEvents == 0 &&
1259 dram.enableDRAMPowerdown) {
1264 DPRINTF(DRAMState,
"Rank %d sleep at tick %d\n",
1280 assert(outstandingEvents > 0);
1283 --outstandingEvents;
1299 ++outstandingEvents;
1301 DPRINTF(DRAM,
"Refresh due\n");
1310 if ((rank == dram.activeRank)
1311 && (dram.ctrl->requestEventScheduled(dram.pseudoChannel))) {
1314 DPRINTF(DRAM,
"Refresh awaiting draining\n");
1325 if (inLowPowerState) {
1326 DPRINTF(DRAM,
"Wake Up for refresh\n");
1328 scheduleWakeUpEvent(dram.tXP);
1336 if (refreshState ==
REF_PRE) {
1338 if (numBanksActive != 0) {
1341 DPRINTF(DRAM,
"Precharging all\n");
1346 for (
auto &
b : banks) {
1350 pre_at = std::max(
b.preAllowedAt, pre_at);
1355 Tick act_allowed_at = pre_at + dram.tRP;
1357 for (
auto &
b : banks) {
1359 dram.prechargeBank(*
this,
b, pre_at,
true,
false);
1361 b.actAllowedAt = std::max(
b.actAllowedAt, act_allowed_at);
1362 b.preAllowedAt = std::max(
b.preAllowedAt, pre_at);
1367 cmdList.push_back(
Command(MemCommand::PREA, 0, pre_at));
1371 dram.timeStampOffset, rank);
1372 }
else if ((pwrState ==
PWR_IDLE) && (outstandingEvents == 1)) {
1375 DPRINTF(DRAM,
"All banks already precharged, starting refresh\n");
1384 assert(prechargeEvent.scheduled() ||
1385 dram.ctrl->respondEventScheduled(dram.pseudoChannel));
1389 assert(numBanksActive == 0);
1404 assert(numBanksActive == 0);
1409 for (
auto &
b : banks) {
1410 b.actAllowedAt = ref_done_at;
1420 dram.timeStampOffset, rank);
1423 refreshDueAt += dram.tREFI;
1427 if (refreshDueAt < ref_done_at) {
1428 fatal(
"Refresh was delayed so long we cannot catch up\n");
1434 schedule(refreshEvent, ref_done_at);
1438 if (refreshState ==
REF_RUN) {
1440 assert(numBanksActive == 0);
1443 assert(!powerEvent.scheduled());
1453 if (pwrStatePostRefresh !=
PWR_IDLE) {
1456 DPRINTF(DRAMState,
"Rank %d sleeping after refresh and was in "
1457 "power state %d before refreshing\n", rank,
1458 pwrStatePostRefresh);
1459 powerDownSleep(pwrState,
curTick());
1463 }
else if (isQueueEmpty() && dram.enableDRAMPowerdown) {
1466 assert(outstandingEvents == 1);
1467 DPRINTF(DRAMState,
"Rank %d sleeping after refresh but was NOT"
1468 " in a low power state before refreshing\n", rank);
1484 schedule(refreshEvent, refreshDueAt - dram.tRP);
1486 DPRINTF(DRAMState,
"Refresh done at %llu and next refresh"
1487 " at %llu\n",
curTick(), refreshDueAt);
1497 if (!powerEvent.scheduled()) {
1498 DPRINTF(DRAMState,
"Scheduling power event at %llu to state %d\n",
1502 pwrStateTrans = pwr_state;
1506 panic(
"Scheduled power event at %llu to state %d, "
1507 "with scheduled event at %llu to %d\n",
tick, pwr_state,
1508 powerEvent.when(), pwrStateTrans);
1519 schedulePowerEvent(pwr_state,
tick);
1521 cmdList.push_back(
Command(MemCommand::PDN_F_ACT, 0,
tick));
1523 dram.tCK) - dram.timeStampOffset, rank);
1528 schedulePowerEvent(pwr_state,
tick);
1530 cmdList.push_back(
Command(MemCommand::PDN_F_PRE, 0,
tick));
1532 dram.tCK) - dram.timeStampOffset, rank);
1533 }
else if (pwr_state ==
PWR_REF) {
1540 cmdList.push_back(
Command(MemCommand::PDN_F_PRE, 0,
tick));
1542 dram.tCK) - dram.timeStampOffset, rank);
1543 }
else if (pwr_state ==
PWR_SREF) {
1550 cmdList.push_back(
Command(MemCommand::SREN, 0,
tick));
1552 dram.tCK) - dram.timeStampOffset, rank);
1557 wakeUpAllowedAt =
tick + dram.tCK;
1560 inLowPowerState =
true;
1566 Tick wake_up_tick = std::max(
curTick(), wakeUpAllowedAt);
1568 DPRINTF(DRAMState,
"Scheduling wake-up for rank %d at tick %d\n",
1569 rank, wake_up_tick);
1574 pwrStatePostRefresh = pwrState;
1582 schedule(wakeUpEvent, wake_up_tick);
1584 for (
auto &
b : banks) {
1588 b.wrAllowedAt = std::max(wake_up_tick + exit_delay,
b.wrAllowedAt);
1589 b.rdAllowedAt = std::max(wake_up_tick + exit_delay,
b.rdAllowedAt);
1590 b.preAllowedAt = std::max(wake_up_tick + exit_delay,
b.preAllowedAt);
1591 b.actAllowedAt = std::max(wake_up_tick + exit_delay,
b.actAllowedAt);
1594 inLowPowerState =
false;
1600 cmdList.push_back(
Command(MemCommand::PUP_ACT, 0, wake_up_tick));
1602 dram.tCK) - dram.timeStampOffset, rank);
1605 cmdList.push_back(
Command(MemCommand::PUP_PRE, 0, wake_up_tick));
1607 dram.tCK) - dram.timeStampOffset, rank);
1608 }
else if (pwrStateTrans ==
PWR_SREF) {
1609 cmdList.push_back(
Command(MemCommand::SREX, 0, wake_up_tick));
1611 dram.tCK) - dram.timeStampOffset, rank);
1636 assert(
curTick() >= pwrStateTick);
1642 stats.pwrStateTime[prev_state] += duration;
1647 stats.totalIdleTime += duration;
1650 pwrState = pwrStateTrans;
1657 assert(outstandingEvents == 1);
1659 --outstandingEvents;
1662 DPRINTF(DRAMState,
"Was refreshing for %llu ticks\n", duration);
1666 DPRINTF(DRAMState,
"Switching to power down state after refreshing"
1667 " rank %d at %llu tick\n", rank,
curTick());
1671 if (!(dram.ctrl->requestEventScheduled(dram.pseudoChannel))) {
1672 DPRINTF(DRAM,
"Scheduling next request after refreshing"
1673 " rank %d, PC %d \n", rank, dram.pseudoChannel);
1674 dram.ctrl->restartScheduler(
curTick(), dram.pseudoChannel);
1686 DPRINTF(DRAMState,
"All banks precharged\n");
1702 if (!activateEvent.scheduled()) {
1704 assert(!powerEvent.scheduled());
1710 }
else if (refreshState ==
REF_PRE) {
1717 assert(prechargeEvent.scheduled());
1732 if (pwrStatePostRefresh ==
PWR_PRE_PDN && isQueueEmpty() &&
1735 dram.enableDRAMPowerdown) {
1736 DPRINTF(DRAMState,
"Rank %d bypassing refresh and transitioning "
1737 "to self refresh at %11u tick\n", rank,
curTick());
1741 assert(outstandingEvents == 1);
1742 --outstandingEvents;
1749 DPRINTF(DRAMState,
"Refreshing\n");
1752 assert(!powerEvent.scheduled());
1778 dram.timeStampOffset);
1781 Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy();
1785 stats.actEnergy += energy.act_energy * dram.devicesPerRank;
1786 stats.preEnergy += energy.pre_energy * dram.devicesPerRank;
1787 stats.readEnergy += energy.read_energy * dram.devicesPerRank;
1788 stats.writeEnergy += energy.write_energy * dram.devicesPerRank;
1789 stats.refreshEnergy += energy.ref_energy * dram.devicesPerRank;
1790 stats.actBackEnergy += energy.act_stdby_energy * dram.devicesPerRank;
1791 stats.preBackEnergy += energy.pre_stdby_energy * dram.devicesPerRank;
1792 stats.actPowerDownEnergy += energy.f_act_pd_energy * dram.devicesPerRank;
1793 stats.prePowerDownEnergy += energy.f_pre_pd_energy * dram.devicesPerRank;
1794 stats.selfRefreshEnergy += energy.sref_energy * dram.devicesPerRank;
1797 stats.totalEnergy += energy.window_energy * dram.devicesPerRank;
1804 stats.averagePower = (
stats.totalEnergy.value() /
1805 (
curTick() - dram.lastStatsResetTick)) *
1812 DPRINTF(DRAM,
"Computing stats due to a dump callback\n");
1818 stats.pwrStateTime[pwrState] += (
curTick() - pwrStateTick);
1828 dram.timeStampOffset);
1834 return (readEntries != 0) ||
1835 (dram.ctrl->inWriteBusState(
true, &(this->dram))
1836 && (writeEntries != 0));
1842 dram.lastStatsResetTick =
curTick();
1846 : statistics::
Group(&_dram),
1850 "Number of DRAM read bursts"),
1852 "Number of DRAM write bursts"),
1855 "Per bank write bursts"),
1857 "Per bank write bursts"),
1860 "Total ticks spent queuing"),
1862 "Total ticks spent in databus transfers"),
1864 "Total ticks spent from burst creation until serviced "
1867 ADD_STAT(avgQLat, statistics::units::Rate<
1868 statistics::units::
Tick, statistics::units::
Count>::get(),
1869 "Average queueing delay per DRAM burst"),
1870 ADD_STAT(avgBusLat, statistics::units::Rate<
1871 statistics::units::
Tick, statistics::units::
Count>::get(),
1872 "Average bus latency per DRAM burst"),
1873 ADD_STAT(avgMemAccLat, statistics::units::Rate<
1874 statistics::units::
Tick, statistics::units::
Count>::get(),
1875 "Average memory access latency per DRAM burst"),
1878 "Number of row buffer hits during reads"),
1880 "Number of row buffer hits during writes"),
1881 ADD_STAT(readRowHitRate, statistics::units::Ratio::get(),
1882 "Row buffer hit rate for reads"),
1883 ADD_STAT(writeRowHitRate, statistics::units::Ratio::get(),
1884 "Row buffer hit rate for writes"),
1886 ADD_STAT(bytesPerActivate, statistics::units::Byte::get(),
1887 "Bytes accessed per row activation"),
1888 ADD_STAT(bytesRead, statistics::units::Byte::get(),
1889 "Total bytes read"),
1890 ADD_STAT(bytesWritten, statistics::units::Byte::get(),
1891 "Total bytes written"),
1893 ADD_STAT(avgRdBW, statistics::units::Rate<
1894 statistics::units::Byte, statistics::units::Second>::get(),
1895 "Average DRAM read bandwidth in MiBytes/s"),
1896 ADD_STAT(avgWrBW, statistics::units::Rate<
1897 statistics::units::Byte, statistics::units::Second>::get(),
1898 "Average DRAM write bandwidth in MiBytes/s"),
1899 ADD_STAT(peakBW, statistics::units::Rate<
1900 statistics::units::Byte, statistics::units::Second>::get(),
1901 "Theoretical peak bandwidth in MiByte/s"),
1903 ADD_STAT(busUtil, statistics::units::Ratio::get(),
1904 "Data bus utilization in percentage"),
1905 ADD_STAT(busUtilRead, statistics::units::Ratio::get(),
1906 "Data bus utilization in percentage for reads"),
1907 ADD_STAT(busUtilWrite, statistics::units::Ratio::get(),
1908 "Data bus utilization in percentage for writes"),
1910 ADD_STAT(pageHitRate, statistics::units::Ratio::get(),
1911 "Row buffer hit rate, read and write combined")
1919 using namespace statistics;
1921 avgQLat.precision(2);
1922 avgBusLat.precision(2);
1923 avgMemAccLat.precision(2);
1925 readRowHitRate.precision(2);
1926 writeRowHitRate.precision(2);
1928 perBankRdBursts.init(dram.banksPerRank * dram.ranksPerChannel);
1929 perBankWrBursts.init(dram.banksPerRank * dram.ranksPerChannel);
1932 .init(dram.maxAccessesPerRow ?
1933 dram.maxAccessesPerRow : dram.rowBufferSize)
1936 peakBW.precision(2);
1937 busUtil.precision(2);
1938 busUtilWrite.precision(2);
1939 busUtilRead.precision(2);
1941 pageHitRate.precision(2);
1944 avgQLat = totQLat / readBursts;
1945 avgBusLat = totBusLat / readBursts;
1946 avgMemAccLat = totMemAccLat / readBursts;
1948 readRowHitRate = (readRowHits / readBursts) * 100;
1949 writeRowHitRate = (writeRowHits / writeBursts) * 100;
1951 avgRdBW = (bytesRead / 1000000) /
simSeconds;
1952 avgWrBW = (bytesWritten / 1000000) /
simSeconds;
1954 dram.bytesPerBurst() / 1000000;
1956 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1957 busUtilRead = avgRdBW / peakBW * 100;
1958 busUtilWrite = avgWrBW / peakBW * 100;
1960 pageHitRate = (writeRowHits + readRowHits) /
1961 (writeBursts + readBursts) * 100;
1965 : statistics::
Group(&_dram,
csprintf(
"rank%d", _rank.rank).c_str()),
1968 ADD_STAT(actEnergy, statistics::units::Joule::get(),
1969 "Energy for activate commands per rank (pJ)"),
1970 ADD_STAT(preEnergy, statistics::units::Joule::get(),
1971 "Energy for precharge commands per rank (pJ)"),
1972 ADD_STAT(readEnergy, statistics::units::Joule::get(),
1973 "Energy for read commands per rank (pJ)"),
1974 ADD_STAT(writeEnergy, statistics::units::Joule::get(),
1975 "Energy for write commands per rank (pJ)"),
1976 ADD_STAT(refreshEnergy, statistics::units::Joule::get(),
1977 "Energy for refresh commands per rank (pJ)"),
1978 ADD_STAT(actBackEnergy, statistics::units::Joule::get(),
1979 "Energy for active background per rank (pJ)"),
1980 ADD_STAT(preBackEnergy, statistics::units::Joule::get(),
1981 "Energy for precharge background per rank (pJ)"),
1982 ADD_STAT(actPowerDownEnergy, statistics::units::Joule::get(),
1983 "Energy for active power-down per rank (pJ)"),
1984 ADD_STAT(prePowerDownEnergy, statistics::units::Joule::get(),
1985 "Energy for precharge power-down per rank (pJ)"),
1986 ADD_STAT(selfRefreshEnergy, statistics::units::Joule::get(),
1987 "Energy for self refresh per rank (pJ)"),
1989 ADD_STAT(totalEnergy, statistics::units::Joule::get(),
1990 "Total energy per rank (pJ)"),
1991 ADD_STAT(averagePower, statistics::units::Watt::get(),
1992 "Core power per rank (mW)"),
1994 ADD_STAT(totalIdleTime, statistics::units::
Tick::get(),
1995 "Total Idle time Per DRAM Rank"),
1997 "Time in different power states")
2011 .subname(3,
"PRE_PDN")
2013 .subname(5,
"ACT_PDN");
2029 rank.computeStats();