46 #include "debug/DRAM.hh"
47 #include "debug/DRAMPower.hh"
48 #include "debug/DRAMState.hh"
65 bool filled_earliest_banks =
false;
67 bool hidden_bank_prep =
false;
74 bool found_hidden_bank =
false;
78 bool found_prepped_pkt =
false;
82 bool found_earliest_pkt =
false;
85 auto selected_pkt_it = queue.end();
87 for (
auto i = queue.begin();
i != queue.end() ; ++
i) {
96 DPRINTF(DRAM,
"%s checking DRAM packet in bank %d, row %d\n",
101 if (burstReady(pkt)) {
104 "%s bank %d - Rank %d available\n", __func__,
112 if (col_allowed_at <= min_col_at) {
117 DPRINTF(DRAM,
"%s Seamless buffer hit\n", __func__);
119 selected_col_at = col_allowed_at;
122 }
else if (!found_hidden_bank && !found_prepped_pkt) {
128 selected_col_at = col_allowed_at;
129 found_prepped_pkt =
true;
130 DPRINTF(DRAM,
"%s Prepped row buffer hit\n", __func__);
132 }
else if (!found_earliest_pkt) {
135 if (!filled_earliest_banks) {
137 std::tie(earliest_banks, hidden_bank_prep) =
138 minBankPrep(queue, min_col_at);
139 filled_earliest_banks =
true;
147 found_earliest_pkt =
true;
148 found_hidden_bank = hidden_bank_prep;
154 if (hidden_bank_prep || !found_prepped_pkt) {
156 selected_col_at = col_allowed_at;
161 DPRINTF(DRAM,
"%s bank %d - Rank %d not available\n", __func__,
167 if (selected_pkt_it == queue.end()) {
168 DPRINTF(DRAM,
"%s no available DRAM ranks found\n", __func__);
171 return std::make_pair(selected_pkt_it, selected_col_at);
175 DRAMInterface::activateBank(
Rank& rank_ref,
Bank& bank_ref,
176 Tick act_tick, uint32_t row)
178 assert(rank_ref.
actTicks.size() == activationLimit);
183 if (twoCycleActivate)
184 act_at = ctrl->verifyMultiCmd(act_tick, maxCommandsPerWindow, tAAD);
186 act_at = ctrl->verifySingleCmd(act_tick, maxCommandsPerWindow,
true);
188 DPRINTF(DRAM,
"Activate at tick %d\n", act_at);
191 assert(bank_ref.
openRow == Bank::NO_ROW);
203 DPRINTF(DRAM,
"Activate bank %d, rank %d at tick %lld, now got "
204 "%d active\n", bank_ref.
bank, rank_ref.
rank, act_at,
205 ranks[rank_ref.
rank]->numBanksActive);
211 timeStampOffset, bank_ref.
bank, rank_ref.
rank);
221 for (
int i = 0;
i < banksPerRank;
i++) {
224 if (bankGroupArch && (bank_ref.
bankgr == rank_ref.
banks[
i].bankgr)) {
228 rank_ref.
banks[
i].actAllowedAt = std::max(act_at + tRRD_L,
229 rank_ref.
banks[
i].actAllowedAt);
234 rank_ref.
banks[
i].actAllowedAt = std::max(act_at + tRRD,
235 rank_ref.
banks[
i].actAllowedAt);
244 (act_at - rank_ref.
actTicks.back()) < tXAW) {
245 panic(
"Got %d activates in window %d (%llu - %llu) which "
246 "is smaller than %llu\n", activationLimit, act_at -
256 rank_ref.
actTicks.push_front(act_at);
262 (act_at - rank_ref.
actTicks.back()) < tXAW) {
263 DPRINTF(DRAM,
"Enforcing tXAW with X = %d, next activate "
264 "no earlier than %llu\n", activationLimit,
266 for (
int j = 0;
j < banksPerRank;
j++)
268 rank_ref.
banks[
j].actAllowedAt =
269 std::max(rank_ref.
actTicks.back() + tXAW,
270 rank_ref.
banks[
j].actAllowedAt);
284 DRAMInterface::prechargeBank(
Rank& rank_ref,
Bank& bank,
Tick pre_tick,
285 bool auto_or_preall,
bool trace)
288 assert(bank.
openRow != Bank::NO_ROW);
296 Tick pre_at = pre_tick;
297 if (auto_or_preall) {
304 pre_at = ctrl->verifySingleCmd(pre_tick, maxCommandsPerWindow,
true);
306 for (
int i = 0;
i < banksPerRank;
i++) {
307 rank_ref.
banks[
i].preAllowedAt = std::max(pre_at + tPPD,
308 rank_ref.
banks[
i].preAllowedAt);
312 Tick pre_done_at = pre_at + tRP;
319 DPRINTF(DRAM,
"Precharging bank %d, rank %d at tick %lld, now got "
320 "%d active\n", bank.
bank, rank_ref.
rank, pre_at,
328 timeStampOffset, bank.
bank, rank_ref.
rank);
350 DPRINTF(DRAM,
"Timing access to addr %#x, rank/bank/row %d %d %d\n",
354 Rank& rank_ref = *ranks[mem_pkt->
rank];
362 assert(rank_ref.
pwrState != PWR_SREF);
379 if (bank_ref.
openRow != Bank::NO_ROW) {
380 prechargeBank(rank_ref, bank_ref, std::max(bank_ref.
preAllowedAt,
389 activateBank(rank_ref, bank_ref, act_tick, mem_pkt->
row);
393 const Tick col_allowed_at = mem_pkt->
isRead() ?
398 Tick cmd_at = std::max({col_allowed_at, next_burst_at,
curTick()});
402 Tick max_sync = clkResyncDelay + (mem_pkt->
isRead() ? tRL : tWL);
403 if (dataClockSync && ((cmd_at - rank_ref.
lastBurstTick) > max_sync))
404 cmd_at = ctrl->verifyMultiCmd(cmd_at, maxCommandsPerWindow, tCK);
406 cmd_at = ctrl->verifySingleCmd(cmd_at, maxCommandsPerWindow,
false);
411 Tick burst_gap = tBURST_MIN;
412 if (burstInterleave) {
424 DPRINTF(DRAM,
"Schedule RD/WR burst at tick %d\n", cmd_at);
428 mem_pkt->
readyTime = cmd_at + tRL + tBURST;
430 mem_pkt->
readyTime = cmd_at + tWL + tBURST;
439 for (
int j = 0;
j < ranksPerChannel;
j++) {
440 for (
int i = 0;
i < banksPerRank;
i++) {
441 if (mem_pkt->
rank ==
j) {
443 (bank_ref.
bankgr == ranks[
j]->banks[
i].bankgr)) {
449 dly_to_rd_cmd = mem_pkt->
isRead() ?
450 tCCD_L : std::max(tCCD_L, wrToRdDlySameBG);
451 dly_to_wr_cmd = mem_pkt->
isRead() ?
452 std::max(tCCD_L, rdToWrDlySameBG) :
457 dly_to_rd_cmd = mem_pkt->
isRead() ? burst_gap :
459 dly_to_wr_cmd = mem_pkt->
isRead() ? readToWriteDelay() :
466 dly_to_wr_cmd = rankToRankDelay();
467 dly_to_rd_cmd = rankToRankDelay();
469 ranks[
j]->banks[
i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
470 ranks[
j]->banks[
i].rdAllowedAt);
471 ranks[
j]->banks[
i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
472 ranks[
j]->banks[
i].wrAllowedAt);
477 activeRank = mem_pkt->
rank;
483 mem_pkt->
isRead() ? cmd_at + tRTP :
491 bool auto_precharge = pageMgmt == enums::close ||
496 if (!auto_precharge &&
497 (pageMgmt == enums::open_adaptive ||
498 pageMgmt == enums::close_adaptive)) {
507 bool got_more_hits =
false;
508 bool got_bank_conflict =
false;
510 for (uint8_t
i = 0;
i < ctrl->numPriorities(); ++
i) {
511 auto p = queue[
i].begin();
520 while (!got_more_hits &&
p != queue[
i].end()) {
522 if ((*p)->pseudoChannel != pseudoChannel) {
528 if (mem_pkt != (*
p)) {
529 bool same_rank_bank = (mem_pkt->
rank == (*p)->rank) &&
530 (mem_pkt->
bank == (*p)->bank);
532 bool same_row = mem_pkt->
row == (*p)->row;
533 got_more_hits |= same_rank_bank && same_row;
534 got_bank_conflict |= same_rank_bank && !same_row;
547 auto_precharge = !got_more_hits &&
548 (got_bank_conflict || pageMgmt == enums::close_adaptive);
552 std::string mem_cmd = mem_pkt->
isRead() ?
"RD" :
"WR";
561 timeStampOffset, mem_cmd, mem_pkt->
bank, mem_pkt->
rank);
565 if (auto_precharge) {
568 prechargeBank(rank_ref, bank_ref, std::max(
curTick(),
571 DPRINTF(DRAM,
"Auto-precharged bank: %d\n", mem_pkt->
bankId);
582 stats.bytesRead += burstSize;
583 stats.perBankRdBursts[mem_pkt->
bankId]++;
587 stats.totQLat += cmd_at - mem_pkt->
entryTime;
588 stats.totBusLat += tBURST;
610 stats.writeRowHits++;
611 stats.bytesWritten += burstSize;
612 stats.perBankWrBursts[mem_pkt->
bankId]++;
616 return std::make_pair(cmd_at, cmd_at + burst_gap);
620 DRAMInterface::addRankToRankDelay(
Tick cmd_at)
624 for (
auto n : ranks) {
625 for (
int i = 0;
i < banksPerRank;
i++) {
628 n->banks[
i].rdAllowedAt = std::max(cmd_at + rankToRankDelay(),
629 n->banks[
i].rdAllowedAt);
630 n->banks[
i].wrAllowedAt = std::max(cmd_at + rankToRankDelay(),
631 n->banks[
i].wrAllowedAt);
636 DRAMInterface::DRAMInterface(
const DRAMInterfaceParams &_p)
638 bankGroupsPerRank(_p.bank_groups_per_rank),
639 bankGroupArch(_p.bank_groups_per_rank > 0),
642 tBURST_MIN(_p.tBURST_MIN), tBURST_MAX(_p.tBURST_MAX),
643 tCCD_L_WR(_p.tCCD_L_WR), tCCD_L(_p.tCCD_L),
644 tRCD_RD(_p.tRCD), tRCD_WR(_p.tRCD_WR),
645 tRP(_p.tRP), tRAS(_p.tRAS), tWR(_p.tWR), tRTP(_p.tRTP),
646 tRFC(_p.tRFC), tREFI(_p.tREFI), tRRD(_p.tRRD), tRRD_L(_p.tRRD_L),
647 tPPD(_p.tPPD), tAAD(_p.tAAD),
648 tXAW(_p.tXAW), tXP(_p.tXP), tXS(_p.tXS),
649 clkResyncDelay(_p.tBURST_MAX),
650 dataClockSync(_p.data_clock_sync),
651 burstInterleave(tBURST != tBURST_MIN),
652 twoCycleActivate(_p.two_cycle_activate),
653 activationLimit(_p.activation_limit),
654 wrToRdDlySameBG(tWL + _p.tBURST_MAX + _p.tWTR_L),
655 rdToWrDlySameBG(_p.tRTW + _p.tBURST_MAX),
656 pageMgmt(_p.page_policy),
657 maxAccessesPerRow(_p.max_accesses_per_row),
658 timeStampOffset(0), activeRank(0),
659 enableDRAMPowerdown(_p.enable_dram_powerdown),
660 lastStatsResetTick(0),
663 DPRINTF(DRAM,
"Setting up DRAM Interface\n");
674 DPRINTF(DRAM,
"Creating DRAM rank %d \n",
i);
676 ranks.push_back(rank);
685 DPRINTF(DRAM,
"Memory capacity %lld (%lld) bytes\n", capacity,
689 if (deviceCapacity != capacity / (1024 * 1024))
690 warn(
"DRAM device capacity (%d Mbytes) does not match the "
691 "address range assigned (%d Mbytes)\n", deviceCapacity,
692 capacity / (1024 * 1024));
694 DPRINTF(DRAM,
"Row buffer size %d bytes with %d bursts per row buffer\n",
701 fatal(
"tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
709 fatal(
"banks per rank (%d) must be equal to or larger than "
710 "banks groups per rank (%d)\n",
715 fatal(
"Banks per rank (%d) must be evenly divisible by bank "
716 "groups per rank (%d) for equal banks per bank group\n",
721 fatal(
"tCCD_L (%d) should be larger than the minimum bus delay "
722 "(%d) when bank groups per rank (%d) is greater than 1\n",
727 fatal(
"tCCD_L_WR (%d) should be larger than the minimum bus delay "
728 " (%d) when bank groups per rank (%d) is greater than 1\n",
734 fatal(
"tRRD_L (%d) should be larger than tRRD (%d) when "
735 "bank groups per rank (%d) is greater than 1\n",
751 fatal(
"Channel interleaving of %s doesn't match RoRaBaChCo "
752 "address map\n",
name());
768 fatal(
"Channel interleaving of %s must be at least as large "
769 "as the cache line size\n",
name());
774 fatal(
"Channel interleaving of %s must be at most as large "
775 "as the row-buffer size\n",
name());
786 if (
system()->isTimingMode()) {
801 if (!
r->inRefIdleState()) {
804 DPRINTF(DRAMState,
"Rank %d is not available\n",
r->rank);
814 if ((
r->pwrState ==
PWR_SREF) &&
r->inLowPowerState) {
815 DPRINTF(DRAMState,
"Rank %d is in self-refresh\n",
r->rank);
819 if (
r->forceSelfRefreshExit()) {
820 DPRINTF(DRAMState,
"rank %d was in self refresh and"
821 " should wake up\n",
r->rank);
823 r->scheduleWakeUpEvent(
tXS);
836 unsigned size,
bool is_read, uint8_t pseudo_channel)
901 panic(
"Unknown address mapping policy chosen!");
908 DPRINTF(DRAM,
"Address: %#x Rank %d Bank %d Row %d\n",
909 pkt_addr, rank, bank, row);
916 return new MemPacket(pkt, is_read,
true, pseudo_channel, rank, bank, row,
917 bank_id, pkt_addr,
size);
924 ++
ranks[rank]->readEntries;
926 ++
ranks[rank]->writeEntries;
939 DPRINTF(DRAM,
"number of read entries for rank %d is %d\n",
963 DPRINTF(DRAMState,
"Rank %d sleep at tick %d; current power state is "
997 DPRINTF(DRAM,
"Rank%d: Forcing self-refresh wakeup in drain\n",
999 r->scheduleWakeUpEvent(
tXS);
1008 bool all_ranks_drained =
true;
1013 all_ranks_drained =
r->inPwrIdleState() &&
r->inRefIdleState() &&
1016 return all_ranks_drained;
1029 Tick min_col_at)
const
1035 bool found_seamless_bank =
false;
1039 bool hidden_bank_prep =
false;
1044 for (
const auto&
p : queue) {
1047 if (
p->isDram() &&
ranks[
p->rank]->inRefIdleState())
1048 got_waiting[
p->bankId] =
true;
1059 if (got_waiting[bank_id]) {
1061 assert(
ranks[
i]->inRefIdleState());
1073 const Tick hidden_act_max =
1074 std::max(min_col_at - tRCD,
curTick());
1078 ranks[
i]->banks[
j].rdAllowedAt :
1079 ranks[
i]->banks[
j].wrAllowedAt;
1080 Tick col_at = std::max(col_allowed_at, act_at + tRCD);
1084 bool new_seamless_bank = col_at <= min_col_at;
1089 if (new_seamless_bank ||
1090 (!found_seamless_bank && act_at <= min_act_at)) {
1096 if (!found_seamless_bank &&
1097 (new_seamless_bank || act_at < min_act_at)) {
1098 std::fill(bank_mask.begin(), bank_mask.end(), 0);
1101 found_seamless_bank |= new_seamless_bank;
1104 hidden_bank_prep = act_at <= hidden_act_max;
1108 min_act_at = act_at;
1114 return std::make_pair(bank_mask, hidden_bank_prep);
1120 pwrStateTrans(PWR_IDLE), pwrStatePostRefresh(PWR_IDLE),
1121 pwrStateTick(0), refreshDueAt(0), pwrState(PWR_IDLE),
1122 refreshState(REF_IDLE), inLowPowerState(false), rank(_rank),
1123 readEntries(0), writeEntries(0), outstandingEvents(0),
1124 wakeUpAllowedAt(0), power(_p, false), banks(_p.banks_per_rank),
1125 numBanksActive(0), actTicks(_p.activation_limit, 0), lastBurstTick(0),
1127 activateEvent([
this]{ processActivateEvent(); },
name()),
1128 prechargeEvent([
this]{ processPrechargeEvent(); },
name()),
1129 refreshEvent([
this]{ processRefreshEvent(); },
name()),
1130 powerEvent([
this]{ processPowerEvent(); },
name()),
1131 wakeUpEvent([
this]{ processWakeUpEvent(); },
name()),
1134 for (
int b = 0;
b < _p.banks_per_rank;
b++) {
1139 if (_p.bank_groups_per_rank > 0) {
1147 banks[
b].bankgr =
b % _p.bank_groups_per_rank;
1150 banks[
b].bankgr =
b;
1183 bool no_queued_cmds = (dram.ctrl->inReadBusState(
true) &&
1185 || (dram.ctrl->inWriteBusState(
true) &&
1186 (writeEntries == 0));
1187 return no_queued_cmds;
1196 DPRINTF(DRAM,
"Refresh drain done, now precharging\n");
1212 auto next_iter = cmdList.begin();
1214 for ( ; next_iter != cmdList.end() ; ++next_iter) {
1218 power.powerlib.doCommand(cmd.
type, cmd.
bank,
1220 dram.timeStampOffset);
1230 cmdList.assign(next_iter, cmdList.end());
1248 assert(outstandingEvents > 0);
1250 --outstandingEvents;
1254 if (numBanksActive == 0) {
1257 if (isQueueEmpty() && outstandingEvents == 0 &&
1258 dram.enableDRAMPowerdown) {
1263 DPRINTF(DRAMState,
"Rank %d sleep at tick %d\n",
1279 assert(outstandingEvents > 0);
1282 --outstandingEvents;
1298 ++outstandingEvents;
1300 DPRINTF(DRAM,
"Refresh due\n");
1309 if ((rank == dram.activeRank)
1310 && (dram.ctrl->requestEventScheduled(dram.pseudoChannel))) {
1313 DPRINTF(DRAM,
"Refresh awaiting draining\n");
1324 if (inLowPowerState) {
1325 DPRINTF(DRAM,
"Wake Up for refresh\n");
1327 scheduleWakeUpEvent(dram.tXP);
1335 if (refreshState ==
REF_PRE) {
1337 if (numBanksActive != 0) {
1340 DPRINTF(DRAM,
"Precharging all\n");
1345 for (
auto &
b : banks) {
1349 pre_at = std::max(
b.preAllowedAt, pre_at);
1354 Tick act_allowed_at = pre_at + dram.tRP;
1356 for (
auto &
b : banks) {
1358 dram.prechargeBank(*
this,
b, pre_at,
true,
false);
1360 b.actAllowedAt = std::max(
b.actAllowedAt, act_allowed_at);
1361 b.preAllowedAt = std::max(
b.preAllowedAt, pre_at);
1366 cmdList.push_back(
Command(MemCommand::PREA, 0, pre_at));
1370 dram.timeStampOffset, rank);
1371 }
else if ((pwrState ==
PWR_IDLE) && (outstandingEvents == 1)) {
1374 DPRINTF(DRAM,
"All banks already precharged, starting refresh\n");
1383 assert(prechargeEvent.scheduled() ||
1384 dram.ctrl->respondEventScheduled());
1388 assert(numBanksActive == 0);
1403 assert(numBanksActive == 0);
1408 for (
auto &
b : banks) {
1409 b.actAllowedAt = ref_done_at;
1419 dram.timeStampOffset, rank);
1422 refreshDueAt += dram.tREFI;
1426 if (refreshDueAt < ref_done_at) {
1427 fatal(
"Refresh was delayed so long we cannot catch up\n");
1433 schedule(refreshEvent, ref_done_at);
1437 if (refreshState ==
REF_RUN) {
1439 assert(numBanksActive == 0);
1442 assert(!powerEvent.scheduled());
1452 if (pwrStatePostRefresh !=
PWR_IDLE) {
1455 DPRINTF(DRAMState,
"Rank %d sleeping after refresh and was in "
1456 "power state %d before refreshing\n", rank,
1457 pwrStatePostRefresh);
1458 powerDownSleep(pwrState,
curTick());
1462 }
else if (isQueueEmpty() && dram.enableDRAMPowerdown) {
1465 assert(outstandingEvents == 1);
1466 DPRINTF(DRAMState,
"Rank %d sleeping after refresh but was NOT"
1467 " in a low power state before refreshing\n", rank);
1483 schedule(refreshEvent, refreshDueAt - dram.tRP);
1485 DPRINTF(DRAMState,
"Refresh done at %llu and next refresh"
1486 " at %llu\n",
curTick(), refreshDueAt);
1496 if (!powerEvent.scheduled()) {
1497 DPRINTF(DRAMState,
"Scheduling power event at %llu to state %d\n",
1501 pwrStateTrans = pwr_state;
1505 panic(
"Scheduled power event at %llu to state %d, "
1506 "with scheduled event at %llu to %d\n",
tick, pwr_state,
1507 powerEvent.when(), pwrStateTrans);
1518 schedulePowerEvent(pwr_state,
tick);
1520 cmdList.push_back(
Command(MemCommand::PDN_F_ACT, 0,
tick));
1522 dram.tCK) - dram.timeStampOffset, rank);
1527 schedulePowerEvent(pwr_state,
tick);
1529 cmdList.push_back(
Command(MemCommand::PDN_F_PRE, 0,
tick));
1531 dram.tCK) - dram.timeStampOffset, rank);
1532 }
else if (pwr_state ==
PWR_REF) {
1539 cmdList.push_back(
Command(MemCommand::PDN_F_PRE, 0,
tick));
1541 dram.tCK) - dram.timeStampOffset, rank);
1542 }
else if (pwr_state ==
PWR_SREF) {
1549 cmdList.push_back(
Command(MemCommand::SREN, 0,
tick));
1551 dram.tCK) - dram.timeStampOffset, rank);
1556 wakeUpAllowedAt =
tick + dram.tCK;
1559 inLowPowerState =
true;
1565 Tick wake_up_tick = std::max(
curTick(), wakeUpAllowedAt);
1567 DPRINTF(DRAMState,
"Scheduling wake-up for rank %d at tick %d\n",
1568 rank, wake_up_tick);
1573 pwrStatePostRefresh = pwrState;
1581 schedule(wakeUpEvent, wake_up_tick);
1583 for (
auto &
b : banks) {
1587 b.wrAllowedAt = std::max(wake_up_tick + exit_delay,
b.wrAllowedAt);
1588 b.rdAllowedAt = std::max(wake_up_tick + exit_delay,
b.rdAllowedAt);
1589 b.preAllowedAt = std::max(wake_up_tick + exit_delay,
b.preAllowedAt);
1590 b.actAllowedAt = std::max(wake_up_tick + exit_delay,
b.actAllowedAt);
1593 inLowPowerState =
false;
1599 cmdList.push_back(
Command(MemCommand::PUP_ACT, 0, wake_up_tick));
1601 dram.tCK) - dram.timeStampOffset, rank);
1604 cmdList.push_back(
Command(MemCommand::PUP_PRE, 0, wake_up_tick));
1606 dram.tCK) - dram.timeStampOffset, rank);
1607 }
else if (pwrStateTrans ==
PWR_SREF) {
1608 cmdList.push_back(
Command(MemCommand::SREX, 0, wake_up_tick));
1610 dram.tCK) - dram.timeStampOffset, rank);
1635 assert(
curTick() >= pwrStateTick);
1641 stats.pwrStateTime[prev_state] += duration;
1646 stats.totalIdleTime += duration;
1649 pwrState = pwrStateTrans;
1656 assert(outstandingEvents == 1);
1658 --outstandingEvents;
1661 DPRINTF(DRAMState,
"Was refreshing for %llu ticks\n", duration);
1665 DPRINTF(DRAMState,
"Switching to power down state after refreshing"
1666 " rank %d at %llu tick\n", rank,
curTick());
1670 if (!(dram.ctrl->requestEventScheduled(dram.pseudoChannel))) {
1671 DPRINTF(DRAM,
"Scheduling next request after refreshing"
1672 " rank %d\n", rank);
1673 dram.ctrl->restartScheduler(
curTick(), dram.pseudoChannel);
1685 DPRINTF(DRAMState,
"All banks precharged\n");
1701 if (!activateEvent.scheduled()) {
1703 assert(!powerEvent.scheduled());
1709 }
else if (refreshState ==
REF_PRE) {
1716 assert(prechargeEvent.scheduled());
1731 if (pwrStatePostRefresh ==
PWR_PRE_PDN && isQueueEmpty() &&
1734 dram.enableDRAMPowerdown) {
1735 DPRINTF(DRAMState,
"Rank %d bypassing refresh and transitioning "
1736 "to self refresh at %11u tick\n", rank,
curTick());
1740 assert(outstandingEvents == 1);
1741 --outstandingEvents;
1748 DPRINTF(DRAMState,
"Refreshing\n");
1751 assert(!powerEvent.scheduled());
1777 dram.timeStampOffset);
1780 Data::MemoryPowerModel::Energy energy = power.powerlib.getEnergy();
1784 stats.actEnergy += energy.act_energy * dram.devicesPerRank;
1785 stats.preEnergy += energy.pre_energy * dram.devicesPerRank;
1786 stats.readEnergy += energy.read_energy * dram.devicesPerRank;
1787 stats.writeEnergy += energy.write_energy * dram.devicesPerRank;
1788 stats.refreshEnergy += energy.ref_energy * dram.devicesPerRank;
1789 stats.actBackEnergy += energy.act_stdby_energy * dram.devicesPerRank;
1790 stats.preBackEnergy += energy.pre_stdby_energy * dram.devicesPerRank;
1791 stats.actPowerDownEnergy += energy.f_act_pd_energy * dram.devicesPerRank;
1792 stats.prePowerDownEnergy += energy.f_pre_pd_energy * dram.devicesPerRank;
1793 stats.selfRefreshEnergy += energy.sref_energy * dram.devicesPerRank;
1796 stats.totalEnergy += energy.window_energy * dram.devicesPerRank;
1803 stats.averagePower = (
stats.totalEnergy.value() /
1804 (
curTick() - dram.lastStatsResetTick)) *
1811 DPRINTF(DRAM,
"Computing stats due to a dump callback\n");
1817 stats.pwrStateTime[pwrState] += (
curTick() - pwrStateTick);
1827 dram.timeStampOffset);
1833 return (readEntries != 0) ||
1834 (dram.ctrl->inWriteBusState(
true) && (writeEntries != 0));
1840 dram.lastStatsResetTick =
curTick();
1844 : statistics::
Group(&_dram),
1847 ADD_STAT(readBursts, statistics::units::Count::get(),
1848 "Number of DRAM read bursts"),
1849 ADD_STAT(writeBursts, statistics::units::Count::get(),
1850 "Number of DRAM write bursts"),
1852 ADD_STAT(perBankRdBursts, statistics::units::Count::get(),
1853 "Per bank write bursts"),
1854 ADD_STAT(perBankWrBursts, statistics::units::Count::get(),
1855 "Per bank write bursts"),
1858 "Total ticks spent queuing"),
1860 "Total ticks spent in databus transfers"),
1862 "Total ticks spent from burst creation until serviced "
1865 ADD_STAT(avgQLat, statistics::units::Rate<
1866 statistics::units::
Tick, statistics::units::Count>::get(),
1867 "Average queueing delay per DRAM burst"),
1868 ADD_STAT(avgBusLat, statistics::units::Rate<
1869 statistics::units::
Tick, statistics::units::Count>::get(),
1870 "Average bus latency per DRAM burst"),
1871 ADD_STAT(avgMemAccLat, statistics::units::Rate<
1872 statistics::units::
Tick, statistics::units::Count>::get(),
1873 "Average memory access latency per DRAM burst"),
1875 ADD_STAT(readRowHits, statistics::units::Count::get(),
1876 "Number of row buffer hits during reads"),
1877 ADD_STAT(writeRowHits, statistics::units::Count::get(),
1878 "Number of row buffer hits during writes"),
1879 ADD_STAT(readRowHitRate, statistics::units::Ratio::get(),
1880 "Row buffer hit rate for reads"),
1881 ADD_STAT(writeRowHitRate, statistics::units::Ratio::get(),
1882 "Row buffer hit rate for writes"),
1884 ADD_STAT(bytesPerActivate, statistics::units::Byte::get(),
1885 "Bytes accessed per row activation"),
1886 ADD_STAT(bytesRead, statistics::units::Byte::get(),
1887 "Total bytes read"),
1888 ADD_STAT(bytesWritten, statistics::units::Byte::get(),
1889 "Total bytes written"),
1891 ADD_STAT(avgRdBW, statistics::units::Rate<
1892 statistics::units::Byte, statistics::units::Second>::get(),
1893 "Average DRAM read bandwidth in MiBytes/s"),
1894 ADD_STAT(avgWrBW, statistics::units::Rate<
1895 statistics::units::Byte, statistics::units::Second>::get(),
1896 "Average DRAM write bandwidth in MiBytes/s"),
1897 ADD_STAT(peakBW, statistics::units::Rate<
1898 statistics::units::Byte, statistics::units::Second>::get(),
1899 "Theoretical peak bandwidth in MiByte/s"),
1901 ADD_STAT(busUtil, statistics::units::Ratio::get(),
1902 "Data bus utilization in percentage"),
1903 ADD_STAT(busUtilRead, statistics::units::Ratio::get(),
1904 "Data bus utilization in percentage for reads"),
1905 ADD_STAT(busUtilWrite, statistics::units::Ratio::get(),
1906 "Data bus utilization in percentage for writes"),
1908 ADD_STAT(pageHitRate, statistics::units::Ratio::get(),
1909 "Row buffer hit rate, read and write combined")
1917 using namespace statistics;
1919 avgQLat.precision(2);
1920 avgBusLat.precision(2);
1921 avgMemAccLat.precision(2);
1923 readRowHitRate.precision(2);
1924 writeRowHitRate.precision(2);
1926 perBankRdBursts.init(dram.banksPerRank * dram.ranksPerChannel);
1927 perBankWrBursts.init(dram.banksPerRank * dram.ranksPerChannel);
1930 .init(dram.maxAccessesPerRow ?
1931 dram.maxAccessesPerRow : dram.rowBufferSize)
1934 peakBW.precision(2);
1935 busUtil.precision(2);
1936 busUtilWrite.precision(2);
1937 busUtilRead.precision(2);
1939 pageHitRate.precision(2);
1942 avgQLat = totQLat / readBursts;
1943 avgBusLat = totBusLat / readBursts;
1944 avgMemAccLat = totMemAccLat / readBursts;
1946 readRowHitRate = (readRowHits / readBursts) * 100;
1947 writeRowHitRate = (writeRowHits / writeBursts) * 100;
1949 avgRdBW = (bytesRead / 1000000) /
simSeconds;
1950 avgWrBW = (bytesWritten / 1000000) /
simSeconds;
1952 dram.bytesPerBurst() / 1000000;
1954 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
1955 busUtilRead = avgRdBW / peakBW * 100;
1956 busUtilWrite = avgWrBW / peakBW * 100;
1958 pageHitRate = (writeRowHits + readRowHits) /
1959 (writeBursts + readBursts) * 100;
1963 : statistics::
Group(&_dram,
csprintf(
"rank%d", _rank.rank).c_str()),
1966 ADD_STAT(actEnergy, statistics::units::Joule::get(),
1967 "Energy for activate commands per rank (pJ)"),
1968 ADD_STAT(preEnergy, statistics::units::Joule::get(),
1969 "Energy for precharge commands per rank (pJ)"),
1970 ADD_STAT(readEnergy, statistics::units::Joule::get(),
1971 "Energy for read commands per rank (pJ)"),
1972 ADD_STAT(writeEnergy, statistics::units::Joule::get(),
1973 "Energy for write commands per rank (pJ)"),
1974 ADD_STAT(refreshEnergy, statistics::units::Joule::get(),
1975 "Energy for refresh commands per rank (pJ)"),
1976 ADD_STAT(actBackEnergy, statistics::units::Joule::get(),
1977 "Energy for active background per rank (pJ)"),
1978 ADD_STAT(preBackEnergy, statistics::units::Joule::get(),
1979 "Energy for precharge background per rank (pJ)"),
1980 ADD_STAT(actPowerDownEnergy, statistics::units::Joule::get(),
1981 "Energy for active power-down per rank (pJ)"),
1982 ADD_STAT(prePowerDownEnergy, statistics::units::Joule::get(),
1983 "Energy for precharge power-down per rank (pJ)"),
1984 ADD_STAT(selfRefreshEnergy, statistics::units::Joule::get(),
1985 "Energy for self refresh per rank (pJ)"),
1987 ADD_STAT(totalEnergy, statistics::units::Joule::get(),
1988 "Total energy per rank (pJ)"),
1989 ADD_STAT(averagePower, statistics::units::Watt::get(),
1990 "Core power per rank (mW)"),
1992 ADD_STAT(totalIdleTime, statistics::units::
Tick::get(),
1993 "Total Idle time Per DRAM Rank"),
1995 "Time in different power states")
2009 .subname(3,
"PRE_PDN")
2011 .subname(5,
"ACT_PDN");
2027 rank.computeStats();