52 #include "debug/DRAM.hh" 53 #include "debug/DRAMPower.hh" 54 #include "debug/DRAMState.hh" 55 #include "debug/Drain.hh" 56 #include "debug/QOS.hh" 64 port(
name() +
".port", *this), isTimingMode(false),
65 retryRdReq(false), retryWrReq(false),
117 ranks.push_back(rank);
121 if (
p->write_low_thresh_perc >=
p->write_high_thresh_perc)
122 fatal(
"Write buffer low threshold %d must be smaller than the " 123 "high threshold %d\n",
p->write_low_thresh_perc,
124 p->write_high_thresh_perc);
134 if (deviceCapacity != capacity / (1024 * 1024))
135 warn(
"DRAM device capacity (%d Mbytes) does not match the " 136 "address range assigned (%d Mbytes)\n", deviceCapacity,
137 capacity / (1024 * 1024));
139 DPRINTF(DRAM,
"Memory capacity %lld (%lld) bytes\n", capacity,
142 DPRINTF(DRAM,
"Row buffer size %d bytes with %d columns per row buffer\n",
149 fatal(
"tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
157 fatal(
"banks per rank (%d) must be equal to or larger than " 158 "banks groups per rank (%d)\n",
163 fatal(
"Banks per rank (%d) must be evenly divisible by bank groups " 164 "per rank (%d) for equal banks per bank group\n",
169 fatal(
"tCCD_L (%d) should be larger than tBURST (%d) when " 170 "bank groups per rank (%d) is greater than 1\n",
175 fatal(
"tCCD_L_WR (%d) should be larger than tBURST (%d) when " 176 "bank groups per rank (%d) is greater than 1\n",
182 fatal(
"tRRD_L (%d) should be larger than tRRD (%d) when " 183 "bank groups per rank (%d) is greater than 1\n",
196 fatal(
"DRAMCtrl %s is unconnected!\n",
name());
206 fatal(
"Channel interleaving of %s doesn't match RoRaBaChCo " 207 "address map\n",
name());
223 fatal(
"Channel interleaving of %s must be at least as large " 224 "as the cache line size\n",
name());
229 fatal(
"Channel interleaving of %s must be at most as large " 230 "as the row-buffer size\n",
name());
285 DPRINTF(DRAM,
"Read queue limit %d, current size %d, entries needed %d\n",
296 DPRINTF(DRAM,
"Write queue limit %d, current size %d, entries needed %d\n",
361 panic(
"Unknown address mapping policy chosen!");
368 DPRINTF(DRAM,
"Address: %lld Rank %d Bank %d Row %d\n",
369 dramPktAddr, rank, bank, row);
375 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
376 size,
ranks[rank]->banks[bank], *
ranks[rank]);
386 assert(pktCount != 0);
396 unsigned pktsServicedByWrQ = 0;
398 for (
int cnt = 0; cnt < pktCount; ++cnt) {
400 base_addr + pkt->
getSize()) - addr;
407 bool foundInWrQ =
false;
413 for (
const auto&
p : vec) {
416 if (
p->addr <= addr &&
417 ((addr + size) <= (
p->addr +
p->size))) {
423 "Read to addr %lld with size %d serviced by " 438 if (pktCount > 1 && burst_helper == NULL) {
439 DPRINTF(DRAM,
"Read to addr %lld translates to %d " 440 "dram requests\n", pkt->
getAddr(), pktCount);
450 DPRINTF(DRAM,
"Adding to read queue\n");
469 if (pktsServicedByWrQ == pktCount) {
475 if (burst_helper != NULL)
481 DPRINTF(DRAM,
"Request scheduled immediately\n");
497 for (
int cnt = 0; cnt < pktCount; ++cnt) {
499 base_addr + pkt->
getSize()) - addr;
517 DPRINTF(DRAM,
"Adding to write queue\n");
534 DPRINTF(DRAM,
"Merging write burst with existing queue entry\n");
555 DPRINTF(DRAM,
"Request scheduled immediately\n");
564 DPRINTF(DRAM,
"===READ QUEUE===\n\n");
566 for (
const auto& packet : queue) {
567 DPRINTF(DRAM,
"Read %lu\n", packet->addr);
571 DPRINTF(DRAM,
"\n===RESP QUEUE===\n\n");
573 DPRINTF(DRAM,
"Response %lu\n", packet->addr);
576 DPRINTF(DRAM,
"\n===WRITE QUEUE===\n\n");
578 for (
const auto& packet : queue) {
579 DPRINTF(DRAM,
"Write %lu\n", packet->addr);
589 DPRINTF(DRAM,
"recvTimingReq: request %s addr %lld size %d\n",
596 "Should only see read and writes at memory controller\n");
620 DPRINTF(DRAM,
"Write queue full, not accepting\n");
634 DPRINTF(DRAM,
"Read queue full, not accepting\n");
653 "processRespondEvent(): Some req has reached its readyTime\n");
661 DPRINTF(DRAM,
"number of read entries for rank %d is %d\n",
685 DPRINTF(DRAMState,
"Rank %d sleep at tick %d; current power state is " 728 DPRINTF(Drain,
"DRAM controller done draining\n");
741 DRAMCtrl::DRAMPacketQueue::iterator
746 DRAMCtrl::DRAMPacketQueue::iterator ret = queue.end();
748 if (!queue.empty()) {
749 if (queue.size() == 1) {
752 if (
ranks[dram_pkt->
rank]->inRefIdleState()) {
754 DPRINTF(DRAM,
"Single request, going to a free rank\n");
756 DPRINTF(DRAM,
"Single request, going to a busy rank\n");
760 for (
auto i = queue.begin();
i != queue.end(); ++
i) {
762 if (
ranks[dram_pkt->
rank]->inRefIdleState()) {
770 panic(
"No scheduling policy chosen\n");
776 DRAMCtrl::DRAMPacketQueue::iterator
783 bool filled_earliest_banks =
false;
785 bool hidden_bank_prep =
false;
792 bool found_hidden_bank =
false;
796 bool found_prepped_pkt =
false;
800 bool found_earliest_pkt =
false;
802 auto selected_pkt_it = queue.end();
807 for (
auto i = queue.begin();
i != queue.end() ; ++
i) {
813 DPRINTF(DRAM,
"%s checking packet in bank %d\n",
821 "%s bank %d - Rank %d available\n", __func__,
829 if (col_allowed_at <= min_col_at) {
834 DPRINTF(DRAM,
"%s Seamless row buffer hit\n", __func__);
838 }
else if (!found_hidden_bank && !found_prepped_pkt) {
844 found_prepped_pkt =
true;
845 DPRINTF(DRAM,
"%s Prepped row buffer hit\n", __func__);
847 }
else if (!found_earliest_pkt) {
850 if (!filled_earliest_banks) {
852 std::tie(earliest_banks, hidden_bank_prep) =
854 filled_earliest_banks =
true;
860 if (
bits(earliest_banks[dram_pkt->
rank],
862 found_earliest_pkt =
true;
863 found_hidden_bank = hidden_bank_prep;
869 if (hidden_bank_prep || !found_prepped_pkt)
874 DPRINTF(DRAM,
"%s bank %d - Rank %d not available\n", __func__,
879 if (selected_pkt_it == queue.end()) {
880 DPRINTF(DRAM,
"%s no available ranks found\n", __func__);
883 return selected_pkt_it;
925 Tick act_tick, uint32_t row)
929 DPRINTF(DRAM,
"Activate at tick %d\n", act_tick);
944 DPRINTF(DRAM,
"Activate bank %d, rank %d at tick %lld, now got %d active\n",
945 bank_ref.
bank, rank_ref.
rank, act_tick,
969 rank_ref.
banks[
i].actAllowedAt = std::max(act_tick +
tRRD_L,
970 rank_ref.
banks[
i].actAllowedAt);
975 rank_ref.
banks[
i].actAllowedAt = std::max(act_tick +
tRRD,
976 rank_ref.
banks[
i].actAllowedAt);
986 panic(
"Got %d activates in window %d (%llu - %llu) which " 997 rank_ref.
actTicks.push_front(act_tick);
1004 DPRINTF(DRAM,
"Enforcing tXAW with X = %d, next activate " 1009 rank_ref.
banks[
j].actAllowedAt =
1011 rank_ref.
banks[
j].actAllowedAt);
1039 Tick pre_done_at = pre_at +
tRP;
1046 DPRINTF(DRAM,
"Precharging bank %d, rank %d at tick %lld, now got " 1047 "%d active\n", bank.
bank, rank_ref.
rank, pre_at,
1075 DPRINTF(DRAM,
"Timing access to addr %lld, rank/bank/row %d %d %d\n",
1093 bool row_hit =
true;
1116 const Tick col_allowed_at = dram_pkt->
isRead() ?
1135 if (dram_pkt->
rank ==
j) {
1143 dly_to_rd_cmd = dram_pkt->
isRead() ?
1145 dly_to_wr_cmd = dram_pkt->
isRead() ?
1160 ranks[
j]->banks[
i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
1161 ranks[
j]->banks[
i].rdAllowedAt);
1162 ranks[
j]->banks[
i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
1163 ranks[
j]->banks[
i].wrAllowedAt);
1182 bool auto_precharge =
pageMgmt == Enums::close ||
1187 if (!auto_precharge &&
1188 (
pageMgmt == Enums::open_adaptive ||
1189 pageMgmt == Enums::close_adaptive)) {
1198 bool got_more_hits =
false;
1199 bool got_bank_conflict =
false;
1206 auto p = queue[
i].begin();
1214 while (!got_more_hits &&
p != queue[
i].end()) {
1215 if (dram_pkt != (*
p)) {
1216 bool same_rank_bank = (dram_pkt->
rank == (*p)->rank) &&
1217 (dram_pkt->
bank == (*p)->bank);
1219 bool same_row = dram_pkt->
row == (*p)->row;
1220 got_more_hits |= same_rank_bank && same_row;
1221 got_bank_conflict |= same_rank_bank && !same_row;
1234 auto_precharge = !got_more_hits &&
1235 (got_bank_conflict ||
pageMgmt == Enums::close_adaptive);
1239 std::string mem_cmd = dram_pkt->
isRead() ?
"RD" :
"WR";
1248 DPRINTF(DRAM,
"Access to %lld, ready at %lld next burst at %lld.\n",
1259 if (auto_precharge) {
1264 DPRINTF(DRAM,
"Auto-precharged bank: %d\n", dram_pkt->
bankId);
1274 if (dram_pkt->
isRead()) {
1315 DPRINTF(DRAM,
"QoS Turnarounds selected state %s %s\n",
1316 (
busState==MemCtrl::READ)?
"READ":
"WRITE",
1317 switched_cmd_type?
"[turnaround triggered]":
"");
1319 if (switched_cmd_type) {
1322 "Switching to writes after %d reads with %d reads " 1328 "Switching to reads after %d writes with %d writes " 1342 if (!
r->inRefIdleState()) {
1345 DPRINTF(DRAMState,
"Rank %d is not available\n",
r->rank);
1350 r->checkDrainDone();
1355 if ((
r->pwrState ==
PWR_SREF) &&
r->inLowPowerState) {
1356 DPRINTF(DRAMState,
"Rank %d is in self-refresh\n",
r->rank);
1360 if (
r->forceSelfRefreshExit()) {
1361 DPRINTF(DRAMState,
"rank %d was in self refresh and" 1362 " should wake up\n",
r->rank);
1364 r->scheduleWakeUpEvent(
tXS);
1384 bool switch_to_writes =
false;
1394 DPRINTF(DRAM,
"Switching to writes due to read queue empty\n");
1395 switch_to_writes =
true;
1404 DPRINTF(Drain,
"DRAM controller done draining\n");
1414 bool read_found =
false;
1415 DRAMPacketQueue::iterator to_read;
1424 "DRAM controller checking READ queue [%d] priority [%d elements]\n",
1425 prio, queue->size());
1430 to_read =
chooseNext((*queue), switched_cmd_type ?
tCS : 0);
1432 if (to_read != queue->end()) {
1445 DPRINTF(DRAM,
"No Reads Found - exiting\n");
1449 auto dram_pkt = *to_read;
1451 assert(dram_pkt->rankRef.inRefIdleState());
1456 ++dram_pkt->rankRef.outstandingEvents;
1459 assert(dram_pkt->readyTime >=
curTick());
1462 logResponse(MemCtrl::READ, (*to_read)->masterId(),
1463 dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
1464 dram_pkt->readyTime - dram_pkt->entryTime);
1473 assert(
respQueue.back()->readyTime <= dram_pkt->readyTime);
1481 switch_to_writes =
true;
1485 readQueue[dram_pkt->qosValue()].erase(to_read);
1491 if (switch_to_writes) {
1497 bool write_found =
false;
1498 DRAMPacketQueue::iterator to_write;
1507 "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
1508 prio, queue->size());
1513 switched_cmd_type ? std::min(
tRTW,
tCS) : 0);
1515 if (to_write != queue->end()) {
1527 DPRINTF(DRAM,
"No Writes Found - exiting\n");
1531 auto dram_pkt = *to_write;
1533 assert(dram_pkt->rankRef.inRefIdleState());
1540 --dram_pkt->rankRef.writeEntries;
1548 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) {
1549 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1551 ++dram_pkt->rankRef.outstandingEvents;
1553 }
else if (dram_pkt->rankRef.writeDoneEvent.when() <
1554 dram_pkt->readyTime) {
1556 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1563 dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
1564 dram_pkt->readyTime - dram_pkt->entryTime);
1568 writeQueue[dram_pkt->qosValue()].erase(to_write);
1576 bool below_threshold =
1609 Tick min_col_at)
const 1619 bool found_seamless_bank =
false;
1623 bool hidden_bank_prep =
false;
1628 for (
const auto&
p : queue) {
1629 if (
p->rankRef.inRefIdleState())
1630 got_waiting[
p->bankId] =
true;
1637 uint16_t bank_id =
i * banksPerRank +
j;
1641 if (got_waiting[bank_id]) {
1643 assert(
ranks[
i]->inRefIdleState());
1653 ranks[
i]->banks[j].rdAllowedAt :
1654 ranks[
i]->banks[j].wrAllowedAt;
1655 Tick col_at = std::max(col_allowed_at, act_at +
tRCD);
1659 bool new_seamless_bank = col_at <= min_col_at;
1664 if (new_seamless_bank ||
1665 (!found_seamless_bank && act_at <= min_act_at)) {
1671 if (!found_seamless_bank &&
1672 (new_seamless_bank || act_at < min_act_at)) {
1673 std::fill(bank_mask.begin(), bank_mask.end(), 0);
1676 found_seamless_bank |= new_seamless_bank;
1679 hidden_bank_prep = act_at <= hidden_act_max;
1683 min_act_at = act_at;
1689 return make_pair(bank_mask, hidden_bank_prep);
1696 refreshState(
REF_IDLE), inLowPowerState(false), rank(rank),
1697 readEntries(0), writeEntries(0), outstandingEvents(0),
1698 wakeUpAllowedAt(0),
power(_p, false), banks(_p->banks_per_rank),
1699 numBanksActive(0), actTicks(_p->activation_limit, 0),
1706 stats(_memory, *
this)
1708 for (
int b = 0;
b < _p->banks_per_rank;
b++) {
1713 if (_p->bank_groups_per_rank > 0) {
1721 banks[
b].bankgr =
b % _p->bank_groups_per_rank;
1760 return no_queued_cmds;
1769 DPRINTF(DRAM,
"Refresh drain done, now precharging\n");
1785 auto next_iter =
cmdList.begin();
1787 for ( ; next_iter !=
cmdList.end() ; ++next_iter) {
1831 memory.enableDRAMPowerdown) {
1836 DPRINTF(DRAMState,
"Rank %d sleep at tick %d\n",
1873 DPRINTF(DRAM,
"Refresh due\n");
1883 && (
memory.nextReqEvent.scheduled())) {
1886 DPRINTF(DRAM,
"Refresh awaiting draining\n");
1899 DPRINTF(DRAM,
"Wake Up for refresh\n");
1914 DPRINTF(DRAM,
"Precharging all\n");
1923 pre_at = std::max(
b.preAllowedAt, pre_at);
1930 for (
auto &
b : banks) {
1932 memory.prechargeBank(*
this,
b, pre_at,
false);
1934 b.actAllowedAt = std::max(
b.actAllowedAt, act_allowed_at);
1935 b.preAllowedAt = std::max(
b.preAllowedAt, pre_at);
1948 DPRINTF(DRAM,
"All banks already precharged, starting refresh\n");
1979 b.actAllowedAt = ref_done_at;
1997 fatal(
"Refresh was delayed so long we cannot catch up\n");
2025 DPRINTF(DRAMState,
"Rank %d sleeping after refresh and was in " 2026 "power state %d before refreshing\n",
rank,
2036 DPRINTF(DRAMState,
"Rank %d sleeping after refresh but was NOT" 2037 " in a low power state before refreshing\n",
rank);
2055 DPRINTF(DRAMState,
"Refresh done at %llu and next refresh" 2067 DPRINTF(DRAMState,
"Scheduling power event at %llu to state %d\n",
2075 panic(
"Scheduled power event at %llu to state %d, " 2076 "with scheduled event at %llu to %d\n", tick, pwr_state,
2102 }
else if (pwr_state ==
PWR_REF) {
2112 }
else if (pwr_state ==
PWR_SREF) {
2137 DPRINTF(DRAMState,
"Scheduling wake-up for rank %d at tick %d\n",
2138 rank, wake_up_tick);
2157 b.wrAllowedAt = std::max(wake_up_tick + exit_delay,
b.wrAllowedAt);
2158 b.rdAllowedAt = std::max(wake_up_tick + exit_delay,
b.rdAllowedAt);
2159 b.preAllowedAt = std::max(wake_up_tick + exit_delay,
b.preAllowedAt);
2160 b.actAllowedAt = std::max(wake_up_tick + exit_delay,
b.actAllowedAt);
2169 cmdList.push_back(
Command(MemCommand::PUP_ACT, 0, wake_up_tick));
2174 cmdList.push_back(
Command(MemCommand::PUP_PRE, 0, wake_up_tick));
2231 DPRINTF(DRAMState,
"Was refreshing for %llu ticks\n", duration);
2235 DPRINTF(DRAMState,
"Switching to power down state after refreshing" 2240 if (!
memory.nextReqEvent.scheduled()) {
2241 DPRINTF(DRAM,
"Scheduling next request after refreshing" 2242 " rank %d\n",
rank);
2255 DPRINTF(DRAMState,
"All banks precharged\n");
2304 memory.enableDRAMPowerdown) {
2305 DPRINTF(DRAMState,
"Rank %d bypassing refresh and transitioning " 2306 "to self refresh at %11u tick\n",
rank,
curTick());
2318 DPRINTF(DRAMState,
"Refreshing\n");
2350 Data::MemoryPowerModel::Energy energy =
power.
powerlib.getEnergy();
2381 DPRINTF(DRAM,
"Computing stats due to a dump callback\n");
2405 ADD_STAT(readReqs,
"Number of read requests accepted"),
2406 ADD_STAT(writeReqs,
"Number of write requests accepted"),
2409 "Number of DRAM read bursts, " 2410 "including those serviced by the write queue"),
2412 "Number of DRAM write bursts, " 2413 "including those merged in the write queue"),
2415 "Number of DRAM read bursts serviced by the write queue"),
2417 "Number of DRAM write bursts merged with an existing one"),
2420 "Number of requests that are neither read nor write"),
2422 ADD_STAT(perBankRdBursts,
"Per bank write bursts"),
2423 ADD_STAT(perBankWrBursts,
"Per bank write bursts"),
2425 ADD_STAT(avgRdQLen,
"Average read queue length when enqueuing"),
2426 ADD_STAT(avgWrQLen,
"Average write queue length when enqueuing"),
2428 ADD_STAT(totQLat,
"Total ticks spent queuing"),
2429 ADD_STAT(totBusLat,
"Total ticks spent in databus transfers"),
2431 "Total ticks spent from burst creation until serviced " 2433 ADD_STAT(avgQLat,
"Average queueing delay per DRAM burst"),
2434 ADD_STAT(avgBusLat,
"Average bus latency per DRAM burst"),
2435 ADD_STAT(avgMemAccLat,
"Average memory access latency per DRAM burst"),
2437 ADD_STAT(numRdRetry,
"Number of times read queue was full causing retry"),
2438 ADD_STAT(numWrRetry,
"Number of times write queue was full causing retry"),
2440 ADD_STAT(readRowHits,
"Number of row buffer hits during reads"),
2441 ADD_STAT(writeRowHits,
"Number of row buffer hits during writes"),
2442 ADD_STAT(readRowHitRate,
"Row buffer hit rate for reads"),
2443 ADD_STAT(writeRowHitRate,
"Row buffer hit rate for writes"),
2445 ADD_STAT(readPktSize,
"Read request sizes (log2)"),
2446 ADD_STAT(writePktSize,
"Write request sizes (log2)"),
2448 ADD_STAT(rdQLenPdf,
"What read queue length does an incoming req see"),
2449 ADD_STAT(wrQLenPdf,
"What write queue length does an incoming req see"),
2451 ADD_STAT(bytesPerActivate,
"Bytes accessed per row activation"),
2454 "Reads before turning the bus around for writes"),
2456 "Writes before turning the bus around for reads"),
2458 ADD_STAT(bytesReadDRAM,
"Total number of bytes read from DRAM"),
2459 ADD_STAT(bytesReadWrQ,
"Total number of bytes read from write queue"),
2460 ADD_STAT(bytesWritten,
"Total number of bytes written to DRAM"),
2461 ADD_STAT(bytesReadSys,
"Total read bytes from the system interface side"),
2463 "Total written bytes from the system interface side"),
2465 ADD_STAT(avgRdBW,
"Average DRAM read bandwidth in MiByte/s"),
2466 ADD_STAT(avgWrBW,
"Average achieved write bandwidth in MiByte/s"),
2467 ADD_STAT(avgRdBWSys,
"Average system read bandwidth in MiByte/s"),
2468 ADD_STAT(avgWrBWSys,
"Average system write bandwidth in MiByte/s"),
2469 ADD_STAT(peakBW,
"Theoretical peak bandwidth in MiByte/s"),
2471 ADD_STAT(busUtil,
"Data bus utilization in percentage"),
2472 ADD_STAT(busUtilRead,
"Data bus utilization in percentage for reads"),
2473 ADD_STAT(busUtilWrite,
"Data bus utilization in percentage for writes"),
2475 ADD_STAT(totGap,
"Total gap between requests"),
2476 ADD_STAT(avgGap,
"Average gap between requests"),
2478 ADD_STAT(masterReadBytes,
"Per-master bytes read from memory"),
2479 ADD_STAT(masterWriteBytes,
"Per-master bytes write to memory"),
2481 "Per-master bytes read from memory rate (Bytes/sec)"),
2483 "Per-master bytes write to memory rate (Bytes/sec)"),
2485 "Per-master read serviced memory accesses"),
2487 "Per-master write serviced memory accesses"),
2489 "Per-master read total memory access latency"),
2491 "Per-master write total memory access latency"),
2493 "Per-master read average memory access latency"),
2495 "Per-master write average memory access latency"),
2497 ADD_STAT(pageHitRate,
"Row buffer hit rate, read and write combined")
2504 using namespace Stats;
2596 for (
int i = 0;
i < max_masters;
i++) {
2650 ADD_STAT(actEnergy,
"Energy for activate commands per rank (pJ)"),
2651 ADD_STAT(preEnergy,
"Energy for precharge commands per rank (pJ)"),
2652 ADD_STAT(readEnergy,
"Energy for read commands per rank (pJ)"),
2653 ADD_STAT(writeEnergy,
"Energy for write commands per rank (pJ)"),
2654 ADD_STAT(refreshEnergy,
"Energy for refresh commands per rank (pJ)"),
2655 ADD_STAT(actBackEnergy,
"Energy for active background per rank (pJ)"),
2656 ADD_STAT(preBackEnergy,
"Energy for precharge background per rank (pJ)"),
2658 "Energy for active power-down per rank (pJ)"),
2660 "Energy for precharge power-down per rank (pJ)"),
2661 ADD_STAT(selfRefreshEnergy,
"Energy for self refresh per rank (pJ)"),
2663 ADD_STAT(totalEnergy,
"Total energy per rank (pJ)"),
2664 ADD_STAT(averagePower,
"Core power per rank (mW)"),
2666 ADD_STAT(totalIdleTime,
"Total Idle time Per DRAM Rank"),
2667 ADD_STAT(memoryStateTime,
"Time in different power states")
2711 if (if_name !=
"port") {
2726 DPRINTF(Drain,
"DRAM controller not drained, write: %d, read: %d," 2740 DPRINTF(DRAM,
"Rank%d: Forcing self-refresh wakeup in drain\n",
2742 r->scheduleWakeUpEvent(
tXS);
2756 bool all_ranks_drained =
true;
2761 all_ranks_drained =
r->inPwrIdleState() &&
r->inRefIdleState() &&
2764 return all_ranks_drained;
2787 :
QueuedSlavePort(name, &_memory, queue), queue(_memory, *this, true),
2795 ranges.push_back(
memory.getAddrRange());
2808 memory.recvFunctional(pkt);
2817 return memory.recvAtomic(pkt);
2824 return memory.recvTimingReq(pkt);
2828 DRAMCtrlParams::create()
#define panic(...)
This implements a cprintf based panic() function.
void logResponse(BusState dir, MasterID m_id, uint8_t qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
Enums::PageManage pageMgmt
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
bool enableDRAMPowerdown
Enable or disable DRAM powerdown states.
const uint32_t writeLowThreshold
const uint32_t activationLimit
Ports are used to interface objects to each other.
PowerState
The power state captures the different operational states of the DRAM and interacts with the bus read...
Stats::Scalar mergedWrBursts
const Tick entryTime
When did request enter the controller.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Stats::Scalar totalEnergy
void resetStats()
Reset stats on a stats event.
Stats::Scalar bytesReadDRAM
void sendRangeChange() const
Called by the owner to send a range change.
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation...
void prechargeBank(Rank &rank_ref, Bank &bank_ref, Tick pre_at, bool trace=true)
Precharge a given bank and also update when the precharge is done.
#define fatal(...)
This implements a cprintf based fatal() function.
bool retryRdReq
Remember if we have to retry a request when available.
BusState busStateNext
bus state for next request event triggered
Stats::Scalar bytesWrittenSys
const std::string & name()
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
std::vector< Command > cmdList
List of commands issued, to be sent to DRAMPpower at refresh and stats dump.
std::vector< Rank * > ranks
Vector of ranks.
std::string getMasterName(MasterID master_id)
Get the name of an object for a given request id.
DrainState
Object drain/handover states.
void doDRAMAccess(DRAMPacket *dram_pkt)
Actually do the DRAM access - figure out the latency it will take to service the req based on bank st...
EventFunctionWrapper nextReqEvent
Tick when() const
Get the time that the event is scheduled.
Stats::Formula busUtilRead
uint64_t granularity() const
Determing the interleaving granularity of the range.
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
std::pair< std::vector< uint32_t >, bool > minBankPrep(const DRAMPacketQueue &queue, Tick min_col_at) const
Find which are the earliest banks ready to issue an activate for the enqueued requests.
A DRAM packet stores packets along with the timestamp of when the packet entered the queue...
const FlagsType nonan
Don't print if this is NAN.
uint32_t writeEntries
Track number of packets in write queue going to this rank.
Stats::Vector perBankRdBursts
DRAMPacketQueue::iterator chooseNextFRFCFS(DRAMPacketQueue &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
uint32_t readEntries
Track number of packets in read queue going to this rank.
Tick lastStatsResetTick
The time when stats were last reset used to calculate average power.
MemoryPort(const std::string &name, DRAMCtrl &_memory)
const Tick frontendLatency
Pipeline latency of the controller frontend.
Stats::Vector masterReadTotalLat
Stats::Formula pageHitRate
Stats::Scalar selfRefreshEnergy
Stats::Histogram wrPerTurnAround
bool cacheResponding() const
DrainState drainState() const
Return the current drain state of an object.
bool recvTimingReq(PacketPtr pkt)
Stats::Formula avgWrBWSys
DRAMCtrl(const DRAMCtrlParams *p)
DrainState drain() override
Notify an object that it needs to drain its state.
void signalDrainDone() const
Signal that an object is drained.
MasterID masterId() const
Get the packet MasterID (interface compatibility with Packet)
std::deque< DRAMPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Rank(DRAMCtrl &_memory, const DRAMCtrlParams *_p, int rank)
virtual void regStats()
Callback to set stat parameters.
unsigned int burstsServiced
Number of DRAM bursts serviced so far for a system packet.
Stats::Vector memoryStateTime
Track time spent in each power state.
RefreshState refreshState
current refresh state
Addr addr
The starting address of the DRAM packet.
Histogram & init(size_type size)
Set the parameters of this histogram.
Overload hash function for BasicBlockRange type.
bool writeQueueFull(unsigned int pktCount) const
Check if the write queue has room for more entries.
Tick refreshDueAt
Keep track of when a refresh is due.
bool isConnected() const
Is this port currently connected to a peer?
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
The DRAM controller is a single-channel memory controller capturing the most important timing constra...
Stats::Vector writePktSize
uint8_t rank
Current Rank index.
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
const uint32_t burstLength
uint8_t qosSchedule(std::initializer_list< Queues *> queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
void regStats() override
Callback to set stat parameters.
const uint32_t ranksPerChannel
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
bool readQueueFull(unsigned int pktCount) const
Check if the read queue has room for more entries.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Stats::Formula simSeconds
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
const uint32_t deviceRowBufferSize
A burst helper helps organize and manage a packet that is larger than the DRAM burst size...
Derived & init(size_type size)
Set this vector to have the given size.
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks, then bankId = 0 –> rank0, bank0 and bankId = 8 –> rank1, bank0.
void deschedule(Event &event)
void computeStats()
Computes stats just prior to dump event.
virtual void init() override
Initialise this memory.
uint8_t outstandingEvents
Number of ACT, RD, and WR events currently scheduled Incremented when a refresh event is started as w...
EventFunctionWrapper activateEvent
Stats::Scalar writeEnergy
void startup(Tick ref_tick)
Kick off accounting for power and refresh states and schedule initial refresh.
bool isQueueEmpty() const
Check if the command queue of current rank is idle.
unsigned int numBanksActive
To track number of banks which are currently active for this rank.
Stats::Vector perBankWrBursts
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
bool isTimingMode
Remember if the memory system is in timing mode.
void processWriteDoneEvent()
void regStats() override
Callback to set stat parameters.
A basic class to track the bank state, i.e.
Addr getCtrlAddr(Addr addr)
Get an address in a dense range which starts from 0.
const uint32_t maxAccessesPerRow
Max column accesses (read and write) per row, before forcefully closing it.
const uint32_t bankGroupsPerRank
Tick curTick()
The current simulated tick.
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
Stats::Vector masterReadBytes
Enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
std::string csprintf(const char *format, const Args &...args)
bool needsResponse() const
bool scheduled() const
Determine if the current event is scheduled.
const Tick backendLatency
Pipeline latency of the backend and PHY.
EventFunctionWrapper prechargeEvent
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Stats::Scalar servicedByWrQ
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
BurstHelper * burstHelper
A pointer to the BurstHelper if this DRAMPacket is a split packet If not a split packet (common case)...
std::vector< Bank > banks
Vector of Banks.
uint64_t Tick
Tick count type.
uint64_t power(uint32_t n, uint32_t e)
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
PowerState pwrState
Current power state.
void checkDrainDone()
Let the rank check if it was waiting for requests to drain to allow it to transition states...
EventFunctionWrapper respondEvent
void replaceBits(T &val, int first, int last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
std::deque< Tick > actTicks
List to keep track of activate ticks.
Stats::Vector masterWriteAccesses
virtual void preDumpStats()
Callback before stats are dumped.
DRAMStats(DRAMCtrl &dram)
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Stats::Formula readRowHitRate
virtual void resetStats()
Callback to reset stats.
std::vector< DRAMPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Tick recvAtomic(PacketPtr pkt)
MasterID masterId() const
bool isPowerOf2(const T &n)
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request...
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Stats::Formula masterReadAvgLat
Tick pwrStateTick
Track when we transitioned to the current power state.
Stats::Vector masterReadAccesses
static bool sortTime(const Command &cmd, const Command &cmd_next)
Function for sorting Command structures based on timeStamp.
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Stats::Scalar prePowerDownEnergy
void resetStats() override
Callback to reset stats.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Stats::Scalar bytesReadWrQ
Stats::Formula avgMemAccLat
Derived & precision(int _precision)
Set the precision and marks this stat to print at the end of simulation.
Draining buffers pending serialization/handover.
#define ULL(N)
uint64_t constant
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
virtual const std::string name() const
const uint8_t rank
Will be populated by address decoder.
void activateBank(Rank &rank_ref, Bank &bank_ref, Tick act_tick, uint32_t row)
Keep track of when row activations happen, in order to enforce the maximum number of activations in t...
PowerState pwrStatePostRefresh
Previous low-power state, which will be re-entered after refresh.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
const Tick M5_CLASS_VAR_USED tCK
Basic memory timing parameters initialized based on parameter values.
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
void processRefreshEvent()
Stats::Scalar averagePower
const unsigned int burstCount
Number of DRAM bursts requred for a system packet.
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
void addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
Decode the incoming pkt, create a dram_pkt and push to the back of the write queue.
static const uint32_t NO_ROW
const uint32_t rowBufferSize
RankStats(DRAMCtrl &memory, Rank &rank)
bool inRefIdleState() const
Check if there is no refresh and no preparation of refresh ongoing i.e.
const uint32_t devicesPerRank
MasterID maxMasters()
Get the number of masters registered in the system.
PowerState pwrStateTrans
Since we are taking decisions out of order, we need to keep track of what power transition is happeni...
const uint32_t minWritesPerSwitch
void suspend()
Stop the refresh events.
Stats::Formula masterWriteRate
void schedulePowerEvent(PowerState pwr_state, Tick tick)
Schedule a power state transition in the future, and potentially override an already scheduled transi...
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
const uint32_t deviceBusWidth
Stats::Scalar bytesReadSys
Stats::Formula avgRdBWSys
void updatePowerStats()
Function to update Power Stats.
Enums::PwrState pwrState() const
EventFunctionWrapper powerEvent
void reschedule(Event &event, Tick when, bool always=false)
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
const uint32_t writeHighThreshold
Enums::AddrMap addrMapping
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
bool allRanksDrained() const
Return true once refresh is complete for all ranks and there are no additional commands enqueued...
void processPrechargeEvent()
void powerDownSleep(PowerState pwr_state, Tick tick)
Schedule a transition to power-down (sleep)
System * system() const
read the system pointer Implemented for completeness with the setter
Stats::Scalar preBackEnergy
bool interleaved() const
Determine if the range is interleaved or not.
EventFunctionWrapper wakeUpEvent
const std::string name() const
void processActivateEvent()
DRAMPacket * decodeAddr(const PacketPtr pkt, Addr dramPktAddr, unsigned int size, bool isRead) const
Address decoder to figure out physical mapping onto ranks, banks, and rows.
DRAMPower power
One DRAMPower instance per rank.
void sendRetryReq()
Send a retry to the master port that previously attempted a sendTimingReq to this slave port and fail...
Stats::Scalar writeRowHits
Stats::Scalar totalIdleTime
Stat to track total DRAM idle time.
void logRequest(BusState dir, MasterID m_id, uint8_t qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
uint64_t size() const
Get the memory size.
Tick wakeUpAllowedAt
delay power-down and self-refresh exit until this requirement is met
Stats::Formula busUtilWrite
Stats::Formula writeRowHitRate
void resetStats() override
Callback to reset stats.
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example...
System * _system
Pointer to the System object.
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
const uint32_t columnsPerRowBuffer
void recvFunctional(PacketPtr pkt)
Stats::Histogram bytesPerActivate
T divCeil(const T &a, const U &b)
Stats::Formula masterWriteAvgLat
void schedule(Event &event, Tick when)
Stats::Scalar actBackEnergy
EventFunctionWrapper refreshEvent
Stats::Scalar readRowHits
Simple structure to hold the values needed to keep track of commands for DRAMPower.
Stats::Formula masterReadRate
void processWakeUpEvent()
const uint32_t deviceSize
The following are basic design parameters of the memory controller, and are initialized based on para...
std::vector< DRAMPacketQueue > writeQueue
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Tick readyTime
When will request leave the controller.
const uint32_t columnsPerStripe
Stats::Scalar totMemAccLat
Data::MemCommand::cmds type
void preDumpStats() override
Callback before stats are dumped.
bool trySatisfyFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
bool isTimingMode() const
Is the system in timing mode?
Tick nextBurstAt
Till when must we wait before issuing next RD/WR burst?
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
void processRespondEvent()
const uint32_t writeBufferSize
const uint32_t banksPerRank
const uint32_t readBufferSize
DRAMPower is a standalone tool which calculates the power consumed by a DRAM in the system...
const FlagsType nozero
Don't print if this is zero.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
virtual void drainResume() override
Resume execution after a successful drain.
virtual void startup() override
startup() is the final initialization call before simulation.
Addr burstAlign(Addr addr) const
Burst-align an address.
Stats::Vector masterWriteBytes
Counter value() const
Return the current value of this stat as its base type.
Stats::Vector readPktSize
void addToReadQueue(PacketPtr pkt, unsigned int pktCount)
When a new read comes in, first check if the write q has a pending request to the same address...
Stats::Scalar actPowerDownEnergy
void printQs() const
Used for debugging to observe the contents of the queues.
uint8_t schedule(MasterID m_id, uint64_t data)
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than DRAM burst size...
bool recvTimingReq(PacketPtr)
Receive a timing request from the peer.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
const FlagsType init
This Stat is Initialized.
Stats::Vector masterWriteTotalLat
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Stats::Scalar writeBursts
const PacketPtr pkt
This comes from the outside world.
void scheduleWakeUpEvent(Tick exit_delay)
schedule and event to wake-up from power-down or self-refresh and update bank timing parameters ...
Stats::Histogram rdPerTurnAround
Rank class includes a vector of banks.
Stats::Scalar bytesWritten
bool inLowPowerState
rank is in or transitioning to power-down or self-refresh
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
void flushCmdList()
Push command out of cmdList queue that are scheduled at or before curTick() to DRAMPower library All ...
Stats::Scalar refreshEnergy
DRAMPacketQueue::iterator chooseNext(DRAMPacketQueue &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...