45 #include "debug/DRAM.hh" 46 #include "debug/DRAMPower.hh" 47 #include "debug/DRAMState.hh" 48 #include "debug/Drain.hh" 49 #include "debug/QOS.hh" 57 port(
name() +
".port", *this), isTimingMode(false),
58 retryRdReq(false), retryWrReq(false),
119 ranks.push_back(rank);
123 if (
p->write_low_thresh_perc >=
p->write_high_thresh_perc)
124 fatal(
"Write buffer low threshold %d must be smaller than the " 125 "high threshold %d\n",
p->write_low_thresh_perc,
126 p->write_high_thresh_perc);
136 if (deviceCapacity != capacity / (1024 * 1024))
137 warn(
"DRAM device capacity (%d Mbytes) does not match the " 138 "address range assigned (%d Mbytes)\n", deviceCapacity,
139 capacity / (1024 * 1024));
141 DPRINTF(DRAM,
"Memory capacity %lld (%lld) bytes\n", capacity,
144 DPRINTF(DRAM,
"Row buffer size %d bytes with %d columns per row buffer\n",
151 fatal(
"tREFI (%d) must be larger than tRP (%d) and tRFC (%d)\n",
159 fatal(
"banks per rank (%d) must be equal to or larger than " 160 "banks groups per rank (%d)\n",
165 fatal(
"Banks per rank (%d) must be evenly divisible by bank groups " 166 "per rank (%d) for equal banks per bank group\n",
171 fatal(
"tCCD_L (%d) should be larger than tBURST (%d) when " 172 "bank groups per rank (%d) is greater than 1\n",
177 fatal(
"tCCD_L_WR (%d) should be larger than tBURST (%d) when " 178 "bank groups per rank (%d) is greater than 1\n",
184 fatal(
"tRRD_L (%d) should be larger than tRRD (%d) when " 185 "bank groups per rank (%d) is greater than 1\n",
198 fatal(
"DRAMCtrl %s is unconnected!\n",
name());
208 fatal(
"Channel interleaving of %s doesn't match RoRaBaChCo " 209 "address map\n",
name());
225 fatal(
"Channel interleaving of %s must be at least as large " 226 "as the cache line size\n",
name());
231 fatal(
"Channel interleaving of %s must be at most as large " 232 "as the row-buffer size\n",
name());
287 DPRINTF(DRAM,
"Read queue limit %d, current size %d, entries needed %d\n",
298 DPRINTF(DRAM,
"Write queue limit %d, current size %d, entries needed %d\n",
363 panic(
"Unknown address mapping policy chosen!");
370 DPRINTF(DRAM,
"Address: %lld Rank %d Bank %d Row %d\n",
371 dramPktAddr, rank, bank, row);
377 return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
378 size,
ranks[rank]->banks[bank], *
ranks[rank]);
388 assert(pktCount != 0);
398 unsigned pktsServicedByWrQ = 0;
400 for (
int cnt = 0; cnt < pktCount; ++cnt) {
402 base_addr + pkt->
getSize()) - addr;
409 bool foundInWrQ =
false;
415 for (
const auto&
p : vec) {
418 if (
p->addr <= addr &&
419 ((addr + size) <= (
p->addr +
p->size))) {
425 "Read to addr %lld with size %d serviced by " 440 if (pktCount > 1 && burst_helper == NULL) {
441 DPRINTF(DRAM,
"Read to addr %lld translates to %d " 442 "dram requests\n", pkt->
getAddr(), pktCount);
452 DPRINTF(DRAM,
"Adding to read queue\n");
471 if (pktsServicedByWrQ == pktCount) {
477 if (burst_helper != NULL)
483 DPRINTF(DRAM,
"Request scheduled immediately\n");
499 for (
int cnt = 0; cnt < pktCount; ++cnt) {
501 base_addr + pkt->
getSize()) - addr;
519 DPRINTF(DRAM,
"Adding to write queue\n");
536 DPRINTF(DRAM,
"Merging write burst with existing queue entry\n");
557 DPRINTF(DRAM,
"Request scheduled immediately\n");
566 DPRINTF(DRAM,
"===READ QUEUE===\n\n");
568 for (
const auto& packet : queue) {
569 DPRINTF(DRAM,
"Read %lu\n", packet->addr);
573 DPRINTF(DRAM,
"\n===RESP QUEUE===\n\n");
575 DPRINTF(DRAM,
"Response %lu\n", packet->addr);
578 DPRINTF(DRAM,
"\n===WRITE QUEUE===\n\n");
580 for (
const auto& packet : queue) {
581 DPRINTF(DRAM,
"Write %lu\n", packet->addr);
591 DPRINTF(DRAM,
"recvTimingReq: request %s addr %lld size %d\n",
598 "Should only see read and writes at memory controller\n");
622 DPRINTF(DRAM,
"Write queue full, not accepting\n");
636 DPRINTF(DRAM,
"Read queue full, not accepting\n");
655 "processRespondEvent(): Some req has reached its readyTime\n");
663 DPRINTF(DRAM,
"number of read entries for rank %d is %d\n",
688 DPRINTF(DRAMState,
"Rank %d sleep at tick %d; current power state is " 731 DPRINTF(Drain,
"DRAM controller done draining\n");
751 DRAMCtrl::DRAMPacketQueue::iterator
756 DRAMCtrl::DRAMPacketQueue::iterator ret = queue.end();
758 if (!queue.empty()) {
759 if (queue.size() == 1) {
762 if (
ranks[dram_pkt->
rank]->inRefIdleState()) {
764 DPRINTF(DRAM,
"Single request, going to a free rank\n");
766 DPRINTF(DRAM,
"Single request, going to a busy rank\n");
770 for (
auto i = queue.begin();
i != queue.end(); ++
i) {
772 if (
ranks[dram_pkt->
rank]->inRefIdleState()) {
780 panic(
"No scheduling policy chosen\n");
786 DRAMCtrl::DRAMPacketQueue::iterator
793 bool filled_earliest_banks =
false;
795 bool hidden_bank_prep =
false;
802 bool found_hidden_bank =
false;
806 bool found_prepped_pkt =
false;
810 bool found_earliest_pkt =
false;
812 auto selected_pkt_it = queue.end();
817 for (
auto i = queue.begin();
i != queue.end() ; ++
i) {
823 DPRINTF(DRAM,
"%s checking packet in bank %d, row %d\n",
831 "%s bank %d - Rank %d available\n", __func__,
839 if (col_allowed_at <= min_col_at) {
844 DPRINTF(DRAM,
"%s Seamless row buffer hit\n", __func__);
848 }
else if (!found_hidden_bank && !found_prepped_pkt) {
854 found_prepped_pkt =
true;
855 DPRINTF(DRAM,
"%s Prepped row buffer hit\n", __func__);
857 }
else if (!found_earliest_pkt) {
860 if (!filled_earliest_banks) {
862 std::tie(earliest_banks, hidden_bank_prep) =
864 filled_earliest_banks =
true;
870 if (
bits(earliest_banks[dram_pkt->
rank],
872 found_earliest_pkt =
true;
873 found_hidden_bank = hidden_bank_prep;
879 if (hidden_bank_prep || !found_prepped_pkt)
884 DPRINTF(DRAM,
"%s bank %d - Rank %d not available\n", __func__,
889 if (selected_pkt_it == queue.end()) {
890 DPRINTF(DRAM,
"%s no available ranks found\n", __func__);
893 return selected_pkt_it;
938 auto current_it = it++;
940 DPRINTF(DRAM,
"Removing burstTick for %d\n", *current_it);
951 return (cmd_tick - burst_offset);
958 Tick cmd_at = cmd_tick;
966 DPRINTF(DRAM,
"Contention found on command bus at %d\n", burst_tick);
980 Tick cmd_at = cmd_tick;
990 Tick burst_offset = 0;
992 while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
997 Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
1000 bool first_can_issue =
false;
1001 bool second_can_issue =
false;
1003 while (!first_can_issue || !second_can_issue) {
1004 bool same_burst = (burst_tick == first_cmd_tick);
1005 auto first_cmd_count =
burstTicks.count(first_cmd_tick);
1006 auto second_cmd_count = same_burst ? first_cmd_count + 1 :
1012 if (!second_can_issue) {
1013 DPRINTF(DRAM,
"Contention (cmd2) found on command bus at %d\n",
1016 cmd_at = burst_tick;
1022 bool gap_violated = !same_burst &&
1023 ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
1025 if (!first_can_issue || (!second_can_issue && gap_violated)) {
1026 DPRINTF(DRAM,
"Contention (cmd1) found on command bus at %d\n",
1041 Tick act_tick, uint32_t row)
1053 DPRINTF(DRAM,
"Activate at tick %d\n", act_at);
1068 DPRINTF(DRAM,
"Activate bank %d, rank %d at tick %lld, now got %d active\n",
1069 bank_ref.
bank, rank_ref.
rank, act_at,
1093 rank_ref.
banks[
i].actAllowedAt = std::max(act_at +
tRRD_L,
1094 rank_ref.
banks[
i].actAllowedAt);
1099 rank_ref.
banks[
i].actAllowedAt = std::max(act_at +
tRRD,
1100 rank_ref.
banks[
i].actAllowedAt);
1110 panic(
"Got %d activates in window %d (%llu - %llu) which " 1121 rank_ref.
actTicks.push_front(act_at);
1128 DPRINTF(DRAM,
"Enforcing tXAW with X = %d, next activate " 1133 rank_ref.
banks[
j].actAllowedAt =
1135 rank_ref.
banks[
j].actAllowedAt);
1150 bool auto_or_preall,
bool trace)
1161 Tick pre_at = pre_tick;
1162 if (auto_or_preall) {
1172 rank_ref.
banks[
i].preAllowedAt = std::max(pre_at +
tPPD,
1173 rank_ref.
banks[
i].preAllowedAt);
1177 Tick pre_done_at = pre_at +
tRP;
1184 DPRINTF(DRAM,
"Precharging bank %d, rank %d at tick %lld, now got " 1185 "%d active\n", bank.
bank, rank_ref.
rank, pre_at,
1214 DPRINTF(DRAM,
"Timing access to addr %lld, rank/bank/row %d %d %d\n",
1236 bool row_hit =
true;
1259 const Tick col_allowed_at = dram_pkt->
isRead() ?
1289 DPRINTF(DRAM,
"Schedule RD/WR burst at tick %d\n", cmd_at);
1305 if (dram_pkt->
rank ==
j) {
1313 dly_to_rd_cmd = dram_pkt->
isRead() ?
1315 dly_to_wr_cmd = dram_pkt->
isRead() ?
1331 ranks[
j]->banks[
i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
1332 ranks[
j]->banks[
i].rdAllowedAt);
1333 ranks[
j]->banks[
i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
1334 ranks[
j]->banks[
i].wrAllowedAt);
1353 bool auto_precharge =
pageMgmt == Enums::close ||
1358 if (!auto_precharge &&
1359 (
pageMgmt == Enums::open_adaptive ||
1360 pageMgmt == Enums::close_adaptive)) {
1369 bool got_more_hits =
false;
1370 bool got_bank_conflict =
false;
1377 auto p = queue[
i].begin();
1385 while (!got_more_hits &&
p != queue[
i].end()) {
1386 if (dram_pkt != (*
p)) {
1387 bool same_rank_bank = (dram_pkt->
rank == (*p)->rank) &&
1388 (dram_pkt->
bank == (*p)->bank);
1390 bool same_row = dram_pkt->
row == (*p)->row;
1391 got_more_hits |= same_rank_bank && same_row;
1392 got_bank_conflict |= same_rank_bank && !same_row;
1405 auto_precharge = !got_more_hits &&
1406 (got_bank_conflict ||
pageMgmt == Enums::close_adaptive);
1410 std::string mem_cmd = dram_pkt->
isRead() ?
"RD" :
"WR";
1418 DPRINTF(DRAM,
"Access to %lld, ready at %lld next burst at %lld.\n",
1429 if (auto_precharge) {
1435 DPRINTF(DRAM,
"Auto-precharged bank: %d\n", dram_pkt->
bankId);
1445 if (dram_pkt->
isRead()) {
1486 DPRINTF(DRAM,
"QoS Turnarounds selected state %s %s\n",
1487 (
busState==MemCtrl::READ)?
"READ":
"WRITE",
1488 switched_cmd_type?
"[turnaround triggered]":
"");
1490 if (switched_cmd_type) {
1493 "Switching to writes after %d reads with %d reads " 1499 "Switching to reads after %d writes with %d writes " 1513 if (!
r->inRefIdleState()) {
1516 DPRINTF(DRAMState,
"Rank %d is not available\n",
r->rank);
1521 r->checkDrainDone();
1526 if ((
r->pwrState ==
PWR_SREF) &&
r->inLowPowerState) {
1527 DPRINTF(DRAMState,
"Rank %d is in self-refresh\n",
r->rank);
1531 if (
r->forceSelfRefreshExit()) {
1532 DPRINTF(DRAMState,
"rank %d was in self refresh and" 1533 " should wake up\n",
r->rank);
1535 r->scheduleWakeUpEvent(
tXS);
1555 bool switch_to_writes =
false;
1565 DPRINTF(DRAM,
"Switching to writes due to read queue empty\n");
1566 switch_to_writes =
true;
1575 DPRINTF(Drain,
"DRAM controller done draining\n");
1585 bool read_found =
false;
1586 DRAMPacketQueue::iterator to_read;
1595 "DRAM controller checking READ queue [%d] priority [%d elements]\n",
1596 prio, queue->size());
1601 to_read =
chooseNext((*queue), switched_cmd_type ?
tCS : 0);
1603 if (to_read != queue->end()) {
1616 DPRINTF(DRAM,
"No Reads Found - exiting\n");
1620 auto dram_pkt = *to_read;
1622 assert(dram_pkt->rankRef.inRefIdleState());
1627 ++dram_pkt->rankRef.outstandingEvents;
1630 assert(dram_pkt->readyTime >=
curTick());
1633 logResponse(MemCtrl::READ, (*to_read)->masterId(),
1634 dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
1635 dram_pkt->readyTime - dram_pkt->entryTime);
1644 assert(
respQueue.back()->readyTime <= dram_pkt->readyTime);
1652 switch_to_writes =
true;
1656 readQueue[dram_pkt->qosValue()].erase(to_read);
1662 if (switch_to_writes) {
1668 bool write_found =
false;
1669 DRAMPacketQueue::iterator to_write;
1678 "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
1679 prio, queue->size());
1684 switched_cmd_type ? std::min(
tRTW,
tCS) : 0);
1686 if (to_write != queue->end()) {
1698 DPRINTF(DRAM,
"No Writes Found - exiting\n");
1702 auto dram_pkt = *to_write;
1704 assert(dram_pkt->rankRef.inRefIdleState());
1711 --dram_pkt->rankRef.writeEntries;
1719 if (!dram_pkt->rankRef.writeDoneEvent.scheduled()) {
1720 schedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1722 ++dram_pkt->rankRef.outstandingEvents;
1724 }
else if (dram_pkt->rankRef.writeDoneEvent.when() <
1725 dram_pkt->readyTime) {
1727 reschedule(dram_pkt->rankRef.writeDoneEvent, dram_pkt->readyTime);
1734 dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
1735 dram_pkt->readyTime - dram_pkt->entryTime);
1739 writeQueue[dram_pkt->qosValue()].erase(to_write);
1747 bool below_threshold =
1780 Tick min_col_at)
const 1790 bool found_seamless_bank =
false;
1794 bool hidden_bank_prep =
false;
1799 for (
const auto&
p : queue) {
1800 if (
p->rankRef.inRefIdleState())
1801 got_waiting[
p->bankId] =
true;
1808 uint16_t bank_id =
i * banksPerRank +
j;
1812 if (got_waiting[bank_id]) {
1814 assert(
ranks[
i]->inRefIdleState());
1824 ranks[
i]->banks[j].rdAllowedAt :
1825 ranks[
i]->banks[j].wrAllowedAt;
1826 Tick col_at = std::max(col_allowed_at, act_at +
tRCD);
1830 bool new_seamless_bank = col_at <= min_col_at;
1835 if (new_seamless_bank ||
1836 (!found_seamless_bank && act_at <= min_act_at)) {
1842 if (!found_seamless_bank &&
1843 (new_seamless_bank || act_at < min_act_at)) {
1844 std::fill(bank_mask.begin(), bank_mask.end(), 0);
1847 found_seamless_bank |= new_seamless_bank;
1850 hidden_bank_prep = act_at <= hidden_act_max;
1854 min_act_at = act_at;
1860 return make_pair(bank_mask, hidden_bank_prep);
1866 pwrStateTick(0), refreshDueAt(0), pwrState(
PWR_IDLE),
1867 refreshState(
REF_IDLE), inLowPowerState(false), rank(rank),
1868 readEntries(0), writeEntries(0), outstandingEvents(0),
1869 wakeUpAllowedAt(0),
power(_p, false), banks(_p->banks_per_rank),
1870 numBanksActive(0), actTicks(_p->activation_limit, 0), lastBurstTick(0),
1877 stats(_memory, *
this)
1879 for (
int b = 0;
b < _p->banks_per_rank;
b++) {
1884 if (_p->bank_groups_per_rank > 0) {
1892 banks[
b].bankgr =
b % _p->bank_groups_per_rank;
1931 return no_queued_cmds;
1940 DPRINTF(DRAM,
"Refresh drain done, now precharging\n");
1956 auto next_iter =
cmdList.begin();
1958 for ( ; next_iter !=
cmdList.end() ; ++next_iter) {
2002 memory.enableDRAMPowerdown) {
2007 DPRINTF(DRAMState,
"Rank %d sleep at tick %d\n",
2044 DPRINTF(DRAM,
"Refresh due\n");
2054 && (
memory.nextReqEvent.scheduled())) {
2057 DPRINTF(DRAM,
"Refresh awaiting draining\n");
2070 DPRINTF(DRAM,
"Wake Up for refresh\n");
2085 DPRINTF(DRAM,
"Precharging all\n");
2094 pre_at = std::max(
b.preAllowedAt, pre_at);
2101 for (
auto &
b : banks) {
2103 memory.prechargeBank(*
this,
b, pre_at,
true,
false);
2105 b.actAllowedAt = std::max(
b.actAllowedAt, act_allowed_at);
2106 b.preAllowedAt = std::max(
b.preAllowedAt, pre_at);
2119 DPRINTF(DRAM,
"All banks already precharged, starting refresh\n");
2129 memory.respondEvent.scheduled());
2154 b.actAllowedAt = ref_done_at;
2172 fatal(
"Refresh was delayed so long we cannot catch up\n");
2200 DPRINTF(DRAMState,
"Rank %d sleeping after refresh and was in " 2201 "power state %d before refreshing\n",
rank,
2211 DPRINTF(DRAMState,
"Rank %d sleeping after refresh but was NOT" 2212 " in a low power state before refreshing\n",
rank);
2230 DPRINTF(DRAMState,
"Refresh done at %llu and next refresh" 2242 DPRINTF(DRAMState,
"Scheduling power event at %llu to state %d\n",
2250 panic(
"Scheduled power event at %llu to state %d, " 2251 "with scheduled event at %llu to %d\n", tick, pwr_state,
2277 }
else if (pwr_state ==
PWR_REF) {
2287 }
else if (pwr_state ==
PWR_SREF) {
2312 DPRINTF(DRAMState,
"Scheduling wake-up for rank %d at tick %d\n",
2313 rank, wake_up_tick);
2332 b.wrAllowedAt = std::max(wake_up_tick + exit_delay,
b.wrAllowedAt);
2333 b.rdAllowedAt = std::max(wake_up_tick + exit_delay,
b.rdAllowedAt);
2334 b.preAllowedAt = std::max(wake_up_tick + exit_delay,
b.preAllowedAt);
2335 b.actAllowedAt = std::max(wake_up_tick + exit_delay,
b.actAllowedAt);
2344 cmdList.push_back(
Command(MemCommand::PUP_ACT, 0, wake_up_tick));
2349 cmdList.push_back(
Command(MemCommand::PUP_PRE, 0, wake_up_tick));
2406 DPRINTF(DRAMState,
"Was refreshing for %llu ticks\n", duration);
2410 DPRINTF(DRAMState,
"Switching to power down state after refreshing" 2415 if (!
memory.nextReqEvent.scheduled()) {
2416 DPRINTF(DRAM,
"Scheduling next request after refreshing" 2417 " rank %d\n",
rank);
2430 DPRINTF(DRAMState,
"All banks precharged\n");
2479 memory.enableDRAMPowerdown) {
2480 DPRINTF(DRAMState,
"Rank %d bypassing refresh and transitioning " 2481 "to self refresh at %11u tick\n",
rank,
curTick());
2493 DPRINTF(DRAMState,
"Refreshing\n");
2525 Data::MemoryPowerModel::Energy energy =
power.
powerlib.getEnergy();
2556 DPRINTF(DRAM,
"Computing stats due to a dump callback\n");
2580 ADD_STAT(readReqs,
"Number of read requests accepted"),
2581 ADD_STAT(writeReqs,
"Number of write requests accepted"),
2584 "Number of DRAM read bursts, " 2585 "including those serviced by the write queue"),
2587 "Number of DRAM write bursts, " 2588 "including those merged in the write queue"),
2590 "Number of DRAM read bursts serviced by the write queue"),
2592 "Number of DRAM write bursts merged with an existing one"),
2595 "Number of requests that are neither read nor write"),
2597 ADD_STAT(perBankRdBursts,
"Per bank write bursts"),
2598 ADD_STAT(perBankWrBursts,
"Per bank write bursts"),
2600 ADD_STAT(avgRdQLen,
"Average read queue length when enqueuing"),
2601 ADD_STAT(avgWrQLen,
"Average write queue length when enqueuing"),
2603 ADD_STAT(totQLat,
"Total ticks spent queuing"),
2604 ADD_STAT(totBusLat,
"Total ticks spent in databus transfers"),
2606 "Total ticks spent from burst creation until serviced " 2608 ADD_STAT(avgQLat,
"Average queueing delay per DRAM burst"),
2609 ADD_STAT(avgBusLat,
"Average bus latency per DRAM burst"),
2610 ADD_STAT(avgMemAccLat,
"Average memory access latency per DRAM burst"),
2612 ADD_STAT(numRdRetry,
"Number of times read queue was full causing retry"),
2613 ADD_STAT(numWrRetry,
"Number of times write queue was full causing retry"),
2615 ADD_STAT(readRowHits,
"Number of row buffer hits during reads"),
2616 ADD_STAT(writeRowHits,
"Number of row buffer hits during writes"),
2617 ADD_STAT(readRowHitRate,
"Row buffer hit rate for reads"),
2618 ADD_STAT(writeRowHitRate,
"Row buffer hit rate for writes"),
2620 ADD_STAT(readPktSize,
"Read request sizes (log2)"),
2621 ADD_STAT(writePktSize,
"Write request sizes (log2)"),
2623 ADD_STAT(rdQLenPdf,
"What read queue length does an incoming req see"),
2624 ADD_STAT(wrQLenPdf,
"What write queue length does an incoming req see"),
2626 ADD_STAT(bytesPerActivate,
"Bytes accessed per row activation"),
2629 "Reads before turning the bus around for writes"),
2631 "Writes before turning the bus around for reads"),
2633 ADD_STAT(bytesReadDRAM,
"Total number of bytes read from DRAM"),
2634 ADD_STAT(bytesReadWrQ,
"Total number of bytes read from write queue"),
2635 ADD_STAT(bytesWritten,
"Total number of bytes written to DRAM"),
2636 ADD_STAT(bytesReadSys,
"Total read bytes from the system interface side"),
2638 "Total written bytes from the system interface side"),
2640 ADD_STAT(avgRdBW,
"Average DRAM read bandwidth in MiByte/s"),
2641 ADD_STAT(avgWrBW,
"Average achieved write bandwidth in MiByte/s"),
2642 ADD_STAT(avgRdBWSys,
"Average system read bandwidth in MiByte/s"),
2643 ADD_STAT(avgWrBWSys,
"Average system write bandwidth in MiByte/s"),
2644 ADD_STAT(peakBW,
"Theoretical peak bandwidth in MiByte/s"),
2646 ADD_STAT(busUtil,
"Data bus utilization in percentage"),
2647 ADD_STAT(busUtilRead,
"Data bus utilization in percentage for reads"),
2648 ADD_STAT(busUtilWrite,
"Data bus utilization in percentage for writes"),
2650 ADD_STAT(totGap,
"Total gap between requests"),
2651 ADD_STAT(avgGap,
"Average gap between requests"),
2653 ADD_STAT(masterReadBytes,
"Per-master bytes read from memory"),
2654 ADD_STAT(masterWriteBytes,
"Per-master bytes write to memory"),
2656 "Per-master bytes read from memory rate (Bytes/sec)"),
2658 "Per-master bytes write to memory rate (Bytes/sec)"),
2660 "Per-master read serviced memory accesses"),
2662 "Per-master write serviced memory accesses"),
2664 "Per-master read total memory access latency"),
2666 "Per-master write total memory access latency"),
2668 "Per-master read average memory access latency"),
2670 "Per-master write average memory access latency"),
2672 ADD_STAT(pageHitRate,
"Row buffer hit rate, read and write combined")
2679 using namespace Stats;
2771 for (
int i = 0;
i < max_masters;
i++) {
2826 ADD_STAT(actEnergy,
"Energy for activate commands per rank (pJ)"),
2827 ADD_STAT(preEnergy,
"Energy for precharge commands per rank (pJ)"),
2828 ADD_STAT(readEnergy,
"Energy for read commands per rank (pJ)"),
2829 ADD_STAT(writeEnergy,
"Energy for write commands per rank (pJ)"),
2830 ADD_STAT(refreshEnergy,
"Energy for refresh commands per rank (pJ)"),
2831 ADD_STAT(actBackEnergy,
"Energy for active background per rank (pJ)"),
2832 ADD_STAT(preBackEnergy,
"Energy for precharge background per rank (pJ)"),
2834 "Energy for active power-down per rank (pJ)"),
2836 "Energy for precharge power-down per rank (pJ)"),
2837 ADD_STAT(selfRefreshEnergy,
"Energy for self refresh per rank (pJ)"),
2839 ADD_STAT(totalEnergy,
"Total energy per rank (pJ)"),
2840 ADD_STAT(averagePower,
"Core power per rank (mW)"),
2842 ADD_STAT(totalIdleTime,
"Total Idle time Per DRAM Rank"),
2843 ADD_STAT(memoryStateTime,
"Time in different power states")
2887 if (if_name !=
"port") {
2902 DPRINTF(Drain,
"DRAM controller not drained, write: %d, read: %d," 2916 DPRINTF(DRAM,
"Rank%d: Forcing self-refresh wakeup in drain\n",
2918 r->scheduleWakeUpEvent(
tXS);
2932 bool all_ranks_drained =
true;
2937 all_ranks_drained =
r->inPwrIdleState() &&
r->inRefIdleState() &&
2940 return all_ranks_drained;
2963 :
QueuedSlavePort(name, &_memory, queue), queue(_memory, *this, true),
2971 ranges.push_back(
memory.getAddrRange());
2984 memory.recvFunctional(pkt);
2993 return memory.recvAtomic(pkt);
3000 return memory.recvTimingReq(pkt);
3004 DRAMCtrlParams::create()
#define panic(...)
This implements a cprintf based panic() function.
void logResponse(BusState dir, MasterID m_id, uint8_t qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
void pruneBurstTick()
Remove commands that have already issued from burstTicks.
Enums::PageManage pageMgmt
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
bool enableDRAMPowerdown
Enable or disable DRAM powerdown states.
const uint32_t writeLowThreshold
const uint32_t activationLimit
Ports are used to interface objects to each other.
Stats::Scalar mergedWrBursts
const Tick entryTime
When did request enter the controller.
Stats::Scalar totalEnergy
void resetStats()
Reset stats on a stats event.
Stats::Scalar bytesReadDRAM
virtual void resetStats()
Callback to reset stats.
void sendRangeChange() const
Called by the owner to send a range change.
Derived & subname(off_type index, const std::string &name)
Set the subfield name for the given index, and marks this stat to print at the end of simulation...
#define fatal(...)
This implements a cprintf based fatal() function.
bool retryRdReq
Remember if we have to retry a request when available.
BusState busStateNext
bus state for next request event triggered
Stats::Scalar bytesWrittenSys
const std::string & name()
unsigned int maxCommandsPerBurst
virtual AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
const Tick rdToWrDlySameBG
std::vector< Command > cmdList
List of commands issued, to be sent to DRAMPpower at refresh and stats dump.
std::vector< Rank * > ranks
Vector of ranks.
std::string getMasterName(MasterID master_id)
Get the name of an object for a given request id.
void doDRAMAccess(DRAMPacket *dram_pkt)
Actually do the DRAM access - figure out the latency it will take to service the req based on bank st...
EventFunctionWrapper nextReqEvent
const bool burstInterleave
Stats::Formula busUtilRead
uint64_t granularity() const
Determing the interleaving granularity of the range.
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
std::pair< std::vector< uint32_t >, bool > minBankPrep(const DRAMPacketQueue &queue, Tick min_col_at) const
Find which are the earliest banks ready to issue an activate for the enqueued requests.
A DRAM packet stores packets along with the timestamp of when the packet entered the queue...
const FlagsType nonan
Don't print if this is NAN.
uint32_t writeEntries
Track number of packets in write queue going to this rank.
Stats::Vector perBankRdBursts
Tick verifySingleCmd(Tick cmd_tick)
Check for command bus contention for single cycle command.
DRAMPacketQueue::iterator chooseNextFRFCFS(DRAMPacketQueue &queue, Tick extra_col_delay)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
uint32_t readEntries
Track number of packets in read queue going to this rank.
Tick lastStatsResetTick
The time when stats were last reset used to calculate average power.
MemoryPort(const std::string &name, DRAMCtrl &_memory)
const Tick frontendLatency
Pipeline latency of the controller frontend.
Stats::Vector masterReadTotalLat
Stats::Formula pageHitRate
Stats::Scalar selfRefreshEnergy
Stats::Histogram wrPerTurnAround
bool cacheResponding() const
bool recvTimingReq(PacketPtr pkt)
Stats::Formula avgWrBWSys
DRAMCtrl(const DRAMCtrlParams *p)
DrainState drain() override
Notify an object that it needs to drain its state.
MasterID masterId() const
Get the packet MasterID (interface compatibility with Packet)
std::deque< DRAMPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Rank(DRAMCtrl &_memory, const DRAMCtrlParams *_p, int rank)
unsigned int burstsServiced
Number of DRAM bursts serviced so far for a system packet.
Stats::Vector memoryStateTime
Track time spent in each power state.
RefreshState refreshState
current refresh state
Addr addr
The starting address of the DRAM packet.
Histogram & init(size_type size)
Set the parameters of this histogram.
Overload hash function for BasicBlockRange type.
bool writeQueueFull(unsigned int pktCount) const
Check if the write queue has room for more entries.
Tick refreshDueAt
Keep track of when a refresh is due.
bool isConnected() const
Is this port currently connected to a peer?
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
The DRAM controller is a single-channel memory controller capturing the most important timing constra...
Stats::Vector writePktSize
uint8_t rank
Current Rank index.
Helper class for objects that have power states.
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
const uint32_t burstLength
uint8_t qosSchedule(std::initializer_list< Queues *> queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
void regStats() override
Callback to set stat parameters.
const uint32_t ranksPerChannel
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
bool readQueueFull(unsigned int pktCount) const
Check if the read queue has room for more entries.
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Stats::Formula simSeconds
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
const uint32_t deviceRowBufferSize
DrainState
Object drain/handover states.
A burst helper helps organize and manage a packet that is larger than the DRAM burst size...
Derived & init(size_type size)
Set this vector to have the given size.
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks, then bankId = 0 –> rank0, bank0 and bankId = 8 –> rank1, bank0.
void computeStats()
Computes stats just prior to dump event.
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
uint8_t outstandingEvents
Number of ACT, RD, and WR events currently scheduled Incremented when a refresh event is started as w...
EventFunctionWrapper activateEvent
Stats::Scalar writeEnergy
void startup(Tick ref_tick)
Kick off accounting for power and refresh states and schedule initial refresh.
bool isQueueEmpty() const
Check if the command queue of current rank is idle.
unsigned int numBanksActive
To track number of banks which are currently active for this rank.
Stats::Vector perBankWrBursts
void recvFunctional(PacketPtr pkt)
Receive a functional request packet from the peer.
bool isTimingMode
Remember if the memory system is in timing mode.
void processWriteDoneEvent()
DrainState drainState() const
Return the current drain state of an object.
void regStats() override
Callback to set stat parameters.
A basic class to track the bank state, i.e.
Addr getCtrlAddr(Addr addr)
Get an address in a dense range which starts from 0.
Draining buffers pending serialization/handover.
virtual void preDumpStats()
Callback before stats are dumped.
const uint32_t maxAccessesPerRow
Max column accesses (read and write) per row, before forcefully closing it.
const uint32_t bankGroupsPerRank
Tick lastBurstTick
Track when we issued the last read/write burst.
Tick curTick()
The current simulated tick.
Tick recvAtomic(PacketPtr pkt)
Receive an atomic request packet from the peer.
Stats::Vector masterReadBytes
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
Enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
std::string csprintf(const char *format, const Args &...args)
bool needsResponse() const
const Tick backendLatency
Pipeline latency of the backend and PHY.
EventFunctionWrapper prechargeEvent
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
Stats::Scalar servicedByWrQ
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
BurstHelper * burstHelper
A pointer to the BurstHelper if this DRAMPacket is a split packet If not a split packet (common case)...
void prechargeBank(Rank &rank_ref, Bank &bank_ref, Tick pre_tick, bool auto_or_preall=false, bool trace=true)
Precharge a given bank and also update when the precharge is done.
std::vector< Bank > banks
Vector of Banks.
uint64_t Tick
Tick count type.
uint64_t power(uint32_t n, uint32_t e)
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
PowerState pwrState
Current power state.
void checkDrainDone()
Let the rank check if it was waiting for requests to drain to allow it to transition states...
EventFunctionWrapper respondEvent
void replaceBits(T &val, int first, int last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
std::deque< Tick > actTicks
List to keep track of activate ticks.
Stats::Vector masterWriteAccesses
DRAMStats(DRAMCtrl &dram)
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
Stats::Formula readRowHitRate
const uint8_t twoCycleActivate
std::vector< DRAMPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
void deschedule(Event &event)
Tick recvAtomic(PacketPtr pkt)
MasterID masterId() const
bool isPowerOf2(const T &n)
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
void schedule(Event &event, Tick when)
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request...
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Stats::Formula masterReadAvgLat
Tick pwrStateTick
Track when we transitioned to the current power state.
const Tick clkResyncDelay
Stats::Vector masterReadAccesses
static bool sortTime(const Command &cmd, const Command &cmd_next)
Function for sorting Command structures based on timeStamp.
void reschedule(Event &event, Tick when, bool always=false)
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Stats::Scalar prePowerDownEnergy
const Tick wrToRdDlySameBG
void resetStats() override
Callback to reset stats.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Stats::Scalar bytesReadWrQ
Stats::Formula avgMemAccLat
Derived & precision(int _precision)
Set the precision and marks this stat to print at the end of simulation.
#define ULL(N)
uint64_t constant
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
const uint8_t rank
Will be populated by address decoder.
void activateBank(Rank &rank_ref, Bank &bank_ref, Tick act_tick, uint32_t row)
Keep track of when row activations happen, in order to enforce the maximum number of activations in t...
PowerState pwrStatePostRefresh
Previous low-power state, which will be re-entered after refresh.
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
const Tick M5_CLASS_VAR_USED tCK
Basic memory timing parameters initialized based on parameter values.
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
void processRefreshEvent()
Stats::Scalar averagePower
const unsigned int burstCount
Number of DRAM bursts requred for a system packet.
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
void addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
Decode the incoming pkt, create a dram_pkt and push to the back of the write queue.
static const uint32_t NO_ROW
const uint32_t rowBufferSize
RankStats(DRAMCtrl &memory, Rank &rank)
bool inRefIdleState() const
Check if there is no refresh and no preparation of refresh ongoing i.e.
const uint32_t devicesPerRank
MasterID maxMasters()
Get the number of masters registered in the system.
PowerState pwrStateTrans
Since we are taking decisions out of order, we need to keep track of what power transition is happeni...
const uint32_t minWritesPerSwitch
void suspend()
Stop the refresh events.
Stats::Formula masterWriteRate
void schedulePowerEvent(PowerState pwr_state, Tick tick)
Schedule a power state transition in the future, and potentially override an already scheduled transi...
bool scheduled() const
Determine if the current event is scheduled.
void accessAndRespond(PacketPtr pkt, Tick static_latency)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
const uint32_t deviceBusWidth
Stats::Scalar bytesReadSys
Stats::Formula avgRdBWSys
void updatePowerStats()
Function to update Power Stats.
EventFunctionWrapper powerEvent
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
const uint32_t writeHighThreshold
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
virtual const std::string name() const
Enums::AddrMap addrMapping
Tick nextReqTime
The soonest you have to start thinking about the next request is the longest access time that can occ...
bool allRanksDrained() const
Return true once refresh is complete for all ranks and there are no additional commands enqueued...
void processPrechargeEvent()
void powerDownSleep(PowerState pwr_state, Tick tick)
Schedule a transition to power-down (sleep)
System * system() const
read the system pointer Implemented for completeness with the setter
Stats::Scalar preBackEnergy
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
bool interleaved() const
Determine if the range is interleaved or not.
EventFunctionWrapper wakeUpEvent
const std::string name() const
void processActivateEvent()
DRAMPacket * decodeAddr(const PacketPtr pkt, Addr dramPktAddr, unsigned int size, bool isRead) const
Address decoder to figure out physical mapping onto ranks, banks, and rows.
DRAMPower power
One DRAMPower instance per rank.
void sendRetryReq()
Send a retry to the master port that previously attempted a sendTimingReq to this slave port and fail...
Stats::Scalar writeRowHits
Stats::Scalar totalIdleTime
Stat to track total DRAM idle time.
void logRequest(BusState dir, MasterID m_id, uint8_t qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
uint64_t size() const
Get the memory size.
Tick wakeUpAllowedAt
delay power-down and self-refresh exit until this requirement is met
Stats::Formula busUtilWrite
Stats::Formula writeRowHitRate
void resetStats() override
Callback to reset stats.
void processNextReqEvent()
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example...
System * _system
Pointer to the System object.
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
const uint32_t columnsPerRowBuffer
void signalDrainDone() const
Signal that an object is drained.
void recvFunctional(PacketPtr pkt)
Stats::Histogram bytesPerActivate
Tick verifyMultiCmd(Tick cmd_tick, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
T divCeil(const T &a, const U &b)
Stats::Formula masterWriteAvgLat
Stats::Scalar actBackEnergy
EventFunctionWrapper refreshEvent
Stats::Scalar readRowHits
Simple structure to hold the values needed to keep track of commands for DRAMPower.
Stats::Formula masterReadRate
void processWakeUpEvent()
const uint32_t deviceSize
The following are basic design parameters of the memory controller, and are initialized based on para...
std::vector< DRAMPacketQueue > writeQueue
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Tick readyTime
When will request leave the controller.
const uint32_t columnsPerStripe
Stats::Scalar totMemAccLat
Data::MemCommand::cmds type
virtual void regStats()
Callback to set stat parameters.
void preDumpStats() override
Callback before stats are dumped.
bool trySatisfyFunctional(PacketPtr pkt)
Check the list of buffered packets against the supplied functional request.
bool isTimingMode() const
Is the system in timing mode?
Tick nextBurstAt
Till when must we wait before issuing next RD/WR burst?
T bits(T val, int first, int last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it...
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
void processRespondEvent()
const uint32_t writeBufferSize
const uint32_t banksPerRank
const uint32_t readBufferSize
DRAMPower is a standalone tool which calculates the power consumed by a DRAM in the system...
const FlagsType nozero
Don't print if this is zero.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
const Tick burstDataCycles
virtual void drainResume() override
Resume execution after a successful drain.
Tick when() const
Get the time that the event is scheduled.
virtual void startup() override
startup() is the final initialization call before simulation.
Addr burstAlign(Addr addr) const
Burst-align an address.
Stats::Vector masterWriteBytes
Counter value() const
Return the current value of this stat as its base type.
Stats::Vector readPktSize
void addToReadQueue(PacketPtr pkt, unsigned int pktCount)
When a new read comes in, first check if the write q has a pending request to the same address...
Stats::Scalar actPowerDownEnergy
void printQs() const
Used for debugging to observe the contents of the queues.
uint8_t schedule(MasterID m_id, uint64_t data)
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than DRAM burst size...
bool recvTimingReq(PacketPtr)
Receive a timing request from the peer.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
const FlagsType init
This Stat is Initialized.
Stats::Vector masterWriteTotalLat
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
Stats::Scalar writeBursts
const PacketPtr pkt
This comes from the outside world.
void scheduleWakeUpEvent(Tick exit_delay)
schedule and event to wake-up from power-down or self-refresh and update bank timing parameters ...
Stats::Histogram rdPerTurnAround
Rank class includes a vector of banks.
Stats::Scalar bytesWritten
bool inLowPowerState
rank is in or transitioning to power-down or self-refresh
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
void flushCmdList()
Push command out of cmdList queue that are scheduled at or before curTick() to DRAMPower library All ...
Stats::Scalar refreshEnergy
DRAMPacketQueue::iterator chooseNext(DRAMPacketQueue &queue, Tick extra_col_delay)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...