44#include "debug/DRAM.hh"
45#include "debug/Drain.hh"
46#include "debug/MemCtrl.hh"
47#include "debug/NVM.hh"
48#include "debug/QOS.hh"
62 port(
name() +
".port", *this), isTimingMode(false),
63 retryRdReq(false), retryWrReq(false),
66 respondEvent([
this] {processRespondEvent(dram, respQueue,
67 respondEvent, retryRdReq); },
name()),
69 readBufferSize(dram->readBufferSize),
70 writeBufferSize(dram->writeBufferSize),
71 writeHighThreshold(writeBufferSize *
p.write_high_thresh_perc / 100.0),
72 writeLowThreshold(writeBufferSize *
p.write_low_thresh_perc / 100.0),
73 minWritesPerSwitch(
p.min_writes_per_switch),
74 minReadsPerSwitch(
p.min_reads_per_switch),
75 memSchedPolicy(
p.mem_sched_policy),
76 frontendLatency(
p.static_frontend_latency),
77 backendLatency(
p.static_backend_latency),
78 commandWindow(
p.command_window),
82 DPRINTF(MemCtrl,
"Setting up controller\n");
84 readQueue.resize(
p.qos_priorities);
85 writeQueue.resize(
p.qos_priorities);
87 dram->setCtrl(
this, commandWindow);
90 if (
p.write_low_thresh_perc >=
p.write_high_thresh_perc)
91 fatal(
"Write buffer low threshold %d must be smaller than the "
92 "high threshold %d\n",
p.write_low_thresh_perc,
93 p.write_high_thresh_perc);
94 if (
p.disable_sanity_check) {
95 port.disableSanityCheck();
103 fatal(
"MemCtrl %s is unconnected!\n",
name());
128 panic(
"Can't handle address range for packet %s\n", pkt->
print());
169 "Read queue limit %d, current size %d, entries needed %d\n",
181 "Write queue limit %d, current size %d, entries needed %d\n",
196 assert(pkt_count != 0);
206 unsigned pktsServicedByWrQ = 0;
211 for (
int cnt = 0; cnt < pkt_count; ++cnt) {
212 unsigned size = std::min((
addr | (burst_size - 1)) + 1,
220 bool foundInWrQ =
false;
226 for (
const auto&
p :
vec) {
229 if (
p->addr <=
addr &&
230 ((
addr + size) <= (
p->addr +
p->size))) {
236 "Read to addr %#x with size %d serviced by "
251 if (pkt_count > 1 && burst_helper == NULL) {
253 "memory requests\n", pkt->
getAddr(), pkt_count);
286 addr = (
addr | (burst_size - 1)) + 1;
290 if (pktsServicedByWrQ == pkt_count) {
296 if (burst_helper != NULL)
317 for (
int cnt = 0; cnt < pkt_count; ++cnt) {
318 unsigned size = std::min((
addr | (burst_size - 1)) + 1,
362 "Merging write burst with existing queue entry\n");
370 addr = (
addr | (burst_size - 1)) + 1;
387 for (
const auto& packet : queue) {
399 for (
const auto& packet : queue) {
417 "Should only see read and writes at memory controller\n");
426 "Can't handle address range for packet %s\n", pkt->
print());
432 unsigned size = pkt->
getSize();
495 "processRespondEvent(): Some req has reached its readyTime\n");
525 if (!queue.empty()) {
526 assert(queue.front()->readyTime >=
curTick());
528 schedule(resp_event, queue.front()->readyTime);
535 DPRINTF(Drain,
"Controller done draining\n");
551 retry_rd_req =
false;
556MemPacketQueue::iterator
562 MemPacketQueue::iterator ret = queue.end();
564 if (!queue.empty()) {
565 if (queue.size() == 1) {
579 for (
auto i = queue.begin();
i != queue.end(); ++
i) {
591 std::tie(ret, col_allowed_at)
594 panic(
"No scheduling policy chosen\n");
604 auto selected_pkt_it = queue.end();
608 const Tick min_col_at = std::max(mem_intr->
nextBurstAt + extra_col_delay,
611 std::tie(selected_pkt_it, col_allowed_at) =
614 if (selected_pkt_it == queue.end()) {
618 return std::make_pair(selected_pkt_it, col_allowed_at);
631 "Can't handle address range for packet %s\n", pkt->
print());
666 auto current_it = it++;
679 return (cmd_tick - burst_offset);
686 Tick cmd_at = cmd_tick;
693 while (
burstTicks.count(burst_tick) >= max_cmds_per_burst) {
707 Tick max_multi_cmd_split)
710 Tick cmd_at = cmd_tick;
720 Tick burst_offset = 0;
722 while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
727 Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
730 bool first_can_issue =
false;
731 bool second_can_issue =
false;
733 while (!first_can_issue || !second_can_issue) {
734 bool same_burst = (burst_tick == first_cmd_tick);
735 auto first_cmd_count =
burstTicks.count(first_cmd_tick);
736 auto second_cmd_count = same_burst ? first_cmd_count + 1 :
739 first_can_issue = first_cmd_count < max_cmds_per_burst;
740 second_can_issue = second_cmd_count < max_cmds_per_burst;
742 if (!second_can_issue) {
743 DPRINTF(
MemCtrl,
"Contention (cmd2) found on command bus at %d\n",
752 bool gap_violated = !same_burst &&
753 ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
755 if (!first_can_issue || (!second_can_issue && gap_violated)) {
756 DPRINTF(
MemCtrl,
"Contention (cmd1) found on command bus at %d\n",
811 DPRINTF(
MemCtrl,
"Access to %#x, ready at %lld next burst at %lld.\n",
844 bool mem_busy =
true;
847 mem_busy = mem_intr->
isBusy(read_queue_empty, all_writes_nvm);
885 bool& retry_wr_req) {
899 switched_cmd_type?
"[turnaround triggered]":
"");
901 if (switched_cmd_type) {
904 "Switching to writes after %d reads with %d reads "
910 "Switching to reads after %d writes with %d writes "
920 DPRINTF(Drain,
"MemCtrl controller done draining\n");
937 bool switch_to_writes =
false;
948 "Switching to writes due to read queue empty\n");
949 switch_to_writes =
true;
958 DPRINTF(Drain,
"MemCtrl controller done draining\n");
968 bool read_found =
false;
969 MemPacketQueue::iterator to_read;
978 "Checking READ queue [%d] priority [%d elements]\n",
979 prio, queue->size());
984 to_read =
chooseNext((*queue), switched_cmd_type ?
987 if (to_read != queue->end()) {
1004 auto mem_pkt = *to_read;
1009 "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
1013 assert(mem_pkt->readyTime >=
curTick());
1017 mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1018 mem_pkt->readyTime - mem_pkt->entryTime);
1024 if (resp_queue.empty()) {
1026 schedule(resp_event, mem_pkt->readyTime);
1028 assert(resp_queue.back()->readyTime <= mem_pkt->readyTime);
1032 resp_queue.push_back(mem_pkt);
1043 switch_to_writes =
true;
1048 readQueue[mem_pkt->qosValue()].erase(to_read);
1054 if (switch_to_writes) {
1060 bool write_found =
false;
1061 MemPacketQueue::iterator to_write;
1070 "Checking WRITE queue [%d] priority [%d elements]\n",
1071 prio, queue->size());
1078 if (to_write != queue->end()) {
1094 auto mem_pkt = *to_write;
1101 "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
1107 mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1108 mem_pkt->readyTime - mem_pkt->entryTime);
1113 writeQueue[mem_pkt->qosValue()].erase(to_write);
1123 bool below_threshold =
1146 retry_wr_req =
false;
1182 : statistics::
Group(&_ctrl),
1185 ADD_STAT(readReqs, statistics::units::Count::get(),
1186 "Number of read requests accepted"),
1187 ADD_STAT(writeReqs, statistics::units::Count::get(),
1188 "Number of write requests accepted"),
1190 ADD_STAT(readBursts, statistics::units::Count::get(),
1191 "Number of controller read bursts, including those serviced by "
1193 ADD_STAT(writeBursts, statistics::units::Count::get(),
1194 "Number of controller write bursts, including those merged in "
1196 ADD_STAT(servicedByWrQ, statistics::units::Count::get(),
1197 "Number of controller read bursts serviced by the write queue"),
1198 ADD_STAT(mergedWrBursts, statistics::units::Count::get(),
1199 "Number of controller write bursts merged with an existing one"),
1201 ADD_STAT(neitherReadNorWriteReqs, statistics::units::Count::get(),
1202 "Number of requests that are neither read nor write"),
1204 ADD_STAT(avgRdQLen, statistics::units::Rate<
1205 statistics::units::Count, statistics::units::
Tick>::get(),
1206 "Average read queue length when enqueuing"),
1207 ADD_STAT(avgWrQLen, statistics::units::Rate<
1208 statistics::units::Count, statistics::units::
Tick>::get(),
1209 "Average write queue length when enqueuing"),
1211 ADD_STAT(numRdRetry, statistics::units::Count::get(),
1212 "Number of times read queue was full causing retry"),
1213 ADD_STAT(numWrRetry, statistics::units::Count::get(),
1214 "Number of times write queue was full causing retry"),
1216 ADD_STAT(readPktSize, statistics::units::Count::get(),
1217 "Read request sizes (log2)"),
1218 ADD_STAT(writePktSize, statistics::units::Count::get(),
1219 "Write request sizes (log2)"),
1221 ADD_STAT(rdQLenPdf, statistics::units::Count::get(),
1222 "What read queue length does an incoming req see"),
1223 ADD_STAT(wrQLenPdf, statistics::units::Count::get(),
1224 "What write queue length does an incoming req see"),
1226 ADD_STAT(rdPerTurnAround, statistics::units::Count::get(),
1227 "Reads before turning the bus around for writes"),
1228 ADD_STAT(wrPerTurnAround, statistics::units::Count::get(),
1229 "Writes before turning the bus around for reads"),
1231 ADD_STAT(bytesReadWrQ, statistics::units::Byte::get(),
1232 "Total number of bytes read from write queue"),
1233 ADD_STAT(bytesReadSys, statistics::units::Byte::get(),
1234 "Total read bytes from the system interface side"),
1235 ADD_STAT(bytesWrittenSys, statistics::units::Byte::get(),
1236 "Total written bytes from the system interface side"),
1238 ADD_STAT(avgRdBWSys, statistics::units::Rate<
1239 statistics::units::Byte, statistics::units::Second>::get(),
1240 "Average system read bandwidth in Byte/s"),
1241 ADD_STAT(avgWrBWSys, statistics::units::Rate<
1242 statistics::units::Byte, statistics::units::Second>::get(),
1243 "Average system write bandwidth in Byte/s"),
1246 "Total gap between requests"),
1247 ADD_STAT(avgGap, statistics::units::Rate<
1248 statistics::units::
Tick, statistics::units::Count>::get(),
1249 "Average gap between requests"),
1251 ADD_STAT(requestorReadBytes, statistics::units::Byte::get(),
1252 "Per-requestor bytes read from memory"),
1253 ADD_STAT(requestorWriteBytes, statistics::units::Byte::get(),
1254 "Per-requestor bytes write to memory"),
1255 ADD_STAT(requestorReadRate, statistics::units::Rate<
1256 statistics::units::Byte, statistics::units::Second>::get(),
1257 "Per-requestor bytes read from memory rate"),
1258 ADD_STAT(requestorWriteRate, statistics::units::Rate<
1259 statistics::units::Byte, statistics::units::Second>::get(),
1260 "Per-requestor bytes write to memory rate"),
1261 ADD_STAT(requestorReadAccesses, statistics::units::Count::get(),
1262 "Per-requestor read serviced memory accesses"),
1263 ADD_STAT(requestorWriteAccesses, statistics::units::Count::get(),
1264 "Per-requestor write serviced memory accesses"),
1265 ADD_STAT(requestorReadTotalLat, statistics::units::
Tick::get(),
1266 "Per-requestor read total memory access latency"),
1267 ADD_STAT(requestorWriteTotalLat, statistics::units::
Tick::get(),
1268 "Per-requestor write total memory access latency"),
1269 ADD_STAT(requestorReadAvgLat, statistics::units::Rate<
1270 statistics::units::
Tick, statistics::units::Count>::get(),
1271 "Per-requestor read average memory access latency"),
1272 ADD_STAT(requestorWriteAvgLat, statistics::units::Rate<
1273 statistics::units::
Tick, statistics::units::Count>::get(),
1274 "Per-requestor write average memory access latency")
1281 using namespace statistics;
1283 assert(ctrl.system());
1284 const auto max_requestors = ctrl.system()->maxRequestors();
1286 avgRdQLen.precision(2);
1287 avgWrQLen.precision(2);
1289 readPktSize.init(
ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1290 writePktSize.init(
ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1292 rdQLenPdf.init(ctrl.readBufferSize);
1293 wrQLenPdf.init(ctrl.writeBufferSize);
1296 .init(ctrl.readBufferSize)
1299 .init(ctrl.writeBufferSize)
1302 avgRdBWSys.precision(8);
1303 avgWrBWSys.precision(8);
1304 avgGap.precision(2);
1308 .init(max_requestors)
1312 .init(max_requestors)
1320 requestorReadAccesses
1321 .init(max_requestors)
1324 requestorWriteAccesses
1325 .init(max_requestors)
1328 requestorReadTotalLat
1329 .init(max_requestors)
1340 requestorWriteTotalLat
1341 .init(max_requestors)
1344 requestorWriteAvgLat
1348 for (
int i = 0;
i < max_requestors;
i++) {
1349 const std::string requestor = ctrl.system()->getRequestorName(
i);
1350 requestorReadBytes.subname(
i, requestor);
1351 requestorReadRate.subname(
i, requestor);
1352 requestorWriteBytes.subname(
i, requestor);
1353 requestorWriteRate.subname(
i, requestor);
1354 requestorReadAccesses.subname(
i, requestor);
1355 requestorWriteAccesses.subname(
i, requestor);
1356 requestorReadTotalLat.subname(
i, requestor);
1357 requestorReadAvgLat.subname(
i, requestor);
1358 requestorWriteTotalLat.subname(
i, requestor);
1359 requestorWriteAvgLat.subname(
i, requestor);
1366 avgGap = totGap / (readReqs + writeReqs);
1368 requestorReadRate = requestorReadBytes /
simSeconds;
1369 requestorWriteRate = requestorWriteBytes /
simSeconds;
1370 requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1371 requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1379 panic_if(!found,
"Can't handle address range for packet %s\n",
1388 "Can't handle address range for backdoor %s.",
1409 if (if_name !=
"port") {
1432 DPRINTF(Drain,
"Memory controller not drained, write: %d, read: %d,"
1439 DPRINTF(Drain,
"Scheduling nextReqEvent from drain\n");
1486 return ctrl.getAddrRanges();
1494 if (!queue.trySatisfyFunctional(pkt)) {
1498 ctrl.recvFunctional(pkt);
1513 ctrl.recvMemBackdoorReq(req, backdoor);
1519 return ctrl.recvAtomic(pkt);
1526 return ctrl.recvAtomicBackdoor(pkt, backdoor);
1533 return ctrl.recvTimingReq(pkt);
1539 queue.disableSanityCheck();
const AddrRange & range() const
virtual std::string name() const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
bool needsResponse() const
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
RequestorID requestorId() const
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
bool cacheResponding() const
Ports are used to interface objects to each other.
bool isConnected() const
Is this port currently connected to a peer?
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
void sendRangeChange() const
Called by the owner to send a range change.
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
bool isTimingMode() const
Is the system in timing mode?
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
void getBackdoor(MemBackdoorPtr &bd_ptr)
AddrRange getAddrRange() const
Get the address range.
A burst helper helps organize and manage a packet that is larger than the memory burst size.
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
const unsigned int burstCount
Number of bursts requred for a system packet.
void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
MemoryPort(const std::string &name, MemCtrl &_ctrl)
bool recvTimingReq(PacketPtr) override
Receive a timing request from the peer.
AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
void disableSanityCheck()
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Receive an atomic request packet from the peer, and optionally provide a backdoor to the data being a...
void recvMemBackdoorReq(const MemBackdoorReq &req, MemBackdoorPtr &backdoor) override
Receive a request for a back door to a range of memory.
The memory controller is a single-channel memory controller capturing the most important timing const...
virtual void recvFunctional(PacketPtr pkt)
virtual void pruneBurstTick()
Remove commands that have already issued from burstTicks.
uint32_t writeLowThreshold
enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
bool recvFunctionalLogic(PacketPtr pkt, MemInterface *mem_intr)
bool inReadBusState(bool next_state, const MemInterface *mem_intr) const
Check the current direction of the memory channel.
bool retryRdReq
Remember if we have to retry a request when available.
void printQs() const
Used for debugging to observe the contents of the queues.
const uint32_t minReadsPerSwitch
virtual void startup() override
startup() is the final initialization call before simulation.
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Tick recvAtomicLogic(PacketPtr pkt, MemInterface *mem_intr)
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
uint32_t writeHighThreshold
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
std::vector< MemPacketQueue > writeQueue
virtual Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
EventFunctionWrapper respondEvent
virtual MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
virtual Tick doBurstAccess(MemPacket *mem_pkt, MemInterface *mem_intr)
Actually do the burst based on media specific access function.
virtual void processNextReqEvent(MemInterface *mem_intr, MemPacketQueue &resp_queue, EventFunctionWrapper &resp_event, EventFunctionWrapper &next_req_event, bool &retry_wr_req)
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
When a new read comes in, first check if the write q has a pending request to the same address....
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
virtual Addr burstAlign(Addr addr, MemInterface *mem_intr) const
Burst-align an address.
bool inWriteBusState(bool next_state, const MemInterface *mem_intr) const
Check the current direction of the memory channel.
const uint32_t minWritesPerSwitch
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
const Tick backendLatency
Pipeline latency of the backend and PHY.
uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
virtual void recvMemBackdoorReq(const MemBackdoorReq &req, MemBackdoorPtr &backdoor)
const Tick frontendLatency
Pipeline latency of the controller frontend.
virtual bool respQEmpty()
virtual bool allIntfDrained() const
Ensure that all interfaced have drained commands.
EventFunctionWrapper nextReqEvent
virtual bool packetReady(MemPacket *pkt, MemInterface *mem_intr)
Determine if there is a packet that can issue.
virtual Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
Check for command bus contention for single cycle command.
virtual bool nvmWriteBlock(MemInterface *mem_intr)
Will check if all writes are for nvm interface and nvm's write resp queue is full.
virtual void processRespondEvent(MemInterface *mem_intr, MemPacketQueue &queue, EventFunctionWrapper &resp_event, bool &retry_rd_req)
virtual void accessAndRespond(PacketPtr pkt, Tick static_latency, MemInterface *mem_intr)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
virtual Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
virtual bool recvTimingReq(PacketPtr pkt)
virtual Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
bool isTimingMode
Remember if the memory system is in timing mode.
virtual Tick recvAtomic(PacketPtr pkt)
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
virtual bool memBusy(MemInterface *mem_intr)
Checks if the memory interface is already busy.
virtual AddrRangeList getAddrRanges()
MemCtrl(const MemCtrlParams &p)
virtual Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
virtual bool pktSizeCheck(MemPacket *mem_pkt, MemInterface *mem_intr) const
Check if mem pkt's size is sane.
virtual void drainResume() override
Resume execution after a successful drain.
const Tick commandWindow
Length of a command window, used to check command bandwidth.
virtual void nonDetermReads(MemInterface *mem_intr)
Will access memory interface and select non-deterministic reads to issue.
General interface to memory device Includes functions and parameters shared across media types.
MemCtrl::BusState busState
virtual void setupRank(const uint8_t rank, const bool is_read)=0
Setup the rank based on packet received.
virtual MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
virtual void suspend()
This function is DRAM specific.
virtual std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)=0
This function performs the burst and update stats.
virtual Tick commandOffset() const =0
uint8_t pseudoChannel
pseudo channel number used for HBM modeling
virtual bool burstReady(MemPacket *pkt) const =0
Check if a burst operation can be issued to the interface.
virtual void checkRefreshState(uint8_t rank)
This function is DRAM specific.
virtual bool isBusy(bool read_queue_empty, bool all_writes_nvm)=0
This function checks if ranks are busy.
uint32_t readsThisTime
Reads/writes performed by the controller for this interface before bus direction is switched.
uint32_t readQueueSize
Read/write packets in the read/write queue for this interface qos/mem_ctrl.hh has similar counters,...
virtual void respondEvent(uint8_t rank)
This function is DRAM specific.
virtual Tick accessLatency() const =0
virtual bool writeRespQueueFull() const
This function is NVM specific.
virtual void chooseRead(MemPacketQueue &queue)
This function is NVM specific.
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const =0
For FR-FCFS policy, find first command that can issue Function will be overriden by interface to sele...
MemCtrl::BusState busStateNext
bus state for next request event triggered
Tick nextBurstAt
Till when the controller must wait before issuing next RD/WR burst?
Tick minWriteToReadDataGap() const
virtual bool readsWaitingToIssue() const
This function is NVM specific.
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
Tick minReadToWriteDataGap() const
virtual bool allRanksDrained() const =0
Check drain state of interface.
virtual void drainRanks()
This function is DRAM specific.
uint32_t bytesPerBurst() const
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Tick readyTime
When will request leave the controller.
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
const uint8_t pseudoChannel
pseudo channel num
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Addr addr
The starting address of the packet.
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
const Tick entryTime
When did request enter the controller.
const PacketPtr pkt
This comes from the outside world.
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
const uint8_t rank
Will be populated by address decoder.
void logResponse(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
void recordTurnaroundStats(BusState busState, BusState busStateNext)
Record statistics on turnarounds based on busStateNext and busState values.
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
System * system() const
read the system pointer
BusState busStateNext
bus state for next request event triggered
uint8_t schedule(RequestorID id, uint64_t data)
void logRequest(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
size_type size() const
Return the number of elements, always 1 for a scalar.
DRAMInterface declaration.
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
bool contains(const Addr &a) const
Determine if the range contains an address.
Addr start() const
Get the start address of the range.
std::string to_string() const
Get a string representation of the range.
static constexpr int ceilLog2(const T &n)
static constexpr T divCeil(const T &a, const U &b)
void signalDrainDone() const
Signal that an object is drained.
DrainState drainState() const
Return the current drain state of an object.
DrainState
Object drain/handover states.
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
#define panic(...)
This implements a cprintf based panic() function.
#define fatal(...)
This implements a cprintf based fatal() function.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
virtual void startup()
startup() is the final initialization call before simulation.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
MemInterface declaration.
const FlagsType nonan
Don't print if this is NAN.
const FlagsType nozero
Don't print if this is zero.
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
uint64_t Tick
Tick count type.
statistics::Formula & simSeconds
NVMInterface declaration.
statistics::Scalar writeReqs
statistics::Scalar mergedWrBursts
statistics::Scalar readReqs
statistics::Scalar servicedByWrQ
statistics::Histogram rdPerTurnAround
statistics::Vector readPktSize
statistics::Scalar numWrRetry
statistics::Scalar totGap
statistics::Scalar numRdRetry
statistics::Scalar readBursts
void regStats() override
Callback to set stat parameters.
statistics::Vector requestorReadTotalLat
statistics::Vector requestorWriteTotalLat
statistics::Vector requestorWriteBytes
statistics::Scalar writeBursts
statistics::Vector writePktSize
statistics::Histogram wrPerTurnAround
statistics::Vector requestorWriteAccesses
statistics::Scalar bytesReadSys
statistics::Average avgRdQLen
statistics::Vector requestorReadAccesses
statistics::Scalar bytesWrittenSys
statistics::Average avgWrQLen
statistics::Vector wrQLenPdf
statistics::Scalar bytesReadWrQ
statistics::Vector requestorReadBytes
statistics::Vector rdQLenPdf
const std::string & name()