Go to the documentation of this file.
44 #include "debug/DRAM.hh"
45 #include "debug/Drain.hh"
46 #include "debug/MemCtrl.hh"
47 #include "debug/NVM.hh"
48 #include "debug/QOS.hh"
62 port(
name() +
".port", *this), isTimingMode(false),
63 retryRdReq(false), retryWrReq(false),
66 respondEvent([
this] {processRespondEvent(dram, respQueue,
67 respondEvent, retryRdReq); },
name()),
69 readBufferSize(dram->readBufferSize),
70 writeBufferSize(dram->writeBufferSize),
71 writeHighThreshold(writeBufferSize *
p.write_high_thresh_perc / 100.0),
72 writeLowThreshold(writeBufferSize *
p.write_low_thresh_perc / 100.0),
73 minWritesPerSwitch(
p.min_writes_per_switch),
74 minReadsPerSwitch(
p.min_reads_per_switch),
75 writesThisTime(0), readsThisTime(0),
76 memSchedPolicy(
p.mem_sched_policy),
77 frontendLatency(
p.static_frontend_latency),
78 backendLatency(
p.static_backend_latency),
79 commandWindow(
p.command_window),
83 DPRINTF(MemCtrl,
"Setting up controller\n");
85 readQueue.resize(
p.qos_priorities);
86 writeQueue.resize(
p.qos_priorities);
88 dram->setCtrl(
this, commandWindow);
91 if (
p.write_low_thresh_perc >=
p.write_high_thresh_perc)
92 fatal(
"Write buffer low threshold %d must be smaller than the "
93 "high threshold %d\n",
p.write_low_thresh_perc,
94 p.write_high_thresh_perc);
101 fatal(
"MemCtrl %s is unconnected!\n",
name());
126 panic(
"Can't handle address range for packet %s\n", pkt->
print());
167 "Read queue limit %d, current size %d, entries needed %d\n",
179 "Write queue limit %d, current size %d, entries needed %d\n",
194 assert(pkt_count != 0);
204 unsigned pktsServicedByWrQ = 0;
209 for (
int cnt = 0; cnt < pkt_count; ++cnt) {
210 unsigned size = std::min((
addr | (burst_size - 1)) + 1,
218 bool foundInWrQ =
false;
224 for (
const auto&
p :
vec) {
227 if (
p->addr <=
addr &&
228 ((
addr + size) <= (
p->addr +
p->size))) {
234 "Read to addr %#x with size %d serviced by "
249 if (pkt_count > 1 && burst_helper == NULL) {
251 "memory requests\n", pkt->
getAddr(), pkt_count);
282 addr = (
addr | (burst_size - 1)) + 1;
286 if (pktsServicedByWrQ == pkt_count) {
292 if (burst_helper != NULL)
313 for (
int cnt = 0; cnt < pkt_count; ++cnt) {
314 unsigned size = std::min((
addr | (burst_size - 1)) + 1,
356 "Merging write burst with existing queue entry\n");
364 addr = (
addr | (burst_size - 1)) + 1;
381 for (
const auto& packet : queue) {
393 for (
const auto& packet : queue) {
411 "Should only see read and writes at memory controller\n");
420 "Can't handle address range for packet %s\n", pkt->
print());
426 unsigned size = pkt->
getSize();
489 "processRespondEvent(): Some req has reached its readyTime\n");
519 if (!queue.empty()) {
520 assert(queue.front()->readyTime >=
curTick());
522 schedule(resp_event, queue.front()->readyTime);
529 DPRINTF(Drain,
"Controller done draining\n");
545 retry_rd_req =
false;
550 MemPacketQueue::iterator
556 MemPacketQueue::iterator ret = queue.end();
558 if (!queue.empty()) {
559 if (queue.size() == 1) {
573 for (
auto i = queue.begin();
i != queue.end(); ++
i) {
582 std::tie(ret, col_allowed_at)
585 panic(
"No scheduling policy chosen\n");
595 auto selected_pkt_it = queue.end();
599 const Tick min_col_at = std::max(mem_intr->
nextBurstAt + extra_col_delay,
602 std::tie(selected_pkt_it, col_allowed_at) =
605 if (selected_pkt_it == queue.end()) {
609 return std::make_pair(selected_pkt_it, col_allowed_at);
622 "Can't handle address range for packet %s\n", pkt->
print());
657 auto current_it = it++;
670 return (cmd_tick - burst_offset);
677 Tick cmd_at = cmd_tick;
684 while (
burstTicks.count(burst_tick) >= max_cmds_per_burst) {
698 Tick max_multi_cmd_split)
701 Tick cmd_at = cmd_tick;
711 Tick burst_offset = 0;
713 while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
718 Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
721 bool first_can_issue =
false;
722 bool second_can_issue =
false;
724 while (!first_can_issue || !second_can_issue) {
725 bool same_burst = (burst_tick == first_cmd_tick);
726 auto first_cmd_count =
burstTicks.count(first_cmd_tick);
727 auto second_cmd_count = same_burst ? first_cmd_count + 1 :
730 first_can_issue = first_cmd_count < max_cmds_per_burst;
731 second_can_issue = second_cmd_count < max_cmds_per_burst;
733 if (!second_can_issue) {
734 DPRINTF(
MemCtrl,
"Contention (cmd2) found on command bus at %d\n",
743 bool gap_violated = !same_burst &&
744 ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
746 if (!first_can_issue || (!second_can_issue && gap_violated)) {
747 DPRINTF(
MemCtrl,
"Contention (cmd1) found on command bus at %d\n",
802 DPRINTF(
MemCtrl,
"Access to %#x, ready at %lld next burst at %lld.\n",
835 bool mem_busy =
true;
838 mem_busy = mem_intr->
isBusy(read_queue_empty, all_writes_nvm);
876 bool& retry_wr_req) {
890 switched_cmd_type?
"[turnaround triggered]":
"");
892 if (switched_cmd_type) {
895 "Switching to writes after %d reads with %d reads "
901 "Switching to reads after %d writes with %d writes "
921 bool switch_to_writes =
false;
932 "Switching to writes due to read queue empty\n");
933 switch_to_writes =
true;
942 DPRINTF(Drain,
"MemCtrl controller done draining\n");
952 bool read_found =
false;
953 MemPacketQueue::iterator to_read;
962 "Checking READ queue [%d] priority [%d elements]\n",
963 prio, queue->size());
968 to_read =
chooseNext((*queue), switched_cmd_type ?
971 if (to_read != queue->end()) {
988 auto mem_pkt = *to_read;
993 "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
997 assert(mem_pkt->readyTime >=
curTick());
1001 mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1002 mem_pkt->readyTime - mem_pkt->entryTime);
1007 if (resp_queue.empty()) {
1009 schedule(resp_event, mem_pkt->readyTime);
1011 assert(resp_queue.back()->readyTime <= mem_pkt->readyTime);
1015 resp_queue.push_back(mem_pkt);
1025 switch_to_writes =
true;
1030 readQueue[mem_pkt->qosValue()].erase(to_read);
1036 if (switch_to_writes) {
1042 bool write_found =
false;
1043 MemPacketQueue::iterator to_write;
1052 "Checking WRITE queue [%d] priority [%d elements]\n",
1053 prio, queue->size());
1060 if (to_write != queue->end()) {
1076 auto mem_pkt = *to_write;
1083 "Command for %#x, issued at %lld.\n", mem_pkt->addr, cmd_at);
1089 mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
1090 mem_pkt->readyTime - mem_pkt->entryTime);
1094 writeQueue[mem_pkt->qosValue()].erase(to_write);
1104 bool below_threshold =
1127 retry_wr_req =
false;
1163 : statistics::
Group(&_ctrl),
1166 ADD_STAT(readReqs, statistics::units::Count::get(),
1167 "Number of read requests accepted"),
1168 ADD_STAT(writeReqs, statistics::units::Count::get(),
1169 "Number of write requests accepted"),
1171 ADD_STAT(readBursts, statistics::units::Count::get(),
1172 "Number of controller read bursts, including those serviced by "
1174 ADD_STAT(writeBursts, statistics::units::Count::get(),
1175 "Number of controller write bursts, including those merged in "
1177 ADD_STAT(servicedByWrQ, statistics::units::Count::get(),
1178 "Number of controller read bursts serviced by the write queue"),
1179 ADD_STAT(mergedWrBursts, statistics::units::Count::get(),
1180 "Number of controller write bursts merged with an existing one"),
1182 ADD_STAT(neitherReadNorWriteReqs, statistics::units::Count::get(),
1183 "Number of requests that are neither read nor write"),
1185 ADD_STAT(avgRdQLen, statistics::units::Rate<
1186 statistics::units::Count, statistics::units::
Tick>::get(),
1187 "Average read queue length when enqueuing"),
1188 ADD_STAT(avgWrQLen, statistics::units::Rate<
1189 statistics::units::Count, statistics::units::
Tick>::get(),
1190 "Average write queue length when enqueuing"),
1192 ADD_STAT(numRdRetry, statistics::units::Count::get(),
1193 "Number of times read queue was full causing retry"),
1194 ADD_STAT(numWrRetry, statistics::units::Count::get(),
1195 "Number of times write queue was full causing retry"),
1197 ADD_STAT(readPktSize, statistics::units::Count::get(),
1198 "Read request sizes (log2)"),
1199 ADD_STAT(writePktSize, statistics::units::Count::get(),
1200 "Write request sizes (log2)"),
1202 ADD_STAT(rdQLenPdf, statistics::units::Count::get(),
1203 "What read queue length does an incoming req see"),
1204 ADD_STAT(wrQLenPdf, statistics::units::Count::get(),
1205 "What write queue length does an incoming req see"),
1207 ADD_STAT(rdPerTurnAround, statistics::units::Count::get(),
1208 "Reads before turning the bus around for writes"),
1209 ADD_STAT(wrPerTurnAround, statistics::units::Count::get(),
1210 "Writes before turning the bus around for reads"),
1212 ADD_STAT(bytesReadWrQ, statistics::units::Byte::get(),
1213 "Total number of bytes read from write queue"),
1214 ADD_STAT(bytesReadSys, statistics::units::Byte::get(),
1215 "Total read bytes from the system interface side"),
1216 ADD_STAT(bytesWrittenSys, statistics::units::Byte::get(),
1217 "Total written bytes from the system interface side"),
1219 ADD_STAT(avgRdBWSys, statistics::units::Rate<
1220 statistics::units::Byte, statistics::units::Second>::get(),
1221 "Average system read bandwidth in Byte/s"),
1222 ADD_STAT(avgWrBWSys, statistics::units::Rate<
1223 statistics::units::Byte, statistics::units::Second>::get(),
1224 "Average system write bandwidth in Byte/s"),
1227 "Total gap between requests"),
1228 ADD_STAT(avgGap, statistics::units::Rate<
1229 statistics::units::
Tick, statistics::units::Count>::get(),
1230 "Average gap between requests"),
1232 ADD_STAT(requestorReadBytes, statistics::units::Byte::get(),
1233 "Per-requestor bytes read from memory"),
1234 ADD_STAT(requestorWriteBytes, statistics::units::Byte::get(),
1235 "Per-requestor bytes write to memory"),
1236 ADD_STAT(requestorReadRate, statistics::units::Rate<
1237 statistics::units::Byte, statistics::units::Second>::get(),
1238 "Per-requestor bytes read from memory rate"),
1239 ADD_STAT(requestorWriteRate, statistics::units::Rate<
1240 statistics::units::Byte, statistics::units::Second>::get(),
1241 "Per-requestor bytes write to memory rate"),
1242 ADD_STAT(requestorReadAccesses, statistics::units::Count::get(),
1243 "Per-requestor read serviced memory accesses"),
1244 ADD_STAT(requestorWriteAccesses, statistics::units::Count::get(),
1245 "Per-requestor write serviced memory accesses"),
1246 ADD_STAT(requestorReadTotalLat, statistics::units::
Tick::get(),
1247 "Per-requestor read total memory access latency"),
1248 ADD_STAT(requestorWriteTotalLat, statistics::units::
Tick::get(),
1249 "Per-requestor write total memory access latency"),
1250 ADD_STAT(requestorReadAvgLat, statistics::units::Rate<
1251 statistics::units::
Tick, statistics::units::Count>::get(),
1252 "Per-requestor read average memory access latency"),
1253 ADD_STAT(requestorWriteAvgLat, statistics::units::Rate<
1254 statistics::units::
Tick, statistics::units::Count>::get(),
1255 "Per-requestor write average memory access latency")
1262 using namespace statistics;
1264 assert(ctrl.system());
1265 const auto max_requestors = ctrl.system()->maxRequestors();
1267 avgRdQLen.precision(2);
1268 avgWrQLen.precision(2);
1270 readPktSize.init(
ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1271 writePktSize.init(
ceilLog2(ctrl.system()->cacheLineSize()) + 1);
1273 rdQLenPdf.init(ctrl.readBufferSize);
1274 wrQLenPdf.init(ctrl.writeBufferSize);
1277 .init(ctrl.readBufferSize)
1280 .init(ctrl.writeBufferSize)
1283 avgRdBWSys.precision(8);
1284 avgWrBWSys.precision(8);
1285 avgGap.precision(2);
1289 .init(max_requestors)
1293 .init(max_requestors)
1301 requestorReadAccesses
1302 .init(max_requestors)
1305 requestorWriteAccesses
1306 .init(max_requestors)
1309 requestorReadTotalLat
1310 .init(max_requestors)
1321 requestorWriteTotalLat
1322 .init(max_requestors)
1325 requestorWriteAvgLat
1329 for (
int i = 0;
i < max_requestors;
i++) {
1330 const std::string requestor = ctrl.system()->getRequestorName(
i);
1331 requestorReadBytes.subname(
i, requestor);
1332 requestorReadRate.subname(
i, requestor);
1333 requestorWriteBytes.subname(
i, requestor);
1334 requestorWriteRate.subname(
i, requestor);
1335 requestorReadAccesses.subname(
i, requestor);
1336 requestorWriteAccesses.subname(
i, requestor);
1337 requestorReadTotalLat.subname(
i, requestor);
1338 requestorReadAvgLat.subname(
i, requestor);
1339 requestorWriteTotalLat.subname(
i, requestor);
1340 requestorWriteAvgLat.subname(
i, requestor);
1347 avgGap = totGap / (readReqs + writeReqs);
1349 requestorReadRate = requestorReadBytes /
simSeconds;
1350 requestorWriteRate = requestorWriteBytes /
simSeconds;
1351 requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
1352 requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
1360 panic_if(!found,
"Can't handle address range for packet %s\n",
1379 if (if_name !=
"port") {
1403 DPRINTF(Drain,
"Memory controller not drained, write: %d, read: %d,"
1456 return ctrl.getAddrRanges();
1464 if (!queue.trySatisfyFunctional(pkt)) {
1468 ctrl.recvFunctional(pkt);
1477 return ctrl.recvAtomic(pkt);
1484 return ctrl.recvAtomicBackdoor(pkt, backdoor);
1491 return ctrl.recvTimingReq(pkt);
Tick curTick()
The universal simulation clock.
#define fatal(...)
This implements a cprintf based fatal() function.
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
virtual void respondEvent(uint8_t rank)
This function is DRAM specific.
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
When a new read comes in, first check if the write q has a pending request to the same address....
const Tick backendLatency
Pipeline latency of the backend and PHY.
uint32_t writeLowThreshold
statistics::Scalar bytesReadWrQ
EventFunctionWrapper nextReqEvent
virtual Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
Check for command bus contention for single cycle command.
void logResponse(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries, double delay)
Called upon receiving a response, updates statistics and updates queues status.
bool recvTimingReq(PacketPtr) override
Receive a timing request from the peer.
BurstHelper * burstHelper
A pointer to the BurstHelper if this MemPacket is a split packet If not a split packet (common case),...
virtual bool readsWaitingToIssue() const
This function is NVM specific.
void sendRetryReq()
Send a retry to the request port that previously attempted a sendTimingReq to this response port and ...
Tick readyTime
When will request leave the controller.
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
virtual void suspend()
This function is DRAM specific.
std::unordered_set< Addr > isInWriteQueue
To avoid iterating over the write queue to check for overlapping transactions, maintain a set of burs...
DrainState drainState() const
Return the current drain state of an object.
virtual Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor)
statistics::Vector requestorWriteAccesses
MemoryPort(const std::string &name, MemCtrl &_ctrl)
AddrRange getAddrRange() const
Get the address range.
bool contains(const Addr &a) const
Determine if the range contains an address.
const FlagsType nozero
Don't print if this is zero.
statistics::Vector requestorReadTotalLat
const PacketPtr pkt
This comes from the outside world.
uint8_t qosValue() const
QoS Value getter Returns 0 if QoS value was never set (constructor default).
virtual bool allRanksDrained() const =0
Check drain state of interface.
virtual Addr burstAlign(Addr addr, MemInterface *mem_intr) const
Burst-align an address.
The memory controller is a single-channel memory controller capturing the most important timing const...
bool cacheResponding() const
uint32_t writeHighThreshold
EventFunctionWrapper respondEvent
const FlagsType nonan
Don't print if this is NAN.
virtual void startup() override
startup() is the final initialization call before simulation.
statistics::Vector requestorWriteTotalLat
std::vector< MemPacketQueue > & selQueue(bool is_read)
Select either the read or write queue.
virtual Tick doBurstAccess(MemPacket *mem_pkt, MemInterface *mem_intr)
Actually do the burst based on media specific access function.
enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
statistics::Vector readPktSize
uint32_t headerDelay
The extra delay from seeing the packet until the header is transmitted.
MemCtrl(const MemCtrlParams &p)
BusState busStateNext
bus state for next request event triggered
const uint32_t minReadsPerSwitch
BusState busState
Bus state used to control the read/write switching and drive the scheduling of the next request.
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
virtual void chooseRead(MemPacketQueue &queue)
This function is NVM specific.
uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
statistics::Scalar bytesReadSys
bool inReadBusState(bool next_state) const
Check the current direction of the memory channel.
bool retryRdReq
Remember if we have to retry a request when available.
statistics::Vector requestorReadAccesses
const Tick entryTime
When did request enter the controller.
Tick minWriteToReadDataGap() const
const std::unique_ptr< TurnaroundPolicy > turnPolicy
QoS Bus Turnaround Policy: selects the bus direction (READ/WRITE)
statistics::Average avgWrQLen
uint32_t payloadDelay
The extra pipelining delay from seeing the packet until the end of payload is transmitted by the comp...
virtual Tick recvAtomic(PacketPtr pkt)
statistics::Vector writePktSize
std::vector< MemPacketQueue > writeQueue
virtual void drainRanks()
This function is DRAM specific.
DrainState
Object drain/handover states.
bool inWriteBusState(bool next_state) const
Check the current direction of the memory channel.
uint8_t schedule(RequestorID id, uint64_t data)
virtual std::pair< Tick, Tick > doBurstAccess(MemPacket *mem_pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue)=0
This function performs the burst and update stats.
Addr addr
The starting address of the packet.
virtual void processRespondEvent(MemInterface *mem_intr, MemPacketQueue &queue, EventFunctionWrapper &resp_event, bool &retry_rd_req)
virtual Tick minWriteToReadDataGap()
Calculate the minimum delay used when scheduling a write-to-read transision.
MemoryPort port
Our incoming port, for a multi-ported controller add a crossbar in front of it.
General interface to memory device Includes functions and parameters shared across media types.
virtual bool isBusy(bool read_queue_empty, bool all_writes_nvm)=0
This function checks if ranks are busy.
virtual void setupRank(const uint8_t rank, const bool is_read)=0
Setup the rank based on packet received.
statistics::Vector requestorReadBytes
void functionalAccess(PacketPtr pkt)
Perform an untimed memory read or write without changing anything but the memory itself.
statistics::Scalar numWrRetry
virtual std::string name() const
RequestorID requestorId() const
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
BusState selectNextBusState()
Returns next bus direction (READ or WRITE) based on configured policy.
A queued port is a port that has an infinite queue for outgoing packets and thus decouples the module...
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
uint64_t Tick
Tick count type.
virtual void recvFunctional(PacketPtr pkt)
virtual bool packetReady(MemPacket *pkt, MemInterface *mem_intr)
Determine if there is a packet that can issue.
virtual bool recvTimingReq(PacketPtr pkt)
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
virtual Tick commandOffset() const =0
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
virtual AddrRangeList getAddrRanges()
System * system() const
read the system pointer
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
const uint8_t pseudoChannel
pseudo channel num
virtual bool burstReady(MemPacket *pkt) const =0
Check if a burst operation can be issued to the interface.
bool recvFunctionalLogic(PacketPtr pkt, MemInterface *mem_intr)
void qosValue(const uint8_t qv)
Set the packet QoS value (interface compatibility with Packet)
virtual void pruneBurstTick()
Remove commands that have already issued from burstTicks.
std::unique_ptr< Packet > pendingDelete
Upstream caches need this packet until true is returned, so hold it for deletion until a subsequent c...
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
virtual void checkRefreshState(uint8_t rank)
This function is DRAM specific.
virtual bool allIntfDrained() const
Ensure that all interfaced have drained commands.
statistics::Vector rdQLenPdf
bool isConnected() const
Is this port currently connected to a peer?
statistics::Scalar bytesWrittenSys
void recvFunctional(PacketPtr pkt) override
Receive a functional request packet from the peer.
statistics::Scalar writeReqs
unsigned int burstsServiced
Number of bursts serviced so far for a system packet.
void regStats() override
Callback to set stat parameters.
RequestorID requestorId() const
Get the packet RequestorID (interface compatibility with Packet)
@ Drained
Buffers drained, ready for serialization/handover.
bool isTimingMode() const
Is the system in timing mode?
Tick minReadToWriteDataGap() const
bool isTimingMode
Remember if the memory system is in timing mode.
void pushLabel(const std::string &lbl)
Push label for PrintReq (safe to call unconditionally).
void popLabel()
Pop label for PrintReq (safe to call unconditionally).
bool needsResponse() const
statistics::Scalar numRdRetry
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
const Tick frontendLatency
Pipeline latency of the controller frontend.
statistics::Vector wrQLenPdf
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Receive an atomic request packet from the peer, and optionally provide a backdoor to the data being a...
virtual bool pktSizeCheck(MemPacket *mem_pkt, MemInterface *mem_intr) const
Check if mem pkt's size is sane.
const std::string & name()
statistics::Scalar servicedByWrQ
statistics::Scalar totGap
void sendRangeChange() const
Called by the owner to send a range change.
Tick nextBurstAt
Till when the controller must wait before issuing next RD/WR burst?
void getBackdoor(MemBackdoorPtr &bd_ptr)
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
statistics::Scalar readBursts
static constexpr T divCeil(const T &a, const U &b)
std::unordered_multiset< Tick > burstTicks
Holds count of commands issued in burst window starting at defined Tick.
virtual bool respQEmpty()
void signalDrainDone() const
Signal that an object is drained.
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
statistics::Scalar readReqs
virtual Tick minReadToWriteDataGap()
Calculate the minimum delay used when scheduling a read-to-write transision.
static constexpr int ceilLog2(const T &n)
virtual void accessAndRespond(PacketPtr pkt, Tick static_latency, MemInterface *mem_intr)
When a packet reaches its "readyTime" in the response Q, use the "access()" method in AbstractMemory ...
AddrRangeList getAddrRanges() const override
Get a list of the non-overlapping address ranges the owner is responsible for.
virtual Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
virtual bool memBusy(MemInterface *mem_intr)
Checks if the memory interface is already busy.
Tick recvAtomic(PacketPtr pkt) override
Receive an atomic request packet from the peer.
Ports are used to interface objects to each other.
const uint8_t rank
Will be populated by address decoder.
Tick recvAtomicLogic(PacketPtr pkt, MemInterface *mem_intr)
virtual void startup()
startup() is the final initialization call before simulation.
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const =0
For FR-FCFS policy, find first command that can issue Function will be overriden by interface to sele...
uint8_t numPriorities() const
Gets the total number of priority levels in the QoS memory controller.
statistics::Formula & simSeconds
statistics::Scalar writeBursts
statistics::Histogram rdPerTurnAround
A burst helper helps organize and manage a packet that is larger than the memory burst size.
virtual void processNextReqEvent(MemInterface *mem_intr, MemPacketQueue &resp_queue, EventFunctionWrapper &resp_event, EventFunctionWrapper &next_req_event, bool &retry_wr_req)
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
void recordTurnaroundStats()
Record statistics on turnarounds based on busStateNext and busState values.
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
uint8_t pseudoChannel
pseudo channel number used for HBM modeling
const uint32_t minWritesPerSwitch
uint32_t bytesPerBurst() const
virtual void drainResume() override
Resume execution after a successful drain.
const Tick commandWindow
Length of a command window, used to check command bandwidth.
statistics::Scalar mergedWrBursts
void schedTimingResp(PacketPtr pkt, Tick when)
Schedule the sending of a timing response.
void printQs() const
Used for debugging to observe the contents of the queues.
void access(PacketPtr pkt)
Perform an untimed memory access and update all the state (e.g.
const unsigned int burstCount
Number of bursts requred for a system packet.
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
virtual bool writeRespQueueFull() const
This function is NVM specific.
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
virtual MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
virtual Tick accessLatency() const =0
statistics::Average avgRdQLen
void logRequest(BusState dir, RequestorID id, uint8_t _qos, Addr addr, uint64_t entries)
Called upon receiving a request or updates statistics and updates queues status.
statistics::Histogram wrPerTurnAround
virtual void nonDetermReads(MemInterface *mem_intr)
Will access memory interface and select non-deterministic reads to issue.
statistics::Vector requestorWriteBytes
@ Draining
Draining buffers pending serialization/handover.
virtual MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0)
Address decoder to figure out physical mapping onto ranks, banks, and rows.
bool scheduled() const
Determine if the current event is scheduled.
#define panic(...)
This implements a cprintf based panic() function.
virtual bool nvmWriteBlock(MemInterface *mem_intr)
Will check if all writes are for nvm interface and nvm's write resp queue is full.
Generated on Wed Jul 13 2022 10:39:24 for gem5 by doxygen 1.8.17