gem5  v22.1.0.0
hbm_ctrl.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022 The Regents of the University of California
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met: redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer;
9  * redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution;
12  * neither the name of the copyright holders nor the names of its
13  * contributors may be used to endorse or promote products derived from
14  * this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "mem/hbm_ctrl.hh"
30 
31 #include "base/trace.hh"
32 #include "debug/DRAM.hh"
33 #include "debug/Drain.hh"
34 #include "debug/MemCtrl.hh"
35 #include "debug/QOS.hh"
36 #include "mem/dram_interface.hh"
37 #include "mem/mem_interface.hh"
38 #include "sim/system.hh"
39 
40 namespace gem5
41 {
42 
43 namespace memory
44 {
45 
46 HBMCtrl::HBMCtrl(const HBMCtrlParams &p) :
47  MemCtrl(p),
48  retryRdReqPC1(false), retryWrReqPC1(false),
49  nextReqEventPC1([this] {processNextReqEvent(pc1Int, respQueuePC1,
51  name()),
52  respondEventPC1([this] {processRespondEvent(pc1Int, respQueuePC1,
53  respondEventPC1, retryRdReqPC1); }, name()),
54  pc1Int(p.dram_2),
55  partitionedQ(p.partitioned_q)
56 {
57  DPRINTF(MemCtrl, "Setting up HBM controller\n");
58 
59  pc0Int = dynamic_cast<DRAMInterface*>(dram);
60 
61  assert(dynamic_cast<DRAMInterface*>(p.dram_2) != nullptr);
62 
63  readBufferSize = pc0Int->readBufferSize + pc1Int->readBufferSize;
64  writeBufferSize = pc0Int->writeBufferSize + pc1Int->writeBufferSize;
65 
66  fatal_if(!pc0Int, "Memory controller must have pc0 interface");
67  fatal_if(!pc1Int, "Memory controller must have pc1 interface");
68 
69  pc0Int->setCtrl(this, commandWindow, 0);
70  pc1Int->setCtrl(this, commandWindow, 1);
71 
72  if (partitionedQ) {
73  writeHighThreshold = (writeBufferSize * (p.write_high_thresh_perc/2)
74  / 100.0);
75  writeLowThreshold = (writeBufferSize * (p.write_low_thresh_perc/2)
76  / 100.0);
77  } else {
78  writeHighThreshold = (writeBufferSize * p.write_high_thresh_perc
79  / 100.0);
80  writeLowThreshold = (writeBufferSize * p.write_low_thresh_perc
81  / 100.0);
82  }
83 }
84 
85 void
87 {
88  MemCtrl::init();
89 }
90 
91 void
93 {
95 
97  if (isTimingMode) {
98  // shift the bus busy time sufficiently far ahead that we never
99  // have to worry about negative values when computing the time for
100  // the next request, this will add an insignificant bubble at the
101  // start of simulation
103  }
104 }
105 
106 Tick
108 {
109  Tick latency = 0;
110 
111  if (pc0Int->getAddrRange().contains(pkt->getAddr())) {
112  latency = MemCtrl::recvAtomicLogic(pkt, pc0Int);
113  } else if (pc1Int->getAddrRange().contains(pkt->getAddr())) {
114  latency = MemCtrl::recvAtomicLogic(pkt, pc1Int);
115  } else {
116  panic("Can't handle address range for packet %s\n", pkt->print());
117  }
118 
119  return latency;
120 }
121 
122 void
124 {
125  bool found = MemCtrl::recvFunctionalLogic(pkt, pc0Int);
126 
127  if (!found) {
128  found = MemCtrl::recvFunctionalLogic(pkt, pc1Int);
129  }
130 
131  if (!found) {
132  panic("Can't handle address range for packet %s\n", pkt->print());
133  }
134 }
135 
136 Tick
138 {
139  Tick latency = recvAtomic(pkt);
140 
141  if (pc0Int && pc0Int->getAddrRange().contains(pkt->getAddr())) {
142  pc0Int->getBackdoor(backdoor);
143  } else if (pc1Int && pc1Int->getAddrRange().contains(pkt->getAddr())) {
144  pc1Int->getBackdoor(backdoor);
145  }
146  else {
147  panic("Can't handle address range for packet %s\n",
148  pkt->print());
149  }
150  return latency;
151 }
152 
153 bool
154 HBMCtrl::writeQueueFullPC0(unsigned int neededEntries) const
155 {
157  "Write queue limit %d, PC0 size %d, entries needed %d\n",
158  writeBufferSize, writeQueueSizePC0, neededEntries);
159 
160  unsigned int wrsize_new = (writeQueueSizePC0 + neededEntries);
161  return wrsize_new > (writeBufferSize/2);
162 }
163 
164 bool
165 HBMCtrl::writeQueueFullPC1(unsigned int neededEntries) const
166 {
168  "Write queue limit %d, PC1 size %d, entries needed %d\n",
169  writeBufferSize, writeQueueSizePC1, neededEntries);
170 
171  unsigned int wrsize_new = (writeQueueSizePC1 + neededEntries);
172  return wrsize_new > (writeBufferSize/2);
173 }
174 
175 bool
176 HBMCtrl::readQueueFullPC0(unsigned int neededEntries) const
177 {
179  "Read queue limit %d, PC0 size %d, entries needed %d\n",
181  neededEntries);
182 
183  unsigned int rdsize_new = readQueueSizePC0 + respQueue.size()
184  + neededEntries;
185  return rdsize_new > (readBufferSize/2);
186 }
187 
188 bool
189 HBMCtrl::readQueueFullPC1(unsigned int neededEntries) const
190 {
192  "Read queue limit %d, PC1 size %d, entries needed %d\n",
194  neededEntries);
195 
196  unsigned int rdsize_new = readQueueSizePC1 + respQueuePC1.size()
197  + neededEntries;
198  return rdsize_new > (readBufferSize/2);
199 }
200 
201 bool
202 HBMCtrl::readQueueFull(unsigned int neededEntries) const
203 {
205  "HBMCtrl: Read queue limit %d, entries needed %d\n",
206  readBufferSize, neededEntries);
207 
208  unsigned int rdsize_new = totalReadQueueSize + respQueue.size() +
209  respQueuePC1.size() + neededEntries;
210  return rdsize_new > readBufferSize;
211 }
212 
213 bool
215 {
216  // This is where we enter from the outside world
217  DPRINTF(MemCtrl, "recvTimingReq: request %s addr %#x size %d\n",
218  pkt->cmdString(), pkt->getAddr(), pkt->getSize());
219 
220  panic_if(pkt->cacheResponding(), "Should not see packets where cache "
221  "is responding");
222 
223  panic_if(!(pkt->isRead() || pkt->isWrite()),
224  "Should only see read and writes at memory controller\n");
225 
226  // Calc avg gap between requests
227  if (prevArrival != 0) {
229  }
230  prevArrival = curTick();
231 
232  // What type of media does this packet access?
233  bool is_pc0;
234 
235  // TODO: make the interleaving bit across pseudo channels a parameter
236  if (bits(pkt->getAddr(), 6) == 0) {
237  is_pc0 = true;
238  } else {
239  is_pc0 = false;
240  }
241 
242  // Find out how many memory packets a pkt translates to
243  // If the burst size is equal or larger than the pkt size, then a pkt
244  // translates to only one memory packet. Otherwise, a pkt translates to
245  // multiple memory packets
246  unsigned size = pkt->getSize();
247  uint32_t burst_size = pc0Int->bytesPerBurst();
248  unsigned offset = pkt->getAddr() & (burst_size - 1);
249  unsigned int pkt_count = divCeil(offset + size, burst_size);
250 
251  // run the QoS scheduler and assign a QoS priority value to the packet
252  qosSchedule({&readQueue, &writeQueue}, burst_size, pkt);
253 
254  // check local buffers and do not accept if full
255  if (pkt->isWrite()) {
256  if (is_pc0) {
257  if (partitionedQ ? writeQueueFullPC0(pkt_count) :
258  writeQueueFull(pkt_count))
259  {
260  DPRINTF(MemCtrl, "Write queue full, not accepting\n");
261  // remember that we have to retry this port
262  MemCtrl::retryWrReq = true;
263  stats.numWrRetry++;
264  return false;
265  } else {
266  addToWriteQueue(pkt, pkt_count, pc0Int);
267  stats.writeReqs++;
268  stats.bytesWrittenSys += size;
269  }
270  } else {
271  if (partitionedQ ? writeQueueFullPC1(pkt_count) :
272  writeQueueFull(pkt_count))
273  {
274  DPRINTF(MemCtrl, "Write queue full, not accepting\n");
275  // remember that we have to retry this port
276  retryWrReqPC1 = true;
277  stats.numWrRetry++;
278  return false;
279  } else {
280  addToWriteQueue(pkt, pkt_count, pc1Int);
281  stats.writeReqs++;
282  stats.bytesWrittenSys += size;
283  }
284  }
285  } else {
286 
287  assert(pkt->isRead());
288  assert(size != 0);
289 
290  if (is_pc0) {
291  if (partitionedQ ? readQueueFullPC0(pkt_count) :
292  HBMCtrl::readQueueFull(pkt_count)) {
293  DPRINTF(MemCtrl, "Read queue full, not accepting\n");
294  // remember that we have to retry this port
295  retryRdReqPC1 = true;
296  stats.numRdRetry++;
297  return false;
298  } else {
299  if (!addToReadQueue(pkt, pkt_count, pc0Int)) {
300  if (!nextReqEvent.scheduled()) {
301  DPRINTF(MemCtrl, "Request scheduled immediately\n");
303  }
304  }
305 
306  stats.readReqs++;
307  stats.bytesReadSys += size;
308  }
309  } else {
310  if (partitionedQ ? readQueueFullPC1(pkt_count) :
311  HBMCtrl::readQueueFull(pkt_count)) {
312  DPRINTF(MemCtrl, "Read queue full, not accepting\n");
313  // remember that we have to retry this port
314  retryRdReqPC1 = true;
315  stats.numRdRetry++;
316  return false;
317  } else {
318  if (!addToReadQueue(pkt, pkt_count, pc1Int)) {
319  if (!nextReqEventPC1.scheduled()) {
320  DPRINTF(MemCtrl, "Request scheduled immediately\n");
322  }
323  }
324  stats.readReqs++;
325  stats.bytesReadSys += size;
326  }
327  }
328  }
329 
330  return true;
331 }
332 
333 void
335 {
336  auto it = rowBurstTicks.begin();
337  while (it != rowBurstTicks.end()) {
338  auto current_it = it++;
339  if (MemCtrl::getBurstWindow(curTick()) > *current_it) {
340  DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
341  rowBurstTicks.erase(current_it);
342  }
343  }
344 }
345 
346 void
348 {
349  auto it = colBurstTicks.begin();
350  while (it != colBurstTicks.end()) {
351  auto current_it = it++;
352  if (MemCtrl::getBurstWindow(curTick()) > *current_it) {
353  DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
354  colBurstTicks.erase(current_it);
355  }
356  }
357 }
358 
359 void
361 {
364 }
365 
366 Tick
367 HBMCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
368 {
369  // start with assumption that there is no contention on command bus
370  Tick cmd_at = cmd_tick;
371 
372  // get tick aligned to burst window
373  Tick burst_tick = MemCtrl::getBurstWindow(cmd_tick);
374 
375  // verify that we have command bandwidth to issue the command
376  // if not, iterate over next window(s) until slot found
377 
378  if (row_cmd) {
379  while (rowBurstTicks.count(burst_tick) >= max_cmds_per_burst) {
380  DPRINTF(MemCtrl, "Contention found on row command bus at %d\n",
381  burst_tick);
382  burst_tick += commandWindow;
383  cmd_at = burst_tick;
384  }
385  DPRINTF(MemCtrl, "Now can send a row cmd_at %d\n",
386  cmd_at);
387  rowBurstTicks.insert(burst_tick);
388 
389  } else {
390  while (colBurstTicks.count(burst_tick) >= max_cmds_per_burst) {
391  DPRINTF(MemCtrl, "Contention found on col command bus at %d\n",
392  burst_tick);
393  burst_tick += commandWindow;
394  cmd_at = burst_tick;
395  }
396  DPRINTF(MemCtrl, "Now can send a col cmd_at %d\n",
397  cmd_at);
398  colBurstTicks.insert(burst_tick);
399  }
400  return cmd_at;
401 }
402 
403 Tick
404 HBMCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
405  Tick max_multi_cmd_split)
406 {
407 
408  // start with assumption that there is no contention on command bus
409  Tick cmd_at = cmd_tick;
410 
411  // get tick aligned to burst window
412  Tick burst_tick = MemCtrl::getBurstWindow(cmd_tick);
413 
414  // Command timing requirements are from 2nd command
415  // Start with assumption that 2nd command will issue at cmd_at and
416  // find prior slot for 1st command to issue
417  // Given a maximum latency of max_multi_cmd_split between the commands,
418  // find the burst at the maximum latency prior to cmd_at
419  Tick burst_offset = 0;
420  Tick first_cmd_offset = cmd_tick % commandWindow;
421  while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
422  burst_offset += commandWindow;
423  }
424  // get the earliest burst aligned address for first command
425  // ensure that the time does not go negative
426  Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
427 
428  // Can required commands issue?
429  bool first_can_issue = false;
430  bool second_can_issue = false;
431  // verify that we have command bandwidth to issue the command(s)
432  while (!first_can_issue || !second_can_issue) {
433  bool same_burst = (burst_tick == first_cmd_tick);
434  auto first_cmd_count = rowBurstTicks.count(first_cmd_tick);
435  auto second_cmd_count = same_burst ?
436  first_cmd_count + 1 : rowBurstTicks.count(burst_tick);
437 
438  first_can_issue = first_cmd_count < max_cmds_per_burst;
439  second_can_issue = second_cmd_count < max_cmds_per_burst;
440 
441  if (!second_can_issue) {
442  DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
443  burst_tick);
444  burst_tick += commandWindow;
445  cmd_at = burst_tick;
446  }
447 
448  // Verify max_multi_cmd_split isn't violated when command 2 is shifted
449  // If commands initially were issued in same burst, they are
450  // now in consecutive bursts and can still issue B2B
451  bool gap_violated = !same_burst &&
452  ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
453 
454  if (!first_can_issue || (!second_can_issue && gap_violated)) {
455  DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
456  first_cmd_tick);
457  first_cmd_tick += commandWindow;
458  }
459  }
460 
461  // Add command to burstTicks
462  rowBurstTicks.insert(burst_tick);
463  rowBurstTicks.insert(first_cmd_tick);
464 
465  return cmd_at;
466 }
467 
468 void
470 {
471 
473 
474  if (!isTimingMode && system()->isTimingMode()) {
475  // if we switched to timing mode, kick things into action,
476  // and behave as if we restored from a checkpoint
477  startup();
478  pc1Int->startup();
479  } else if (isTimingMode && !system()->isTimingMode()) {
480  // if we switch from timing mode, stop the refresh events to
481  // not cause issues with KVM
482  if (pc1Int) {
483  pc1Int->drainRanks();
484  }
485  }
486 
487  // update the mode
489 }
490 
493 {
494  AddrRangeList ranges;
495  ranges.push_back(pc0Int->getAddrRange());
496  ranges.push_back(pc1Int->getAddrRange());
497  return ranges;
498 }
499 
500 } // namespace memory
501 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
bool isRead() const
Definition: packet.hh:592
Addr getAddr() const
Definition: packet.hh:805
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition: packet.cc:368
bool isWrite() const
Definition: packet.hh:593
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:587
unsigned getSize() const
Definition: packet.hh:815
bool cacheResponding() const
Definition: packet.hh:657
bool isTimingMode() const
Is the system in timing mode?
Definition: system.hh:273
void getBackdoor(MemBackdoorPtr &bd_ptr)
AddrRange getAddrRange() const
Get the address range.
Tick commandOffset() const override
void drainRanks() override
Iterate through dram ranks to exit self-refresh in order to drain.
void startup() override
Iterate through dram ranks and instantiate per rank startup routine.
bool readQueueFullPC1(unsigned int pkt_count) const
Definition: hbm_ctrl.cc:189
DRAMInterface * pc1Int
Definition: hbm_ctrl.hh:196
uint64_t readQueueSizePC1
Definition: hbm_ctrl.hh:166
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: hbm_ctrl.cc:86
AddrRangeList getAddrRanges() override
Definition: hbm_ctrl.cc:492
uint64_t readQueueSizePC0
Following counters are used to keep track of the entries in read/write queue for each pseudo channel ...
Definition: hbm_ctrl.hh:165
Tick recvAtomicBackdoor(PacketPtr pkt, MemBackdoorPtr &backdoor) override
Definition: hbm_ctrl.cc:137
bool retryRdReqPC1
Remember if we have to retry a request for second pseudo channel.
Definition: hbm_ctrl.hh:79
void recvFunctional(PacketPtr pkt) override
Definition: hbm_ctrl.cc:123
virtual void drainResume() override
Resume execution after a successful drain.
Definition: hbm_ctrl.cc:469
EventFunctionWrapper respondEventPC1
Definition: hbm_ctrl.hh:135
uint64_t writeQueueSizePC1
Definition: hbm_ctrl.hh:168
bool recvTimingReq(PacketPtr pkt) override
Definition: hbm_ctrl.cc:214
bool writeQueueFullPC1(unsigned int pkt_count) const
Definition: hbm_ctrl.cc:165
bool partitionedQ
This indicates if the R/W queues will be partitioned among pseudo channels.
Definition: hbm_ctrl.hh:202
uint64_t writeQueueSizePC0
Definition: hbm_ctrl.hh:167
void pruneBurstTick() override
Remove commands that have already issued from rowBurstTicks and colBurstTicks.
Definition: hbm_ctrl.cc:360
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: hbm_ctrl.cc:92
std::unordered_multiset< Tick > colBurstTicks
This is used to ensure that the column command bandwidth does not exceed the allowable media constrai...
Definition: hbm_ctrl.hh:188
bool readQueueFullPC0(unsigned int pkt_count) const
Check if the read queue partition of both pseudo channels has room for more entries.
Definition: hbm_ctrl.cc:176
std::unordered_multiset< Tick > rowBurstTicks
Holds count of row commands issued in burst window starting at defined Tick.
Definition: hbm_ctrl.hh:181
Tick recvAtomic(PacketPtr pkt) override
Definition: hbm_ctrl.cc:107
bool readQueueFull(unsigned int pkt_count) const
Definition: hbm_ctrl.cc:202
EventFunctionWrapper nextReqEventPC1
NextReq and Respond events for second pseudo channel.
Definition: hbm_ctrl.hh:134
HBMCtrl(const HBMCtrlParams &p)
Definition: hbm_ctrl.cc:46
Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0) override
Check for command bus contention for multi-cycle (2 currently) command.
Definition: hbm_ctrl.cc:404
std::deque< MemPacket * > respQueuePC1
Response queue for pkts sent to second pseudo channel The first pseudo channel uses MemCtrl::respQueu...
Definition: hbm_ctrl.hh:174
DRAMInterface * pc0Int
Pointers to interfaces of the two pseudo channels pc0Int is same as MemCtrl::dram (it will be pointin...
Definition: hbm_ctrl.hh:195
Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd) override
Check for command bus contention for single cycle command.
Definition: hbm_ctrl.cc:367
bool writeQueueFullPC0(unsigned int pkt_count) const
Check if the write queue partition of both pseudo channels has room for more entries.
Definition: hbm_ctrl.cc:154
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition: mem_ctrl.hh:247
bool recvFunctionalLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition: mem_ctrl.cc:1368
virtual void startup() override
startup() is the final initialization call before simulation.
Definition: mem_ctrl.cc:111
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition: mem_ctrl.cc:303
Tick recvAtomicLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition: mem_ctrl.cc:137
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition: mem_ctrl.hh:490
std::vector< MemPacketQueue > writeQueue
Definition: mem_ctrl.hh:471
virtual void processNextReqEvent(MemInterface *mem_intr, MemPacketQueue &resp_queue, EventFunctionWrapper &resp_event, EventFunctionWrapper &next_req_event, bool &retry_wr_req)
Bunch of things requires to setup "events" in gem5 When event "respondEvent" occurs for example,...
Definition: mem_ctrl.cc:875
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition: mem_ctrl.cc:190
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition: mem_ctrl.cc:179
virtual void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition: mem_ctrl.cc:101
uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition: mem_ctrl.hh:512
EventFunctionWrapper nextReqEvent
Definition: mem_ctrl.hh:305
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition: mem_ctrl.hh:470
bool isTimingMode
Remember if the memory system is in timing mode.
Definition: mem_ctrl.hh:286
uint32_t writeBufferSize
Definition: mem_ctrl.hh:513
Tick getBurstWindow(Tick cmd_tick)
Calculate burst window aligned tick.
Definition: mem_ctrl.cc:669
virtual void drainResume() override
Resume execution after a successful drain.
Definition: mem_ctrl.cc:1425
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition: mem_ctrl.hh:545
Tick nextBurstAt
Till when the controller must wait before issuing next RD/WR burst?
uint32_t bytesPerBurst() const
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition: mem_ctrl.hh:496
System * system() const
read the system pointer
Definition: mem_ctrl.hh:371
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition: mem_ctrl.hh:131
uint8_t schedule(RequestorID id, uint64_t data)
Definition: mem_ctrl.cc:218
DRAMInterface declaration.
bool contains(const Addr &a) const
Determine if the range contains an address.
Definition: addr_range.hh:471
static constexpr T divCeil(const T &a, const U &b)
Definition: intmath.hh:110
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:465
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition: logging.hh:226
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
HBMCtrl declaration.
MemInterface declaration.
Bitfield< 23, 0 > offset
Definition: types.hh:144
Bitfield< 54 > p
Definition: pagetable.hh:70
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Tick
Tick count type.
Definition: types.hh:58
statistics::Scalar writeReqs
Definition: mem_ctrl.hh:572
statistics::Scalar readReqs
Definition: mem_ctrl.hh:571
statistics::Scalar numWrRetry
Definition: mem_ctrl.hh:583
statistics::Scalar totGap
Definition: mem_ctrl.hh:598
statistics::Scalar numRdRetry
Definition: mem_ctrl.hh:582
statistics::Scalar bytesReadSys
Definition: mem_ctrl.hh:592
statistics::Scalar bytesWrittenSys
Definition: mem_ctrl.hh:593
Definition: mem.h:38
const std::string & name()
Definition: trace.cc:49

Generated on Wed Dec 21 2022 10:22:37 for gem5 by doxygen 1.9.1