gem5 v24.0.0.0
Loading...
Searching...
No Matches
hetero_mem_ctrl.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
42
43#include "base/trace.hh"
44#include "debug/DRAM.hh"
45#include "debug/Drain.hh"
46#include "debug/MemCtrl.hh"
47#include "debug/NVM.hh"
48#include "debug/QOS.hh"
49#include "mem/dram_interface.hh"
50#include "mem/mem_interface.hh"
51#include "mem/nvm_interface.hh"
52#include "sim/system.hh"
53
54namespace gem5
55{
56
57namespace memory
58{
59
60HeteroMemCtrl::HeteroMemCtrl(const HeteroMemCtrlParams &p) :
61 MemCtrl(p),
62 nvm(p.nvm)
63{
64 DPRINTF(MemCtrl, "Setting up controller\n");
65 readQueue.resize(p.qos_priorities);
66 writeQueue.resize(p.qos_priorities);
67
68 fatal_if(dynamic_cast<DRAMInterface*>(dram) == nullptr,
69 "HeteroMemCtrl's dram interface must be of type DRAMInterface.\n");
70 fatal_if(dynamic_cast<NVMInterface*>(nvm) == nullptr,
71 "HeteroMemCtrl's nvm interface must be of type NVMInterface.\n");
72
73 // hook up interfaces to the controller
76
79
80 writeHighThreshold = writeBufferSize * p.write_high_thresh_perc / 100.0;
81 writeLowThreshold = writeBufferSize * p.write_low_thresh_perc / 100.0;
82
83 // perform a basic check of the write thresholds
84 if (p.write_low_thresh_perc >= p.write_high_thresh_perc)
85 fatal("Write buffer low threshold %d must be smaller than the "
86 "high threshold %d\n", p.write_low_thresh_perc,
87 p.write_high_thresh_perc);
88}
89
90Tick
92{
93 Tick latency = 0;
94
95 if (dram->getAddrRange().contains(pkt->getAddr())) {
96 latency = MemCtrl::recvAtomicLogic(pkt, dram);
97 } else if (nvm->getAddrRange().contains(pkt->getAddr())) {
98 latency = MemCtrl::recvAtomicLogic(pkt, nvm);
99 } else {
100 panic("Can't handle address range for packet %s\n", pkt->print());
101 }
102
103 return latency;
104}
105
106bool
108{
109 // This is where we enter from the outside world
110 DPRINTF(MemCtrl, "recvTimingReq: request %s addr %#x size %d\n",
111 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
112
113 panic_if(pkt->cacheResponding(), "Should not see packets where cache "
114 "is responding");
115
116 panic_if(!(pkt->isRead() || pkt->isWrite()),
117 "Should only see read and writes at memory controller\n");
118
119 // Calc avg gap between requests
120 if (prevArrival != 0) {
122 }
124
125 // What type of media does this packet access?
126 bool is_dram;
127 if (dram->getAddrRange().contains(pkt->getAddr())) {
128 is_dram = true;
129 } else if (nvm->getAddrRange().contains(pkt->getAddr())) {
130 is_dram = false;
131 } else {
132 panic("Can't handle address range for packet %s\n",
133 pkt->print());
134 }
135
136 // Find out how many memory packets a pkt translates to
137 // If the burst size is equal or larger than the pkt size, then a pkt
138 // translates to only one memory packet. Otherwise, a pkt translates to
139 // multiple memory packets
140 unsigned size = pkt->getSize();
141 uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
143 unsigned offset = pkt->getAddr() & (burst_size - 1);
144 unsigned int pkt_count = divCeil(offset + size, burst_size);
145
146 // run the QoS scheduler and assign a QoS priority value to the packet
147 qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
148
149 // check local buffers and do not accept if full
150 if (pkt->isWrite()) {
151 assert(size != 0);
152 if (writeQueueFull(pkt_count)) {
153 DPRINTF(MemCtrl, "Write queue full, not accepting\n");
154 // remember that we have to retry this port
155 retryWrReq = true;
157 return false;
158 } else {
159 addToWriteQueue(pkt, pkt_count, is_dram ? dram : nvm);
160 // If we are not already scheduled to get a request out of the
161 // queue, do so now
162 if (!nextReqEvent.scheduled()) {
163 DPRINTF(MemCtrl, "Request scheduled immediately\n");
165 }
167 stats.bytesWrittenSys += size;
168 }
169 } else {
170 assert(pkt->isRead());
171 assert(size != 0);
172 if (readQueueFull(pkt_count)) {
173 DPRINTF(MemCtrl, "Read queue full, not accepting\n");
174 // remember that we have to retry this port
175 retryRdReq = true;
177 return false;
178 } else {
179 if (!addToReadQueue(pkt, pkt_count, is_dram ? dram : nvm)) {
180 // If we are not already scheduled to get a request out of the
181 // queue, do so now
182 if (!nextReqEvent.scheduled()) {
183 DPRINTF(MemCtrl, "Request scheduled immediately\n");
185 }
186 }
187 stats.readReqs++;
188 stats.bytesReadSys += size;
189 }
190 }
191
192 return true;
193}
194
195void
197 MemPacketQueue& queue,
198 EventFunctionWrapper& resp_event,
199 bool& retry_rd_req)
200{
202 "processRespondEvent(): Some req has reached its readyTime\n");
203
204 if (queue.front()->isDram()) {
205 MemCtrl::processRespondEvent(dram, queue, resp_event, retry_rd_req);
206 } else {
207 MemCtrl::processRespondEvent(nvm, queue, resp_event, retry_rd_req);
208 }
209}
210
211MemPacketQueue::iterator
213 MemInterface* mem_int)
214{
215 // This method does the arbitration between requests.
216
217 MemPacketQueue::iterator ret = queue.end();
218
219 if (!queue.empty()) {
220 if (queue.size() == 1) {
221 // available rank corresponds to state refresh idle
222 MemPacket* mem_pkt = *(queue.begin());
223 if (packetReady(mem_pkt, mem_pkt->isDram()? dram : nvm)) {
224 ret = queue.begin();
225 DPRINTF(MemCtrl, "Single request, going to a free rank\n");
226 } else {
227 DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
228 }
229 } else if (memSchedPolicy == enums::fcfs) {
230 // check if there is a packet going to a free rank
231 for (auto i = queue.begin(); i != queue.end(); ++i) {
232 MemPacket* mem_pkt = *i;
233 if (packetReady(mem_pkt, mem_pkt->isDram()? dram : nvm)) {
234 ret = i;
235 break;
236 }
237 }
238 } else if (memSchedPolicy == enums::frfcfs) {
239 Tick col_allowed_at;
240 std::tie(ret, col_allowed_at)
241 = chooseNextFRFCFS(queue, extra_col_delay, mem_int);
242 } else {
243 panic("No scheduling policy chosen\n");
244 }
245 }
246 return ret;
247}
248
251 MemInterface* mem_intr)
252{
253
254 auto selected_pkt_it = queue.end();
255 auto nvm_pkt_it = queue.end();
256 Tick col_allowed_at = MaxTick;
257 Tick nvm_col_allowed_at = MaxTick;
258
259 std::tie(selected_pkt_it, col_allowed_at) =
260 MemCtrl::chooseNextFRFCFS(queue, extra_col_delay, dram);
261
262 std::tie(nvm_pkt_it, nvm_col_allowed_at) =
263 MemCtrl::chooseNextFRFCFS(queue, extra_col_delay, nvm);
264
265
266 // Compare DRAM and NVM and select NVM if it can issue
267 // earlier than the DRAM packet
268 if (col_allowed_at > nvm_col_allowed_at) {
269 selected_pkt_it = nvm_pkt_it;
270 col_allowed_at = nvm_col_allowed_at;
271 }
272
273 return std::make_pair(selected_pkt_it, col_allowed_at);
274}
275
276
277Tick
279{
280 // mem_intr will be dram by default in HeteroMemCtrl
281
282 // When was command issued?
283 Tick cmd_at;
284
285 if (mem_pkt->isDram()) {
286 cmd_at = MemCtrl::doBurstAccess(mem_pkt, mem_intr);
287 // Update timing for NVM ranks if NVM is configured on this channel
288 nvm->addRankToRankDelay(cmd_at);
289 // Since nextBurstAt and nextReqAt are part of the interface, making
290 // sure that they are same for both nvm and dram interfaces
293
294 } else {
295 cmd_at = MemCtrl::doBurstAccess(mem_pkt, nvm);
296 // Update timing for NVM ranks if NVM is configured on this channel
297 dram->addRankToRankDelay(cmd_at);
300 }
301
302 return cmd_at;
303}
304
305bool
307
308 // mem_intr in case of HeteroMemCtrl will always be dram
309
310 // check ranks for refresh/wakeup - uses busStateNext, so done after
311 // turnaround decisions
312 // Default to busy status and update based on interface specifics
313 bool dram_busy, nvm_busy = true;
314 // DRAM
315 dram_busy = mem_intr->isBusy(false, false);
316 // NVM
317 bool read_queue_empty = totalReadQueueSize == 0;
318 bool all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
319 nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
320
321 // Default state of unused interface is 'true'
322 // Simply AND the busy signals to determine if system is busy
323 if (dram_busy && nvm_busy) {
324 // if all ranks are refreshing wait for them to finish
325 // and stall this state machine without taking any further
326 // action, and do not schedule a new nextReqEvent
327 return true;
328 } else {
329 return false;
330 }
331}
332
333void
335{
336 // mem_intr by default points to dram in case
337 // of HeteroMemCtrl, therefore, calling nonDetermReads
338 // from MemCtrl using nvm interace
340}
341
342bool
344{
345 // mem_intr by default points to dram in case
346 // of HeteroMemCtrl, therefore, calling nvmWriteBlock
347 // from MemCtrl using nvm interface
349}
350
351Tick
357
358Tick
364
365Addr
367{
368 // mem_intr will point to dram interface in HeteroMemCtrl
369 if (mem_intr->getAddrRange().contains(addr)) {
370 return (addr & ~(Addr(mem_intr->bytesPerBurst() - 1)));
371 } else {
372 assert(nvm->getAddrRange().contains(addr));
373 return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
374 }
375}
376
377bool
379{
380 // mem_intr will point to dram interface in HeteroMemCtrl
381 if (mem_pkt->isDram()) {
382 return (mem_pkt->size <= mem_intr->bytesPerBurst());
383 } else {
384 return (mem_pkt->size <= nvm->bytesPerBurst());
385 }
386}
387
388void
390{
391 bool found;
392
394
395 if (!found) {
396 found = MemCtrl::recvFunctionalLogic(pkt, nvm);
397 }
398
399 if (!found) {
400 panic("Can't handle address range for packet %s\n", pkt->print());
401 }
402}
403
404bool
406{
407 // ensure dram is in power down and refresh IDLE states
408 bool dram_drained = dram->allRanksDrained();
409 // No outstanding NVM writes
410 // All other queues verified as needed with calling logic
411 bool nvm_drained = nvm->allRanksDrained();
412 return (dram_drained && nvm_drained);
413}
414
417{
418 // if there is anything in any of our internal queues, keep track
419 // of that as well
420 if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
421 allIntfDrained())) {
422
423 DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
425 respQueue.size());
426
427 // the only queue that is not drained automatically over time
428 // is the write queue, thus kick things into action if needed
431 }
432
433 dram->drainRanks();
434
436 } else {
437 return DrainState::Drained;
438 }
439}
440
441void
443{
444 if (!isTimingMode && system()->isTimingMode()) {
445 // if we switched to timing mode, kick things into action,
446 // and behave as if we restored from a checkpoint
447 startup();
448 dram->startup();
449 } else if (isTimingMode && !system()->isTimingMode()) {
450 // if we switch from timing mode, stop the refresh events to
451 // not cause issues with KVM
452 dram->suspend();
453 }
454
455 // update the mode
457}
458
461{
462 AddrRangeList ranges;
463 ranges.push_back(dram->getAddrRange());
464 ranges.push_back(nvm->getAddrRange());
465 return ranges;
466}
467
468} // namespace memory
469} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
bool isWrite() const
Definition packet.hh:594
unsigned getSize() const
Definition packet.hh:817
bool cacheResponding() const
Definition packet.hh:659
bool isTimingMode() const
Is the system in timing mode?
Definition system.hh:270
AddrRange getAddrRange() const
Get the address range.
Interface to DRAM devices with media specific parameters, statistics, and functions.
virtual void processRespondEvent(MemInterface *mem_intr, MemPacketQueue &queue, EventFunctionWrapper &resp_event, bool &retry_rd_req) override
void drainResume() override
Resume execution after a successful drain.
virtual bool pktSizeCheck(MemPacket *mem_pkt, MemInterface *mem_intr) const override
Check if mem pkt's size is sane.
HeteroMemCtrl(const HeteroMemCtrlParams &p)
Tick doBurstAccess(MemPacket *mem_pkt, MemInterface *mem_int) override
Actually do the burst based on media specific access function.
virtual bool memBusy(MemInterface *mem_intr) override
Checks if the memory interface is already busy.
Tick recvAtomic(PacketPtr pkt) override
virtual Addr burstAlign(Addr addr, MemInterface *mem_intr) const override
Burst-align an address.
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr) override
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
bool recvTimingReq(PacketPtr pkt) override
AddrRangeList getAddrRanges() override
MemPacketQueue::iterator chooseNext(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_int) override
The memory schduler/arbiter - picks which request needs to go next, based on the specified policy suc...
Tick minReadToWriteDataGap() override
Calculate the minimum delay used when scheduling a read-to-write transision.
bool allIntfDrained() const override
Ensure that all interfaced have drained commands.
virtual void nonDetermReads(MemInterface *mem_intr) override
Will access nvm memory interface and select non-deterministic reads to issue.
Tick minWriteToReadDataGap() override
Calculate the minimum delay used when scheduling a write-to-read transision.
virtual bool nvmWriteBlock(MemInterface *mem_intr) override
Will check if all writes are for nvm interface and nvm's write resp queue is full.
NVMInterface * nvm
Create pointer to interface of the actual nvm media when connected.
void recvFunctional(PacketPtr pkt) override
The memory controller is a single-channel memory controller capturing the most important timing const...
Definition mem_ctrl.hh:247
uint32_t writeLowThreshold
Definition mem_ctrl.hh:517
enums::MemSched memSchedPolicy
Memory controller configuration initialized based on parameter values.
Definition mem_ctrl.hh:525
bool recvFunctionalLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition mem_ctrl.cc:1395
bool retryRdReq
Remember if we have to retry a request when available.
Definition mem_ctrl.hh:293
virtual void startup() override
startup() is the final initialization call before simulation.
Definition mem_ctrl.cc:110
void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
Decode the incoming pkt, create a mem_pkt and push to the back of the write queue.
Definition mem_ctrl.cc:304
Tick recvAtomicLogic(PacketPtr pkt, MemInterface *mem_intr)
Definition mem_ctrl.cc:136
std::deque< MemPacket * > respQueue
Response queue where read packets wait after we're done working with them, but it's not time to send ...
Definition mem_ctrl.hh:492
uint32_t writeHighThreshold
Definition mem_ctrl.hh:516
std::vector< MemPacketQueue > writeQueue
Definition mem_ctrl.hh:473
virtual std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick extra_col_delay, MemInterface *mem_intr)
For FR-FCFS policy reorder the read/write queue depending on row buffer hits and earliest bursts avai...
Definition mem_ctrl.cc:601
bool readQueueFull(unsigned int pkt_count) const
Check if the read queue has room for more entries.
Definition mem_ctrl.cc:166
virtual Tick doBurstAccess(MemPacket *mem_pkt, MemInterface *mem_intr)
Actually do the burst based on media specific access function.
Definition mem_ctrl.cc:796
MemInterface * dram
Definition mem_ctrl.hh:504
bool addToReadQueue(PacketPtr pkt, unsigned int pkt_count, MemInterface *mem_intr)
When a new read comes in, first check if the write q has a pending request to the same address....
Definition mem_ctrl.cc:189
bool writeQueueFull(unsigned int pkt_count) const
Check if the write queue has room for more entries.
Definition mem_ctrl.cc:178
uint32_t readBufferSize
The following are basic design parameters of the memory controller, and are initialized based on para...
Definition mem_ctrl.hh:514
EventFunctionWrapper nextReqEvent
Definition mem_ctrl.hh:307
virtual bool packetReady(MemPacket *pkt, MemInterface *mem_intr)
Determine if there is a packet that can issue.
Definition mem_ctrl.cc:1152
virtual bool nvmWriteBlock(MemInterface *mem_intr)
Will check if all writes are for nvm interface and nvm's write resp queue is full.
Definition mem_ctrl.cc:859
virtual void processRespondEvent(MemInterface *mem_intr, MemPacketQueue &queue, EventFunctionWrapper &resp_event, bool &retry_rd_req)
Definition mem_ctrl.cc:488
std::vector< MemPacketQueue > readQueue
The controller's main read and write queues, with support for QoS reordering.
Definition mem_ctrl.hh:472
bool isTimingMode
Remember if the memory system is in timing mode.
Definition mem_ctrl.hh:288
const Tick commandWindow
Length of a command window, used to check command bandwidth.
Definition mem_ctrl.hh:545
virtual void nonDetermReads(MemInterface *mem_intr)
Will access memory interface and select non-deterministic reads to issue.
Definition mem_ctrl.cc:866
General interface to memory device Includes functions and parameters shared across media types.
virtual void suspend()
This function is DRAM specific.
virtual void addRankToRankDelay(Tick cmd_at)=0
Add rank to rank delay to bus timing to all banks in all ranks when access to an alternate interface ...
virtual bool isBusy(bool read_queue_empty, bool all_writes_nvm)=0
This function checks if ranks are busy.
void setCtrl(MemCtrl *_ctrl, unsigned int command_window, uint8_t pseudo_channel=0)
Set a pointer to the controller and initialize interface based on controller parameters.
const uint32_t readBufferSize
Buffer sizes for read and write queues in the controller These are passed to the controller on instan...
Tick nextBurstAt
Till when the controller must wait before issuing next RD/WR burst?
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
virtual bool allRanksDrained() const =0
Check drain state of interface.
virtual void drainRanks()
This function is DRAM specific.
uint32_t bytesPerBurst() const
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition mem_ctrl.hh:99
unsigned int size
The size of this dram packet in bytes It is always equal or smaller than the burst size.
Definition mem_ctrl.hh:146
bool isDram() const
Return true if its a DRAM access.
Definition mem_ctrl.hh:204
Interface to NVM devices with media specific parameters, statistics, and functions.
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all NVM banks in alli ranks when access to an alternate inter...
bool isBusy(bool read_queue_empty, bool all_writes_nvm) override
This function checks if ranks are busy.
bool allRanksDrained() const override
Check drain state of NVM interface.
uint8_t qosSchedule(std::initializer_list< Queues * > queues_ptr, uint64_t queue_entry_size, const PacketPtr pkt)
Assign priority to a packet by executing the configured QoS policy.
Definition mem_ctrl.hh:495
uint64_t totalWriteQueueSize
Total write request packets queue length in #packets.
Definition mem_ctrl.hh:133
uint64_t totalReadQueueSize
Total read request packets queue length in #packets.
Definition mem_ctrl.hh:130
System * system() const
read the system pointer
Definition mem_ctrl.hh:370
uint8_t schedule(RequestorID id, uint64_t data)
Definition mem_ctrl.cc:217
STL deque class.
Definition stl.hh:44
STL pair class.
Definition stl.hh:58
DRAMInterface declaration.
bool contains(const Addr &a) const
Determine if the range contains an address.
static constexpr T divCeil(const T &a, const U &b)
Definition intmath.hh:110
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
virtual void startup()
startup() is the final initialization call before simulation.
Definition sim_object.cc:96
HeteroMemCtrl declaration.
MemInterface declaration.
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 23, 0 > offset
Definition types.hh:144
Bitfield< 0 > p
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
const Tick MaxTick
Definition types.hh:60
NVMInterface declaration.
statistics::Scalar writeReqs
Definition mem_ctrl.hh:572
statistics::Scalar readReqs
Definition mem_ctrl.hh:571
statistics::Scalar numWrRetry
Definition mem_ctrl.hh:583
statistics::Scalar numRdRetry
Definition mem_ctrl.hh:582
statistics::Scalar bytesReadSys
Definition mem_ctrl.hh:592
statistics::Scalar bytesWrittenSys
Definition mem_ctrl.hh:593
Definition mem.h:38

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0