gem5 v24.0.0.0
Loading...
Searching...
No Matches
nvm_interface.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2010-2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2013 Amin Farmahini-Farahani
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "mem/nvm_interface.hh"
42
43#include "base/bitfield.hh"
44#include "base/cprintf.hh"
45#include "base/trace.hh"
46#include "debug/NVM.hh"
47#include "sim/system.hh"
48
49namespace gem5
50{
51
52namespace memory
53{
54
55NVMInterface::NVMInterface(const NVMInterfaceParams &_p)
56 : MemInterface(_p),
57 maxPendingWrites(_p.max_pending_writes),
58 maxPendingReads(_p.max_pending_reads),
59 twoCycleRdWr(_p.two_cycle_rdwr),
60 tREAD(_p.tREAD), tWRITE(_p.tWRITE), tSEND(_p.tSEND),
61 stats(*this),
62 writeRespondEvent([this]{ processWriteRespondEvent(); }, name()),
63 readReadyEvent([this]{ processReadReadyEvent(); }, name()),
64 nextReadAt(0), numPendingReads(0), numReadDataReady(0),
65 numReadsToIssue(0)
66{
67 DPRINTF(NVM, "Setting up NVM Interface\n");
68
69 fatal_if(!isPowerOf2(burstSize), "NVM burst size %d is not allowed, "
70 "must be a power of two\n", burstSize);
71
72 // sanity check the ranks since we rely on bit slicing for the
73 // address decoding
74 fatal_if(!isPowerOf2(ranksPerChannel), "NVM rank count of %d is "
75 "not allowed, must be a power of two\n", ranksPerChannel);
76
77 for (int i =0; i < ranksPerChannel; i++) {
78 // Add NVM ranks to the system
79 DPRINTF(NVM, "Creating NVM rank %d \n", i);
80 Rank* rank = new Rank(_p, i, *this);
81 ranks.push_back(rank);
82 }
83
84 uint64_t capacity = 1ULL << ceilLog2(AbstractMemory::size());
85
86 DPRINTF(NVM, "NVM capacity %lld (%lld) bytes\n", capacity,
88
89 rowsPerBank = capacity / (rowBufferSize *
90 banksPerRank * ranksPerChannel);
91}
92
93NVMInterface::Rank::Rank(const NVMInterfaceParams &_p,
94 int _rank, NVMInterface& _nvm)
95 : EventManager(&_nvm), rank(_rank), banks(_p.banks_per_rank)
96{
97 for (int b = 0; b < _p.banks_per_rank; b++) {
98 banks[b].bank = b;
99 // No bank groups; simply assign to bank number
100 banks[b].bankgr = b;
101 }
102}
103
104void
109
110void NVMInterface::setupRank(const uint8_t rank, const bool is_read)
111{
112 if (is_read) {
113 // increment count to trigger read and track number of reads in Q
115 } else {
116 // increment count to track number of writes in Q
118 }
119}
120
123 unsigned size, bool is_read, uint8_t pseudo_channel)
124{
125 // decode the address based on the address mapping scheme, with
126 // Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
127 // channel, respectively
128 uint8_t rank;
129 uint8_t bank;
130 // use a 64-bit unsigned during the computations as the row is
131 // always the top bits, and check before creating the packet
132 uint64_t row;
133
134 // Get packed address, starting at 0
135 Addr addr = getCtrlAddr(pkt_addr);
136
137 // truncate the address to a memory burst, which makes it unique to
138 // a specific buffer, row, bank, rank and channel
139 addr = addr / burstSize;
140
141 // we have removed the lowest order address bits that denote the
142 // position within the column
143 if (addrMapping == enums::RoRaBaChCo || addrMapping == enums::RoRaBaCoCh) {
144 // the lowest order bits denote the column to ensure that
145 // sequential cache lines occupy the same row
147
148 // after the channel bits, get the bank bits to interleave
149 // over the banks
150 bank = addr % banksPerRank;
152
153 // after the bank, we get the rank bits which thus interleaves
154 // over the ranks
155 rank = addr % ranksPerChannel;
157
158 // lastly, get the row bits, no need to remove them from addr
159 row = addr % rowsPerBank;
160 } else if (addrMapping == enums::RoCoRaBaCh) {
161 // with emerging technologies, could have small page size with
162 // interleaving granularity greater than row buffer
164 // remove column bits which are a subset of burstsPerStripe
166 } else {
167 // remove lower column bits below channel bits
169 }
170
171 // start with the bank bits, as this provides the maximum
172 // opportunity for parallelism between requests
173 bank = addr % banksPerRank;
175
176 // next get the rank bits
177 rank = addr % ranksPerChannel;
179
180 // next, the higher-order column bites
183 }
184
185 // lastly, get the row bits, no need to remove them from addr
186 row = addr % rowsPerBank;
187 } else
188 panic("Unknown address mapping policy chosen!");
189
190 assert(rank < ranksPerChannel);
191 assert(bank < banksPerRank);
192 assert(row < rowsPerBank);
193 assert(row < Bank::NO_ROW);
194
195 DPRINTF(NVM, "Address: %#x Rank %d Bank %d Row %d\n",
196 pkt_addr, rank, bank, row);
197
198 // create the corresponding memory packet with the entry time and
199 // ready time set to the current tick, the latter will be updated
200 // later
201 uint16_t bank_id = banksPerRank * rank + bank;
202
203 return new MemPacket(pkt, is_read, false, pseudo_channel, rank, bank, row,
204 bank_id, pkt_addr, size);
205}
206
209{
210 // remember if we found a hit, but one that cannit issue seamlessly
211 bool found_prepped_pkt = false;
212
213 auto selected_pkt_it = queue.end();
214 Tick selected_col_at = MaxTick;
215
216 for (auto i = queue.begin(); i != queue.end() ; ++i) {
217 MemPacket* pkt = *i;
218
219 // select optimal NVM packet in Q
220 if (!pkt->isDram()) {
221 const Bank& bank = ranks[pkt->rank]->banks[pkt->bank];
222 const Tick col_allowed_at = pkt->isRead() ? bank.rdAllowedAt :
223 bank.wrAllowedAt;
224
225 // check if rank is not doing a refresh and thus is available,
226 // if not, jump to the next packet
227 if (burstReady(pkt)) {
228 DPRINTF(NVM, "%s bank %d - Rank %d available\n", __func__,
229 pkt->bank, pkt->rank);
230
231 // no additional rank-to-rank or media delays
232 if (col_allowed_at <= min_col_at) {
233 // FCFS within entries that can issue without
234 // additional delay, such as same rank accesses
235 // or media delay requirements
236 selected_pkt_it = i;
237 selected_col_at = col_allowed_at;
238 // no need to look through the remaining queue entries
239 DPRINTF(NVM, "%s Seamless buffer hit\n", __func__);
240 break;
241 } else if (!found_prepped_pkt) {
242 // packet is to prepped region but cannnot issue
243 // seamlessly; remember this one and continue
244 selected_pkt_it = i;
245 selected_col_at = col_allowed_at;
246 DPRINTF(NVM, "%s Prepped packet found \n", __func__);
247 found_prepped_pkt = true;
248 }
249 } else {
250 DPRINTF(NVM, "%s bank %d - Rank %d not available\n", __func__,
251 pkt->bank, pkt->rank);
252 }
253 }
254 }
255
256 if (selected_pkt_it == queue.end()) {
257 DPRINTF(NVM, "%s no available NVM ranks found\n", __func__);
258 }
259
260 return std::make_pair(selected_pkt_it, selected_col_at);
261}
262
263void
265{
266 Tick cmd_at = std::max(curTick(), nextReadAt);
267
268 // This method does the arbitration between non-deterministic read
269 // requests to NVM. The chosen packet is not removed from the queue
270 // at this time. Removal from the queue will occur when the data is
271 // ready and a separate SEND command is issued to retrieve it via the
272 // chooseNext function in the top-level controller.
273 assert(!queue.empty());
274
275 assert(numReadsToIssue > 0);
277 // For simplicity, issue non-deterministic reads in order (fcfs)
278 for (auto i = queue.begin(); i != queue.end() ; ++i) {
279 MemPacket* pkt = *i;
280
281 // Find 1st NVM read packet that hasn't issued read command
282 if (pkt->readyTime == MaxTick && !pkt->isDram() && pkt->isRead()) {
283 // get the bank
284 Bank& bank_ref = ranks[pkt->rank]->banks[pkt->bank];
285
286 // issueing a read, inc counter and verify we haven't overrun
289
290 // increment the bytes accessed and the accesses per row
291 bank_ref.bytesAccessed += burstSize;
292
293 // Verify command bandiwth to issue
294 // Host can issue read immediately uith buffering closer
295 // to the NVM. The actual execution at the NVM may be delayed
296 // due to busy resources
297 if (twoCycleRdWr) {
298 cmd_at = ctrl->verifyMultiCmd(cmd_at,
300 } else {
301 cmd_at = ctrl->verifySingleCmd(cmd_at,
302 maxCommandsPerWindow, false);
303 }
304
305 // Update delay to next read
306 // Ensures single read command issued per cycle
307 nextReadAt = cmd_at + tCK;
308
309 // If accessing a new location in this bank, update timing
310 // and stats
311 if (bank_ref.openRow != pkt->row) {
312 // update the open bank, re-using row field
313 bank_ref.openRow = pkt->row;
314
315 // sample the bytes accessed to a buffer in this bank
316 // here when we are re-buffering the data
318 // start counting anew
319 bank_ref.bytesAccessed = 0;
320
321 // holdoff next command to this bank until the read completes
322 // and the data has been successfully buffered
323 // can pipeline accesses to the same bank, sending them
324 // across the interface B2B, but will incur full access
325 // delay between data ready responses to different buffers
326 // in a bank
327 bank_ref.actAllowedAt = std::max(cmd_at,
328 bank_ref.actAllowedAt) + tREAD;
329 }
330 // update per packet readyTime to holdoff burst read operation
331 // overloading readyTime, which will be updated again when the
332 // burst is issued
333 pkt->readyTime = std::max(cmd_at, bank_ref.actAllowedAt);
334
335 DPRINTF(NVM, "Issuing NVM Read to bank %d at tick %d. "
336 "Data ready at %d\n",
337 bank_ref.bank, cmd_at, pkt->readyTime);
338
339 // Insert into read ready queue. It will be handled after
340 // the media delay has been met
341 if (readReadyQueue.empty()) {
342 assert(!readReadyEvent.scheduled());
344 } else if (readReadyEvent.when() > pkt->readyTime) {
345 // move it sooner in time, to the first read with data
347 } else {
348 assert(readReadyEvent.scheduled());
349 }
350 readReadyQueue.push_back(pkt->readyTime);
351
352 // found an NVM read to issue - break out
353 break;
354 }
355 }
356}
357
358void
360{
361 // signal that there is read data ready to be transmitted
363
364 DPRINTF(NVM,
365 "processReadReadyEvent(): Data for an NVM read is ready. "
366 "numReadDataReady is %d\t numPendingReads is %d\n",
368
369 // Find lowest ready time and verify it is equal to curTick
370 // also find the next lowest to schedule next event
371 // Done with this response, erase entry
372 auto ready_it = readReadyQueue.begin();
373 Tick next_ready_at = MaxTick;
374 for (auto i = readReadyQueue.begin(); i != readReadyQueue.end() ; ++i) {
375 if (*ready_it > *i) {
376 next_ready_at = *ready_it;
377 ready_it = i;
378 } else if ((next_ready_at > *i) && (i != ready_it)) {
379 next_ready_at = *i;
380 }
381 }
382
383 // Verify we found the time of this event and remove it
384 assert(*ready_it == curTick());
385 readReadyQueue.erase(ready_it);
386
387 if (!readReadyQueue.empty()) {
388 assert(readReadyQueue.front() >= curTick());
389 assert(!readReadyEvent.scheduled());
390 schedule(readReadyEvent, next_ready_at);
391 }
392
393 // It is possible that a new command kicks things back into
394 // action before reaching this point but need to ensure that we
395 // continue to process new commands as read data becomes ready
396 // This will also trigger a drain if needed
397 if (!ctrl->requestEventScheduled()) {
398 DPRINTF(NVM, "Restart controller scheduler immediately\n");
400 }
401}
402
403bool
405 bool read_rdy = pkt->isRead() && (ctrl->inReadBusState(true, this)) &&
406 (pkt->readyTime <= curTick()) && (numReadDataReady > 0);
407 bool write_rdy = !pkt->isRead() && !ctrl->inReadBusState(true, this) &&
409 return (read_rdy || write_rdy);
410}
411
414 const std::vector<MemPacketQueue>& queue)
415{
416 DPRINTF(NVM, "NVM Timing access to addr %#x, rank/bank/row %d %d %d\n",
417 pkt->addr, pkt->rank, pkt->bank, pkt->row);
418
419 // get the bank
420 Bank& bank_ref = ranks[pkt->rank]->banks[pkt->bank];
421
422 // respect any constraints on the command
423 const Tick bst_allowed_at = pkt->isRead() ?
424 bank_ref.rdAllowedAt : bank_ref.wrAllowedAt;
425
426 // we need to wait until the bus is available before we can issue
427 // the command; need minimum of tBURST between commands
428 Tick cmd_at = std::max(bst_allowed_at, curTick());
429
430 // we need to wait until the bus is available before we can issue
431 // the command; need minimum of tBURST between commands
432 cmd_at = std::max(cmd_at, next_burst_at);
433
434 // Verify there is command bandwidth to issue
435 // Read burst (send command) is a simple data access and only requires
436 // one command cycle
437 // Write command may require multiple cycles to enable larger address space
438 if (pkt->isRead() || !twoCycleRdWr) {
439 cmd_at = ctrl->verifySingleCmd(cmd_at, maxCommandsPerWindow, false);
440 } else {
441 cmd_at = ctrl->verifyMultiCmd(cmd_at, maxCommandsPerWindow, tCK);
442 }
443 // update the packet ready time to reflect when data will be transferred
444 // Use the same bus delays defined for NVM
445 pkt->readyTime = cmd_at + tSEND + tBURST;
446
447 Tick dly_to_rd_cmd;
448 Tick dly_to_wr_cmd;
449 for (auto n : ranks) {
450 for (int i = 0; i < banksPerRank; i++) {
451 // base delay is a function of tBURST and bus turnaround
452 dly_to_rd_cmd = pkt->isRead() ? tBURST : writeToReadDelay();
453 dly_to_wr_cmd = pkt->isRead() ? readToWriteDelay() : tBURST;
454
455 if (pkt->rank != n->rank) {
456 // adjust timing for different ranks
457 // Need to account for rank-to-rank switching with tCS
458 dly_to_wr_cmd = rankToRankDelay();
459 dly_to_rd_cmd = rankToRankDelay();
460 }
461 n->banks[i].rdAllowedAt = std::max(cmd_at + dly_to_rd_cmd,
462 n->banks[i].rdAllowedAt);
463
464 n->banks[i].wrAllowedAt = std::max(cmd_at + dly_to_wr_cmd,
465 n->banks[i].wrAllowedAt);
466 }
467 }
468
469 DPRINTF(NVM, "NVM Access to %#x, ready at %lld.\n",
470 pkt->addr, pkt->readyTime);
471
472 if (pkt->isRead()) {
473 // completed the read, decrement counters
474 assert(numPendingReads != 0);
475 assert(numReadDataReady != 0);
476
479 } else {
480 // Adjust number of NVM writes in Q
481 assert(numWritesQueued > 0);
483
484 // increment the bytes accessed and the accesses per row
485 // only increment for writes as the reads are handled when
486 // the non-deterministic read is issued, before the data transfer
487 bank_ref.bytesAccessed += burstSize;
488
489 // Commands will be issued serially when accessing the same bank
490 // Commands can issue in parallel to different banks
491 if ((bank_ref.bank == pkt->bank) &&
492 (bank_ref.openRow != pkt->row)) {
493 // update the open buffer, re-using row field
494 bank_ref.openRow = pkt->row;
495
496 // sample the bytes accessed to a buffer in this bank
497 // here when we are re-buffering the data
499 // start counting anew
500 bank_ref.bytesAccessed = 0;
501 }
502
503 // Determine when write will actually complete, assuming it is
504 // scheduled to push to NVM immediately
505 // update actAllowedAt to serialize next command completion that
506 // accesses this bank; must wait until this write completes
507 // Data accesses to the same buffer in this bank
508 // can issue immediately after actAllowedAt expires, without
509 // waiting additional delay of tWRITE. Can revisit this
510 // assumption/simplification in the future.
511 bank_ref.actAllowedAt = std::max(pkt->readyTime,
512 bank_ref.actAllowedAt) + tWRITE;
513
514 // Need to track number of outstanding writes to
515 // ensure 'buffer' on media controller does not overflow
516 assert(!writeRespQueueFull());
517
518 // Insert into write done queue. It will be handled after
519 // the media delay has been met
520 if (writeRespQueueEmpty()) {
521 assert(!writeRespondEvent.scheduled());
523 } else {
525 }
526 writeRespQueue.push_back(bank_ref.actAllowedAt);
527 writeRespQueue.sort();
528 if (writeRespondEvent.when() > bank_ref.actAllowedAt) {
529 DPRINTF(NVM, "Rescheduled respond event from %lld to %11d\n",
531 DPRINTF(NVM, "Front of response queue is %11d\n",
532 writeRespQueue.front());
534 }
535
536 }
537
538 // Update the stats
539 if (pkt->isRead()) {
544
545 // Update latency stats
546 stats.totMemAccLat += pkt->readyTime - pkt->entryTime;
548 stats.totQLat += cmd_at - pkt->entryTime;
549 } else {
553 }
554
555 return std::make_pair(cmd_at, cmd_at + tBURST);
556}
557
558void
560{
561 DPRINTF(NVM,
562 "processWriteRespondEvent(): A NVM write reached its readyTime. "
563 "%d remaining pending NVM writes\n", writeRespQueue.size());
564
565 // Update stat to track histogram of pending writes
567
568 // Done with this response, pop entry
569 writeRespQueue.pop_front();
570
571 if (!writeRespQueue.empty()) {
572 assert(writeRespQueue.front() >= curTick());
573 assert(!writeRespondEvent.scheduled());
575 }
576
577 // It is possible that a new command kicks things back into
578 // action before reaching this point but need to ensure that we
579 // continue to process new commands as writes complete at the media and
580 // credits become available. This will also trigger a drain if needed
581 if (!ctrl->requestEventScheduled()) {
582 DPRINTF(NVM, "Restart controller scheduler immediately\n");
584 }
585}
586
587void
589{
590 // update timing for NVM ranks due to bursts issued
591 // to ranks for other media interfaces
592 for (auto n : ranks) {
593 for (int i = 0; i < banksPerRank; i++) {
594 // different rank by default
595 // Need to only account for rank-to-rank switching
596 n->banks[i].rdAllowedAt = std::max(cmd_at + rankToRankDelay(),
597 n->banks[i].rdAllowedAt);
598 n->banks[i].wrAllowedAt = std::max(cmd_at + rankToRankDelay(),
599 n->banks[i].wrAllowedAt);
600 }
601 }
602}
603
604bool
605NVMInterface::isBusy(bool read_queue_empty, bool all_writes_nvm)
606{
607 DPRINTF(NVM,"isBusy: numReadDataReady = %d\n", numReadDataReady);
608 // Determine NVM is busy and cannot issue a burst
609 // A read burst cannot issue when data is not ready from the NVM
610 // Also check that we have reads queued to ensure we can change
611 // bus direction to service potential write commands.
612 // A write cannot issue once we've reached MAX pending writes
613 // Only assert busy for the write case when there are also
614 // no reads in Q and the write queue only contains NVM commands
615 // This allows the bus state to switch and service reads
616 return (ctrl->inReadBusState(true, this) ?
617 (numReadDataReady == 0) && !read_queue_empty :
618 writeRespQueueFull() && read_queue_empty &&
619 all_writes_nvm);
620}
621
623 : statistics::Group(&_nvm),
624 nvm(_nvm),
625
626 ADD_STAT(readBursts, statistics::units::Count::get(),
627 "Number of NVM read bursts"),
628 ADD_STAT(writeBursts, statistics::units::Count::get(),
629 "Number of NVM write bursts"),
630
631 ADD_STAT(perBankRdBursts, statistics::units::Count::get(),
632 "Per bank write bursts"),
633 ADD_STAT(perBankWrBursts, statistics::units::Count::get(),
634 "Per bank write bursts"),
635
636 ADD_STAT(totQLat, statistics::units::Tick::get(),
637 "Total ticks spent queuing"),
638 ADD_STAT(totBusLat, statistics::units::Tick::get(),
639 "Total ticks spent in databus transfers"),
640 ADD_STAT(totMemAccLat, statistics::units::Tick::get(),
641 "Total ticks spent from burst creation until serviced "
642 "by the NVM"),
643 ADD_STAT(avgQLat, statistics::units::Rate<
644 statistics::units::Tick, statistics::units::Count>::get(),
645 "Average queueing delay per NVM burst"),
646 ADD_STAT(avgBusLat, statistics::units::Rate<
647 statistics::units::Tick, statistics::units::Count>::get(),
648 "Average bus latency per NVM burst"),
649 ADD_STAT(avgMemAccLat, statistics::units::Rate<
650 statistics::units::Tick, statistics::units::Count>::get(),
651 "Average memory access latency per NVM burst"),
652
653 ADD_STAT(nvmBytesRead, statistics::units::Byte::get(),
654 "Total bytes read"),
655 ADD_STAT(nvmBytesWritten, statistics::units::Byte::get(),
656 "Total bytes written"),
657
658 ADD_STAT(avgRdBW, statistics::units::Rate<
659 statistics::units::Byte, statistics::units::Second>::get(),
660 "Average DRAM read bandwidth in MiBytes/s"),
661 ADD_STAT(avgWrBW, statistics::units::Rate<
662 statistics::units::Byte, statistics::units::Second>::get(),
663 "Average DRAM write bandwidth in MiBytes/s"),
664 ADD_STAT(peakBW, statistics::units::Rate<
665 statistics::units::Byte, statistics::units::Second>::get(),
666 "Theoretical peak bandwidth in MiByte/s"),
667 ADD_STAT(busUtil, statistics::units::Ratio::get(),
668 "NVM Data bus utilization in percentage"),
669 ADD_STAT(busUtilRead, statistics::units::Ratio::get(),
670 "NVM Data bus read utilization in percentage"),
671 ADD_STAT(busUtilWrite, statistics::units::Ratio::get(),
672 "NVM Data bus write utilization in percentage"),
673
674 ADD_STAT(pendingReads, statistics::units::Count::get(),
675 "Reads issued to NVM for which data has not been transferred"),
676 ADD_STAT(pendingWrites, statistics::units::Count::get(),
677 "Number of outstanding writes to NVM"),
678 ADD_STAT(bytesPerBank, statistics::units::Byte::get(),
679 "Bytes read within a bank before loading new bank")
680
681{
682}
683
684void
686{
687 using namespace statistics;
688
689 perBankRdBursts.init(nvm.ranksPerChannel == 0 ? 1 :
690 nvm.banksPerRank * nvm.ranksPerChannel);
691
692 perBankWrBursts.init(nvm.ranksPerChannel == 0 ? 1 :
693 nvm.banksPerRank * nvm.ranksPerChannel);
694
695 avgQLat.precision(2);
696 avgBusLat.precision(2);
697 avgMemAccLat.precision(2);
698
699 avgRdBW.precision(2);
700 avgWrBW.precision(2);
701 peakBW.precision(2);
702
703 busUtil.precision(2);
704 busUtilRead.precision(2);
705 busUtilWrite.precision(2);
706
707 pendingReads
708 .init(nvm.maxPendingReads)
709 .flags(nozero);
710
711 pendingWrites
712 .init(nvm.maxPendingWrites)
713 .flags(nozero);
714
715 bytesPerBank
716 .init(nvm.rowBufferSize)
717 .flags(nozero);
718
719 avgQLat = totQLat / readBursts;
720 avgBusLat = totBusLat / readBursts;
721 avgMemAccLat = totMemAccLat / readBursts;
722
723 avgRdBW = (nvmBytesRead / 1000000) / simSeconds;
724 avgWrBW = (nvmBytesWritten / 1000000) / simSeconds;
725 peakBW = (sim_clock::Frequency / nvm.tBURST) *
726 nvm.burstSize / 1000000;
727
728 busUtil = (avgRdBW + avgWrBW) / peakBW * 100;
729 busUtilRead = avgRdBW / peakBW * 100;
730 busUtilWrite = avgWrBW / peakBW * 100;
731}
732
733} // namespace memory
734} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
uint64_t size() const
Get the memory size.
bool inReadBusState(bool next_state, const MemInterface *mem_intr) const
Check the current direction of the memory channel.
Definition mem_ctrl.cc:770
virtual Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst, Tick max_multi_cmd_split=0)
Check for command bus contention for multi-cycle (2 currently) command.
Definition mem_ctrl.cc:706
virtual bool requestEventScheduled(uint8_t pseudo_channel=0) const
Is there a read/write burst Event scheduled?
Definition mem_ctrl.hh:738
virtual void restartScheduler(Tick tick, uint8_t pseudo_channel=0)
restart the controller This can be used by interfaces to restart the scheduler after maintainence com...
Definition mem_ctrl.hh:753
virtual Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst, bool row_cmd)
Check for command bus contention for single cycle command.
Definition mem_ctrl.cc:683
A basic class to track the bank state, i.e.
General interface to memory device Includes functions and parameters shared across media types.
enums::AddrMap addrMapping
Memory controller configuration initialized based on parameter values.
virtual Tick writeToReadDelay() const
MemCtrl * ctrl
A pointer to the parent memory controller instance.
Addr getCtrlAddr(Addr addr)
Get an address in a dense range which starts from 0.
const uint32_t burstSize
General device and channel characteristics The rowsPerBank is determined based on the capacity,...
unsigned int maxCommandsPerWindow
Number of commands that can issue in the defined controller command window, used to verify command ba...
const uint32_t burstsPerRowBuffer
GEM5_CLASS_VAR_USED const Tick tCK
General timing requirements.
uint32_t numWritesQueued
NVM specific variable, but declaring it here allows treating different interfaces in a more genral wa...
A memory packet stores packets along with the timestamp of when the packet entered the queue,...
Definition mem_ctrl.hh:99
Tick readyTime
When will request leave the controller.
Definition mem_ctrl.hh:106
const uint16_t bankId
Bank id is calculated considering banks in all the ranks eg: 2 ranks each with 8 banks,...
Definition mem_ctrl.hh:132
const uint32_t row
Definition mem_ctrl.hh:125
Addr addr
The starting address of the packet.
Definition mem_ctrl.hh:140
bool isDram() const
Return true if its a DRAM access.
Definition mem_ctrl.hh:204
bool isRead() const
Return true if its a read packet (interface compatibility with Packet)
Definition mem_ctrl.hh:193
const Tick entryTime
When did request enter the controller.
Definition mem_ctrl.hh:103
const uint8_t rank
Will be populated by address decoder.
Definition mem_ctrl.hh:123
Rank(const NVMInterfaceParams &_p, int _rank, NVMInterface &_nvm)
std::vector< Bank > banks
Vector of NVM banks.
Interface to NVM devices with media specific parameters, statistics, and functions.
std::pair< MemPacketQueue::iterator, Tick > chooseNextFRFCFS(MemPacketQueue &queue, Tick min_col_at) const override
For FR-FCFS policy, find first NVM command that can issue default to first command to prepped region.
std::pair< Tick, Tick > doBurstAccess(MemPacket *pkt, Tick next_burst_at, const std::vector< MemPacketQueue > &queue) override
Actually do the burst and update stats.
void init() override
Initialize the NVM interface and verify parameters.
void setupRank(const uint8_t rank, const bool is_read) override
Setup the rank based on packet received.
void chooseRead(MemPacketQueue &queue) override
Select read command to issue asynchronously.
MemPacket * decodePacket(const PacketPtr pkt, Addr pkt_addr, unsigned int size, bool is_read, uint8_t pseudo_channel=0) override
Address decoder to figure out physical mapping onto ranks, banks, and rows.
Tick nextReadAt
Till when must we wait before issuing next read command?
NVMInterface(const NVMInterfaceParams &_p)
std::vector< Rank * > ranks
Vector of nvm ranks.
void addRankToRankDelay(Tick cmd_at) override
Add rank to rank delay to bus timing to all NVM banks in alli ranks when access to an alternate inter...
bool isBusy(bool read_queue_empty, bool all_writes_nvm) override
This function checks if ranks are busy.
std::list< Tick > writeRespQueue
Holding queue for non-deterministic write commands, which maintains writes that have been issued but ...
bool writeRespQueueFull() const override
Check if the write response queue has reached defined threshold.
std::deque< Tick > readReadyQueue
bool burstReady(MemPacket *pkt) const override
Check if a burst operation can be issued to the NVM.
const Tick tREAD
NVM specific timing requirements.
EventFunctionWrapper writeRespondEvent
bool writeRespQueueEmpty() const
Check if the write response queue is empty.
EventFunctionWrapper readReadyEvent
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
STL deque class.
Definition stl.hh:44
STL pair class.
Definition stl.hh:58
STL vector class.
Definition stl.hh:37
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
static constexpr int ceilLog2(const T &n)
Definition intmath.hh:84
static constexpr bool isPowerOf2(const T &n)
Definition intmath.hh:98
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void reschedule(Event &event, Tick when, bool always=false)
Definition eventq.hh:1030
Tick when() const
Get the time that the event is scheduled.
Definition eventq.hh:501
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
virtual void init()
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition sim_object.cc:73
Bitfield< 31 > n
Bitfield< 7 > b
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 3 > addr
Definition types.hh:84
Tick Frequency
The simulated frequency of curTick(). (In ticks per second)
Definition core.cc:47
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
const Tick MaxTick
Definition types.hh:60
statistics::Formula & simSeconds
Definition stats.cc:45
NVMInterface declaration.
void regStats() override
Callback to set stat parameters.
statistics::Histogram pendingReads
NVM stats.
statistics::Scalar readBursts
NVM stats.
Definition mem.h:38
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0