gem5 v24.0.0.0
Loading...
Searching...
No Matches
spatter_gen.cc
Go to the documentation of this file.
1/*
2* Copyright (c) 2024 The Regents of The University of California
3* All rights reserved.
4*
5* Redistribution and use in source and binary forms, with or without
6* modification, are permitted provided that the following conditions are
7* met: redistributions of source code must retain the above copyright
8* notice, this list of conditions and the following disclaimer;
9* redistributions in binary form must reproduce the above copyright
10* notice, this list of conditions and the following disclaimer in the
11* documentation and/or other materials provided with the distribution;
12* neither the name of the copyright holders nor the names of its
13* contributors may be used to endorse or promote products derived from
14* this software without specific prior written permission.
15*
16* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27*/
28
30
31#include "base/cprintf.hh"
32#include "debug/SpatterGen.hh"
33#include "debug/SpatterKernel.hh"
34#include "enums/SpatterKernelType.hh"
35#include "enums/SpatterProcessingMode.hh"
36#include "mem/packet.hh"
37#include "sim/sim_exit.hh"
38#include "sim/system.hh"
39
40namespace gem5
41{
42
43using enums::SpatterKernelTypeStrings;
44using enums::SpatterProcessingMode;
45
47 ClockedObject(params),
48 state(SpatterGenState::RUNNING),
49 requestorId(params.system->getRequestorId(this)),
50 numPendingMemRequests(0),
51 stats(this),
52 mode(params.processing_mode),
53 port(this, name() + ".port"),
54 intRegFileSize(params.int_regfile_size), intRegUsed(0),
55 fpRegFileSize(params.fp_regfile_size), fpRegUsed(0),
56 requestGenLatency(params.request_gen_latency),
57 requestGenRate(params.request_gen_rate),
58 firstGeneratorAvailableTime(0),
59 nextGenEvent([this](){ processNextGenEvent(); }, name() + ".GenEvent"),
60 requestBufferEntries(params.request_buffer_entries),
61 requestBuffer(clockPeriod()),
62 sendRate(params.send_rate),
63 firstPortAvailableTime(0),
64 nextSendEvent([this](){ processNextSendEvent(); }, name() + ".SendEvent"),
65 receiveBuffer(clockPeriod())
66{
67 fatal_if(fpRegFileSize < requestBufferEntries,
68 "fp_regfile_size should be >= request_buffer_entries."
69 "if request_buffer_entries is bigger than fp_regfile_size,"
70 "it may result in inaccuracies in your simulation."
71 "Ideally: fp_regfile_size >> request_buffer_entries."
72 );
73 generatorBusyUntil.resize(requestGenRate, 0);
74 portBusyUntil.resize(sendRate, 0);
75}
76
77Port&
78SpatterGen::getPort(const std::string& if_name, PortID idx)
79{
80 if (if_name == "port") {
81 return port;
82 } else {
83 return ClockedObject::getPort(if_name, idx);
84 }
85}
86
87void
92
93void
95{
96 panic_if(blocked(), "Should never try to send if port is blocked.");
97 if (!sendTimingReq(pkt)) {
98 blockedPacket = pkt;
99 DPRINTF(
101 "%s: Port blocked when sending %s.\n",
102 __func__, pkt->print()
103 );
104 }
105}
106
107void
109{
110 DPRINTF(SpatterGen, "%s: Port received a ReqRetry.\n", __func__);
111 panic_if(
112 blockedPacket == nullptr,
113 "Received reqRetry with no blocked packet."
114 );
115 if (!sendTimingReq(blockedPacket)) {
116 DPRINTF(
118 "%s: Port blocked when sending %s.\n",
119 __func__, blockedPacket->print()
120 );
121 } else {
122 blockedPacket = nullptr;
123 owner->recvReqRetry();
124 }
125}
126
127void
135
136bool
138 return owner->recvTimingResp(pkt);
139}
140
141bool
143{
144 DPRINTF(SpatterGen, "%s: Received pkt: %s.\n", __func__, pkt->print());
145 assert(pkt->isResponse());
146
147 // record trip time.
148 SpatterAccess* spatter_access = pkt->findNextSenderState<SpatterAccess>();
149 Tick trip_time = (curTick() - requestDepartureTime[pkt->req]);
150 requestDepartureTime.erase(pkt->req);
151 spatter_access->recordTripTime(trip_time);
152
153 int trips_left = spatter_access->tripsLeft();
154 assert(trips_left >= 0);
155 if (trips_left > 0) {
157 stats.indexBytesRead += pkt->getSize();
158 stats.totalIndexReadLatency += trip_time;
159
161 receiveBuffer.push(spatter_access, curTick());
162 } else {
165 spatter_access->tripTimeSoFar()
166 );
167 if (spatter_access->type() == SpatterKernelType::gather) {
169 stats.valueBytesRead += pkt->getSize();
170 stats.totalValueReadLatency += trip_time;
171 } else if (spatter_access->type() == SpatterKernelType::scatter) {
174 stats.totalValueWriteLatency += trip_time;
175 } else {
176 panic("Unknown kernel type.");
177 }
178 // CAUTION: We're going to decrement fpRegUsed here,
179 // it could cause inaccuracies if processNextGenEvent
180 // is called after recvTimingResp on the same tick.
181 // i.e. we might end up releasing a register on the same
182 // cycle that we are allocating it.
183 // it's probably not going to ever be an issue since
184 // fpRegFileSize is probably >> requestBufferEntries
185 // i.e. the chances of running out of fp registers is low because
186 // we do not simulate parts of the pipeline that back things up into
187 // fp registers, e.g. functional units of ALU.
188 fpRegUsed--;
189 delete spatter_access;
190 }
191
192 // delete the pkt since we don't need it anymore.
193 delete pkt;
194
195 if (!nextGenEvent.pending()) {
197 }
198
201 return true;
202}
203
204void
206 uint32_t id, uint32_t delta, uint32_t count,
208 size_t index_size, Addr base_index_addr,
209 size_t value_size, Addr base_value_addr,
210 const std::vector<uint32_t>& indices
211)
212{
213 DPRINTF(
215 "%s: Adding kernel with id: %d, delta: %d, count: %d, type: %s.\n",
216 __func__, id, delta, count, SpatterKernelTypeStrings[type]
217 );
218 SpatterKernel new_kernel(
220 id, delta, count, type,
221 index_size, base_index_addr,
222 value_size, base_value_addr
223 );
224 new_kernel.setIndices(indices);
225 kernels.push(new_kernel);
226}
227
228void
230{
231 assert(mode == SpatterProcessingMode::synchronous);
235}
236
237void
239{
240 bool no_pending = numPendingMemRequests == 0;
241 bool no_queued = requestBuffer.empty();
242 int avail_int_regs = intRegFileSize - intRegUsed;
243 int avail_fp_regs = fpRegFileSize - fpRegUsed;
244 bool can_do_init = initAccessOk(avail_int_regs, avail_fp_regs, curTick());
245 bool can_do_mid = interAccessOk(avail_int_regs, avail_fp_regs, curTick());
246 bool can_do_ult = ultAccessOk(avail_int_regs, avail_fp_regs, curTick());
247 if (!can_do_init && !can_do_mid && !can_do_ult && no_pending && no_queued)
248 {
249 assert((
250 (mode == SpatterProcessingMode::synchronous) &&
252 ) ||
253 mode == SpatterProcessingMode::asynchronous
254 );
257 csprintf("%s received all expected responses.", name()),
258 0,
259 nextCycle()
260 );
261 }
262}
263
264bool
265SpatterGen::initAccessOk(int int_regs, int fp_regs, Tick when) const
266{
267 bool have_int_reg = int_regs > 0;
268 // for mode == SpatterProcessingMode::asynchronous state will always be
269 // SpatterGenState::RUNNING. we don't have to do checks for mode.
270 // for mode == SpatterProcessingMode::synchronous, if state is
271 // SpatterGenState::DRAINING or SpatterGenState::WAITING
272 // we can't initiate any new indirect accesses.
273 bool have_kernel = !kernels.empty() && (state == SpatterGenState::RUNNING);
274 return have_kernel && have_int_reg;
275}
276
277bool
278SpatterGen::interAccessOk(int int_regs, int fp_regs, Tick when) const
279{
280 bool have_int_reg = int_regs > 0;
281 bool have_index = receiveBuffer.hasReady(when);
282 bool mid_idx = have_index && (receiveBuffer.front()->tripsLeft() > 1);
283 return mid_idx && have_int_reg;
284}
285
286bool
287SpatterGen::ultAccessOk(int int_regs, int fp_regs, Tick when) const
288{
289 bool have_fp_reg = fp_regs > 0;
290 bool have_index = receiveBuffer.hasReady(when);
291 bool val_idx = have_index && (receiveBuffer.front()->tripsLeft() == 1);
292 return val_idx && have_fp_reg;
293}
294
295void
297{
298 int avail_int_regs = intRegFileSize - intRegUsed;
299 int avail_fp_regs = fpRegFileSize - fpRegUsed;
300 bool have_work = initAccessOk(avail_int_regs, avail_fp_regs, curTick()) ||
301 interAccessOk(avail_int_regs, avail_fp_regs, curTick()) ||
302 ultAccessOk(avail_int_regs, avail_fp_regs, curTick());
303 Tick schedule_tick = std::max(when, firstGeneratorAvailableTime);
304 if (have_work && (!nextGenEvent.scheduled())) {
305 schedule(nextGenEvent, schedule_tick);
307 }
308}
309
310void
312{
313 assert(!nextGenEvent.pending());
314 int req_buf_before = requestBuffer.size();
315 // track changes to intRegUsed in this variable and apply it
316 // at the end of the for loop. This way if we free a register
317 // in the for loop, other iterations of the for loop won't
318 // observe this change. This matches what happens in real h/w.
319 int int_used_now = 0;
320 // track this independently to prevent different iterations inside
321 // for loop observing change to h/w resources, i.e we can't rely
322 // intRegFileSize - intRegUsed to see if we have registers to allocate
323 // since they don't change until after the for loop
324 int int_regs_now = intRegFileSize - intRegUsed;
325 // same explanation as int_used_now
326 int fp_used_now = 0;
327 // same explanation as int_regs_now
328 int fp_regs_now = fpRegFileSize - fpRegUsed;
329 for (int i = 0; i < requestGenRate; i++) {
330 if (generatorBusyUntil[i] > curTick()) {
331 DPRINTF(
333 "%s: AGU[%d] is busy this cycle.\n", __func__, i
334 );
335 continue;
336 }
337 if (!(requestBuffer.size() < requestBufferEntries)) {
338 // if no space left in the requestBuffer sleep
339 // whoever pops from requestBuffer wakes us up.
341 break;
342 }
343 // Now we know that AGU[i] is available and there is room
344 // in the requestBuffer to put the packet.
345 if (ultAccessOk(int_regs_now, fp_regs_now, curTick())) {
346 // occupy one fp register
347 fp_regs_now--;
348 fp_used_now++;
349 // make AGU busy for the next requestGenLatency cycles.
351
352 // create a new packet to access
353 SpatterAccess* spatter_access = receiveBuffer.front();
354 PacketPtr pkt = spatter_access->nextPacket();
355 pkt->pushSenderState(spatter_access);
356
357 // push to requestBuffer
358 requestBuffer.push(pkt, curTick());
359 DPRINTF(
361 "%s: Pushed pkt: %s to requestBuffer.\n",
362 __func__, pkt->print()
363 );
364
365 // now deallocate resources for reading the index
366 int_used_now--;
367 receiveBuffer.pop();
368 } else if (interAccessOk(int_regs_now, fp_regs_now, curTick())) {
369 // occupy one int register
370 int_regs_now--;
371 int_used_now++;
372 // make AGU busy for the next requestGenLatency cycles.
374
375 // create a new packet to access
376 SpatterAccess* spatter_access = receiveBuffer.front();
377 PacketPtr pkt = spatter_access->nextPacket();
378 pkt->pushSenderState(spatter_access);
379
380 // push to requestBuffer
381 requestBuffer.push(pkt, curTick());
382 DPRINTF(
384 "%s: Pushed pkt: %s to requestBuffer.\n",
385 __func__, pkt->print()
386 );
387
388 // now deallocate resources for reading the index
389 int_used_now--;
390 receiveBuffer.pop();
391 } else if (initAccessOk(int_regs_now, fp_regs_now, curTick())) {
392 // occupy one int register
393 int_regs_now--;
394 int_used_now++;
396
397 SpatterKernel& front = kernels.front();
398 SpatterAccess* spatter_access = front.nextSpatterAccess();
399 PacketPtr pkt = spatter_access->nextPacket();
400 pkt->pushSenderState(spatter_access);
401
402 requestBuffer.push(pkt, curTick());
403 DPRINTF(
405 "%s: Pushed pkt: %s to requestBuffer.\n",
406 __func__, pkt->print()
407 );
408
409 if (front.done()) {
410 DPRINTF(
412 "%s: Done with kernel %d type: %s.\n",
413 __func__, front.id(),
414 SpatterKernelTypeStrings[front.type()]
415 );
416 kernels.pop();
417 // If we're processing synchronously we now have to stop
418 // making intial accesses and wait everyone to receive
419 // all expected responses.
420 if (mode == SpatterProcessingMode::synchronous) {
422 }
423 }
424 } else {
425 //
426 DPRINTF(
428 "%s: Nothing more could be done this cycle.\n", __func__
429 );
430 DPRINTF(SpatterGen, "%s: Here is h/w status report: "
431 "{KERNELS_REMAIN: %d, INDEXES_REMAIN: %d, INT_REG_USED: %d, "
432 "FP_REG_USED: %d, REQ_BUFF_SIZE: %d}.\n",
433 __func__, kernels.size(), receiveBuffer.size(),
435 break;
436 }
437 }
438
439 // update firstGeneratorAvailableTime after making all changes.
440 for (int i = 0; i < requestGenRate; i++) {
445 );
446 }
447
448 // now that we have simulated all the work of this cycle, we can
449 // apply the deltas to the h/w resources.
450 intRegUsed += int_used_now;
451 fpRegUsed += fp_used_now;
452
453 bool did_work = (requestBuffer.size() - req_buf_before) > 0;
454 if (did_work && (!nextSendEvent.pending())) {
456 }
457
458 if (!nextGenEvent.pending()) {
460 }
461}
462
463void
465{
466 bool have_work = !requestBuffer.empty();
467 Tick schedule_tick = std::max(when, firstPortAvailableTime);
468 if (have_work && (!nextSendEvent.scheduled())) {
469 schedule(nextSendEvent, schedule_tick);
471 }
472}
473
474void
476{
477 int req_buf_before = requestBuffer.size();
478 for (int i = 0; i < sendRate; i++) {
479 if (portBusyUntil[i] > curTick()) {
480 DPRINTF(
482 "%s: Port[%d] is busy this cycle.\n", __func__, i
483 );
484 continue;
485 }
486 if (requestBuffer.empty()) {
487 DPRINTF(
489 "%s: No packets to send this cycle.\n", __func__
490 );
491 break;
492 }
493 if (!requestBuffer.hasReady(curTick())) {
494 DPRINTF(
496 "%s: Packet at front of requestBuffer not ready this cycle.\n",
497 __func__
498 );
499 break;
500 }
501 PacketPtr pkt = requestBuffer.front();
502 DPRINTF(
504 "%s: Sending pkt: %s to port[%d].\n",
505 __func__, pkt->print(), i
506 );
507 // NOTE: We assume the port will be busy for 1 cycle.
509 port.sendPacket(pkt);
510 requestBuffer.pop();
511 // increase numPendingMemRequests
513 // record packet departure time
515 // Now if we put the port in blocked state no point in continuing
516 // the loop. also no point in scheduling nextSendEvent.
517 if (port.blocked()) {
519 break;
520 }
521 }
522 // update firstPortAvailableTime after making all changes.
523 for (int i = 0; i < sendRate; i++) {
524 // if the port was not used this cycle, it's busy until nextCycle().
525 portBusyUntil[i] = std::max(portBusyUntil[i], nextCycle());
526 firstPortAvailableTime = std::min(
529 );
530 }
531
532 bool did_work = (req_buf_before - requestBuffer.size()) > 0;
533 if (did_work && nextGenEvent.pending()) {
534 // since this event might open up space for output of nextGenEvent,
535 // it should wake it up if nextGenEvent is asleep.
538 }
539
540 if (!nextSendEvent.pending()) {
542 }
543}
544
546 statistics::Group(spatter_gen), spatterGen(spatter_gen),
547 ADD_STAT(numIndexReads, statistics::units::Count::get(),
548 "Number of reads from the indexer array."),
549 ADD_STAT(indexBytesRead, statistics::units::Byte::get(),
550 "Number of bytes read from the indexer array."),
551 ADD_STAT(totalIndexReadLatency, statistics::units::Tick::get(),
552 "Total latency for reading from the indexer array."),
553 ADD_STAT(numValueReads, statistics::units::Count::get(),
554 "Number of reads from the values array."),
555 ADD_STAT(numValueWrites, statistics::units::Count::get(),
556 "Number of writes to the values array."),
557 ADD_STAT(valueBytesRead, statistics::units::Byte::get(),
558 "Number of bytes read from the values array."),
559 ADD_STAT(valueBytesWritten, statistics::units::Byte::get(),
560 "Number of bytes written to the values array."),
561 ADD_STAT(totalValueReadLatency, statistics::units::Tick::get(),
562 "Total latency for reading from the values array."),
563 ADD_STAT(totalValueWriteLatency, statistics::units::Tick::get(),
564 "Total latency for writing to the values array."),
565 ADD_STAT(indexAccessLatency, statistics::units::Tick::get(),
566 "Distribution of latency for accessing the indexer array."),
567 ADD_STAT(valueAccessLatency, statistics::units::Tick::get(),
568 "Distribution of latency for accessing the values array."),
569 ADD_STAT(totalIndirectAccessLatency, statistics::units::Tick::get(),
570 "Distribution of total latency for indirect accesses.")
571{}
572
573void
575{
576 using namespace statistics;
577 indexAccessLatency.init(8);
578 valueAccessLatency.init(16);
579 totalIndirectAccessLatency.init(16);
580}
581
582} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isResponse() const
Definition packet.hh:598
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition packet.cc:334
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
virtual void recvReqRetry() override
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
void sendPacket(PacketPtr pkt)
virtual bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Spatter Kernel Player.
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
bool initAccessOk(int int_regs, int fp_regs, Tick when) const
bool recvTimingResp(PacketPtr pkt)
TimedQueue< SpatterAccess * > receiveBuffer
void processNextGenEvent()
SpatterProcessingMode mode
std::vector< Tick > portBusyUntil
bool ultAccessOk(int int_regs, int fp_regs, Tick when) const
SpatterGenEvent nextSendEvent
void processNextSendEvent()
SpatterGen(const Params &params)
enums::SpatterKernelType SpatterKernelType
Tick firstGeneratorAvailableTime
virtual void startup() override
startup() is the final initialization call before simulation.
void proceedPastSyncPoint()
SpatterGenPort port
void addKernel(uint32_t id, uint32_t delta, uint32_t count, SpatterKernelType type, size_t index_size, Addr base_index_addr, size_t value_size, Addr base_value_addr, const std::vector< uint32_t > &indices)
RequestorID requestorId
SpatterGenState state
bool interAccessOk(int int_regs, int fp_regs, Tick when) const
SpatterGenEvent nextGenEvent
std::queue< SpatterKernel > kernels
std::unordered_map< RequestPtr, Tick > requestDepartureTime
TimedQueue< PacketPtr > requestBuffer
std::vector< Tick > generatorBusyUntil
void scheduleNextSendEvent(Tick when)
void scheduleNextGenEvent(Tick when)
SpatterGenStats stats
void setIndices(const std::vector< uint32_t > &pattern)
uint32_t id() const
SpatterAccess * nextSpatterAccess()
SpatterKernelType type() const
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
STL vector class.
Definition stl.hh:37
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
atomic_var_t state
Definition helpers.cc:211
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 15 > system
Definition misc.hh:1032
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
uint64_t Tick
Tick count type.
Definition types.hh:58
void exitSimLoop(const std::string &message, int exit_code, Tick when, Tick repeat, bool serialize)
Schedule an event to exit the simulation loop (returning to Python) at the end of the current cycle (...
Definition sim_events.cc:88
const Tick MaxTick
Definition types.hh:60
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
Declaration of the Packet class.
SpatterKernelType type() const
void recordTripTime(Tick trip_time)
statistics::Scalar totalValueWriteLatency
virtual void regStats() override
Callback to set stat parameters.
statistics::Histogram totalIndirectAccessLatency
statistics::Histogram valueAccessLatency
statistics::Scalar totalValueReadLatency
statistics::Histogram indexAccessLatency
statistics::Scalar valueBytesWritten
statistics::Scalar totalIndexReadLatency
SpatterGenStats(SpatterGen *spatter_gen)
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:02 for gem5 by doxygen 1.11.0