gem5 v24.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
spatter_gen.cc
Go to the documentation of this file.
1/*
2* Copyright (c) 2024 The Regents of The University of California
3* All rights reserved.
4*
5* Redistribution and use in source and binary forms, with or without
6* modification, are permitted provided that the following conditions are
7* met: redistributions of source code must retain the above copyright
8* notice, this list of conditions and the following disclaimer;
9* redistributions in binary form must reproduce the above copyright
10* notice, this list of conditions and the following disclaimer in the
11* documentation and/or other materials provided with the distribution;
12* neither the name of the copyright holders nor the names of its
13* contributors may be used to endorse or promote products derived from
14* this software without specific prior written permission.
15*
16* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27*/
28
30
31#include "base/cprintf.hh"
32#include "debug/SpatterGen.hh"
33#include "debug/SpatterKernel.hh"
34#include "enums/SpatterKernelType.hh"
35#include "enums/SpatterProcessingMode.hh"
36#include "mem/packet.hh"
37#include "sim/sim_exit.hh"
38#include "sim/system.hh"
39
40namespace gem5
41{
42
43using enums::SpatterKernelTypeStrings;
44using enums::SpatterProcessingMode;
45
47 ClockedObject(params),
48 state(SpatterGenState::RUNNING),
49 requestorId(params.system->getRequestorId(this)),
50 numPendingMemRequests(0),
51 stats(this),
52 mode(params.processing_mode),
53 port(this, name() + ".port"),
54 intRegFileSize(params.int_regfile_size), intRegUsed(0),
55 fpRegFileSize(params.fp_regfile_size), fpRegUsed(0),
56 requestGenLatency(params.request_gen_latency),
57 requestGenRate(params.request_gen_rate),
58 firstGeneratorAvailableTime(0),
59 nextGenEvent([this](){ processNextGenEvent(); }, name() + ".GenEvent"),
60 requestBufferEntries(params.request_buffer_entries),
61 requestBuffer(clockPeriod()),
62 sendRate(params.send_rate),
63 firstPortAvailableTime(0),
64 nextSendEvent([this](){ processNextSendEvent(); }, name() + ".SendEvent"),
65 receiveBuffer(clockPeriod())
66{
67 fatal_if(fpRegFileSize < requestBufferEntries,
68 "fp_regfile_size should be >= request_buffer_entries."
69 "if request_buffer_entries is bigger than fp_regfile_size,"
70 "it may result in inaccuracies in your simulation."
71 "Ideally: fp_regfile_size >> request_buffer_entries."
72 );
73 generatorBusyUntil.resize(requestGenRate, 0);
74 portBusyUntil.resize(sendRate, 0);
75}
76
77Port&
78SpatterGen::getPort(const std::string& if_name, PortID idx)
79{
80 if (if_name == "port") {
81 return port;
82 } else {
83 return ClockedObject::getPort(if_name, idx);
84 }
85}
86
87void
92
93void
95{
96 panic_if(blocked(), "Should never try to send if port is blocked.");
97 if (!sendTimingReq(pkt)) {
98 blockedPacket = pkt;
99 DPRINTF(
101 "%s: Port blocked when sending %s.\n",
102 __func__, pkt->print()
103 );
104 }
105}
106
107void
109{
110 DPRINTF(SpatterGen, "%s: Port received a ReqRetry.\n", __func__);
111 panic_if(
112 blockedPacket == nullptr,
113 "Received reqRetry with no blocked packet."
114 );
115 if (!sendTimingReq(blockedPacket)) {
116 DPRINTF(
118 "%s: Port blocked when sending %s.\n",
119 __func__, blockedPacket->print()
120 );
121 } else {
122 blockedPacket = nullptr;
123 owner->recvReqRetry();
124 }
125}
126
127void
135
136bool
138 return owner->recvTimingResp(pkt);
139}
140
141bool
143{
144 DPRINTF(SpatterGen, "%s: Received pkt: %s.\n", __func__, pkt->print());
145 assert(pkt->isResponse());
146
147 // record trip time.
148 SpatterAccess* spatter_access = pkt->findNextSenderState<SpatterAccess>();
149 Tick trip_time = (curTick() - requestDepartureTime[pkt->req]);
150 requestDepartureTime.erase(pkt->req);
151 spatter_access->recordTripTime(trip_time);
152
153 int trips_left = spatter_access->tripsLeft();
154 assert(trips_left >= 0);
155 if (trips_left > 0) {
157 stats.indexBytesRead += pkt->getSize();
158 stats.totalIndexReadLatency += trip_time;
159
161 receiveBuffer.push(spatter_access, curTick());
162 } else {
165 spatter_access->tripTimeSoFar()
166 );
167 if (spatter_access->type() == SpatterKernelType::gather) {
169 stats.valueBytesRead += pkt->getSize();
170 stats.totalValueReadLatency += trip_time;
171 } else if (spatter_access->type() == SpatterKernelType::scatter) {
174 stats.totalValueWriteLatency += trip_time;
175 } else {
176 panic("Unknown kernel type.");
177 }
178 // CAUTION: We're going to decrement fpRegUsed here,
179 // it could cause inaccuracies if processNextGenEvent
180 // is called after recvTimingResp on the same tick.
181 // i.e. we might end up releasing a register on the same
182 // cycle that we are allocating it.
183 // it's probably not going to ever be an issue since
184 // fpRegFileSize is probably >> requestBufferEntries
185 // i.e. the chances of running out of fp registers is low because
186 // we do not simulate parts of the pipeline that back things up into
187 // fp registers, e.g. functional units of ALU.
188 fpRegUsed--;
189 delete spatter_access;
190 }
191
192 // delete the pkt since we don't need it anymore.
193 delete pkt;
194
195 if (!nextGenEvent.pending()) {
197 }
198
201 return true;
202}
203
204void
206 uint32_t id, uint32_t delta, uint32_t count,
208 uint32_t base_index, uint32_t indices_per_stride, uint32_t stride,
209 size_t index_size, Addr base_index_addr,
210 size_t value_size, Addr base_value_addr,
211 const std::vector<uint32_t>& indices
212)
213{
214 DPRINTF(
216 "%s: Adding kernel with id: %d, delta: %d, count: %d, type: %s.\n",
217 __func__, id, delta, count, SpatterKernelTypeStrings[type]
218 );
219 SpatterKernel new_kernel(
221 id, delta, count, type,
222 base_index, indices_per_stride, stride,
223 index_size, base_index_addr,
224 value_size, base_value_addr
225 );
226 new_kernel.setIndices(indices);
227 kernels.push(new_kernel);
228}
229
230void
232{
233 assert(mode == SpatterProcessingMode::synchronous);
237}
238
239void
241{
242 bool no_pending = numPendingMemRequests == 0;
243 bool no_queued = requestBuffer.empty();
244 int avail_int_regs = intRegFileSize - intRegUsed;
245 int avail_fp_regs = fpRegFileSize - fpRegUsed;
246 bool can_do_init = initAccessOk(avail_int_regs, avail_fp_regs, curTick());
247 bool can_do_mid = interAccessOk(avail_int_regs, avail_fp_regs, curTick());
248 bool can_do_ult = ultAccessOk(avail_int_regs, avail_fp_regs, curTick());
249 if (!can_do_init && !can_do_mid && !can_do_ult && no_pending && no_queued)
250 {
251 assert((
252 (mode == SpatterProcessingMode::synchronous) &&
254 ) ||
255 mode == SpatterProcessingMode::asynchronous
256 );
259 csprintf("%s received all expected responses.", name()),
260 0,
261 nextCycle()
262 );
263 }
264}
265
266bool
267SpatterGen::initAccessOk(int int_regs, int fp_regs, Tick when) const
268{
269 bool have_int_reg = int_regs > 0;
270 // for mode == SpatterProcessingMode::asynchronous state will always be
271 // SpatterGenState::RUNNING. we don't have to do checks for mode.
272 // for mode == SpatterProcessingMode::synchronous, if state is
273 // SpatterGenState::DRAINING or SpatterGenState::WAITING
274 // we can't initiate any new indirect accesses.
275 bool have_kernel = !kernels.empty() && (state == SpatterGenState::RUNNING);
276 return have_kernel && have_int_reg;
277}
278
279bool
280SpatterGen::interAccessOk(int int_regs, int fp_regs, Tick when) const
281{
282 bool have_int_reg = int_regs > 0;
283 bool have_index = receiveBuffer.hasReady(when);
284 bool mid_idx = have_index && (receiveBuffer.front()->tripsLeft() > 1);
285 return mid_idx && have_int_reg;
286}
287
288bool
289SpatterGen::ultAccessOk(int int_regs, int fp_regs, Tick when) const
290{
291 bool have_fp_reg = fp_regs > 0;
292 bool have_index = receiveBuffer.hasReady(when);
293 bool val_idx = have_index && (receiveBuffer.front()->tripsLeft() == 1);
294 return val_idx && have_fp_reg;
295}
296
297void
299{
300 int avail_int_regs = intRegFileSize - intRegUsed;
301 int avail_fp_regs = fpRegFileSize - fpRegUsed;
302 bool have_work = initAccessOk(avail_int_regs, avail_fp_regs, curTick()) ||
303 interAccessOk(avail_int_regs, avail_fp_regs, curTick()) ||
304 ultAccessOk(avail_int_regs, avail_fp_regs, curTick());
305 Tick schedule_tick = std::max(when, firstGeneratorAvailableTime);
306 if (have_work && (!nextGenEvent.scheduled())) {
307 schedule(nextGenEvent, schedule_tick);
309 }
310}
311
312void
314{
315 assert(!nextGenEvent.pending());
316 int req_buf_before = requestBuffer.size();
317 // track changes to intRegUsed in this variable and apply it
318 // at the end of the for loop. This way if we free a register
319 // in the for loop, other iterations of the for loop won't
320 // observe this change. This matches what happens in real h/w.
321 int int_used_now = 0;
322 // track this independently to prevent different iterations inside
323 // for loop observing change to h/w resources, i.e we can't rely
324 // intRegFileSize - intRegUsed to see if we have registers to allocate
325 // since they don't change until after the for loop
326 int int_regs_now = intRegFileSize - intRegUsed;
327 // same explanation as int_used_now
328 int fp_used_now = 0;
329 // same explanation as int_regs_now
330 int fp_regs_now = fpRegFileSize - fpRegUsed;
331 for (int i = 0; i < requestGenRate; i++) {
332 if (generatorBusyUntil[i] > curTick()) {
333 DPRINTF(
335 "%s: AGU[%d] is busy this cycle.\n", __func__, i
336 );
337 continue;
338 }
339 if (!(requestBuffer.size() < requestBufferEntries)) {
340 // if no space left in the requestBuffer sleep
341 // whoever pops from requestBuffer wakes us up.
343 break;
344 }
345 // Now we know that AGU[i] is available and there is room
346 // in the requestBuffer to put the packet.
347 if (ultAccessOk(int_regs_now, fp_regs_now, curTick())) {
348 // occupy one fp register
349 fp_regs_now--;
350 fp_used_now++;
351 // make AGU busy for the next requestGenLatency cycles.
353
354 // create a new packet to access
355 SpatterAccess* spatter_access = receiveBuffer.front();
356 PacketPtr pkt = spatter_access->nextPacket();
357 pkt->pushSenderState(spatter_access);
358
359 // push to requestBuffer
360 requestBuffer.push(pkt, curTick());
361 DPRINTF(
363 "%s: Pushed pkt: %s to requestBuffer.\n",
364 __func__, pkt->print()
365 );
366
367 // now deallocate resources for reading the index
368 int_used_now--;
369 receiveBuffer.pop();
370 } else if (interAccessOk(int_regs_now, fp_regs_now, curTick())) {
371 // occupy one int register
372 int_regs_now--;
373 int_used_now++;
374 // make AGU busy for the next requestGenLatency cycles.
376
377 // create a new packet to access
378 SpatterAccess* spatter_access = receiveBuffer.front();
379 PacketPtr pkt = spatter_access->nextPacket();
380 pkt->pushSenderState(spatter_access);
381
382 // push to requestBuffer
383 requestBuffer.push(pkt, curTick());
384 DPRINTF(
386 "%s: Pushed pkt: %s to requestBuffer.\n",
387 __func__, pkt->print()
388 );
389
390 // now deallocate resources for reading the index
391 int_used_now--;
392 receiveBuffer.pop();
393 } else if (initAccessOk(int_regs_now, fp_regs_now, curTick())) {
394 // occupy one int register
395 int_regs_now--;
396 int_used_now++;
398
399 SpatterKernel& front = kernels.front();
400 SpatterAccess* spatter_access = front.nextSpatterAccess();
401 PacketPtr pkt = spatter_access->nextPacket();
402 pkt->pushSenderState(spatter_access);
403
404 requestBuffer.push(pkt, curTick());
405 DPRINTF(
407 "%s: Pushed pkt: %s to requestBuffer.\n",
408 __func__, pkt->print()
409 );
410
411 if (front.done()) {
412 DPRINTF(
414 "%s: Done with kernel %d type: %s.\n",
415 __func__, front.id(),
416 SpatterKernelTypeStrings[front.type()]
417 );
418 kernels.pop();
419 // If we're processing synchronously we now have to stop
420 // making intial accesses and wait everyone to receive
421 // all expected responses.
422 if (mode == SpatterProcessingMode::synchronous) {
424 }
425 }
426 } else {
427 //
428 DPRINTF(
430 "%s: Nothing more could be done this cycle.\n", __func__
431 );
432 DPRINTF(SpatterGen, "%s: Here is h/w status report: "
433 "{KERNELS_REMAIN: %d, INDEXES_REMAIN: %d, INT_REG_USED: %d, "
434 "FP_REG_USED: %d, REQ_BUFF_SIZE: %d}.\n",
435 __func__, kernels.size(), receiveBuffer.size(),
437 break;
438 }
439 }
440
441 // update firstGeneratorAvailableTime after making all changes.
442 for (int i = 0; i < requestGenRate; i++) {
447 );
448 }
449
450 // now that we have simulated all the work of this cycle, we can
451 // apply the deltas to the h/w resources.
452 intRegUsed += int_used_now;
453 fpRegUsed += fp_used_now;
454
455 bool did_work = (requestBuffer.size() - req_buf_before) > 0;
456 if (did_work && (!nextSendEvent.pending())) {
458 }
459
460 if (!nextGenEvent.pending()) {
462 }
463}
464
465void
467{
468 bool have_work = !requestBuffer.empty();
469 Tick schedule_tick = std::max(when, firstPortAvailableTime);
470 if (have_work && (!nextSendEvent.scheduled())) {
471 schedule(nextSendEvent, schedule_tick);
473 }
474}
475
476void
478{
479 int req_buf_before = requestBuffer.size();
480 for (int i = 0; i < sendRate; i++) {
481 if (portBusyUntil[i] > curTick()) {
482 DPRINTF(
484 "%s: Port[%d] is busy this cycle.\n", __func__, i
485 );
486 continue;
487 }
488 if (requestBuffer.empty()) {
489 DPRINTF(
491 "%s: No packets to send this cycle.\n", __func__
492 );
493 break;
494 }
495 if (!requestBuffer.hasReady(curTick())) {
496 DPRINTF(
498 "%s: Packet at front of requestBuffer not ready this cycle.\n",
499 __func__
500 );
501 break;
502 }
503 PacketPtr pkt = requestBuffer.front();
504 DPRINTF(
506 "%s: Sending pkt: %s to port[%d].\n",
507 __func__, pkt->print(), i
508 );
509 // NOTE: We assume the port will be busy for 1 cycle.
511 port.sendPacket(pkt);
512 requestBuffer.pop();
513 // increase numPendingMemRequests
515 // record packet departure time
517 // Now if we put the port in blocked state no point in continuing
518 // the loop. also no point in scheduling nextSendEvent.
519 if (port.blocked()) {
521 break;
522 }
523 }
524 // update firstPortAvailableTime after making all changes.
525 for (int i = 0; i < sendRate; i++) {
526 // if the port was not used this cycle, it's busy until nextCycle().
527 portBusyUntil[i] = std::max(portBusyUntil[i], nextCycle());
528 firstPortAvailableTime = std::min(
531 );
532 }
533
534 bool did_work = (req_buf_before - requestBuffer.size()) > 0;
535 if (did_work && nextGenEvent.pending()) {
536 // since this event might open up space for output of nextGenEvent,
537 // it should wake it up if nextGenEvent is asleep.
540 }
541
542 if (!nextSendEvent.pending()) {
544 }
545}
546
548 statistics::Group(spatter_gen), spatterGen(spatter_gen),
549 ADD_STAT(numIndexReads, statistics::units::Count::get(),
550 "Number of reads from the indexer array."),
551 ADD_STAT(indexBytesRead, statistics::units::Byte::get(),
552 "Number of bytes read from the indexer array."),
553 ADD_STAT(totalIndexReadLatency, statistics::units::Tick::get(),
554 "Total latency for reading from the indexer array."),
555 ADD_STAT(numValueReads, statistics::units::Count::get(),
556 "Number of reads from the values array."),
557 ADD_STAT(numValueWrites, statistics::units::Count::get(),
558 "Number of writes to the values array."),
559 ADD_STAT(valueBytesRead, statistics::units::Byte::get(),
560 "Number of bytes read from the values array."),
561 ADD_STAT(valueBytesWritten, statistics::units::Byte::get(),
562 "Number of bytes written to the values array."),
563 ADD_STAT(totalValueReadLatency, statistics::units::Tick::get(),
564 "Total latency for reading from the values array."),
565 ADD_STAT(totalValueWriteLatency, statistics::units::Tick::get(),
566 "Total latency for writing to the values array."),
567 ADD_STAT(indexAccessLatency, statistics::units::Tick::get(),
568 "Distribution of latency for accessing the indexer array."),
569 ADD_STAT(valueAccessLatency, statistics::units::Tick::get(),
570 "Distribution of latency for accessing the values array."),
571 ADD_STAT(totalIndirectAccessLatency, statistics::units::Tick::get(),
572 "Distribution of total latency for indirect accesses.")
573{}
574
575void
577{
578 using namespace statistics;
579 indexAccessLatency.init(8);
580 valueAccessLatency.init(16);
581 totalIndirectAccessLatency.init(16);
582}
583
584} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
The ClockedObject class extends the SimObject with a clock and accessor functions to relate ticks to ...
ClockedObjectParams Params
Parameters of ClockedObject.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isResponse() const
Definition packet.hh:598
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition packet.hh:575
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition packet.cc:334
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
virtual void recvReqRetry() override
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
void sendPacket(PacketPtr pkt)
virtual bool recvTimingResp(PacketPtr pkt) override
Receive a timing response from the peer.
Spatter Kernel Player.
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
bool initAccessOk(int int_regs, int fp_regs, Tick when) const
bool recvTimingResp(PacketPtr pkt)
TimedQueue< SpatterAccess * > receiveBuffer
void processNextGenEvent()
SpatterProcessingMode mode
std::vector< Tick > portBusyUntil
bool ultAccessOk(int int_regs, int fp_regs, Tick when) const
SpatterGenEvent nextSendEvent
void processNextSendEvent()
SpatterGen(const Params &params)
enums::SpatterKernelType SpatterKernelType
Tick firstGeneratorAvailableTime
void addKernel(uint32_t id, uint32_t delta, uint32_t count, SpatterKernelType type, uint32_t base_index, uint32_t indices_per_stride, uint32_t stride, size_t index_size, Addr base_index_addr, size_t value_size, Addr base_value_addr, const std::vector< uint32_t > &indices)
virtual void startup() override
startup() is the final initialization call before simulation.
void proceedPastSyncPoint()
SpatterGenPort port
RequestorID requestorId
SpatterGenState state
bool interAccessOk(int int_regs, int fp_regs, Tick when) const
SpatterGenEvent nextGenEvent
std::queue< SpatterKernel > kernels
std::unordered_map< RequestPtr, Tick > requestDepartureTime
TimedQueue< PacketPtr > requestBuffer
std::vector< Tick > generatorBusyUntil
void scheduleNextSendEvent(Tick when)
void scheduleNextGenEvent(Tick when)
SpatterGenStats stats
void setIndices(const std::vector< uint32_t > &pattern)
uint32_t id() const
SpatterAccess * nextSpatterAccess()
SpatterKernelType type() const
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Statistics container.
Definition group.hh:93
STL vector class.
Definition stl.hh:37
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
virtual Port & getPort(const std::string &if_name, PortID idx=InvalidPortID)
Get a port with a given name and index.
atomic_var_t state
Definition helpers.cc:211
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 21, 20 > stride
Bitfield< 15 > system
Definition misc.hh:1032
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
Definition types.hh:245
uint64_t Tick
Tick count type.
Definition types.hh:58
void exitSimLoop(const std::string &message, int exit_code, Tick when, Tick repeat, bool serialize)
Schedule an event to exit the simulation loop (returning to Python) at the end of the current cycle (...
Definition sim_events.cc:88
const Tick MaxTick
Definition types.hh:60
std::string csprintf(const char *format, const Args &...args)
Definition cprintf.hh:161
Declaration of the Packet class.
SpatterKernelType type() const
void recordTripTime(Tick trip_time)
statistics::Scalar totalValueWriteLatency
virtual void regStats() override
Callback to set stat parameters.
statistics::Histogram totalIndirectAccessLatency
statistics::Histogram valueAccessLatency
statistics::Scalar totalValueReadLatency
statistics::Histogram indexAccessLatency
statistics::Scalar valueBytesWritten
statistics::Scalar totalIndexReadLatency
SpatterGenStats(SpatterGen *spatter_gen)
const std::string & name()
Definition trace.cc:48

Generated on Mon Jan 13 2025 04:28:32 for gem5 by doxygen 1.9.8