gem5 v24.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
MessageBuffer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019-2021 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
42
43#include <cassert>
44
45#include "base/cprintf.hh"
46#include "base/logging.hh"
47#include "base/random.hh"
48#include "base/stl_helpers.hh"
49#include "debug/RubyQueue.hh"
50
51namespace gem5
52{
53
54namespace ruby
55{
56
57using stl_helpers::operator<<;
58
60 : SimObject(p), m_stall_map_size(0), m_max_size(p.buffer_size),
61 m_max_dequeue_rate(p.max_dequeue_rate), m_dequeues_this_cy(0),
62 m_time_last_time_size_checked(0),
63 m_time_last_time_enqueue(0), m_time_last_time_pop(0),
64 m_last_arrival_time(0), m_last_message_strict_fifo_bypassed(false),
65 m_strict_fifo(p.ordered),
66 m_randomization(p.randomization),
67 m_allow_zero_latency(p.allow_zero_latency),
68 m_routing_priority(p.routing_priority),
69 ADD_STAT(m_not_avail_count, statistics::units::Count::get(),
70 "Number of times this buffer did not have N slots available"),
71 ADD_STAT(m_msg_count, statistics::units::Count::get(),
72 "Number of messages passed the buffer"),
73 ADD_STAT(m_buf_msgs, statistics::units::Rate<
74 statistics::units::Count, statistics::units::Tick>::get(),
75 "Average number of messages in buffer"),
76 ADD_STAT(m_stall_time, statistics::units::Tick::get(),
77 "Total number of ticks messages were stalled in this buffer"),
78 ADD_STAT(m_stall_count, statistics::units::Count::get(),
79 "Number of times messages were stalled"),
80 ADD_STAT(m_avg_stall_time, statistics::units::Rate<
81 statistics::units::Tick, statistics::units::Count>::get(),
82 "Average stall ticks per message"),
83 ADD_STAT(m_occupancy, statistics::units::Rate<
84 statistics::units::Ratio, statistics::units::Tick>::get(),
85 "Average occupancy of buffer capacity")
86{
87 m_msg_counter = 0;
88 m_consumer = NULL;
94
95 m_stall_msg_map.clear();
97 m_vnet_id = 0;
98
99 m_buf_msgs = 0;
100 m_stall_time = 0;
101
102 m_dequeue_callback = nullptr;
103
104 // stats
107
110
113
116
119
122
125
126 if (m_max_size > 0) {
128 } else {
129 m_occupancy = 0;
130 }
131
133}
134
135unsigned int
145
146bool
147MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
148{
149
150 // fast path when message buffers have infinite size
151 if (m_max_size == 0) {
152 return true;
153 }
154
155 // determine the correct size for the current cycle
156 // pop operations shouldn't effect the network's visible size
157 // until schd cycle, but enqueue operations effect the visible
158 // size immediately
159 unsigned int current_size = 0;
160 unsigned int current_stall_size = 0;
161
162 if (m_time_last_time_pop < current_time) {
163 // no pops this cycle - heap and stall queue size is correct
164 current_size = m_prio_heap.size();
165 current_stall_size = m_stall_map_size;
166 } else {
167 if (m_time_last_time_enqueue < current_time) {
168 // no enqueues this cycle - m_size_at_cycle_start is correct
169 current_size = m_size_at_cycle_start;
170 } else {
171 // both pops and enqueues occured this cycle - add new
172 // enqueued msgs to m_size_at_cycle_start
174 }
175
176 // Stall queue size at start is considered
177 current_stall_size = m_stalled_at_cycle_start;
178 }
179
180 // now compare the new size with our max size
181 if (current_size + current_stall_size + n <= m_max_size) {
182 return true;
183 } else {
184 DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
185 "m_max_size: %d\n",
186 n, current_size + current_stall_size,
187 m_prio_heap.size(), m_max_size);
189 return false;
190 }
191}
192
193const Message*
195{
196 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
197 const Message* msg_ptr = m_prio_heap.front().get();
198 assert(msg_ptr);
199
200 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
201 return msg_ptr;
202}
203
204// FIXME - move me somewhere else
205Tick
207{
209 Tick time = 1;
210 time += rng->random(0, 3); // [0...3]
211 if (rng->random(0, 7) == 0) { // 1 in 8 chance
212 time += 100 + rng->random(1, 15); // 100 + [1...15]
213 }
214 return time;
215}
216
217void
218MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta,
219 bool ruby_is_random, bool ruby_warmup,
220 bool bypassStrictFIFO)
221{
222 // record current time incase we have a pop that also adjusts my size
223 if (m_time_last_time_enqueue < current_time) {
224 m_msgs_this_cycle = 0; // first msg this cycle
225 m_time_last_time_enqueue = current_time;
226 }
227
230
231 // Calculate the arrival time of the message, that is, the first
232 // cycle the message can be dequeued.
233 panic_if((delta == 0) && !m_allow_zero_latency,
234 "Delta equals zero and allow_zero_latency is false during enqueue");
235 Tick arrival_time = 0;
236
237 // random delays are inserted if the RubySystem level randomization flag
238 // is turned on and this buffer allows it
239 if ((m_randomization == MessageRandomization::disabled) ||
240 ((m_randomization == MessageRandomization::ruby_system) &&
241 !ruby_is_random)) {
242 // No randomization
243 arrival_time = current_time + delta;
244 } else {
245 // Randomization - ignore delta
246 if (m_strict_fifo) {
247 if (m_last_arrival_time < current_time) {
248 m_last_arrival_time = current_time;
249 }
250 arrival_time = m_last_arrival_time + random_time();
251 } else {
252 arrival_time = current_time + random_time();
253 }
254 }
255
256 // Check the arrival time
257 assert(arrival_time >= current_time);
258 if (m_strict_fifo &&
259 !(bypassStrictFIFO || m_last_message_strict_fifo_bypassed)) {
260 if (arrival_time < m_last_arrival_time) {
261 panic("FIFO ordering violated: %s name: %s current time: %d "
262 "delta: %d arrival_time: %d last arrival_time: %d\n",
263 *this, name(), current_time, delta, arrival_time,
265 }
266 }
267
268 // If running a cache trace, don't worry about the last arrival checks
269 if (!ruby_warmup) {
270 m_last_arrival_time = arrival_time;
271 }
272
273 m_last_message_strict_fifo_bypassed = bypassStrictFIFO;
274
275 // compute the delay cycles and set enqueue time
276 Message* msg_ptr = message.get();
277 assert(msg_ptr != NULL);
278
279 assert(current_time >= msg_ptr->getLastEnqueueTime() &&
280 "ensure we aren't dequeued early");
281
282 msg_ptr->updateDelayedTicks(current_time);
283 msg_ptr->setLastEnqueueTime(arrival_time);
285
286 // Insert the message into the priority heap
287 m_prio_heap.push_back(message);
288 push_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
289 // Increment the number of messages statistic
290 m_buf_msgs++;
291
292 assert((m_max_size == 0) ||
293 ((m_prio_heap.size() + m_stall_map_size) <= m_max_size));
294
295 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
296 arrival_time, *(message.get()));
297
298 // Schedule the wakeup
299 assert(m_consumer != NULL);
300 m_consumer->scheduleEventAbsolute(arrival_time);
302}
303
304Tick
305MessageBuffer::dequeue(Tick current_time, bool decrement_messages)
306{
307 DPRINTF(RubyQueue, "Popping\n");
308 assert(isReady(current_time));
309
310 // get MsgPtr of the message about to be dequeued
311 MsgPtr message = m_prio_heap.front();
312
313 // get the delay cycles
314 message->updateDelayedTicks(current_time);
315 Tick delay = message->getDelayedTicks();
316
317 // record previous size and time so the current buffer size isn't
318 // adjusted until schd cycle
319 if (m_time_last_time_pop < current_time) {
322 m_time_last_time_pop = current_time;
324 }
326
327 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
328 m_prio_heap.pop_back();
329 if (decrement_messages) {
330 // Record how much time is passed since the message was enqueued
331 m_stall_time += curTick() - message->getLastEnqueueTime();
332 m_msg_count++;
333
334 // If the message will be removed from the queue, decrement the
335 // number of message in the queue.
336 m_buf_msgs--;
337 }
338
339 // if a dequeue callback was requested, call it now
340 if (m_dequeue_callback) {
342 }
343
344 return delay;
345}
346
347void
348MessageBuffer::registerDequeueCallback(std::function<void()> callback)
349{
350 m_dequeue_callback = callback;
351}
352
353void
358
359void
371
372void
373MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
374{
375 DPRINTF(RubyQueue, "Recycling.\n");
376 assert(isReady(current_time));
377 MsgPtr node = m_prio_heap.front();
378 pop_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
379
380 Tick future_time = current_time + recycle_latency;
381 node->setLastEnqueueTime(future_time);
382
383 m_prio_heap.back() = node;
384 push_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
385 m_consumer->scheduleEventAbsolute(future_time);
386}
387
388void
390{
391 while (!lt.empty()) {
392 MsgPtr m = lt.front();
393 assert(m->getLastEnqueueTime() <= schdTick);
394
395 m_prio_heap.push_back(m);
396 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
397 std::greater<MsgPtr>());
398
400
401 DPRINTF(RubyQueue, "Requeue arrival_time: %lld, Message: %s\n",
402 schdTick, *(m.get()));
403
404 lt.pop_front();
405 }
406}
407
408void
410{
411 DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
412 assert(m_stall_msg_map.count(addr) > 0);
413
414 //
415 // Put all stalled messages associated with this address back on the
416 // prio heap. The reanalyzeList call will make sure the consumer is
417 // scheduled for the current cycle so that the previously stalled messages
418 // will be observed before any younger messages that may arrive this cycle
419 //
421 assert(m_stall_map_size >= 0);
422 reanalyzeList(m_stall_msg_map[addr], current_time);
423 m_stall_msg_map.erase(addr);
424}
425
426void
428{
429 DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
430
431 //
432 // Put all stalled messages associated with this address back on the
433 // prio heap. The reanalyzeList call will make sure the consumer is
434 // scheduled for the current cycle so that the previously stalled messages
435 // will be observed before any younger messages that may arrive this cycle.
436 //
437 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
438 map_iter != m_stall_msg_map.end(); ++map_iter) {
439 m_stall_map_size -= map_iter->second.size();
440 assert(m_stall_map_size >= 0);
441 reanalyzeList(map_iter->second, current_time);
442 }
443 m_stall_msg_map.clear();
444}
445
446void
448{
449 DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
450 assert(isReady(current_time));
451 MsgPtr message = m_prio_heap.front();
452
453 // Since the message will just be moved to stall map, indicate that the
454 // buffer should not decrement the m_buf_msgs statistic
455 dequeue(current_time, false);
456
457 //
458 // Note: no event is scheduled to analyze the map at a later time.
459 // Instead the controller is responsible to call reanalyzeMessages when
460 // these addresses change state.
461 //
462 (m_stall_msg_map[addr]).push_back(message);
465}
466
467bool
469{
470 return (m_stall_msg_map.count(addr) != 0);
471}
472
473void
475{
476 DPRINTF(RubyQueue, "Deferring enqueueing message: %s, Address %#x\n",
477 *(message.get()), addr);
478 (m_deferred_msg_map[addr]).push_back(message);
479}
480
481void
483 bool ruby_is_random, bool ruby_warmup)
484{
485 assert(!isDeferredMsgMapEmpty(addr));
487 assert(msg_vec.size() > 0);
488
489 // enqueue all deferred messages associated with this address
490 for (MsgPtr m : msg_vec) {
491 enqueue(m, curTime, delay, ruby_is_random, ruby_warmup);
492 }
493
494 msg_vec.clear();
496}
497
498bool
503
504void
505MessageBuffer::print(std::ostream& out) const
506{
507 ccprintf(out, "[MessageBuffer: ");
508 if (m_consumer != NULL) {
509 ccprintf(out, " consumer-yes ");
510 }
511
513 std::sort_heap(copy.begin(), copy.end(), std::greater<MsgPtr>());
514 ccprintf(out, "%s] %s", copy, name());
515}
516
517bool
518MessageBuffer::isReady(Tick current_time) const
519{
520 assert(m_time_last_time_pop <= current_time);
521 bool can_dequeue = (m_max_dequeue_rate == 0) ||
522 (m_time_last_time_pop < current_time) ||
524 bool is_ready = (m_prio_heap.size() > 0) &&
525 (m_prio_heap.front()->getLastEnqueueTime() <= current_time);
526 if (!can_dequeue && is_ready) {
527 // Make sure the Consumer executes next cycle to dequeue the ready msg
529 }
530 return can_dequeue && is_ready;
531}
532
533Tick
535{
536 if (m_prio_heap.empty())
537 return MaxTick;
538 else
539 return m_prio_heap.front()->getLastEnqueueTime();
540}
541
542uint32_t
544{
545 DPRINTF(RubyQueue, "functional %s for %#x\n",
546 is_read ? "read" : "write", pkt->getAddr());
547
548 uint32_t num_functional_accesses = 0;
549
550 // Check the priority heap and write any messages that may
551 // correspond to the address in the packet.
552 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
553 Message *msg = m_prio_heap[i].get();
554 if (is_read && !mask && msg->functionalRead(pkt))
555 return 1;
556 else if (is_read && mask && msg->functionalRead(pkt, *mask))
557 num_functional_accesses++;
558 else if (!is_read && msg->functionalWrite(pkt))
559 num_functional_accesses++;
560 }
561
562 // Check the stall queue and write any messages that may
563 // correspond to the address in the packet.
564 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
565 map_iter != m_stall_msg_map.end();
566 ++map_iter) {
567
568 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
569 it != (map_iter->second).end(); ++it) {
570
571 Message *msg = (*it).get();
572 if (is_read && !mask && msg->functionalRead(pkt))
573 return 1;
574 else if (is_read && mask && msg->functionalRead(pkt, *mask))
575 num_functional_accesses++;
576 else if (!is_read && msg->functionalWrite(pkt))
577 num_functional_accesses++;
578 }
579 }
580
581 return num_functional_accesses;
582}
583
584} // namespace ruby
585} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
std::shared_ptr< Random > RandomPtr
Definition random.hh:65
static RandomPtr genRandom()
Definition random.hh:68
Abstract superclass for simulation objects.
void scheduleEventAbsolute(Tick timeAbs)
Definition Consumer.cc:63
void scheduleEvent(Cycles timeDelta)
Definition Consumer.cc:56
virtual void storeEventInfo(int info)
Definition Consumer.hh:73
statistics::Scalar m_stall_count
bool isDeferredMsgMapEmpty(Addr addr) const
void recycle(Tick current_time, Tick recycle_latency)
MessageBuffer(const Params &p)
bool areNSlotsAvailable(unsigned int n, Tick curTime)
bool isReady(Tick current_time) const
const Message * peek() const
Function for extracting the message at the head of the message queue.
unsigned int m_size_last_time_size_checked
const unsigned int m_max_dequeue_rate
When != 0, isReady returns false once m_max_dequeue_rate messages have been dequeued in the same cycl...
MessageBufferParams Params
void enqueueDeferredMessages(Addr addr, Tick curTime, Tick delay, bool ruby_is_random, bool ruby_warmup)
const unsigned int m_max_size
The maximum capacity.
void reanalyzeAllMessages(Tick current_time)
unsigned int m_stalled_at_cycle_start
statistics::Formula m_avg_stall_time
Consumer * m_consumer
Consumer to signal a wakeup(), can be NULL.
Tick dequeue(Tick current_time, bool decrement_messages=true)
Updates the delay cycles of the message at the head of the queue, removes it from the queue and retur...
statistics::Average m_buf_msgs
statistics::Scalar m_stall_time
bool hasStalledMsg(Addr addr) const
std::vector< MsgPtr > m_prio_heap
void deferEnqueueingMessage(Addr addr, MsgPtr message)
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool ruby_is_random, bool ruby_warmup, bool bypassStrictFIFO=false)
void registerDequeueCallback(std::function< void()> callback)
StallMsgMapType m_stall_msg_map
A map from line addresses to lists of stalled messages for that line.
void stallMessage(Addr addr, Tick current_time)
statistics::Scalar m_msg_count
statistics::Formula m_occupancy
void print(std::ostream &out) const
void reanalyzeMessages(Addr addr, Tick current_time)
const MessageRandomization m_randomization
void reanalyzeList(std::list< MsgPtr > &, Tick)
unsigned int getSize(Tick curTime)
int m_stall_map_size
Current size of the stall map.
statistics::Scalar m_not_avail_count
std::function< void()> m_dequeue_callback
uint32_t functionalAccess(Packet *pkt, bool is_read, WriteMask *mask)
DeferredMsgMapType m_deferred_msg_map
virtual bool functionalRead(Packet *pkt)
The two functions below are used for reading / writing the message functionally.
Definition Message.hh:91
Tick getLastEnqueueTime() const
Definition Message.hh:108
virtual bool functionalWrite(Packet *pkt)
Definition Message.hh:95
void updateDelayedTicks(Tick curTime)
Update the delay this message has experienced so far.
Definition Message.hh:99
void setMsgCounter(uint64_t c)
Definition Message.hh:111
void setLastEnqueueTime(const Tick &time)
Definition Message.hh:107
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
STL list class.
Definition stl.hh:51
STL vector class.
Definition stl.hh:37
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
Bitfield< 31 > n
Bitfield< 3, 0 > mask
Definition pcstate.hh:63
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 0 > m
Bitfield< 0 > p
Bitfield< 31 > lt
Definition misc.hh:55
Bitfield< 3 > addr
Definition types.hh:84
std::shared_ptr< Message > MsgPtr
Definition Message.hh:60
Tick random_time()
const FlagsType nonan
Don't print if this is NAN.
Definition info.hh:69
const FlagsType nozero
Don't print if this is zero.
Definition info.hh:67
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
const Tick MaxTick
Definition types.hh:60
void ccprintf(cp::Print &print)
Definition cprintf.hh:130

Generated on Mon Jan 13 2025 04:28:40 for gem5 by doxygen 1.9.8