gem5  v22.1.0.0
MessageBuffer.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019-2021 ARM Limited
3  * All rights reserved.
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
42 
43 #include <cassert>
44 
45 #include "base/cprintf.hh"
46 #include "base/logging.hh"
47 #include "base/random.hh"
48 #include "base/stl_helpers.hh"
49 #include "debug/RubyQueue.hh"
51 
52 namespace gem5
53 {
54 
55 namespace ruby
56 {
57 
58 using stl_helpers::operator<<;
59 
61  : SimObject(p), m_stall_map_size(0), m_max_size(p.buffer_size),
62  m_max_dequeue_rate(p.max_dequeue_rate), m_dequeues_this_cy(0),
63  m_time_last_time_size_checked(0),
64  m_time_last_time_enqueue(0), m_time_last_time_pop(0),
65  m_last_arrival_time(0), m_strict_fifo(p.ordered),
66  m_randomization(p.randomization),
67  m_allow_zero_latency(p.allow_zero_latency),
68  m_routing_priority(p.routing_priority),
69  ADD_STAT(m_not_avail_count, statistics::units::Count::get(),
70  "Number of times this buffer did not have N slots available"),
71  ADD_STAT(m_msg_count, statistics::units::Count::get(),
72  "Number of messages passed the buffer"),
73  ADD_STAT(m_buf_msgs, statistics::units::Rate<
74  statistics::units::Count, statistics::units::Tick>::get(),
75  "Average number of messages in buffer"),
76  ADD_STAT(m_stall_time, statistics::units::Tick::get(),
77  "Total number of ticks messages were stalled in this buffer"),
78  ADD_STAT(m_stall_count, statistics::units::Count::get(),
79  "Number of times messages were stalled"),
80  ADD_STAT(m_avg_stall_time, statistics::units::Rate<
81  statistics::units::Tick, statistics::units::Count>::get(),
82  "Average stall ticks per message"),
83  ADD_STAT(m_occupancy, statistics::units::Rate<
84  statistics::units::Ratio, statistics::units::Tick>::get(),
85  "Average occupancy of buffer capacity")
86 {
87  m_msg_counter = 0;
88  m_consumer = NULL;
93  m_priority_rank = 0;
94 
95  m_stall_msg_map.clear();
96  m_input_link_id = 0;
97  m_vnet_id = 0;
98 
99  m_buf_msgs = 0;
100  m_stall_time = 0;
101 
102  m_dequeue_callback = nullptr;
103 
104  // stats
107 
110 
111  m_buf_msgs
113 
116 
119 
122 
125 
126  if (m_max_size > 0) {
128  } else {
129  m_occupancy = 0;
130  }
131 
133 }
134 
135 unsigned int
137 {
138  if (m_time_last_time_size_checked != curTime) {
141  }
142 
144 }
145 
146 bool
147 MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
148 {
149 
150  // fast path when message buffers have infinite size
151  if (m_max_size == 0) {
152  return true;
153  }
154 
155  // determine the correct size for the current cycle
156  // pop operations shouldn't effect the network's visible size
157  // until schd cycle, but enqueue operations effect the visible
158  // size immediately
159  unsigned int current_size = 0;
160  unsigned int current_stall_size = 0;
161 
162  if (m_time_last_time_pop < current_time) {
163  // no pops this cycle - heap and stall queue size is correct
164  current_size = m_prio_heap.size();
165  current_stall_size = m_stall_map_size;
166  } else {
167  if (m_time_last_time_enqueue < current_time) {
168  // no enqueues this cycle - m_size_at_cycle_start is correct
169  current_size = m_size_at_cycle_start;
170  } else {
171  // both pops and enqueues occured this cycle - add new
172  // enqueued msgs to m_size_at_cycle_start
173  current_size = m_size_at_cycle_start + m_msgs_this_cycle;
174  }
175 
176  // Stall queue size at start is considered
177  current_stall_size = m_stalled_at_cycle_start;
178  }
179 
180  // now compare the new size with our max size
181  if (current_size + current_stall_size + n <= m_max_size) {
182  return true;
183  } else {
184  DPRINTF(RubyQueue, "n: %d, current_size: %d, heap size: %d, "
185  "m_max_size: %d\n",
186  n, current_size + current_stall_size,
187  m_prio_heap.size(), m_max_size);
189  return false;
190  }
191 }
192 
193 const Message*
195 {
196  DPRINTF(RubyQueue, "Peeking at head of queue.\n");
197  const Message* msg_ptr = m_prio_heap.front().get();
198  assert(msg_ptr);
199 
200  DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
201  return msg_ptr;
202 }
203 
204 // FIXME - move me somewhere else
205 Tick
207 {
208  Tick time = 1;
209  time += random_mt.random(0, 3); // [0...3]
210  if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
211  time += 100 + random_mt.random(1, 15); // 100 + [1...15]
212  }
213  return time;
214 }
215 
216 void
217 MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
218 {
219  // record current time incase we have a pop that also adjusts my size
220  if (m_time_last_time_enqueue < current_time) {
221  m_msgs_this_cycle = 0; // first msg this cycle
222  m_time_last_time_enqueue = current_time;
223  }
224 
225  m_msg_counter++;
227 
228  // Calculate the arrival time of the message, that is, the first
229  // cycle the message can be dequeued.
230  panic_if((delta == 0) && !m_allow_zero_latency,
231  "Delta equals zero and allow_zero_latency is false during enqueue");
232  Tick arrival_time = 0;
233 
234  // random delays are inserted if the RubySystem level randomization flag
235  // is turned on and this buffer allows it
236  if ((m_randomization == MessageRandomization::disabled) ||
237  ((m_randomization == MessageRandomization::ruby_system) &&
239  // No randomization
240  arrival_time = current_time + delta;
241  } else {
242  // Randomization - ignore delta
243  if (m_strict_fifo) {
244  if (m_last_arrival_time < current_time) {
245  m_last_arrival_time = current_time;
246  }
247  arrival_time = m_last_arrival_time + random_time();
248  } else {
249  arrival_time = current_time + random_time();
250  }
251  }
252 
253  // Check the arrival time
254  assert(arrival_time >= current_time);
255  if (m_strict_fifo) {
256  if (arrival_time < m_last_arrival_time) {
257  panic("FIFO ordering violated: %s name: %s current time: %d "
258  "delta: %d arrival_time: %d last arrival_time: %d\n",
259  *this, name(), current_time, delta, arrival_time,
261  }
262  }
263 
264  // If running a cache trace, don't worry about the last arrival checks
266  m_last_arrival_time = arrival_time;
267  }
268 
269  // compute the delay cycles and set enqueue time
270  Message* msg_ptr = message.get();
271  assert(msg_ptr != NULL);
272 
273  assert(current_time >= msg_ptr->getLastEnqueueTime() &&
274  "ensure we aren't dequeued early");
275 
276  msg_ptr->updateDelayedTicks(current_time);
277  msg_ptr->setLastEnqueueTime(arrival_time);
278  msg_ptr->setMsgCounter(m_msg_counter);
279 
280  // Insert the message into the priority heap
281  m_prio_heap.push_back(message);
282  push_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
283  // Increment the number of messages statistic
284  m_buf_msgs++;
285 
286  assert((m_max_size == 0) ||
287  ((m_prio_heap.size() + m_stall_map_size) <= m_max_size));
288 
289  DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
290  arrival_time, *(message.get()));
291 
292  // Schedule the wakeup
293  assert(m_consumer != NULL);
294  m_consumer->scheduleEventAbsolute(arrival_time);
296 }
297 
298 Tick
299 MessageBuffer::dequeue(Tick current_time, bool decrement_messages)
300 {
301  DPRINTF(RubyQueue, "Popping\n");
302  assert(isReady(current_time));
303 
304  // get MsgPtr of the message about to be dequeued
305  MsgPtr message = m_prio_heap.front();
306 
307  // get the delay cycles
308  message->updateDelayedTicks(current_time);
309  Tick delay = message->getDelayedTicks();
310 
311  // record previous size and time so the current buffer size isn't
312  // adjusted until schd cycle
313  if (m_time_last_time_pop < current_time) {
316  m_time_last_time_pop = current_time;
317  m_dequeues_this_cy = 0;
318  }
320 
321  pop_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
322  m_prio_heap.pop_back();
323  if (decrement_messages) {
324  // Record how much time is passed since the message was enqueued
325  m_stall_time += curTick() - message->getLastEnqueueTime();
326  m_msg_count++;
327 
328  // If the message will be removed from the queue, decrement the
329  // number of message in the queue.
330  m_buf_msgs--;
331  }
332 
333  // if a dequeue callback was requested, call it now
334  if (m_dequeue_callback) {
336  }
337 
338  return delay;
339 }
340 
341 void
342 MessageBuffer::registerDequeueCallback(std::function<void()> callback)
343 {
344  m_dequeue_callback = callback;
345 }
346 
347 void
349 {
350  m_dequeue_callback = nullptr;
351 }
352 
353 void
355 {
356  m_prio_heap.clear();
357 
358  m_msg_counter = 0;
363  m_msgs_this_cycle = 0;
364 }
365 
366 void
367 MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
368 {
369  DPRINTF(RubyQueue, "Recycling.\n");
370  assert(isReady(current_time));
371  MsgPtr node = m_prio_heap.front();
372  pop_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
373 
374  Tick future_time = current_time + recycle_latency;
375  node->setLastEnqueueTime(future_time);
376 
377  m_prio_heap.back() = node;
378  push_heap(m_prio_heap.begin(), m_prio_heap.end(), std::greater<MsgPtr>());
379  m_consumer->scheduleEventAbsolute(future_time);
380 }
381 
382 void
384 {
385  while (!lt.empty()) {
386  MsgPtr m = lt.front();
387  assert(m->getLastEnqueueTime() <= schdTick);
388 
389  m_prio_heap.push_back(m);
390  push_heap(m_prio_heap.begin(), m_prio_heap.end(),
391  std::greater<MsgPtr>());
392 
394 
395  DPRINTF(RubyQueue, "Requeue arrival_time: %lld, Message: %s\n",
396  schdTick, *(m.get()));
397 
398  lt.pop_front();
399  }
400 }
401 
402 void
404 {
405  DPRINTF(RubyQueue, "ReanalyzeMessages %#x\n", addr);
406  assert(m_stall_msg_map.count(addr) > 0);
407 
408  //
409  // Put all stalled messages associated with this address back on the
410  // prio heap. The reanalyzeList call will make sure the consumer is
411  // scheduled for the current cycle so that the previously stalled messages
412  // will be observed before any younger messages that may arrive this cycle
413  //
415  assert(m_stall_map_size >= 0);
416  reanalyzeList(m_stall_msg_map[addr], current_time);
417  m_stall_msg_map.erase(addr);
418 }
419 
420 void
422 {
423  DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
424 
425  //
426  // Put all stalled messages associated with this address back on the
427  // prio heap. The reanalyzeList call will make sure the consumer is
428  // scheduled for the current cycle so that the previously stalled messages
429  // will be observed before any younger messages that may arrive this cycle.
430  //
431  for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
432  map_iter != m_stall_msg_map.end(); ++map_iter) {
433  m_stall_map_size -= map_iter->second.size();
434  assert(m_stall_map_size >= 0);
435  reanalyzeList(map_iter->second, current_time);
436  }
437  m_stall_msg_map.clear();
438 }
439 
440 void
442 {
443  DPRINTF(RubyQueue, "Stalling due to %#x\n", addr);
444  assert(isReady(current_time));
445  assert(getOffset(addr) == 0);
446  MsgPtr message = m_prio_heap.front();
447 
448  // Since the message will just be moved to stall map, indicate that the
449  // buffer should not decrement the m_buf_msgs statistic
450  dequeue(current_time, false);
451 
452  //
453  // Note: no event is scheduled to analyze the map at a later time.
454  // Instead the controller is responsible to call reanalyzeMessages when
455  // these addresses change state.
456  //
457  (m_stall_msg_map[addr]).push_back(message);
459  m_stall_count++;
460 }
461 
462 bool
464 {
465  return (m_stall_msg_map.count(addr) != 0);
466 }
467 
468 void
470 {
471  DPRINTF(RubyQueue, "Deferring enqueueing message: %s, Address %#x\n",
472  *(message.get()), addr);
473  (m_deferred_msg_map[addr]).push_back(message);
474 }
475 
476 void
478 {
479  assert(!isDeferredMsgMapEmpty(addr));
481  assert(msg_vec.size() > 0);
482 
483  // enqueue all deferred messages associated with this address
484  for (MsgPtr m : msg_vec) {
485  enqueue(m, curTime, delay);
486  }
487 
488  msg_vec.clear();
489  m_deferred_msg_map.erase(addr);
490 }
491 
492 bool
494 {
495  return m_deferred_msg_map.count(addr) == 0;
496 }
497 
498 void
499 MessageBuffer::print(std::ostream& out) const
500 {
501  ccprintf(out, "[MessageBuffer: ");
502  if (m_consumer != NULL) {
503  ccprintf(out, " consumer-yes ");
504  }
505 
507  std::sort_heap(copy.begin(), copy.end(), std::greater<MsgPtr>());
508  ccprintf(out, "%s] %s", copy, name());
509 }
510 
511 bool
512 MessageBuffer::isReady(Tick current_time) const
513 {
514  assert(m_time_last_time_pop <= current_time);
515  bool can_dequeue = (m_max_dequeue_rate == 0) ||
516  (m_time_last_time_pop < current_time) ||
518  bool is_ready = (m_prio_heap.size() > 0) &&
519  (m_prio_heap.front()->getLastEnqueueTime() <= current_time);
520  if (!can_dequeue && is_ready) {
521  // Make sure the Consumer executes next cycle to dequeue the ready msg
523  }
524  return can_dequeue && is_ready;
525 }
526 
527 Tick
529 {
530  if (m_prio_heap.empty())
531  return MaxTick;
532  else
533  return m_prio_heap.front()->getLastEnqueueTime();
534 }
535 
536 uint32_t
538 {
539  DPRINTF(RubyQueue, "functional %s for %#x\n",
540  is_read ? "read" : "write", pkt->getAddr());
541 
542  uint32_t num_functional_accesses = 0;
543 
544  // Check the priority heap and write any messages that may
545  // correspond to the address in the packet.
546  for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
547  Message *msg = m_prio_heap[i].get();
548  if (is_read && !mask && msg->functionalRead(pkt))
549  return 1;
550  else if (is_read && mask && msg->functionalRead(pkt, *mask))
551  num_functional_accesses++;
552  else if (!is_read && msg->functionalWrite(pkt))
553  num_functional_accesses++;
554  }
555 
556  // Check the stall queue and write any messages that may
557  // correspond to the address in the packet.
558  for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
559  map_iter != m_stall_msg_map.end();
560  ++map_iter) {
561 
562  for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
563  it != (map_iter->second).end(); ++it) {
564 
565  Message *msg = (*it).get();
566  if (is_read && !mask && msg->functionalRead(pkt))
567  return 1;
568  else if (is_read && mask && msg->functionalRead(pkt, *mask))
569  num_functional_accesses++;
570  else if (!is_read && msg->functionalWrite(pkt))
571  num_functional_accesses++;
572  }
573  }
574 
575  return num_functional_accesses;
576 }
577 
578 } // namespace ruby
579 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
virtual std::string name() const
Definition: named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
Addr getAddr() const
Definition: packet.hh:805
Abstract superclass for simulation objects.
Definition: sim_object.hh:148
void scheduleEventAbsolute(Tick timeAbs)
Definition: Consumer.cc:63
void scheduleEvent(Cycles timeDelta)
Definition: Consumer.cc:56
virtual void storeEventInfo(int info)
Definition: Consumer.hh:73
statistics::Scalar m_stall_count
bool isDeferredMsgMapEmpty(Addr addr) const
void recycle(Tick current_time, Tick recycle_latency)
MessageBuffer(const Params &p)
bool areNSlotsAvailable(unsigned int n, Tick curTime)
bool isReady(Tick current_time) const
const Message * peek() const
Function for extracting the message at the head of the message queue.
unsigned int m_size_last_time_size_checked
void enqueueDeferredMessages(Addr addr, Tick curTime, Tick delay)
const unsigned int m_max_dequeue_rate
When != 0, isReady returns false once m_max_dequeue_rate messages have been dequeued in the same cycl...
MessageBufferParams Params
const unsigned int m_max_size
The maximum capacity.
void reanalyzeAllMessages(Tick current_time)
unsigned int m_stalled_at_cycle_start
statistics::Formula m_avg_stall_time
Consumer * m_consumer
Consumer to signal a wakeup(), can be NULL.
Tick dequeue(Tick current_time, bool decrement_messages=true)
Updates the delay cycles of the message at the head of the queue, removes it from the queue and retur...
statistics::Average m_buf_msgs
statistics::Scalar m_stall_time
bool hasStalledMsg(Addr addr) const
std::vector< MsgPtr > m_prio_heap
void enqueue(MsgPtr message, Tick curTime, Tick delta)
void deferEnqueueingMessage(Addr addr, MsgPtr message)
void registerDequeueCallback(std::function< void()> callback)
StallMsgMapType m_stall_msg_map
A map from line addresses to lists of stalled messages for that line.
void stallMessage(Addr addr, Tick current_time)
statistics::Scalar m_msg_count
unsigned int m_size_at_cycle_start
statistics::Formula m_occupancy
void print(std::ostream &out) const
void reanalyzeMessages(Addr addr, Tick current_time)
const MessageRandomization m_randomization
void reanalyzeList(std::list< MsgPtr > &, Tick)
unsigned int getSize(Tick curTime)
int m_stall_map_size
Current size of the stall map.
statistics::Scalar m_not_avail_count
std::function< void()> m_dequeue_callback
uint32_t functionalAccess(Packet *pkt, bool is_read, WriteMask *mask)
DeferredMsgMapType m_deferred_msg_map
virtual bool functionalRead(Packet *pkt)
The two functions below are used for reading / writing the message functionally.
Definition: Message.hh:90
Tick getLastEnqueueTime() const
Definition: Message.hh:107
virtual bool functionalWrite(Packet *pkt)
Definition: Message.hh:94
void updateDelayedTicks(Tick curTime)
Update the delay this message has experienced so far.
Definition: Message.hh:98
void setMsgCounter(uint64_t c)
Definition: Message.hh:110
void setLastEnqueueTime(const Tick &time)
Definition: Message.hh:106
static bool getWarmupEnabled()
Definition: RubySystem.hh:75
static int getRandomization()
Definition: RubySystem.hh:71
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:358
STL list class.
Definition: stl.hh:51
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:75
Random random_mt
Definition: random.cc:99
std::enable_if_t< std::is_integral_v< T >, T > random()
Use the SFINAE idiom to choose an implementation based on whether the type is integral or floating po...
Definition: random.hh:90
constexpr uint64_t mask(unsigned nbits)
Generate a 64-bit mask of 'nbits' 1s, right justified.
Definition: bitfield.hh:63
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
Bitfield< 31 > n
Definition: misc_types.hh:462
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 31 > lt
Definition: misc.hh:55
Bitfield< 54 > p
Definition: pagetable.hh:70
Bitfield< 3 > addr
Definition: types.hh:84
std::shared_ptr< Message > MsgPtr
Definition: Message.hh:59
Tick random_time()
Addr getOffset(Addr addr)
Definition: Address.cc:54
const FlagsType nonan
Don't print if this is NAN.
Definition: info.hh:70
const FlagsType nozero
Don't print if this is zero.
Definition: info.hh:68
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
uint64_t Tick
Tick count type.
Definition: types.hh:58
const Tick MaxTick
Definition: types.hh:60
void ccprintf(cp::Print &print)
Definition: cprintf.hh:130

Generated on Wed Dec 21 2022 10:22:38 for gem5 by doxygen 1.9.1