gem5  v22.1.0.0
lsq.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2017-2018,2020-2021 Arm Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/lsq.hh"
39 
40 #include <iomanip>
41 #include <sstream>
42 
43 #include "base/compiler.hh"
44 #include "base/logging.hh"
45 #include "base/trace.hh"
47 #include "cpu/minor/execute.hh"
48 #include "cpu/minor/pipeline.hh"
49 #include "cpu/utils.hh"
50 #include "debug/Activity.hh"
51 #include "debug/MinorMem.hh"
52 
53 namespace gem5
54 {
55 
57 namespace minor
58 {
59 
60 LSQ::LSQRequest::LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_,
61  PacketDataPtr data_, uint64_t *res_) :
62  SenderState(),
63  port(port_),
64  inst(inst_),
65  isLoad(isLoad_),
66  data(data_),
67  packet(NULL),
68  request(),
69  res(res_),
70  skipped(false),
71  issuedToMemory(false),
72  isTranslationDelayed(false),
73  state(NotIssued)
74 {
75  request = std::make_shared<Request>();
76 }
77 
78 void
80 {
81  SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
82  std::unique_ptr<PCStateBase> old_pc(thread.pcState().clone());
83  ExecContext context(port.cpu, thread, port.execute, inst);
84  [[maybe_unused]] Fault fault = inst->translationFault;
85 
86  // Give the instruction a chance to suppress a translation fault
87  inst->translationFault = inst->staticInst->initiateAcc(&context, nullptr);
88  if (inst->translationFault == NoFault) {
89  DPRINTFS(MinorMem, (&port),
90  "Translation fault suppressed for inst:%s\n", *inst);
91  } else {
92  assert(inst->translationFault == fault);
93  }
94  thread.pcState(*old_pc);
95 }
96 
97 void
99 {
100  DPRINTFS(MinorMem, (&port), "Complete disabled mem access for inst:%s\n",
101  *inst);
102 
103  SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
104  std::unique_ptr<PCStateBase> old_pc(thread.pcState().clone());
105 
106  ExecContext context(port.cpu, thread, port.execute, inst);
107 
108  context.setMemAccPredicate(false);
109  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
110 
111  thread.pcState(*old_pc);
112 }
113 
114 void
116 {
117  port.cpu.threads[inst->id.threadId]->setMemAccPredicate(false);
118  DPRINTFS(MinorMem, (&port), "Disable mem access for inst:%s\n", *inst);
119 }
120 
123  Addr req1_addr, unsigned int req1_size,
124  Addr req2_addr, unsigned int req2_size)
125 {
126  /* 'end' here means the address of the byte just past the request
127  * blocks */
128  Addr req2_end_addr = req2_addr + req2_size;
129  Addr req1_end_addr = req1_addr + req1_size;
130 
131  AddrRangeCoverage ret;
132 
133  if (req1_addr >= req2_end_addr || req1_end_addr <= req2_addr)
134  ret = NoAddrRangeCoverage;
135  else if (req1_addr <= req2_addr && req1_end_addr >= req2_end_addr)
136  ret = FullAddrRangeCoverage;
137  else
139 
140  return ret;
141 }
142 
145 {
146  AddrRangeCoverage ret = containsAddrRangeOf(
147  request->getPaddr(), request->getSize(),
148  other_request->request->getPaddr(), other_request->request->getSize());
149  /* If there is a strobe mask then store data forwarding might not be
150  * correct. Instead of checking enablemant of every byte we just fall back
151  * to PartialAddrRangeCoverage to prohibit store data forwarding */
152  if (ret == FullAddrRangeCoverage && request->isMasked())
154  return ret;
155 }
156 
157 
158 bool
160 {
161  return inst->isInst() && inst->staticInst->isFullMemBarrier();
162 }
163 
164 bool
166 {
167  return state == StoreToStoreBuffer;
168 }
169 
170 void
172 {
173  DPRINTFS(MinorMem, (&port), "Setting state from %d to %d for request:"
174  " %s\n", state, new_state, *inst);
175  state = new_state;
176 }
177 
178 bool
180 {
181  /* @todo, There is currently only one 'completed' state. This
182  * may not be a good choice */
183  return state == Complete;
184 }
185 
186 void
187 LSQ::LSQRequest::reportData(std::ostream &os) const
188 {
189  os << (isLoad ? 'R' : 'W') << ';';
190  inst->reportData(os);
191  os << ';' << state;
192 }
193 
194 std::ostream &
195 operator <<(std::ostream &os, LSQ::AddrRangeCoverage coverage)
196 {
197  switch (coverage) {
199  os << "PartialAddrRangeCoverage";
200  break;
202  os << "FullAddrRangeCoverage";
203  break;
205  os << "NoAddrRangeCoverage";
206  break;
207  default:
208  os << "AddrRangeCoverage-" << static_cast<int>(coverage);
209  break;
210  }
211  return os;
212 }
213 
214 std::ostream &
216 {
217  switch (state) {
219  os << "NotIssued";
220  break;
222  os << "InTranslation";
223  break;
225  os << "Translated";
226  break;
228  os << "Failed";
229  break;
231  os << "RequestIssuing";
232  break;
234  os << "StoreToStoreBuffer";
235  break;
237  os << "StoreInStoreBuffer";
238  break;
240  os << "StoreBufferIssuing";
241  break;
243  os << "RequestNeedsRetry";
244  break;
246  os << "StoreBufferNeedsRetry";
247  break;
249  os << "Complete";
250  break;
251  default:
252  os << "LSQRequestState-" << static_cast<int>(state);
253  break;
254  }
255  return os;
256 }
257 
258 void
260 {
261  bool is_last_barrier =
262  inst->id.execSeqNum >= lastMemBarrier[inst->id.threadId];
263 
264  DPRINTF(MinorMem, "Moving %s barrier out of store buffer inst: %s\n",
265  (is_last_barrier ? "last" : "a"), *inst);
266 
267  if (is_last_barrier)
268  lastMemBarrier[inst->id.threadId] = 0;
269 }
270 
271 void
272 LSQ::SingleDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
274 {
275  port.numAccessesInDTLB--;
276 
277  DPRINTFS(MinorMem, (&port), "Received translation response for"
278  " request: %s delayed:%d %s\n", *inst, isTranslationDelayed,
279  fault_ != NoFault ? fault_->name() : "");
280 
281  if (fault_ != NoFault) {
282  inst->translationFault = fault_;
283  if (isTranslationDelayed) {
284  tryToSuppressFault();
285  if (inst->translationFault == NoFault) {
286  completeDisabledMemAccess();
287  setState(Complete);
288  }
289  }
290  setState(Translated);
291  } else {
292  setState(Translated);
293  makePacket();
294  }
295  port.tryToSendToTransfers(this);
296 
297  /* Let's try and wake up the processor for the next cycle */
298  port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
299 }
300 
301 void
303 {
304  ThreadContext *thread = port.cpu.getContext(
305  inst->id.threadId);
306 
307  const auto &byte_enable = request->getByteEnable();
308  if (isAnyActiveElement(byte_enable.cbegin(), byte_enable.cend())) {
309  port.numAccessesInDTLB++;
310 
312 
313  DPRINTFS(MinorMem, (&port), "Submitting DTLB request\n");
314  /* Submit the translation request. The response will come through
315  * finish/markDelayed on the LSQRequest as it bears the Translation
316  * interface */
317  thread->getMMUPtr()->translateTiming(
318  request, thread, this, (isLoad ? BaseMMU::Read : BaseMMU::Write));
319  } else {
320  disableMemAccess();
321  setState(LSQ::LSQRequest::Complete);
322  }
323 }
324 
325 void
327 {
328  DPRINTFS(MinorMem, (&port), "Retiring packet\n");
329  packet = packet_;
330  packetInFlight = false;
331  setState(Complete);
332 }
333 
334 void
335 LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
337 {
338  port.numAccessesInDTLB--;
339 
340  [[maybe_unused]] unsigned int expected_fragment_index =
341  numTranslatedFragments;
342 
343  numInTranslationFragments--;
344  numTranslatedFragments++;
345 
346  DPRINTFS(MinorMem, (&port), "Received translation response for fragment"
347  " %d of request: %s delayed:%d %s\n", expected_fragment_index,
348  *inst, isTranslationDelayed,
349  fault_ != NoFault ? fault_->name() : "");
350 
351  assert(request_ == fragmentRequests[expected_fragment_index]);
352 
353  /* Wake up next cycle to get things going again in case the
354  * tryToSendToTransfers does take */
355  port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
356 
357  if (fault_ != NoFault) {
358  /* tryToSendToTransfers will handle the fault */
359  inst->translationFault = fault_;
360 
361  DPRINTFS(MinorMem, (&port), "Faulting translation for fragment:"
362  " %d of request: %s\n",
363  expected_fragment_index, *inst);
364 
365  if (expected_fragment_index > 0 || isTranslationDelayed)
366  tryToSuppressFault();
367  if (expected_fragment_index == 0) {
368  if (isTranslationDelayed && inst->translationFault == NoFault) {
369  completeDisabledMemAccess();
370  setState(Complete);
371  } else {
372  setState(Translated);
373  }
374  } else if (inst->translationFault == NoFault) {
375  setState(Translated);
376  numTranslatedFragments--;
377  makeFragmentPackets();
378  } else {
379  setState(Translated);
380  }
381  port.tryToSendToTransfers(this);
382  } else if (numTranslatedFragments == numFragments) {
383  makeFragmentPackets();
384  setState(Translated);
385  port.tryToSendToTransfers(this);
386  } else {
387  /* Avoid calling translateTiming from within ::finish */
388  assert(!translationEvent.scheduled());
389  port.cpu.schedule(translationEvent, curTick());
390  }
391 }
392 
394  bool isLoad_, PacketDataPtr data_, uint64_t *res_) :
395  LSQRequest(port_, inst_, isLoad_, data_, res_),
396  translationEvent([this]{ sendNextFragmentToTranslation(); },
397  "translationEvent"),
398  numFragments(0),
399  numInTranslationFragments(0),
400  numTranslatedFragments(0),
401  numIssuedFragments(0),
402  numRetiredFragments(0),
403  fragmentRequests(),
404  fragmentPackets()
405 {
406  /* Don't know how many elements are needed until the request is
407  * populated by the caller. */
408 }
409 
411 {
412  for (auto i = fragmentPackets.begin();
413  i != fragmentPackets.end(); i++)
414  {
415  delete *i;
416  }
417 }
418 
419 void
421 {
422  Addr base_addr = request->getVaddr();
423  unsigned int whole_size = request->getSize();
424  unsigned int line_width = port.lineWidth;
425 
426  unsigned int fragment_size;
427  Addr fragment_addr;
428 
429  std::vector<bool> fragment_write_byte_en;
430 
431  /* Assume that this transfer is across potentially many block snap
432  * boundaries:
433  *
434  * | _|________|________|________|___ |
435  * | |0| 1 | 2 | 3 | 4 | |
436  * | |_|________|________|________|___| |
437  * | | | | | |
438  *
439  * The first transfer (0) can be up to lineWidth in size.
440  * All the middle transfers (1-3) are lineWidth in size
441  * The last transfer (4) can be from zero to lineWidth - 1 in size
442  */
443  unsigned int first_fragment_offset =
444  addrBlockOffset(base_addr, line_width);
445  unsigned int last_fragment_size =
446  addrBlockOffset(base_addr + whole_size, line_width);
447  unsigned int first_fragment_size =
448  line_width - first_fragment_offset;
449 
450  unsigned int middle_fragments_total_size =
451  whole_size - (first_fragment_size + last_fragment_size);
452 
453  assert(addrBlockOffset(middle_fragments_total_size, line_width) == 0);
454 
455  unsigned int middle_fragment_count =
456  middle_fragments_total_size / line_width;
457 
458  numFragments = 1 /* first */ + middle_fragment_count +
459  (last_fragment_size == 0 ? 0 : 1);
460 
461  DPRINTFS(MinorMem, (&port), "Dividing transfer into %d fragmentRequests."
462  " First fragment size: %d Last fragment size: %d\n",
463  numFragments, first_fragment_size,
464  (last_fragment_size == 0 ? line_width : last_fragment_size));
465 
466  assert(((middle_fragment_count * line_width) +
467  first_fragment_size + last_fragment_size) == whole_size);
468 
469  fragment_addr = base_addr;
470  fragment_size = first_fragment_size;
471 
472  /* Just past the last address in the request */
473  Addr end_addr = base_addr + whole_size;
474 
475  auto& byte_enable = request->getByteEnable();
476  unsigned int num_disabled_fragments = 0;
477 
478  for (unsigned int fragment_index = 0; fragment_index < numFragments;
479  fragment_index++)
480  {
481  [[maybe_unused]] bool is_last_fragment = false;
482 
483  if (fragment_addr == base_addr) {
484  /* First fragment */
485  fragment_size = first_fragment_size;
486  } else {
487  if ((fragment_addr + line_width) > end_addr) {
488  /* Adjust size of last fragment */
489  fragment_size = end_addr - fragment_addr;
490  is_last_fragment = true;
491  } else {
492  /* Middle fragments */
493  fragment_size = line_width;
494  }
495  }
496 
497  RequestPtr fragment = std::make_shared<Request>();
498  bool disabled_fragment = false;
499 
500  fragment->setContext(request->contextId());
501  // Set up byte-enable mask for the current fragment
502  auto it_start = byte_enable.begin() +
503  (fragment_addr - base_addr);
504  auto it_end = byte_enable.begin() +
505  (fragment_addr - base_addr) + fragment_size;
506  if (isAnyActiveElement(it_start, it_end)) {
507  fragment->setVirt(
508  fragment_addr, fragment_size, request->getFlags(),
509  request->requestorId(),
510  request->getPC());
511  fragment->setByteEnable(std::vector<bool>(it_start, it_end));
512  } else {
513  disabled_fragment = true;
514  }
515 
516  if (!disabled_fragment) {
517  DPRINTFS(MinorMem, (&port), "Generating fragment addr: 0x%x"
518  " size: %d (whole request addr: 0x%x size: %d) %s\n",
519  fragment_addr, fragment_size, base_addr, whole_size,
520  (is_last_fragment ? "last fragment" : ""));
521 
522  fragmentRequests.push_back(fragment);
523  } else {
524  num_disabled_fragments++;
525  }
526 
527  fragment_addr += fragment_size;
528  }
529  assert(numFragments >= num_disabled_fragments);
530  numFragments -= num_disabled_fragments;
531 }
532 
533 void
535 {
536  assert(numTranslatedFragments > 0);
537  Addr base_addr = request->getVaddr();
538 
539  DPRINTFS(MinorMem, (&port), "Making packets for request: %s\n", *inst);
540 
541  for (unsigned int fragment_index = 0;
542  fragment_index < numTranslatedFragments;
543  fragment_index++)
544  {
545  RequestPtr fragment = fragmentRequests[fragment_index];
546 
547  DPRINTFS(MinorMem, (&port), "Making packet %d for request: %s"
548  " (%d, 0x%x)\n",
549  fragment_index, *inst,
550  (fragment->hasPaddr() ? "has paddr" : "no paddr"),
551  (fragment->hasPaddr() ? fragment->getPaddr() : 0));
552 
553  Addr fragment_addr = fragment->getVaddr();
554  unsigned int fragment_size = fragment->getSize();
555 
556  uint8_t *request_data = NULL;
557 
558  if (!isLoad) {
559  /* Split data for Packets. Will become the property of the
560  * outgoing Packets */
561  request_data = new uint8_t[fragment_size];
562  std::memcpy(request_data, data + (fragment_addr - base_addr),
563  fragment_size);
564  }
565 
566  assert(fragment->hasPaddr());
567 
568  PacketPtr fragment_packet =
569  makePacketForRequest(fragment, isLoad, this, request_data);
570 
571  fragmentPackets.push_back(fragment_packet);
572  /* Accumulate flags in parent request */
573  request->setFlags(fragment->getFlags());
574  }
575 
576  /* Might as well make the overall/response packet here */
577  /* Get the physical address for the whole request/packet from the first
578  * fragment */
579  request->setPaddr(fragmentRequests[0]->getPaddr());
580  makePacket();
581 }
582 
583 void
585 {
586  makeFragmentRequests();
587 
588  if (numFragments > 0) {
590  numInTranslationFragments = 0;
591  numTranslatedFragments = 0;
592 
593  /* @todo, just do these in sequence for now with
594  * a loop of:
595  * do {
596  * sendNextFragmentToTranslation ; translateTiming ; finish
597  * } while (numTranslatedFragments != numFragments);
598  */
599 
600  /* Do first translation */
601  sendNextFragmentToTranslation();
602  } else {
603  disableMemAccess();
604  setState(LSQ::LSQRequest::Complete);
605  }
606 }
607 
608 PacketPtr
610 {
611  assert(numIssuedFragments < numTranslatedFragments);
612 
613  return fragmentPackets[numIssuedFragments];
614 }
615 
616 void
618 {
619  assert(numIssuedFragments < numTranslatedFragments);
620 
621  numIssuedFragments++;
622 }
623 
624 void
626 {
627  assert(inst->translationFault == NoFault);
628  assert(numRetiredFragments < numTranslatedFragments);
629 
630  DPRINTFS(MinorMem, (&port), "Retiring fragment addr: 0x%x size: %d"
631  " offset: 0x%x (retired fragment num: %d)\n",
632  response->req->getVaddr(), response->req->getSize(),
633  request->getVaddr() - response->req->getVaddr(),
634  numRetiredFragments);
635 
636  numRetiredFragments++;
637 
638  if (skipped) {
639  /* Skip because we already knew the request had faulted or been
640  * skipped */
641  DPRINTFS(MinorMem, (&port), "Skipping this fragment\n");
642  } else if (response->isError()) {
643  /* Mark up the error and leave to execute to handle it */
644  DPRINTFS(MinorMem, (&port), "Fragment has an error, skipping\n");
645  setSkipped();
646  packet->copyError(response);
647  } else {
648  if (isLoad) {
649  if (!data) {
650  /* For a split transfer, a Packet must be constructed
651  * to contain all returning data. This is that packet's
652  * data */
653  data = new uint8_t[request->getSize()];
654  }
655 
656  /* Populate the portion of the overall response data represented
657  * by the response fragment */
658  std::memcpy(
659  data + (response->req->getVaddr() - request->getVaddr()),
660  response->getConstPtr<uint8_t>(),
661  response->req->getSize());
662  }
663  }
664 
665  /* Complete early if we're skipping are no more in-flight accesses */
666  if (skipped && !hasPacketsInMemSystem()) {
667  DPRINTFS(MinorMem, (&port), "Completed skipped burst\n");
668  setState(Complete);
669  if (packet->needsResponse())
670  packet->makeResponse();
671  }
672 
673  if (numRetiredFragments == numTranslatedFragments)
674  setState(Complete);
675 
676  if (!skipped && isComplete()) {
677  DPRINTFS(MinorMem, (&port), "Completed burst %d\n", packet != NULL);
678 
679  DPRINTFS(MinorMem, (&port), "Retired packet isRead: %d isWrite: %d"
680  " needsResponse: %d packetSize: %s requestSize: %s responseSize:"
681  " %s\n", packet->isRead(), packet->isWrite(),
682  packet->needsResponse(), packet->getSize(), request->getSize(),
683  response->getSize());
684 
685  /* A request can become complete by several paths, this is a sanity
686  * check to make sure the packet's data is created */
687  if (!data) {
688  data = new uint8_t[request->getSize()];
689  }
690 
691  if (isLoad) {
692  DPRINTFS(MinorMem, (&port), "Copying read data\n");
693  std::memcpy(packet->getPtr<uint8_t>(), data, request->getSize());
694  }
695  packet->makeResponse();
696  }
697 
698  /* Packets are all deallocated together in ~SplitLSQRequest */
699 }
700 
701 void
703 {
704  unsigned int fragment_index = numTranslatedFragments;
705 
706  ThreadContext *thread = port.cpu.getContext(
707  inst->id.threadId);
708 
709  DPRINTFS(MinorMem, (&port), "Submitting DTLB request for fragment: %d\n",
710  fragment_index);
711 
712  port.numAccessesInDTLB++;
713  numInTranslationFragments++;
714 
715  thread->getMMUPtr()->translateTiming(
716  fragmentRequests[fragment_index], thread, this, (isLoad ?
718 }
719 
720 bool
722 {
723  /* @todo, support store amalgamation */
724  return slots.size() < numSlots;
725 }
726 
727 void
729 {
730  auto found = std::find(slots.begin(), slots.end(), request);
731 
732  if (found != slots.end()) {
733  DPRINTF(MinorMem, "Deleting request: %s %s %s from StoreBuffer\n",
734  request, *found, *(request->inst));
735  slots.erase(found);
736 
737  delete request;
738  }
739 }
740 
741 void
743 {
744  if (!canInsert()) {
745  warn("%s: store buffer insertion without space to insert from"
746  " inst: %s\n", name(), *(request->inst));
747  }
748 
749  DPRINTF(MinorMem, "Pushing store: %s into store buffer\n", request);
750 
751  numUnissuedAccesses++;
752 
753  if (request->state != LSQRequest::Complete)
755 
756  slots.push_back(request);
757 
758  /* Let's try and wake up the processor for the next cycle to step
759  * the store buffer */
760  lsq.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
761 }
762 
765  unsigned int &found_slot)
766 {
767  unsigned int slot_index = slots.size() - 1;
768  auto i = slots.rbegin();
770 
771  /* Traverse the store buffer in reverse order (most to least recent)
772  * and try to find a slot whose address range overlaps this request */
773  while (ret == NoAddrRangeCoverage && i != slots.rend()) {
774  LSQRequestPtr slot = *i;
775 
776  /* Cache maintenance instructions go down via the store path but
777  * they carry no data and they shouldn't be considered
778  * for forwarding */
779  if (slot->packet &&
780  slot->inst->id.threadId == request->inst->id.threadId &&
781  !slot->packet->req->isCacheMaintenance()) {
782  AddrRangeCoverage coverage = slot->containsAddrRangeOf(request);
783 
784  if (coverage != NoAddrRangeCoverage) {
785  DPRINTF(MinorMem, "Forwarding: slot: %d result: %s thisAddr:"
786  " 0x%x thisSize: %d slotAddr: 0x%x slotSize: %d\n",
787  slot_index, coverage,
788  request->request->getPaddr(), request->request->getSize(),
789  slot->request->getPaddr(), slot->request->getSize());
790 
791  found_slot = slot_index;
792  ret = coverage;
793  }
794  }
795 
796  i++;
797  slot_index--;
798  }
799 
800  return ret;
801 }
802 
804 void
806  unsigned int slot_number)
807 {
808  assert(slot_number < slots.size());
809  assert(load->packet);
810  assert(load->isLoad);
811 
812  LSQRequestPtr store = slots[slot_number];
813 
814  assert(store->packet);
815  assert(store->containsAddrRangeOf(load) == FullAddrRangeCoverage);
816 
817  Addr load_addr = load->request->getPaddr();
818  Addr store_addr = store->request->getPaddr();
819  Addr addr_offset = load_addr - store_addr;
820 
821  unsigned int load_size = load->request->getSize();
822 
823  DPRINTF(MinorMem, "Forwarding %d bytes for addr: 0x%x from store buffer"
824  " slot: %d addr: 0x%x addressOffset: 0x%x\n",
825  load_size, load_addr, slot_number,
826  store_addr, addr_offset);
827 
828  void *load_packet_data = load->packet->getPtr<void>();
829  void *store_packet_data = store->packet->getPtr<uint8_t>() + addr_offset;
830 
831  std::memcpy(load_packet_data, store_packet_data, load_size);
832 }
833 
834 void
836 {
837  /* Barriers are accounted for as they are cleared from
838  * the queue, not after their transfers are complete */
839  if (!request->isBarrier())
840  numUnissuedAccesses--;
841 }
842 
843 void
845 {
846  DPRINTF(MinorMem, "StoreBuffer step numUnissuedAccesses: %d\n",
847  numUnissuedAccesses);
848 
849  if (numUnissuedAccesses != 0 && lsq.state == LSQ::MemoryRunning) {
850  /* Clear all the leading barriers */
851  while (!slots.empty() &&
852  slots.front()->isComplete() && slots.front()->isBarrier())
853  {
854  LSQRequestPtr barrier = slots.front();
855 
856  DPRINTF(MinorMem, "Clearing barrier for inst: %s\n",
857  *(barrier->inst));
858 
859  numUnissuedAccesses--;
860  lsq.clearMemBarrier(barrier->inst);
861  slots.pop_front();
862 
863  delete barrier;
864  }
865 
866  auto i = slots.begin();
867  bool issued = true;
868  unsigned int issue_count = 0;
869 
870  /* Skip trying if the memory system is busy */
871  if (lsq.state == LSQ::MemoryNeedsRetry)
872  issued = false;
873 
874  /* Try to issue all stores in order starting from the head
875  * of the queue. Responses are allowed to be retired
876  * out of order */
877  while (issued &&
878  issue_count < storeLimitPerCycle &&
879  lsq.canSendToMemorySystem() &&
880  i != slots.end())
881  {
882  LSQRequestPtr request = *i;
883 
884  DPRINTF(MinorMem, "Considering request: %s, sentAllPackets: %d"
885  " state: %s\n",
886  *(request->inst), request->sentAllPackets(),
887  request->state);
888 
889  if (request->isBarrier() && request->isComplete()) {
890  /* Give up at barriers */
891  issued = false;
892  } else if (!(request->state == LSQRequest::StoreBufferIssuing &&
893  request->sentAllPackets()))
894  {
895  DPRINTF(MinorMem, "Trying to send request: %s to memory"
896  " system\n", *(request->inst));
897 
898  if (lsq.tryToSend(request)) {
899  countIssuedStore(request);
900  issue_count++;
901  } else {
902  /* Don't step on to the next store buffer entry if this
903  * one hasn't issued all its packets as the store
904  * buffer must still enforce ordering */
905  issued = false;
906  }
907  }
908  i++;
909  }
910  }
911 }
912 
913 void
915  bool committed)
916 {
917  if (committed) {
918  /* Not already sent to the store buffer as a store request? */
919  if (!inst->inStoreBuffer) {
920  /* Insert an entry into the store buffer to tick off barriers
921  * until there are none in flight */
922  storeBuffer.insert(new BarrierDataRequest(*this, inst));
923  }
924  } else {
925  /* Clear the barrier anyway if it wasn't actually committed */
926  clearMemBarrier(inst);
927  }
928 }
929 
930 void
932 {
933  unsigned int size = slots.size();
934  unsigned int i = 0;
935  std::ostringstream os;
936 
937  while (i < size) {
938  LSQRequestPtr request = slots[i];
939 
940  request->reportData(os);
941 
942  i++;
943  if (i < numSlots)
944  os << ',';
945  }
946 
947  while (i < numSlots) {
948  os << '-';
949 
950  i++;
951  if (i < numSlots)
952  os << ',';
953  }
954 
955  minor::minorTrace("addr=%s num_unissued_stores=%d\n", os.str(),
956  numUnissuedAccesses);
957 }
958 
959 void
961 {
962  if (state == MemoryNeedsRetry) {
963  DPRINTF(MinorMem, "Request needs retry, not issuing to"
964  " memory until retry arrives\n");
965  return;
966  }
967 
968  if (request->state == LSQRequest::InTranslation) {
969  DPRINTF(MinorMem, "Request still in translation, not issuing to"
970  " memory\n");
971  return;
972  }
973 
974  assert(request->state == LSQRequest::Translated ||
975  request->state == LSQRequest::RequestIssuing ||
976  request->state == LSQRequest::Failed ||
977  request->state == LSQRequest::Complete);
978 
979  if (requests.empty() || requests.front() != request) {
980  DPRINTF(MinorMem, "Request not at front of requests queue, can't"
981  " issue to memory\n");
982  return;
983  }
984 
985  if (transfers.unreservedRemainingSpace() == 0) {
986  DPRINTF(MinorMem, "No space to insert request into transfers"
987  " queue\n");
988  return;
989  }
990 
991  if (request->isComplete() || request->state == LSQRequest::Failed) {
992  DPRINTF(MinorMem, "Passing a %s transfer on to transfers"
993  " queue\n", (request->isComplete() ? "completed" : "failed"));
994  request->setState(LSQRequest::Complete);
995  request->setSkipped();
997  return;
998  }
999 
1000  if (!execute.instIsRightStream(request->inst)) {
1001  /* Wrong stream, try to abort the transfer but only do so if
1002  * there are no packets in flight */
1003  if (request->hasPacketsInMemSystem()) {
1004  DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
1005  " waiting for responses before aborting request\n");
1006  } else {
1007  DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
1008  " aborting request\n");
1009  request->setState(LSQRequest::Complete);
1010  request->setSkipped();
1011  moveFromRequestsToTransfers(request);
1012  }
1013  return;
1014  }
1015 
1016  if (request->inst->translationFault != NoFault) {
1017  if (request->inst->staticInst->isPrefetch()) {
1018  DPRINTF(MinorMem, "Not signalling fault for faulting prefetch\n");
1019  }
1020  DPRINTF(MinorMem, "Moving faulting request into the transfers"
1021  " queue\n");
1022  request->setState(LSQRequest::Complete);
1023  request->setSkipped();
1024  moveFromRequestsToTransfers(request);
1025  return;
1026  }
1027 
1028  bool is_load = request->isLoad;
1029  bool is_llsc = request->request->isLLSC();
1030  bool is_release = request->request->isRelease();
1031  bool is_swap = request->request->isSwap();
1032  bool is_atomic = request->request->isAtomic();
1033  bool bufferable = !(request->request->isStrictlyOrdered() ||
1034  is_llsc || is_swap || is_atomic || is_release);
1035 
1036  if (is_load) {
1037  if (numStoresInTransfers != 0) {
1038  DPRINTF(MinorMem, "Load request with stores still in transfers"
1039  " queue, stalling\n");
1040  return;
1041  }
1042  } else {
1043  /* Store. Can it be sent to the store buffer? */
1044  if (bufferable && !request->request->isLocalAccess()) {
1046  moveFromRequestsToTransfers(request);
1047  DPRINTF(MinorMem, "Moving store into transfers queue\n");
1048  return;
1049  }
1050  }
1051 
1052  // Process store conditionals or store release after all previous
1053  // stores are completed
1054  if (((!is_load && is_llsc) || is_release) &&
1055  !storeBuffer.isDrained()) {
1056  DPRINTF(MinorMem, "Memory access needs to wait for store buffer"
1057  " to drain\n");
1058  return;
1059  }
1060 
1061  /* Check if this is the head instruction (and so must be executable as
1062  * its stream sequence number was checked above) for loads which must
1063  * not be speculatively issued and stores which must be issued here */
1064  if (!bufferable) {
1065  if (!execute.instIsHeadInst(request->inst)) {
1066  DPRINTF(MinorMem, "Memory access not the head inst., can't be"
1067  " sure it can be performed, not issuing\n");
1068  return;
1069  }
1070 
1071  unsigned int forwarding_slot = 0;
1072 
1073  if (storeBuffer.canForwardDataToLoad(request, forwarding_slot) !=
1075  {
1076  // There's at least another request that targets the same
1077  // address and is staying in the storeBuffer. Since our
1078  // request is non-bufferable (e.g., strictly ordered or atomic),
1079  // we must wait for the other request in the storeBuffer to
1080  // complete before we can issue this non-bufferable request.
1081  // This is to make sure that the order they access the cache is
1082  // correct.
1083  DPRINTF(MinorMem, "Memory access can receive forwarded data"
1084  " from the store buffer, but need to wait for store buffer"
1085  " to drain\n");
1086  return;
1087  }
1088  }
1089 
1090  /* True: submit this packet to the transfers queue to be sent to the
1091  * memory system.
1092  * False: skip the memory and push a packet for this request onto
1093  * requests */
1094  bool do_access = true;
1095 
1096  if (!is_llsc) {
1097  /* Check for match in the store buffer */
1098  if (is_load) {
1099  unsigned int forwarding_slot = 0;
1100  AddrRangeCoverage forwarding_result =
1102  forwarding_slot);
1103 
1104  switch (forwarding_result) {
1105  case FullAddrRangeCoverage:
1106  /* Forward data from the store buffer into this request and
1107  * repurpose this request's packet into a response packet */
1108  storeBuffer.forwardStoreData(request, forwarding_slot);
1109  request->packet->makeResponse();
1110 
1111  /* Just move between queues, no access */
1112  do_access = false;
1113  break;
1115  DPRINTF(MinorMem, "Load partly satisfied by store buffer"
1116  " data. Must wait for the store to complete\n");
1117  return;
1118  break;
1119  case NoAddrRangeCoverage:
1120  DPRINTF(MinorMem, "No forwardable data from store buffer\n");
1121  /* Fall through to try access */
1122  break;
1123  }
1124  }
1125  } else {
1126  if (!canSendToMemorySystem()) {
1127  DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1128  return;
1129  }
1130 
1131  SimpleThread &thread = *cpu.threads[request->inst->id.threadId];
1132 
1133  std::unique_ptr<PCStateBase> old_pc(thread.pcState().clone());
1134  ExecContext context(cpu, thread, execute, request->inst);
1135 
1136  /* Handle LLSC requests and tests */
1137  if (is_load) {
1138  thread.getIsaPtr()->handleLockedRead(&context, request->request);
1139  } else {
1140  do_access = thread.getIsaPtr()->handleLockedWrite(&context,
1141  request->request, cacheBlockMask);
1142 
1143  if (!do_access) {
1144  DPRINTF(MinorMem, "Not perfoming a memory "
1145  "access for store conditional\n");
1146  }
1147  }
1148  thread.pcState(*old_pc);
1149  }
1150 
1151  /* See the do_access comment above */
1152  if (do_access) {
1153  if (!canSendToMemorySystem()) {
1154  DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1155  return;
1156  }
1157 
1158  /* Remember if this is an access which can't be idly
1159  * discarded by an interrupt */
1160  if (!bufferable && !request->issuedToMemory) {
1162  request->issuedToMemory = true;
1163  }
1164 
1165  if (tryToSend(request)) {
1166  moveFromRequestsToTransfers(request);
1167  }
1168  } else {
1169  request->setState(LSQRequest::Complete);
1170  moveFromRequestsToTransfers(request);
1171  }
1172 }
1173 
1174 bool
1176 {
1177  bool ret = false;
1178 
1179  if (!canSendToMemorySystem()) {
1180  DPRINTF(MinorMem, "Can't send request: %s yet, no space in memory\n",
1181  *(request->inst));
1182  } else {
1183  PacketPtr packet = request->getHeadPacket();
1184 
1185  DPRINTF(MinorMem, "Trying to send request: %s addr: 0x%x\n",
1186  *(request->inst), packet->req->getVaddr());
1187 
1188  /* The sender state of the packet *must* be an LSQRequest
1189  * so the response can be correctly handled */
1190  assert(packet->findNextSenderState<LSQRequest>());
1191 
1192  if (request->request->isLocalAccess()) {
1193  ThreadContext *thread =
1195  request->request->contextId()));
1196 
1197  if (request->isLoad)
1198  DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
1199  else
1200  DPRINTF(MinorMem, "IPR write inst: %s\n", *(request->inst));
1201 
1202  request->request->localAccessor(thread, packet);
1203 
1204  request->stepToNextPacket();
1205  ret = request->sentAllPackets();
1206 
1207  if (!ret) {
1208  DPRINTF(MinorMem, "IPR access has another packet: %s\n",
1209  *(request->inst));
1210  }
1211 
1212  if (ret)
1213  request->setState(LSQRequest::Complete);
1214  else
1216  } else if (dcachePort.sendTimingReq(packet)) {
1217  DPRINTF(MinorMem, "Sent data memory request\n");
1218 
1220 
1221  request->stepToNextPacket();
1222 
1223  ret = request->sentAllPackets();
1224 
1225  switch (request->state) {
1228  /* Fully or partially issued a request in the transfers
1229  * queue */
1231  break;
1234  /* Fully or partially issued a request in the store
1235  * buffer */
1237  break;
1238  default:
1239  panic("Unrecognized LSQ request state %d.", request->state);
1240  }
1241 
1242  state = MemoryRunning;
1243  } else {
1244  DPRINTF(MinorMem,
1245  "Sending data memory request - needs retry\n");
1246 
1247  /* Needs to be resent, wait for that */
1249  retryRequest = request;
1250 
1251  switch (request->state) {
1255  break;
1259  break;
1260  default:
1261  panic("Unrecognized LSQ request state %d.", request->state);
1262  }
1263  }
1264  }
1265 
1266  if (ret)
1267  threadSnoop(request);
1268 
1269  return ret;
1270 }
1271 
1272 void
1274 {
1275  assert(!requests.empty() && requests.front() == request);
1276  assert(transfers.unreservedRemainingSpace() != 0);
1277 
1278  /* Need to count the number of stores in the transfers
1279  * queue so that loads know when their store buffer forwarding
1280  * results will be correct (only when all those stores
1281  * have reached the store buffer) */
1282  if (!request->isLoad)
1284 
1285  requests.pop();
1286  transfers.push(request);
1287 }
1288 
1289 bool
1291 {
1292  return state == MemoryRunning &&
1294 }
1295 
1296 bool
1298 {
1299  LSQRequestPtr request =
1300  safe_cast<LSQRequestPtr>(response->popSenderState());
1301 
1302  DPRINTF(MinorMem, "Received response packet inst: %s"
1303  " addr: 0x%x cmd: %s\n",
1304  *(request->inst), response->getAddr(),
1305  response->cmd.toString());
1306 
1308 
1309  if (response->isError()) {
1310  DPRINTF(MinorMem, "Received error response packet: %s\n",
1311  *request->inst);
1312  }
1313 
1314  switch (request->state) {
1317  /* Response to a request from the transfers queue */
1318  request->retireResponse(response);
1319 
1320  DPRINTF(MinorMem, "Has outstanding packets?: %d %d\n",
1321  request->hasPacketsInMemSystem(), request->isComplete());
1322 
1323  break;
1326  /* Response to a request from the store buffer */
1327  request->retireResponse(response);
1328 
1329  /* Remove completed requests unless they are barriers (which will
1330  * need to be removed in order */
1331  if (request->isComplete()) {
1332  if (!request->isBarrier()) {
1333  storeBuffer.deleteRequest(request);
1334  } else {
1335  DPRINTF(MinorMem, "Completed transfer for barrier: %s"
1336  " leaving the request as it is also a barrier\n",
1337  *(request->inst));
1338  }
1339  }
1340  break;
1341  default:
1342  panic("Shouldn't be allowed to receive a response from another state");
1343  }
1344 
1345  /* We go to idle even if there are more things in the requests queue
1346  * as it's the job of step to actually step us on to the next
1347  * transaction */
1348 
1349  /* Let's try and wake up the processor for the next cycle */
1351 
1352  /* Never busy */
1353  return true;
1354 }
1355 
1356 void
1358 {
1359  DPRINTF(MinorMem, "Received retry request\n");
1360 
1361  assert(state == MemoryNeedsRetry);
1362 
1363  switch (retryRequest->state) {
1365  /* Retry in the requests queue */
1367  break;
1369  /* Retry in the store buffer */
1371  break;
1372  default:
1373  panic("Unrecognized retry request state %d.", retryRequest->state);
1374  }
1375 
1376  /* Set state back to MemoryRunning so that the following
1377  * tryToSend can actually send. Note that this won't
1378  * allow another transfer in as tryToSend should
1379  * issue a memory request and either succeed for this
1380  * request or return the LSQ back to MemoryNeedsRetry */
1381  state = MemoryRunning;
1382 
1383  /* Try to resend the request */
1384  if (tryToSend(retryRequest)) {
1385  /* Successfully sent, need to move the request */
1386  switch (retryRequest->state) {
1388  /* In the requests queue */
1390  break;
1392  /* In the store buffer */
1394  break;
1395  default:
1396  panic("Unrecognized retry request state %d.", retryRequest->state);
1397  }
1398 
1399  retryRequest = NULL;
1400  }
1401 }
1402 
1403 LSQ::LSQ(std::string name_, std::string dcache_port_name_,
1404  MinorCPU &cpu_, Execute &execute_,
1405  unsigned int in_memory_system_limit, unsigned int line_width,
1406  unsigned int requests_queue_size, unsigned int transfers_queue_size,
1407  unsigned int store_buffer_size,
1408  unsigned int store_buffer_cycle_store_limit) :
1409  Named(name_),
1410  cpu(cpu_),
1411  execute(execute_),
1412  dcachePort(dcache_port_name_, *this, cpu_),
1413  lastMemBarrier(cpu.numThreads, 0),
1415  inMemorySystemLimit(in_memory_system_limit),
1416  lineWidth((line_width == 0 ? cpu.cacheLineSize() : line_width)),
1417  requests(name_ + ".requests", "addr", requests_queue_size),
1418  transfers(name_ + ".transfers", "addr", transfers_queue_size),
1419  storeBuffer(name_ + ".storeBuffer",
1420  *this, store_buffer_size, store_buffer_cycle_store_limit),
1422  numAccessesInDTLB(0),
1425  retryRequest(NULL),
1426  cacheBlockMask(~(cpu_.cacheLineSize() - 1))
1427 {
1428  if (in_memory_system_limit < 1) {
1429  fatal("%s: executeMaxAccessesInMemory must be >= 1 (%d)\n", name_,
1430  in_memory_system_limit);
1431  }
1432 
1433  if (store_buffer_cycle_store_limit < 1) {
1434  fatal("%s: executeLSQMaxStoreBufferStoresPerCycle must be"
1435  " >= 1 (%d)\n", name_, store_buffer_cycle_store_limit);
1436  }
1437 
1438  if (requests_queue_size < 1) {
1439  fatal("%s: executeLSQRequestsQueueSize must be"
1440  " >= 1 (%d)\n", name_, requests_queue_size);
1441  }
1442 
1443  if (transfers_queue_size < 1) {
1444  fatal("%s: executeLSQTransfersQueueSize must be"
1445  " >= 1 (%d)\n", name_, transfers_queue_size);
1446  }
1447 
1448  if (store_buffer_size < 1) {
1449  fatal("%s: executeLSQStoreBufferSize must be"
1450  " >= 1 (%d)\n", name_, store_buffer_size);
1451  }
1452 
1453  if ((lineWidth & (lineWidth - 1)) != 0) {
1454  fatal("%s: lineWidth: %d must be a power of 2\n", name(), lineWidth);
1455  }
1456 }
1457 
1459 { }
1460 
1462 {
1463  if (packet)
1464  delete packet;
1465  if (data)
1466  delete [] data;
1467 }
1468 
1475 void
1477 {
1478  /* Try to move address-translated requests between queues and issue
1479  * them */
1480  if (!requests.empty())
1482 
1483  storeBuffer.step();
1484 }
1485 
1488 {
1489  LSQ::LSQRequestPtr ret = NULL;
1490 
1491  if (!transfers.empty()) {
1492  LSQRequestPtr request = transfers.front();
1493 
1494  /* Same instruction and complete access or a store that's
1495  * capable of being moved to the store buffer */
1496  if (request->inst->id == inst->id) {
1497  bool complete = request->isComplete();
1498  bool can_store = storeBuffer.canInsert();
1499  bool to_store_buffer = request->state ==
1501 
1502  if ((complete && !(request->isBarrier() && !can_store)) ||
1503  (to_store_buffer && can_store))
1504  {
1505  ret = request;
1506  }
1507  }
1508  }
1509 
1510  if (ret) {
1511  DPRINTF(MinorMem, "Found matching memory response for inst: %s\n",
1512  *inst);
1513  } else {
1514  DPRINTF(MinorMem, "No matching memory response for inst: %s\n",
1515  *inst);
1516  }
1517 
1518  return ret;
1519 }
1520 
1521 void
1523 {
1524  assert(!transfers.empty() && transfers.front() == response);
1525 
1526  transfers.pop();
1527 
1528  if (!response->isLoad)
1530 
1531  if (response->issuedToMemory)
1533 
1534  if (response->state != LSQRequest::StoreInStoreBuffer) {
1535  DPRINTF(MinorMem, "Deleting %s request: %s\n",
1536  (response->isLoad ? "load" : "store"),
1537  *(response->inst));
1538 
1539  delete response;
1540  }
1541 }
1542 
1543 void
1545 {
1546  assert(request->state == LSQRequest::StoreToStoreBuffer);
1547 
1548  DPRINTF(MinorMem, "Sending store: %s to store buffer\n",
1549  *(request->inst));
1550 
1551  request->inst->inStoreBuffer = true;
1552 
1553  storeBuffer.insert(request);
1554 }
1555 
1556 bool
1558 {
1559  return requests.empty() && transfers.empty() &&
1561 }
1562 
1563 bool
1565 {
1566  bool ret = false;
1567 
1568  if (canSendToMemorySystem()) {
1569  bool have_translated_requests = !requests.empty() &&
1572 
1573  ret = have_translated_requests ||
1575  }
1576 
1577  if (ret)
1578  DPRINTF(Activity, "Need to tick\n");
1579 
1580  return ret;
1581 }
1582 
1583 Fault
1584 LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
1585  unsigned int size, Addr addr, Request::Flags flags,
1586  uint64_t *res, AtomicOpFunctorPtr amo_op,
1587  const std::vector<bool>& byte_enable)
1588 {
1589  assert(inst->translationFault == NoFault || inst->inLSQ);
1590 
1591  if (inst->inLSQ) {
1592  return inst->translationFault;
1593  }
1594 
1595  bool needs_burst = transferNeedsBurst(addr, size, lineWidth);
1596 
1597  if (needs_burst && inst->staticInst->isAtomic()) {
1598  // AMO requests that access across a cache line boundary are not
1599  // allowed since the cache does not guarantee AMO ops to be executed
1600  // atomically in two cache lines
1601  // For ISAs such as x86 that requires AMO operations to work on
1602  // accesses that cross cache-line boundaries, the cache needs to be
1603  // modified to support locking both cache lines to guarantee the
1604  // atomicity.
1605  panic("Do not expect cross-cache-line atomic memory request\n");
1606  }
1607 
1608  LSQRequestPtr request;
1609 
1610  /* Copy given data into the request. The request will pass this to the
1611  * packet and then it will own the data */
1612  uint8_t *request_data = NULL;
1613 
1614  DPRINTF(MinorMem, "Pushing request (%s) addr: 0x%x size: %d flags:"
1615  " 0x%x%s lineWidth : 0x%x\n",
1616  (isLoad ? "load" : "store/atomic"), addr, size, flags,
1617  (needs_burst ? " (needs burst)" : ""), lineWidth);
1618 
1619  if (!isLoad) {
1620  /* Request_data becomes the property of a ...DataRequest (see below)
1621  * and destroyed by its destructor */
1622  request_data = new uint8_t[size];
1623  if (inst->staticInst->isAtomic() ||
1625  /* For atomic or store-no-data, just use zeroed data */
1626  std::memset(request_data, 0, size);
1627  } else {
1628  std::memcpy(request_data, data, size);
1629  }
1630  }
1631 
1632  if (needs_burst) {
1633  request = new SplitDataRequest(
1634  *this, inst, isLoad, request_data, res);
1635  } else {
1636  request = new SingleDataRequest(
1637  *this, inst, isLoad, request_data, res);
1638  }
1639 
1640  if (inst->traceData)
1641  inst->traceData->setMem(addr, size, flags);
1642 
1643  int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
1644  request->request->setContext(cid);
1645  request->request->setVirt(
1646  addr, size, flags, cpu.dataRequestorId(),
1647  /* I've no idea why we need the PC, but give it */
1648  inst->pc->instAddr(), std::move(amo_op));
1649  request->request->setByteEnable(byte_enable);
1650 
1651  /* If the request is marked as NO_ACCESS, setup a local access
1652  * doing nothing */
1653  if (flags.isSet(Request::NO_ACCESS)) {
1654  assert(!request->request->isLocalAccess());
1655  request->request->setLocalAccessor(
1656  [] (ThreadContext *tc, PacketPtr pkt) { return Cycles(1); });
1657  }
1658 
1659  requests.push(request);
1660  inst->inLSQ = true;
1661  request->startAddrTranslation();
1662 
1663  return inst->translationFault;
1664 }
1665 
1666 void
1668 {
1669  LSQRequestPtr request = new FailedDataRequest(*this, inst);
1670  requests.push(request);
1671 }
1672 
1673 void
1675 {
1676  minor::minorTrace("state=%s in_tlb_mem=%d/%d stores_in_transfers=%d"
1677  " lastMemBarrier=%d\n",
1680  requests.minorTrace();
1683 }
1684 
1685 LSQ::StoreBuffer::StoreBuffer(std::string name_, LSQ &lsq_,
1686  unsigned int store_buffer_size,
1687  unsigned int store_limit_per_cycle) :
1688  Named(name_), lsq(lsq_),
1689  numSlots(store_buffer_size),
1690  storeLimitPerCycle(store_limit_per_cycle),
1691  slots(),
1692  numUnissuedAccesses(0)
1693 {
1694 }
1695 
1696 PacketPtr
1697 makePacketForRequest(const RequestPtr &request, bool isLoad,
1698  Packet::SenderState *sender_state, PacketDataPtr data)
1699 {
1700  PacketPtr ret = isLoad ? Packet::createRead(request)
1701  : Packet::createWrite(request);
1702 
1703  if (sender_state)
1704  ret->pushSenderState(sender_state);
1705 
1706  if (isLoad) {
1707  ret->allocate();
1708  } else if (!request->isCacheMaintenance()) {
1709  // CMOs are treated as stores but they don't have data. All
1710  // stores otherwise need to allocate for data.
1711  ret->dataDynamic(data);
1712  }
1713 
1714  return ret;
1715 }
1716 
1717 void
1719 {
1720  assert(inst->isInst() && inst->staticInst->isFullMemBarrier());
1721  assert(inst->id.execSeqNum > lastMemBarrier[inst->id.threadId]);
1722 
1723  /* Remember the barrier. We only have a notion of one
1724  * barrier so this may result in some mem refs being
1725  * delayed if they are between barriers */
1726  lastMemBarrier[inst->id.threadId] = inst->id.execSeqNum;
1727 }
1728 
1729 void
1731 {
1732  assert(inst->translationFault == NoFault);
1733 
1734  /* Make the function idempotent */
1735  if (packet)
1736  return;
1737 
1738  packet = makePacketForRequest(request, isLoad, this, data);
1739  /* Null the ret data so we know not to deallocate it when the
1740  * ret is destroyed. The data now belongs to the ret and
1741  * the ret is responsible for its destruction */
1742  data = NULL;
1743 }
1744 
1745 std::ostream &
1746 operator <<(std::ostream &os, LSQ::MemoryState state)
1747 {
1748  switch (state) {
1749  case LSQ::MemoryRunning:
1750  os << "MemoryRunning";
1751  break;
1752  case LSQ::MemoryNeedsRetry:
1753  os << "MemoryNeedsRetry";
1754  break;
1755  default:
1756  os << "MemoryState-" << static_cast<int>(state);
1757  break;
1758  }
1759  return os;
1760 }
1761 
1762 void
1764 {
1765  /* LLSC operations in Minor can't be speculative and are executed from
1766  * the head of the requests queue. We shouldn't need to do more than
1767  * this action on snoops. */
1768  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1769  if (cpu.getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1770  cpu.wakeup(tid);
1771  }
1772  }
1773 
1774  if (pkt->isInvalidate() || pkt->isWrite()) {
1775  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1777  pkt, cacheBlockMask);
1778  }
1779  }
1780 }
1781 
1782 void
1784 {
1785  /* LLSC operations in Minor can't be speculative and are executed from
1786  * the head of the requests queue. We shouldn't need to do more than
1787  * this action on snoops. */
1788  ThreadID req_tid = request->inst->id.threadId;
1789  PacketPtr pkt = request->packet;
1790 
1791  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1792  if (tid != req_tid) {
1793  if (cpu.getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1794  cpu.wakeup(tid);
1795  }
1796 
1797  if (pkt->isInvalidate() || pkt->isWrite()) {
1799  cacheBlockMask);
1800  }
1801  }
1802  }
1803 }
1804 
1805 } // namespace minor
1806 } // namespace gem5
#define DPRINTFS(x, s,...)
Definition: trace.hh:193
#define DPRINTF(x,...)
Definition: trace.hh:186
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition: base.hh:189
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:633
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:284
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:367
ThreadID contextToThread(ContextID cid)
Convert ContextID to threadID.
Definition: base.hh:295
virtual void handleLockedRead(const RequestPtr &req)
Definition: isa.hh:89
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition: isa.hh:96
virtual void handleLockedSnoop(PacketPtr pkt, Addr cacheBlockMask)
Definition: isa.hh:107
virtual void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode)
Definition: mmu.cc:111
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
const std::string & toString() const
Return the string to a cmd given by idx.
Definition: packet.hh:275
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:86
void wakeup(ThreadID tid) override
Definition: cpu.cc:143
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:291
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:101
Interface for things with names.
Definition: named.hh:39
virtual std::string name() const
Definition: named.hh:47
virtual PCStateBase * clone() const =0
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1212
Addr getAddr() const
Definition: packet.hh:805
bool isError() const
Definition: packet.hh:621
static PacketPtr createWrite(const RequestPtr &req)
Definition: packet.hh:1041
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition: packet.hh:1059
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:574
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:334
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:342
bool isWrite() const
Definition: packet.hh:593
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition: packet.hh:1035
RequestPtr req
A pointer to the original request.
Definition: packet.hh:376
unsigned getSize() const
Definition: packet.hh:815
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1200
const T * getConstPtr() const
Definition: packet.hh:1221
MemCmd cmd
The command field of the packet.
Definition: packet.hh:371
bool isInvalidate() const
Definition: packet.hh:608
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1354
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:495
@ NO_ACCESS
The request should not cause a memory access.
Definition: request.hh:146
static const FlagsType STORE_NO_DATA
Definition: request.hh:260
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
const PCStateBase & pcState() const override
BaseISA * getIsaPtr() const override
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual BaseISA * getIsaPtr() const =0
virtual BaseMMU * getMMUPtr()=0
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:74
void setMemAccPredicate(bool val) override
Execute stage.
Definition: execute.hh:69
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1911
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1917
Request for doing barrier accounting in the store buffer.
Definition: lsq.hh:335
FailedDataRequest represents requests from instructions that failed their predicates but need to ride...
Definition: lsq.hh:325
Derived SenderState to carry data access info.
Definition: lsq.hh:130
static AddrRangeCoverage containsAddrRangeOf(Addr req1_addr, unsigned int req1_size, Addr req2_addr, unsigned int req2_size)
Does address range req1 (req1_addr to req1_addr + req1_size - 1) fully cover, partially cover or not ...
Definition: lsq.cc:122
void tryToSuppressFault()
Instructions may want to suppress translation faults (e.g.
Definition: lsq.cc:79
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:165
RequestPtr request
The underlying request of this LSQRequest.
Definition: lsq.hh:153
void makePacket()
Make a packet to use with the memory transaction.
Definition: lsq.cc:1730
bool isLoad
Load/store indication used for building packet.
Definition: lsq.hh:140
void setState(LSQRequestState new_state)
Set state and output trace output.
Definition: lsq.cc:171
bool issuedToMemory
This in an access other than a normal cacheable load that's visited the memory system.
Definition: lsq.hh:165
void setSkipped()
Set this request as having been skipped before a memory transfer was attempt.
Definition: lsq.hh:220
virtual bool sentAllPackets()=0
Have all packets been sent?
virtual PacketPtr getHeadPacket()=0
Get the next packet to issue for this request.
PacketDataPtr data
Dynamically allocated and populated data carried for building write packets.
Definition: lsq.hh:144
void reportData(std::ostream &os) const
MinorTrace report interface.
Definition: lsq.cc:187
virtual bool hasPacketsInMemSystem()=0
True if this request has any issued packets in the memory system and so can't be interrupted until it...
virtual void startAddrTranslation()=0
Start the address translation process for this request.
LSQRequestState state
Definition: lsq.hh:192
LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, PacketDataPtr data_=NULL, uint64_t *res_=NULL)
Definition: lsq.cc:60
virtual void stepToNextPacket()=0
Step to the next packet for the next call to getHeadPacket.
MinorDynInstPtr inst
Instruction which made this request.
Definition: lsq.hh:136
virtual bool isBarrier()
Is this a request a barrier?
Definition: lsq.cc:159
void completeDisabledMemAccess()
Definition: lsq.cc:98
bool isComplete() const
Has this request been completed.
Definition: lsq.cc:179
virtual void retireResponse(PacketPtr packet_)=0
Retire a response packet into the LSQRequest packet possibly completing this transfer.
SingleDataRequest is used for requests that don't fragment.
Definition: lsq.hh:347
void startAddrTranslation()
Send single translation request.
Definition: lsq.cc:302
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
TLB interace.
Definition: lsq.cc:272
void retireResponse(PacketPtr packet_)
Keep the given packet as the response packet LSQRequest::packet.
Definition: lsq.cc:326
void stepToNextPacket()
Step on numIssuedFragments.
Definition: lsq.cc:617
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
TLB response interface.
Definition: lsq.cc:335
void sendNextFragmentToTranslation()
Part of the address translation loop, see startAddTranslation.
Definition: lsq.cc:702
void retireResponse(PacketPtr packet_)
For loads, paste the response data into the main response packet.
Definition: lsq.cc:625
void makeFragmentRequests()
Make all the Requests for this transfer's fragments so that those requests can be sent for address tr...
Definition: lsq.cc:420
PacketPtr getHeadPacket()
Get the head packet as counted by numIssuedFragments.
Definition: lsq.cc:609
void makeFragmentPackets()
Make the packets to go with the requests so they can be sent to the memory system.
Definition: lsq.cc:534
SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, PacketDataPtr data_=NULL, uint64_t *res_=NULL)
Definition: lsq.cc:393
void startAddrTranslation()
Start a loop of do { sendNextFragmentToTranslation ; translateTiming ; finish } while (numTranslatedF...
Definition: lsq.cc:584
bool isDrained() const
Drained if there is absolutely nothing left in the buffer.
Definition: lsq.hh:529
void step()
Try to issue more stores to memory.
Definition: lsq.cc:844
AddrRangeCoverage canForwardDataToLoad(LSQRequestPtr request, unsigned int &found_slot)
Look for a store which satisfies the given load.
Definition: lsq.cc:764
void forwardStoreData(LSQRequestPtr load, unsigned int slot_number)
Fill the given packet with appropriate date from slot slot_number.
Definition: lsq.cc:805
void minorTrace() const
Report queue contents for MinorTrace.
Definition: lsq.cc:931
StoreBuffer(std::string name_, LSQ &lsq_, unsigned int store_buffer_size, unsigned int store_limit_per_cycle)
Definition: lsq.cc:1685
void countIssuedStore(LSQRequestPtr request)
Count a store being issued to memory by decrementing numUnissuedAccesses.
Definition: lsq.cc:835
void insert(LSQRequestPtr request)
Insert a request at the back of the queue.
Definition: lsq.cc:742
unsigned int numUnissuedStores()
Number of stores in the store buffer which have not been completely issued to the memory system.
Definition: lsq.hh:521
bool canInsert() const
Can a new request be inserted into the queue?
Definition: lsq.cc:721
void deleteRequest(LSQRequestPtr request)
Delete the given request and free the slot it occupied.
Definition: lsq.cc:728
void tryToSendToTransfers(LSQRequestPtr request)
Try and issue a memory access for a translated request at the head of the requests queue.
Definition: lsq.cc:960
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1564
void recvTimingSnoopReq(PacketPtr pkt)
Definition: lsq.cc:1763
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1718
const unsigned int lineWidth
Memory system access width (and snap) in bytes.
Definition: lsq.hh:552
bool tryToSend(LSQRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition: lsq.cc:1175
void minorTrace() const
Definition: lsq.cc:1674
bool canSendToMemorySystem()
Can a request be sent to the memory system.
Definition: lsq.cc:1290
MemoryState
State of memory access for head access.
Definition: lsq.hh:78
@ MemoryRunning
Definition: lsq.hh:79
@ MemoryNeedsRetry
Definition: lsq.hh:80
std::vector< InstSeqNum > lastMemBarrier
Most recent execSeqNum of a memory barrier instruction or 0 if there are no in-flight barriers.
Definition: lsq.hh:542
StoreBuffer storeBuffer
Definition: lsq.hh:595
unsigned int numAccessesInDTLB
Number of requests in the DTLB in the requests queue.
Definition: lsq.hh:607
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1544
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1667
LSQRequestPtr retryRequest
The request (from either requests or the store buffer) which is currently waiting have its memory acc...
Definition: lsq.hh:620
friend std::ostream & operator<<(std::ostream &os, MemoryState state)
Print MemoryState values as shown in the enum definition.
Definition: lsq.cc:1746
void threadSnoop(LSQRequestPtr request)
Snoop other threads monitors on memory system accesses.
Definition: lsq.cc:1783
void recvReqRetry()
Definition: lsq.cc:1357
AddrRangeCoverage
Coverage of one address range with another.
Definition: lsq.hh:89
@ PartialAddrRangeCoverage
Definition: lsq.hh:90
@ NoAddrRangeCoverage
Definition: lsq.hh:92
@ FullAddrRangeCoverage
Definition: lsq.hh:91
virtual ~LSQ()
Definition: lsq.cc:1458
DcachePort dcachePort
Definition: lsq.hh:121
Fault pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data, unsigned int size, Addr addr, Request::Flags flags, uint64_t *res, AtomicOpFunctorPtr amo_op, const std::vector< bool > &byte_enable=std::vector< bool >())
Single interface for readMem/writeMem/amoMem to issue requests into the LSQ.
Definition: lsq.cc:1584
unsigned int numAccessesInMemorySystem
Count of the number of mem.
Definition: lsq.hh:604
unsigned int numAccessesIssuedToMemory
The number of accesses which have been issued to the memory system but have not been committed/discar...
Definition: lsq.hh:616
LSQ(std::string name_, std::string dcache_port_name_, MinorCPU &cpu_, Execute &execute_, unsigned int max_accesses_in_memory_system, unsigned int line_width, unsigned int requests_queue_size, unsigned int transfers_queue_size, unsigned int store_buffer_size, unsigned int store_buffer_cycle_store_limit)
Definition: lsq.cc:1403
Execute & execute
Definition: lsq.hh:73
const unsigned int inMemorySystemLimit
Maximum number of in-flight accesses issued to the memory system.
Definition: lsq.hh:549
LSQQueue requests
requests contains LSQRequests which have been issued to the TLB by calling ExecContext::readMem/write...
Definition: lsq.hh:575
unsigned int numStoresInTransfers
The number of stores in the transfers queue.
Definition: lsq.hh:611
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1476
MemoryState state
Retry state of last issued memory transfer.
Definition: lsq.hh:546
LSQQueue transfers
Once issued to memory (or, for stores, just had their state changed to StoreToStoreBuffer) LSQRequest...
Definition: lsq.hh:584
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:914
Addr cacheBlockMask
Address Mask for a cache block (e.g.
Definition: lsq.hh:623
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1522
MinorCPU & cpu
My owner(s)
Definition: lsq.hh:72
void moveFromRequestsToTransfers(LSQRequestPtr request)
Move a request between queues.
Definition: lsq.cc:1273
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1487
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1557
bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition: lsq.cc:1297
void clearMemBarrier(MinorDynInstPtr inst)
Clear a barrier (if it's the last one marked up in lastMemBarrier)
Definition: lsq.cc:259
bool empty() const
Is the queue empty?
Definition: buffers.hh:509
void minorTrace() const
Definition: buffers.hh:512
void pop()
Pop the head item.
Definition: buffers.hh:506
unsigned int unreservedRemainingSpace() const
Like remainingSpace but does not count reserved spaces.
Definition: buffers.hh:493
ElemType & front()
Head value.
Definition: buffers.hh:501
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition: buffers.hh:433
All the fun of executing instructions from Decode and sending branch/new instruction stream info.
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:242
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
atomic_var_t state
Definition: helpers.cc:188
uint8_t flags
Definition: helpers.cc:66
#define warn(...)
Definition: logging.hh:246
ExecContext bears the exec_context interface for Minor.
A load/store queue that allows outstanding reads and writes.
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 11, 7 > fragment
Definition: pagetable.hh:58
Bitfield< 17 > os
Definition: misc.hh:810
Bitfield< 3 > addr
Definition: types.hh:84
PacketPtr makePacketForRequest(const RequestPtr &request, bool isLoad, Packet::SenderState *sender_state, PacketDataPtr data)
Make a suitable packet for the given request.
Definition: lsq.cc:1697
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:235
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:53
@ Complete
Definition: misc.hh:57
bool transferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
Returns true if the given memory access (address, size) needs to be fragmented across aligned fixed-s...
Definition: utils.hh:80
uint8_t * PacketDataPtr
Definition: packet.hh:71
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:89
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
The constructed pipeline.
bool doMonitor(PacketPtr pkt)
Definition: base.cc:682
A virtual base opaque structure used to hold state associated with the packet (e.g....
Definition: packet.hh:468

Generated on Wed Dec 21 2022 10:22:30 for gem5 by doxygen 1.9.1