gem5  v21.2.1.1
lsq.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2017-2018,2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/lsq.hh"
39 
40 #include <iomanip>
41 #include <sstream>
42 
43 #include "base/compiler.hh"
44 #include "base/logging.hh"
45 #include "base/trace.hh"
47 #include "cpu/minor/execute.hh"
48 #include "cpu/minor/pipeline.hh"
49 #include "cpu/utils.hh"
50 #include "debug/Activity.hh"
51 #include "debug/MinorMem.hh"
52 
53 namespace gem5
54 {
55 
57 namespace minor
58 {
59 
60 LSQ::LSQRequest::LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_,
61  RegIndex zero_reg, PacketDataPtr data_, uint64_t *res_) :
62  SenderState(),
63  port(port_),
64  zeroReg(zero_reg),
65  inst(inst_),
66  isLoad(isLoad_),
67  data(data_),
68  packet(NULL),
69  request(),
70  res(res_),
71  skipped(false),
72  issuedToMemory(false),
73  isTranslationDelayed(false),
74  state(NotIssued)
75 {
76  request = std::make_shared<Request>();
77 }
78 
79 void
81 {
82  SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
83  std::unique_ptr<PCStateBase> old_pc(thread.pcState().clone());
84  ExecContext context(port.cpu, thread, port.execute, inst, zeroReg);
85  [[maybe_unused]] Fault fault = inst->translationFault;
86 
87  // Give the instruction a chance to suppress a translation fault
88  inst->translationFault = inst->staticInst->initiateAcc(&context, nullptr);
89  if (inst->translationFault == NoFault) {
90  DPRINTFS(MinorMem, (&port),
91  "Translation fault suppressed for inst:%s\n", *inst);
92  } else {
93  assert(inst->translationFault == fault);
94  }
95  thread.pcState(*old_pc);
96 }
97 
98 void
100 {
101  DPRINTFS(MinorMem, (&port), "Complete disabled mem access for inst:%s\n",
102  *inst);
103 
104  SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
105  std::unique_ptr<PCStateBase> old_pc(thread.pcState().clone());
106 
107  ExecContext context(port.cpu, thread, port.execute, inst, zeroReg);
108 
109  context.setMemAccPredicate(false);
110  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
111 
112  thread.pcState(*old_pc);
113 }
114 
115 void
117 {
118  port.cpu.threads[inst->id.threadId]->setMemAccPredicate(false);
119  DPRINTFS(MinorMem, (&port), "Disable mem access for inst:%s\n", *inst);
120 }
121 
124  Addr req1_addr, unsigned int req1_size,
125  Addr req2_addr, unsigned int req2_size)
126 {
127  /* 'end' here means the address of the byte just past the request
128  * blocks */
129  Addr req2_end_addr = req2_addr + req2_size;
130  Addr req1_end_addr = req1_addr + req1_size;
131 
132  AddrRangeCoverage ret;
133 
134  if (req1_addr >= req2_end_addr || req1_end_addr <= req2_addr)
135  ret = NoAddrRangeCoverage;
136  else if (req1_addr <= req2_addr && req1_end_addr >= req2_end_addr)
137  ret = FullAddrRangeCoverage;
138  else
140 
141  return ret;
142 }
143 
146 {
147  AddrRangeCoverage ret = containsAddrRangeOf(
148  request->getPaddr(), request->getSize(),
149  other_request->request->getPaddr(), other_request->request->getSize());
150  /* If there is a strobe mask then store data forwarding might not be
151  * correct. Instead of checking enablemant of every byte we just fall back
152  * to PartialAddrRangeCoverage to prohibit store data forwarding */
153  if (ret == FullAddrRangeCoverage && request->isMasked())
155  return ret;
156 }
157 
158 
159 bool
161 {
162  return inst->isInst() && inst->staticInst->isFullMemBarrier();
163 }
164 
165 bool
167 {
168  return state == StoreToStoreBuffer;
169 }
170 
171 void
173 {
174  DPRINTFS(MinorMem, (&port), "Setting state from %d to %d for request:"
175  " %s\n", state, new_state, *inst);
176  state = new_state;
177 }
178 
179 bool
181 {
182  /* @todo, There is currently only one 'completed' state. This
183  * may not be a good choice */
184  return state == Complete;
185 }
186 
187 void
188 LSQ::LSQRequest::reportData(std::ostream &os) const
189 {
190  os << (isLoad ? 'R' : 'W') << ';';
191  inst->reportData(os);
192  os << ';' << state;
193 }
194 
195 std::ostream &
196 operator <<(std::ostream &os, LSQ::AddrRangeCoverage coverage)
197 {
198  switch (coverage) {
200  os << "PartialAddrRangeCoverage";
201  break;
203  os << "FullAddrRangeCoverage";
204  break;
206  os << "NoAddrRangeCoverage";
207  break;
208  default:
209  os << "AddrRangeCoverage-" << static_cast<int>(coverage);
210  break;
211  }
212  return os;
213 }
214 
215 std::ostream &
217 {
218  switch (state) {
220  os << "NotIssued";
221  break;
223  os << "InTranslation";
224  break;
226  os << "Translated";
227  break;
229  os << "Failed";
230  break;
232  os << "RequestIssuing";
233  break;
235  os << "StoreToStoreBuffer";
236  break;
238  os << "StoreInStoreBuffer";
239  break;
241  os << "StoreBufferIssuing";
242  break;
244  os << "RequestNeedsRetry";
245  break;
247  os << "StoreBufferNeedsRetry";
248  break;
250  os << "Complete";
251  break;
252  default:
253  os << "LSQRequestState-" << static_cast<int>(state);
254  break;
255  }
256  return os;
257 }
258 
259 void
261 {
262  bool is_last_barrier =
263  inst->id.execSeqNum >= lastMemBarrier[inst->id.threadId];
264 
265  DPRINTF(MinorMem, "Moving %s barrier out of store buffer inst: %s\n",
266  (is_last_barrier ? "last" : "a"), *inst);
267 
268  if (is_last_barrier)
269  lastMemBarrier[inst->id.threadId] = 0;
270 }
271 
272 void
273 LSQ::SingleDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
275 {
276  port.numAccessesInDTLB--;
277 
278  DPRINTFS(MinorMem, (&port), "Received translation response for"
279  " request: %s delayed:%d %s\n", *inst, isTranslationDelayed,
280  fault_ != NoFault ? fault_->name() : "");
281 
282  if (fault_ != NoFault) {
283  inst->translationFault = fault_;
284  if (isTranslationDelayed) {
285  tryToSuppressFault();
286  if (inst->translationFault == NoFault) {
287  completeDisabledMemAccess();
288  setState(Complete);
289  }
290  }
291  setState(Translated);
292  } else {
293  setState(Translated);
294  makePacket();
295  }
296  port.tryToSendToTransfers(this);
297 
298  /* Let's try and wake up the processor for the next cycle */
299  port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
300 }
301 
302 void
304 {
305  ThreadContext *thread = port.cpu.getContext(
306  inst->id.threadId);
307 
308  const auto &byte_enable = request->getByteEnable();
309  if (isAnyActiveElement(byte_enable.cbegin(), byte_enable.cend())) {
310  port.numAccessesInDTLB++;
311 
313 
314  DPRINTFS(MinorMem, (&port), "Submitting DTLB request\n");
315  /* Submit the translation request. The response will come through
316  * finish/markDelayed on the LSQRequest as it bears the Translation
317  * interface */
318  thread->getMMUPtr()->translateTiming(
319  request, thread, this, (isLoad ? BaseMMU::Read : BaseMMU::Write));
320  } else {
321  disableMemAccess();
322  setState(LSQ::LSQRequest::Complete);
323  }
324 }
325 
326 void
328 {
329  DPRINTFS(MinorMem, (&port), "Retiring packet\n");
330  packet = packet_;
331  packetInFlight = false;
332  setState(Complete);
333 }
334 
335 void
336 LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
338 {
339  port.numAccessesInDTLB--;
340 
341  [[maybe_unused]] unsigned int expected_fragment_index =
342  numTranslatedFragments;
343 
344  numInTranslationFragments--;
345  numTranslatedFragments++;
346 
347  DPRINTFS(MinorMem, (&port), "Received translation response for fragment"
348  " %d of request: %s delayed:%d %s\n", expected_fragment_index,
349  *inst, isTranslationDelayed,
350  fault_ != NoFault ? fault_->name() : "");
351 
352  assert(request_ == fragmentRequests[expected_fragment_index]);
353 
354  /* Wake up next cycle to get things going again in case the
355  * tryToSendToTransfers does take */
356  port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
357 
358  if (fault_ != NoFault) {
359  /* tryToSendToTransfers will handle the fault */
360  inst->translationFault = fault_;
361 
362  DPRINTFS(MinorMem, (&port), "Faulting translation for fragment:"
363  " %d of request: %s\n",
364  expected_fragment_index, *inst);
365 
366  if (expected_fragment_index > 0 || isTranslationDelayed)
367  tryToSuppressFault();
368  if (expected_fragment_index == 0) {
369  if (isTranslationDelayed && inst->translationFault == NoFault) {
370  completeDisabledMemAccess();
371  setState(Complete);
372  } else {
373  setState(Translated);
374  }
375  } else if (inst->translationFault == NoFault) {
376  setState(Translated);
377  numTranslatedFragments--;
378  makeFragmentPackets();
379  } else {
380  setState(Translated);
381  }
382  port.tryToSendToTransfers(this);
383  } else if (numTranslatedFragments == numFragments) {
384  makeFragmentPackets();
385  setState(Translated);
386  port.tryToSendToTransfers(this);
387  } else {
388  /* Avoid calling translateTiming from within ::finish */
389  assert(!translationEvent.scheduled());
390  port.cpu.schedule(translationEvent, curTick());
391  }
392 }
393 
395  bool isLoad_, PacketDataPtr data_, uint64_t *res_) :
396  LSQRequest(port_, inst_, isLoad_, port_.zeroReg, data_, res_),
397  translationEvent([this]{ sendNextFragmentToTranslation(); },
398  "translationEvent"),
399  numFragments(0),
400  numInTranslationFragments(0),
401  numTranslatedFragments(0),
402  numIssuedFragments(0),
403  numRetiredFragments(0),
404  fragmentRequests(),
405  fragmentPackets()
406 {
407  /* Don't know how many elements are needed until the request is
408  * populated by the caller. */
409 }
410 
412 {
413  for (auto i = fragmentPackets.begin();
414  i != fragmentPackets.end(); i++)
415  {
416  delete *i;
417  }
418 }
419 
420 void
422 {
423  Addr base_addr = request->getVaddr();
424  unsigned int whole_size = request->getSize();
425  unsigned int line_width = port.lineWidth;
426 
427  unsigned int fragment_size;
428  Addr fragment_addr;
429 
430  std::vector<bool> fragment_write_byte_en;
431 
432  /* Assume that this transfer is across potentially many block snap
433  * boundaries:
434  *
435  * | _|________|________|________|___ |
436  * | |0| 1 | 2 | 3 | 4 | |
437  * | |_|________|________|________|___| |
438  * | | | | | |
439  *
440  * The first transfer (0) can be up to lineWidth in size.
441  * All the middle transfers (1-3) are lineWidth in size
442  * The last transfer (4) can be from zero to lineWidth - 1 in size
443  */
444  unsigned int first_fragment_offset =
445  addrBlockOffset(base_addr, line_width);
446  unsigned int last_fragment_size =
447  addrBlockOffset(base_addr + whole_size, line_width);
448  unsigned int first_fragment_size =
449  line_width - first_fragment_offset;
450 
451  unsigned int middle_fragments_total_size =
452  whole_size - (first_fragment_size + last_fragment_size);
453 
454  assert(addrBlockOffset(middle_fragments_total_size, line_width) == 0);
455 
456  unsigned int middle_fragment_count =
457  middle_fragments_total_size / line_width;
458 
459  numFragments = 1 /* first */ + middle_fragment_count +
460  (last_fragment_size == 0 ? 0 : 1);
461 
462  DPRINTFS(MinorMem, (&port), "Dividing transfer into %d fragmentRequests."
463  " First fragment size: %d Last fragment size: %d\n",
464  numFragments, first_fragment_size,
465  (last_fragment_size == 0 ? line_width : last_fragment_size));
466 
467  assert(((middle_fragment_count * line_width) +
468  first_fragment_size + last_fragment_size) == whole_size);
469 
470  fragment_addr = base_addr;
471  fragment_size = first_fragment_size;
472 
473  /* Just past the last address in the request */
474  Addr end_addr = base_addr + whole_size;
475 
476  auto& byte_enable = request->getByteEnable();
477  unsigned int num_disabled_fragments = 0;
478 
479  for (unsigned int fragment_index = 0; fragment_index < numFragments;
480  fragment_index++)
481  {
482  [[maybe_unused]] bool is_last_fragment = false;
483 
484  if (fragment_addr == base_addr) {
485  /* First fragment */
486  fragment_size = first_fragment_size;
487  } else {
488  if ((fragment_addr + line_width) > end_addr) {
489  /* Adjust size of last fragment */
490  fragment_size = end_addr - fragment_addr;
491  is_last_fragment = true;
492  } else {
493  /* Middle fragments */
494  fragment_size = line_width;
495  }
496  }
497 
498  RequestPtr fragment = std::make_shared<Request>();
499  bool disabled_fragment = false;
500 
501  fragment->setContext(request->contextId());
502  // Set up byte-enable mask for the current fragment
503  auto it_start = byte_enable.begin() +
504  (fragment_addr - base_addr);
505  auto it_end = byte_enable.begin() +
506  (fragment_addr - base_addr) + fragment_size;
507  if (isAnyActiveElement(it_start, it_end)) {
508  fragment->setVirt(
509  fragment_addr, fragment_size, request->getFlags(),
510  request->requestorId(),
511  request->getPC());
512  fragment->setByteEnable(std::vector<bool>(it_start, it_end));
513  } else {
514  disabled_fragment = true;
515  }
516 
517  if (!disabled_fragment) {
518  DPRINTFS(MinorMem, (&port), "Generating fragment addr: 0x%x"
519  " size: %d (whole request addr: 0x%x size: %d) %s\n",
520  fragment_addr, fragment_size, base_addr, whole_size,
521  (is_last_fragment ? "last fragment" : ""));
522 
523  fragmentRequests.push_back(fragment);
524  } else {
525  num_disabled_fragments++;
526  }
527 
528  fragment_addr += fragment_size;
529  }
530  assert(numFragments >= num_disabled_fragments);
531  numFragments -= num_disabled_fragments;
532 }
533 
534 void
536 {
537  assert(numTranslatedFragments > 0);
538  Addr base_addr = request->getVaddr();
539 
540  DPRINTFS(MinorMem, (&port), "Making packets for request: %s\n", *inst);
541 
542  for (unsigned int fragment_index = 0;
543  fragment_index < numTranslatedFragments;
544  fragment_index++)
545  {
546  RequestPtr fragment = fragmentRequests[fragment_index];
547 
548  DPRINTFS(MinorMem, (&port), "Making packet %d for request: %s"
549  " (%d, 0x%x)\n",
550  fragment_index, *inst,
551  (fragment->hasPaddr() ? "has paddr" : "no paddr"),
552  (fragment->hasPaddr() ? fragment->getPaddr() : 0));
553 
554  Addr fragment_addr = fragment->getVaddr();
555  unsigned int fragment_size = fragment->getSize();
556 
557  uint8_t *request_data = NULL;
558 
559  if (!isLoad) {
560  /* Split data for Packets. Will become the property of the
561  * outgoing Packets */
562  request_data = new uint8_t[fragment_size];
563  std::memcpy(request_data, data + (fragment_addr - base_addr),
564  fragment_size);
565  }
566 
567  assert(fragment->hasPaddr());
568 
569  PacketPtr fragment_packet =
570  makePacketForRequest(fragment, isLoad, this, request_data);
571 
572  fragmentPackets.push_back(fragment_packet);
573  /* Accumulate flags in parent request */
574  request->setFlags(fragment->getFlags());
575  }
576 
577  /* Might as well make the overall/response packet here */
578  /* Get the physical address for the whole request/packet from the first
579  * fragment */
580  request->setPaddr(fragmentRequests[0]->getPaddr());
581  makePacket();
582 }
583 
584 void
586 {
587  makeFragmentRequests();
588 
589  if (numFragments > 0) {
591  numInTranslationFragments = 0;
592  numTranslatedFragments = 0;
593 
594  /* @todo, just do these in sequence for now with
595  * a loop of:
596  * do {
597  * sendNextFragmentToTranslation ; translateTiming ; finish
598  * } while (numTranslatedFragments != numFragments);
599  */
600 
601  /* Do first translation */
602  sendNextFragmentToTranslation();
603  } else {
604  disableMemAccess();
605  setState(LSQ::LSQRequest::Complete);
606  }
607 }
608 
609 PacketPtr
611 {
612  assert(numIssuedFragments < numTranslatedFragments);
613 
614  return fragmentPackets[numIssuedFragments];
615 }
616 
617 void
619 {
620  assert(numIssuedFragments < numTranslatedFragments);
621 
622  numIssuedFragments++;
623 }
624 
625 void
627 {
628  assert(inst->translationFault == NoFault);
629  assert(numRetiredFragments < numTranslatedFragments);
630 
631  DPRINTFS(MinorMem, (&port), "Retiring fragment addr: 0x%x size: %d"
632  " offset: 0x%x (retired fragment num: %d)\n",
633  response->req->getVaddr(), response->req->getSize(),
634  request->getVaddr() - response->req->getVaddr(),
635  numRetiredFragments);
636 
637  numRetiredFragments++;
638 
639  if (skipped) {
640  /* Skip because we already knew the request had faulted or been
641  * skipped */
642  DPRINTFS(MinorMem, (&port), "Skipping this fragment\n");
643  } else if (response->isError()) {
644  /* Mark up the error and leave to execute to handle it */
645  DPRINTFS(MinorMem, (&port), "Fragment has an error, skipping\n");
646  setSkipped();
647  packet->copyError(response);
648  } else {
649  if (isLoad) {
650  if (!data) {
651  /* For a split transfer, a Packet must be constructed
652  * to contain all returning data. This is that packet's
653  * data */
654  data = new uint8_t[request->getSize()];
655  }
656 
657  /* Populate the portion of the overall response data represented
658  * by the response fragment */
659  std::memcpy(
660  data + (response->req->getVaddr() - request->getVaddr()),
661  response->getConstPtr<uint8_t>(),
662  response->req->getSize());
663  }
664  }
665 
666  /* Complete early if we're skipping are no more in-flight accesses */
667  if (skipped && !hasPacketsInMemSystem()) {
668  DPRINTFS(MinorMem, (&port), "Completed skipped burst\n");
669  setState(Complete);
670  if (packet->needsResponse())
671  packet->makeResponse();
672  }
673 
674  if (numRetiredFragments == numTranslatedFragments)
675  setState(Complete);
676 
677  if (!skipped && isComplete()) {
678  DPRINTFS(MinorMem, (&port), "Completed burst %d\n", packet != NULL);
679 
680  DPRINTFS(MinorMem, (&port), "Retired packet isRead: %d isWrite: %d"
681  " needsResponse: %d packetSize: %s requestSize: %s responseSize:"
682  " %s\n", packet->isRead(), packet->isWrite(),
683  packet->needsResponse(), packet->getSize(), request->getSize(),
684  response->getSize());
685 
686  /* A request can become complete by several paths, this is a sanity
687  * check to make sure the packet's data is created */
688  if (!data) {
689  data = new uint8_t[request->getSize()];
690  }
691 
692  if (isLoad) {
693  DPRINTFS(MinorMem, (&port), "Copying read data\n");
694  std::memcpy(packet->getPtr<uint8_t>(), data, request->getSize());
695  }
696  packet->makeResponse();
697  }
698 
699  /* Packets are all deallocated together in ~SplitLSQRequest */
700 }
701 
702 void
704 {
705  unsigned int fragment_index = numTranslatedFragments;
706 
707  ThreadContext *thread = port.cpu.getContext(
708  inst->id.threadId);
709 
710  DPRINTFS(MinorMem, (&port), "Submitting DTLB request for fragment: %d\n",
711  fragment_index);
712 
713  port.numAccessesInDTLB++;
714  numInTranslationFragments++;
715 
716  thread->getMMUPtr()->translateTiming(
717  fragmentRequests[fragment_index], thread, this, (isLoad ?
719 }
720 
721 bool
723 {
724  /* @todo, support store amalgamation */
725  return slots.size() < numSlots;
726 }
727 
728 void
730 {
731  auto found = std::find(slots.begin(), slots.end(), request);
732 
733  if (found != slots.end()) {
734  DPRINTF(MinorMem, "Deleting request: %s %s %s from StoreBuffer\n",
735  request, *found, *(request->inst));
736  slots.erase(found);
737 
738  delete request;
739  }
740 }
741 
742 void
744 {
745  if (!canInsert()) {
746  warn("%s: store buffer insertion without space to insert from"
747  " inst: %s\n", name(), *(request->inst));
748  }
749 
750  DPRINTF(MinorMem, "Pushing store: %s into store buffer\n", request);
751 
752  numUnissuedAccesses++;
753 
754  if (request->state != LSQRequest::Complete)
756 
757  slots.push_back(request);
758 
759  /* Let's try and wake up the processor for the next cycle to step
760  * the store buffer */
761  lsq.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
762 }
763 
766  unsigned int &found_slot)
767 {
768  unsigned int slot_index = slots.size() - 1;
769  auto i = slots.rbegin();
771 
772  /* Traverse the store buffer in reverse order (most to least recent)
773  * and try to find a slot whose address range overlaps this request */
774  while (ret == NoAddrRangeCoverage && i != slots.rend()) {
775  LSQRequestPtr slot = *i;
776 
777  /* Cache maintenance instructions go down via the store path but
778  * they carry no data and they shouldn't be considered
779  * for forwarding */
780  if (slot->packet &&
781  slot->inst->id.threadId == request->inst->id.threadId &&
782  !slot->packet->req->isCacheMaintenance()) {
783  AddrRangeCoverage coverage = slot->containsAddrRangeOf(request);
784 
785  if (coverage != NoAddrRangeCoverage) {
786  DPRINTF(MinorMem, "Forwarding: slot: %d result: %s thisAddr:"
787  " 0x%x thisSize: %d slotAddr: 0x%x slotSize: %d\n",
788  slot_index, coverage,
789  request->request->getPaddr(), request->request->getSize(),
790  slot->request->getPaddr(), slot->request->getSize());
791 
792  found_slot = slot_index;
793  ret = coverage;
794  }
795  }
796 
797  i++;
798  slot_index--;
799  }
800 
801  return ret;
802 }
803 
805 void
807  unsigned int slot_number)
808 {
809  assert(slot_number < slots.size());
810  assert(load->packet);
811  assert(load->isLoad);
812 
813  LSQRequestPtr store = slots[slot_number];
814 
815  assert(store->packet);
816  assert(store->containsAddrRangeOf(load) == FullAddrRangeCoverage);
817 
818  Addr load_addr = load->request->getPaddr();
819  Addr store_addr = store->request->getPaddr();
820  Addr addr_offset = load_addr - store_addr;
821 
822  unsigned int load_size = load->request->getSize();
823 
824  DPRINTF(MinorMem, "Forwarding %d bytes for addr: 0x%x from store buffer"
825  " slot: %d addr: 0x%x addressOffset: 0x%x\n",
826  load_size, load_addr, slot_number,
827  store_addr, addr_offset);
828 
829  void *load_packet_data = load->packet->getPtr<void>();
830  void *store_packet_data = store->packet->getPtr<uint8_t>() + addr_offset;
831 
832  std::memcpy(load_packet_data, store_packet_data, load_size);
833 }
834 
835 void
837 {
838  /* Barriers are accounted for as they are cleared from
839  * the queue, not after their transfers are complete */
840  if (!request->isBarrier())
841  numUnissuedAccesses--;
842 }
843 
844 void
846 {
847  DPRINTF(MinorMem, "StoreBuffer step numUnissuedAccesses: %d\n",
848  numUnissuedAccesses);
849 
850  if (numUnissuedAccesses != 0 && lsq.state == LSQ::MemoryRunning) {
851  /* Clear all the leading barriers */
852  while (!slots.empty() &&
853  slots.front()->isComplete() && slots.front()->isBarrier())
854  {
855  LSQRequestPtr barrier = slots.front();
856 
857  DPRINTF(MinorMem, "Clearing barrier for inst: %s\n",
858  *(barrier->inst));
859 
860  numUnissuedAccesses--;
861  lsq.clearMemBarrier(barrier->inst);
862  slots.pop_front();
863 
864  delete barrier;
865  }
866 
867  auto i = slots.begin();
868  bool issued = true;
869  unsigned int issue_count = 0;
870 
871  /* Skip trying if the memory system is busy */
872  if (lsq.state == LSQ::MemoryNeedsRetry)
873  issued = false;
874 
875  /* Try to issue all stores in order starting from the head
876  * of the queue. Responses are allowed to be retired
877  * out of order */
878  while (issued &&
879  issue_count < storeLimitPerCycle &&
880  lsq.canSendToMemorySystem() &&
881  i != slots.end())
882  {
883  LSQRequestPtr request = *i;
884 
885  DPRINTF(MinorMem, "Considering request: %s, sentAllPackets: %d"
886  " state: %s\n",
887  *(request->inst), request->sentAllPackets(),
888  request->state);
889 
890  if (request->isBarrier() && request->isComplete()) {
891  /* Give up at barriers */
892  issued = false;
893  } else if (!(request->state == LSQRequest::StoreBufferIssuing &&
894  request->sentAllPackets()))
895  {
896  DPRINTF(MinorMem, "Trying to send request: %s to memory"
897  " system\n", *(request->inst));
898 
899  if (lsq.tryToSend(request)) {
900  countIssuedStore(request);
901  issue_count++;
902  } else {
903  /* Don't step on to the next store buffer entry if this
904  * one hasn't issued all its packets as the store
905  * buffer must still enforce ordering */
906  issued = false;
907  }
908  }
909  i++;
910  }
911  }
912 }
913 
914 void
916  bool committed)
917 {
918  if (committed) {
919  /* Not already sent to the store buffer as a store request? */
920  if (!inst->inStoreBuffer) {
921  /* Insert an entry into the store buffer to tick off barriers
922  * until there are none in flight */
923  storeBuffer.insert(new BarrierDataRequest(*this, inst));
924  }
925  } else {
926  /* Clear the barrier anyway if it wasn't actually committed */
927  clearMemBarrier(inst);
928  }
929 }
930 
931 void
933 {
934  unsigned int size = slots.size();
935  unsigned int i = 0;
936  std::ostringstream os;
937 
938  while (i < size) {
939  LSQRequestPtr request = slots[i];
940 
941  request->reportData(os);
942 
943  i++;
944  if (i < numSlots)
945  os << ',';
946  }
947 
948  while (i < numSlots) {
949  os << '-';
950 
951  i++;
952  if (i < numSlots)
953  os << ',';
954  }
955 
956  minor::minorTrace("addr=%s num_unissued_stores=%d\n", os.str(),
957  numUnissuedAccesses);
958 }
959 
960 void
962 {
963  if (state == MemoryNeedsRetry) {
964  DPRINTF(MinorMem, "Request needs retry, not issuing to"
965  " memory until retry arrives\n");
966  return;
967  }
968 
969  if (request->state == LSQRequest::InTranslation) {
970  DPRINTF(MinorMem, "Request still in translation, not issuing to"
971  " memory\n");
972  return;
973  }
974 
975  assert(request->state == LSQRequest::Translated ||
976  request->state == LSQRequest::RequestIssuing ||
977  request->state == LSQRequest::Failed ||
978  request->state == LSQRequest::Complete);
979 
980  if (requests.empty() || requests.front() != request) {
981  DPRINTF(MinorMem, "Request not at front of requests queue, can't"
982  " issue to memory\n");
983  return;
984  }
985 
986  if (transfers.unreservedRemainingSpace() == 0) {
987  DPRINTF(MinorMem, "No space to insert request into transfers"
988  " queue\n");
989  return;
990  }
991 
992  if (request->isComplete() || request->state == LSQRequest::Failed) {
993  DPRINTF(MinorMem, "Passing a %s transfer on to transfers"
994  " queue\n", (request->isComplete() ? "completed" : "failed"));
995  request->setState(LSQRequest::Complete);
996  request->setSkipped();
998  return;
999  }
1000 
1001  if (!execute.instIsRightStream(request->inst)) {
1002  /* Wrong stream, try to abort the transfer but only do so if
1003  * there are no packets in flight */
1004  if (request->hasPacketsInMemSystem()) {
1005  DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
1006  " waiting for responses before aborting request\n");
1007  } else {
1008  DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
1009  " aborting request\n");
1010  request->setState(LSQRequest::Complete);
1011  request->setSkipped();
1012  moveFromRequestsToTransfers(request);
1013  }
1014  return;
1015  }
1016 
1017  if (request->inst->translationFault != NoFault) {
1018  if (request->inst->staticInst->isPrefetch()) {
1019  DPRINTF(MinorMem, "Not signalling fault for faulting prefetch\n");
1020  }
1021  DPRINTF(MinorMem, "Moving faulting request into the transfers"
1022  " queue\n");
1023  request->setState(LSQRequest::Complete);
1024  request->setSkipped();
1025  moveFromRequestsToTransfers(request);
1026  return;
1027  }
1028 
1029  bool is_load = request->isLoad;
1030  bool is_llsc = request->request->isLLSC();
1031  bool is_release = request->request->isRelease();
1032  bool is_swap = request->request->isSwap();
1033  bool is_atomic = request->request->isAtomic();
1034  bool bufferable = !(request->request->isStrictlyOrdered() ||
1035  is_llsc || is_swap || is_atomic || is_release);
1036 
1037  if (is_load) {
1038  if (numStoresInTransfers != 0) {
1039  DPRINTF(MinorMem, "Load request with stores still in transfers"
1040  " queue, stalling\n");
1041  return;
1042  }
1043  } else {
1044  /* Store. Can it be sent to the store buffer? */
1045  if (bufferable && !request->request->isLocalAccess()) {
1047  moveFromRequestsToTransfers(request);
1048  DPRINTF(MinorMem, "Moving store into transfers queue\n");
1049  return;
1050  }
1051  }
1052 
1053  // Process store conditionals or store release after all previous
1054  // stores are completed
1055  if (((!is_load && is_llsc) || is_release) &&
1056  !storeBuffer.isDrained()) {
1057  DPRINTF(MinorMem, "Memory access needs to wait for store buffer"
1058  " to drain\n");
1059  return;
1060  }
1061 
1062  /* Check if this is the head instruction (and so must be executable as
1063  * its stream sequence number was checked above) for loads which must
1064  * not be speculatively issued and stores which must be issued here */
1065  if (!bufferable) {
1066  if (!execute.instIsHeadInst(request->inst)) {
1067  DPRINTF(MinorMem, "Memory access not the head inst., can't be"
1068  " sure it can be performed, not issuing\n");
1069  return;
1070  }
1071 
1072  unsigned int forwarding_slot = 0;
1073 
1074  if (storeBuffer.canForwardDataToLoad(request, forwarding_slot) !=
1076  {
1077  // There's at least another request that targets the same
1078  // address and is staying in the storeBuffer. Since our
1079  // request is non-bufferable (e.g., strictly ordered or atomic),
1080  // we must wait for the other request in the storeBuffer to
1081  // complete before we can issue this non-bufferable request.
1082  // This is to make sure that the order they access the cache is
1083  // correct.
1084  DPRINTF(MinorMem, "Memory access can receive forwarded data"
1085  " from the store buffer, but need to wait for store buffer"
1086  " to drain\n");
1087  return;
1088  }
1089  }
1090 
1091  /* True: submit this packet to the transfers queue to be sent to the
1092  * memory system.
1093  * False: skip the memory and push a packet for this request onto
1094  * requests */
1095  bool do_access = true;
1096 
1097  if (!is_llsc) {
1098  /* Check for match in the store buffer */
1099  if (is_load) {
1100  unsigned int forwarding_slot = 0;
1101  AddrRangeCoverage forwarding_result =
1103  forwarding_slot);
1104 
1105  switch (forwarding_result) {
1106  case FullAddrRangeCoverage:
1107  /* Forward data from the store buffer into this request and
1108  * repurpose this request's packet into a response packet */
1109  storeBuffer.forwardStoreData(request, forwarding_slot);
1110  request->packet->makeResponse();
1111 
1112  /* Just move between queues, no access */
1113  do_access = false;
1114  break;
1116  DPRINTF(MinorMem, "Load partly satisfied by store buffer"
1117  " data. Must wait for the store to complete\n");
1118  return;
1119  break;
1120  case NoAddrRangeCoverage:
1121  DPRINTF(MinorMem, "No forwardable data from store buffer\n");
1122  /* Fall through to try access */
1123  break;
1124  }
1125  }
1126  } else {
1127  if (!canSendToMemorySystem()) {
1128  DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1129  return;
1130  }
1131 
1132  SimpleThread &thread = *cpu.threads[request->inst->id.threadId];
1133 
1134  std::unique_ptr<PCStateBase> old_pc(thread.pcState().clone());
1135  ExecContext context(cpu, thread, execute, request->inst, zeroReg);
1136 
1137  /* Handle LLSC requests and tests */
1138  if (is_load) {
1139  thread.getIsaPtr()->handleLockedRead(&context, request->request);
1140  } else {
1141  do_access = thread.getIsaPtr()->handleLockedWrite(&context,
1142  request->request, cacheBlockMask);
1143 
1144  if (!do_access) {
1145  DPRINTF(MinorMem, "Not perfoming a memory "
1146  "access for store conditional\n");
1147  }
1148  }
1149  thread.pcState(*old_pc);
1150  }
1151 
1152  /* See the do_access comment above */
1153  if (do_access) {
1154  if (!canSendToMemorySystem()) {
1155  DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1156  return;
1157  }
1158 
1159  /* Remember if this is an access which can't be idly
1160  * discarded by an interrupt */
1161  if (!bufferable && !request->issuedToMemory) {
1163  request->issuedToMemory = true;
1164  }
1165 
1166  if (tryToSend(request)) {
1167  moveFromRequestsToTransfers(request);
1168  }
1169  } else {
1170  request->setState(LSQRequest::Complete);
1171  moveFromRequestsToTransfers(request);
1172  }
1173 }
1174 
1175 bool
1177 {
1178  bool ret = false;
1179 
1180  if (!canSendToMemorySystem()) {
1181  DPRINTF(MinorMem, "Can't send request: %s yet, no space in memory\n",
1182  *(request->inst));
1183  } else {
1184  PacketPtr packet = request->getHeadPacket();
1185 
1186  DPRINTF(MinorMem, "Trying to send request: %s addr: 0x%x\n",
1187  *(request->inst), packet->req->getVaddr());
1188 
1189  /* The sender state of the packet *must* be an LSQRequest
1190  * so the response can be correctly handled */
1191  assert(packet->findNextSenderState<LSQRequest>());
1192 
1193  if (request->request->isLocalAccess()) {
1194  ThreadContext *thread =
1195  cpu.getContext(cpu.contextToThread(
1196  request->request->contextId()));
1197 
1198  if (request->isLoad)
1199  DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
1200  else
1201  DPRINTF(MinorMem, "IPR write inst: %s\n", *(request->inst));
1202 
1203  request->request->localAccessor(thread, packet);
1204 
1205  request->stepToNextPacket();
1206  ret = request->sentAllPackets();
1207 
1208  if (!ret) {
1209  DPRINTF(MinorMem, "IPR access has another packet: %s\n",
1210  *(request->inst));
1211  }
1212 
1213  if (ret)
1214  request->setState(LSQRequest::Complete);
1215  else
1217  } else if (dcachePort.sendTimingReq(packet)) {
1218  DPRINTF(MinorMem, "Sent data memory request\n");
1219 
1221 
1222  request->stepToNextPacket();
1223 
1224  ret = request->sentAllPackets();
1225 
1226  switch (request->state) {
1229  /* Fully or partially issued a request in the transfers
1230  * queue */
1232  break;
1235  /* Fully or partially issued a request in the store
1236  * buffer */
1238  break;
1239  default:
1240  panic("Unrecognized LSQ request state %d.", request->state);
1241  }
1242 
1243  state = MemoryRunning;
1244  } else {
1245  DPRINTF(MinorMem,
1246  "Sending data memory request - needs retry\n");
1247 
1248  /* Needs to be resent, wait for that */
1250  retryRequest = request;
1251 
1252  switch (request->state) {
1256  break;
1260  break;
1261  default:
1262  panic("Unrecognized LSQ request state %d.", request->state);
1263  }
1264  }
1265  }
1266 
1267  if (ret)
1268  threadSnoop(request);
1269 
1270  return ret;
1271 }
1272 
1273 void
1275 {
1276  assert(!requests.empty() && requests.front() == request);
1277  assert(transfers.unreservedRemainingSpace() != 0);
1278 
1279  /* Need to count the number of stores in the transfers
1280  * queue so that loads know when their store buffer forwarding
1281  * results will be correct (only when all those stores
1282  * have reached the store buffer) */
1283  if (!request->isLoad)
1285 
1286  requests.pop();
1287  transfers.push(request);
1288 }
1289 
1290 bool
1292 {
1293  return state == MemoryRunning &&
1295 }
1296 
1297 bool
1299 {
1300  LSQRequestPtr request =
1301  safe_cast<LSQRequestPtr>(response->popSenderState());
1302 
1303  DPRINTF(MinorMem, "Received response packet inst: %s"
1304  " addr: 0x%x cmd: %s\n",
1305  *(request->inst), response->getAddr(),
1306  response->cmd.toString());
1307 
1309 
1310  if (response->isError()) {
1311  DPRINTF(MinorMem, "Received error response packet: %s\n",
1312  *request->inst);
1313  }
1314 
1315  switch (request->state) {
1318  /* Response to a request from the transfers queue */
1319  request->retireResponse(response);
1320 
1321  DPRINTF(MinorMem, "Has outstanding packets?: %d %d\n",
1322  request->hasPacketsInMemSystem(), request->isComplete());
1323 
1324  break;
1327  /* Response to a request from the store buffer */
1328  request->retireResponse(response);
1329 
1330  /* Remove completed requests unless they are barriers (which will
1331  * need to be removed in order */
1332  if (request->isComplete()) {
1333  if (!request->isBarrier()) {
1334  storeBuffer.deleteRequest(request);
1335  } else {
1336  DPRINTF(MinorMem, "Completed transfer for barrier: %s"
1337  " leaving the request as it is also a barrier\n",
1338  *(request->inst));
1339  }
1340  }
1341  break;
1342  default:
1343  panic("Shouldn't be allowed to receive a response from another state");
1344  }
1345 
1346  /* We go to idle even if there are more things in the requests queue
1347  * as it's the job of step to actually step us on to the next
1348  * transaction */
1349 
1350  /* Let's try and wake up the processor for the next cycle */
1352 
1353  /* Never busy */
1354  return true;
1355 }
1356 
1357 void
1359 {
1360  DPRINTF(MinorMem, "Received retry request\n");
1361 
1362  assert(state == MemoryNeedsRetry);
1363 
1364  switch (retryRequest->state) {
1366  /* Retry in the requests queue */
1368  break;
1370  /* Retry in the store buffer */
1372  break;
1373  default:
1374  panic("Unrecognized retry request state %d.", retryRequest->state);
1375  }
1376 
1377  /* Set state back to MemoryRunning so that the following
1378  * tryToSend can actually send. Note that this won't
1379  * allow another transfer in as tryToSend should
1380  * issue a memory request and either succeed for this
1381  * request or return the LSQ back to MemoryNeedsRetry */
1382  state = MemoryRunning;
1383 
1384  /* Try to resend the request */
1385  if (tryToSend(retryRequest)) {
1386  /* Successfully sent, need to move the request */
1387  switch (retryRequest->state) {
1389  /* In the requests queue */
1391  break;
1393  /* In the store buffer */
1395  break;
1396  default:
1397  panic("Unrecognized retry request state %d.", retryRequest->state);
1398  }
1399 
1400  retryRequest = NULL;
1401  }
1402 }
1403 
1404 LSQ::LSQ(std::string name_, std::string dcache_port_name_,
1405  MinorCPU &cpu_, Execute &execute_,
1406  unsigned int in_memory_system_limit, unsigned int line_width,
1407  unsigned int requests_queue_size, unsigned int transfers_queue_size,
1408  unsigned int store_buffer_size,
1409  unsigned int store_buffer_cycle_store_limit,
1410  RegIndex zero_reg) :
1411  Named(name_),
1412  cpu(cpu_),
1413  execute(execute_),
1414  zeroReg(zero_reg),
1415  dcachePort(dcache_port_name_, *this, cpu_),
1416  lastMemBarrier(cpu.numThreads, 0),
1418  inMemorySystemLimit(in_memory_system_limit),
1419  lineWidth((line_width == 0 ? cpu.cacheLineSize() : line_width)),
1420  requests(name_ + ".requests", "addr", requests_queue_size),
1421  transfers(name_ + ".transfers", "addr", transfers_queue_size),
1422  storeBuffer(name_ + ".storeBuffer",
1423  *this, store_buffer_size, store_buffer_cycle_store_limit),
1425  numAccessesInDTLB(0),
1428  retryRequest(NULL),
1429  cacheBlockMask(~(cpu_.cacheLineSize() - 1))
1430 {
1431  if (in_memory_system_limit < 1) {
1432  fatal("%s: executeMaxAccessesInMemory must be >= 1 (%d)\n", name_,
1433  in_memory_system_limit);
1434  }
1435 
1436  if (store_buffer_cycle_store_limit < 1) {
1437  fatal("%s: executeLSQMaxStoreBufferStoresPerCycle must be"
1438  " >= 1 (%d)\n", name_, store_buffer_cycle_store_limit);
1439  }
1440 
1441  if (requests_queue_size < 1) {
1442  fatal("%s: executeLSQRequestsQueueSize must be"
1443  " >= 1 (%d)\n", name_, requests_queue_size);
1444  }
1445 
1446  if (transfers_queue_size < 1) {
1447  fatal("%s: executeLSQTransfersQueueSize must be"
1448  " >= 1 (%d)\n", name_, transfers_queue_size);
1449  }
1450 
1451  if (store_buffer_size < 1) {
1452  fatal("%s: executeLSQStoreBufferSize must be"
1453  " >= 1 (%d)\n", name_, store_buffer_size);
1454  }
1455 
1456  if ((lineWidth & (lineWidth - 1)) != 0) {
1457  fatal("%s: lineWidth: %d must be a power of 2\n", name(), lineWidth);
1458  }
1459 }
1460 
1462 { }
1463 
1465 {
1466  if (packet)
1467  delete packet;
1468  if (data)
1469  delete [] data;
1470 }
1471 
1478 void
1480 {
1481  /* Try to move address-translated requests between queues and issue
1482  * them */
1483  if (!requests.empty())
1485 
1486  storeBuffer.step();
1487 }
1488 
1491 {
1492  LSQ::LSQRequestPtr ret = NULL;
1493 
1494  if (!transfers.empty()) {
1495  LSQRequestPtr request = transfers.front();
1496 
1497  /* Same instruction and complete access or a store that's
1498  * capable of being moved to the store buffer */
1499  if (request->inst->id == inst->id) {
1500  bool complete = request->isComplete();
1501  bool can_store = storeBuffer.canInsert();
1502  bool to_store_buffer = request->state ==
1504 
1505  if ((complete && !(request->isBarrier() && !can_store)) ||
1506  (to_store_buffer && can_store))
1507  {
1508  ret = request;
1509  }
1510  }
1511  }
1512 
1513  if (ret) {
1514  DPRINTF(MinorMem, "Found matching memory response for inst: %s\n",
1515  *inst);
1516  } else {
1517  DPRINTF(MinorMem, "No matching memory response for inst: %s\n",
1518  *inst);
1519  }
1520 
1521  return ret;
1522 }
1523 
1524 void
1526 {
1527  assert(!transfers.empty() && transfers.front() == response);
1528 
1529  transfers.pop();
1530 
1531  if (!response->isLoad)
1533 
1534  if (response->issuedToMemory)
1536 
1537  if (response->state != LSQRequest::StoreInStoreBuffer) {
1538  DPRINTF(MinorMem, "Deleting %s request: %s\n",
1539  (response->isLoad ? "load" : "store"),
1540  *(response->inst));
1541 
1542  delete response;
1543  }
1544 }
1545 
1546 void
1548 {
1549  assert(request->state == LSQRequest::StoreToStoreBuffer);
1550 
1551  DPRINTF(MinorMem, "Sending store: %s to store buffer\n",
1552  *(request->inst));
1553 
1554  request->inst->inStoreBuffer = true;
1555 
1556  storeBuffer.insert(request);
1557 }
1558 
1559 bool
1561 {
1562  return requests.empty() && transfers.empty() &&
1564 }
1565 
1566 bool
1568 {
1569  bool ret = false;
1570 
1571  if (canSendToMemorySystem()) {
1572  bool have_translated_requests = !requests.empty() &&
1575 
1576  ret = have_translated_requests ||
1578  }
1579 
1580  if (ret)
1581  DPRINTF(Activity, "Need to tick\n");
1582 
1583  return ret;
1584 }
1585 
1586 Fault
1587 LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
1588  unsigned int size, Addr addr, Request::Flags flags,
1589  uint64_t *res, AtomicOpFunctorPtr amo_op,
1590  const std::vector<bool>& byte_enable)
1591 {
1592  assert(inst->translationFault == NoFault || inst->inLSQ);
1593 
1594  if (inst->inLSQ) {
1595  return inst->translationFault;
1596  }
1597 
1598  bool needs_burst = transferNeedsBurst(addr, size, lineWidth);
1599 
1600  if (needs_burst && inst->staticInst->isAtomic()) {
1601  // AMO requests that access across a cache line boundary are not
1602  // allowed since the cache does not guarantee AMO ops to be executed
1603  // atomically in two cache lines
1604  // For ISAs such as x86 that requires AMO operations to work on
1605  // accesses that cross cache-line boundaries, the cache needs to be
1606  // modified to support locking both cache lines to guarantee the
1607  // atomicity.
1608  panic("Do not expect cross-cache-line atomic memory request\n");
1609  }
1610 
1611  LSQRequestPtr request;
1612 
1613  /* Copy given data into the request. The request will pass this to the
1614  * packet and then it will own the data */
1615  uint8_t *request_data = NULL;
1616 
1617  DPRINTF(MinorMem, "Pushing request (%s) addr: 0x%x size: %d flags:"
1618  " 0x%x%s lineWidth : 0x%x\n",
1619  (isLoad ? "load" : "store/atomic"), addr, size, flags,
1620  (needs_burst ? " (needs burst)" : ""), lineWidth);
1621 
1622  if (!isLoad) {
1623  /* Request_data becomes the property of a ...DataRequest (see below)
1624  * and destroyed by its destructor */
1625  request_data = new uint8_t[size];
1626  if (inst->staticInst->isAtomic() ||
1627  (flags & Request::STORE_NO_DATA)) {
1628  /* For atomic or store-no-data, just use zeroed data */
1629  std::memset(request_data, 0, size);
1630  } else {
1631  std::memcpy(request_data, data, size);
1632  }
1633  }
1634 
1635  if (needs_burst) {
1636  request = new SplitDataRequest(
1637  *this, inst, isLoad, request_data, res);
1638  } else {
1639  request = new SingleDataRequest(
1640  *this, inst, isLoad, request_data, res);
1641  }
1642 
1643  if (inst->traceData)
1644  inst->traceData->setMem(addr, size, flags);
1645 
1646  int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
1647  request->request->setContext(cid);
1648  request->request->setVirt(
1649  addr, size, flags, cpu.dataRequestorId(),
1650  /* I've no idea why we need the PC, but give it */
1651  inst->pc->instAddr(), std::move(amo_op));
1652  request->request->setByteEnable(byte_enable);
1653 
1654  requests.push(request);
1655  inst->inLSQ = true;
1656  request->startAddrTranslation();
1657 
1658  return inst->translationFault;
1659 }
1660 
1661 void
1663 {
1664  LSQRequestPtr request = new FailedDataRequest(*this, inst);
1665  requests.push(request);
1666 }
1667 
1668 void
1670 {
1671  minor::minorTrace("state=%s in_tlb_mem=%d/%d stores_in_transfers=%d"
1672  " lastMemBarrier=%d\n",
1675  requests.minorTrace();
1678 }
1679 
1680 LSQ::StoreBuffer::StoreBuffer(std::string name_, LSQ &lsq_,
1681  unsigned int store_buffer_size,
1682  unsigned int store_limit_per_cycle) :
1683  Named(name_), lsq(lsq_),
1684  numSlots(store_buffer_size),
1685  storeLimitPerCycle(store_limit_per_cycle),
1686  slots(),
1687  numUnissuedAccesses(0)
1688 {
1689 }
1690 
1691 PacketPtr
1692 makePacketForRequest(const RequestPtr &request, bool isLoad,
1693  Packet::SenderState *sender_state, PacketDataPtr data)
1694 {
1695  PacketPtr ret = isLoad ? Packet::createRead(request)
1696  : Packet::createWrite(request);
1697 
1698  if (sender_state)
1699  ret->pushSenderState(sender_state);
1700 
1701  if (isLoad) {
1702  ret->allocate();
1703  } else if (!request->isCacheMaintenance()) {
1704  // CMOs are treated as stores but they don't have data. All
1705  // stores otherwise need to allocate for data.
1706  ret->dataDynamic(data);
1707  }
1708 
1709  return ret;
1710 }
1711 
1712 void
1714 {
1715  assert(inst->isInst() && inst->staticInst->isFullMemBarrier());
1716  assert(inst->id.execSeqNum > lastMemBarrier[inst->id.threadId]);
1717 
1718  /* Remember the barrier. We only have a notion of one
1719  * barrier so this may result in some mem refs being
1720  * delayed if they are between barriers */
1721  lastMemBarrier[inst->id.threadId] = inst->id.execSeqNum;
1722 }
1723 
1724 void
1726 {
1727  assert(inst->translationFault == NoFault);
1728 
1729  /* Make the function idempotent */
1730  if (packet)
1731  return;
1732 
1733  packet = makePacketForRequest(request, isLoad, this, data);
1734  /* Null the ret data so we know not to deallocate it when the
1735  * ret is destroyed. The data now belongs to the ret and
1736  * the ret is responsible for its destruction */
1737  data = NULL;
1738 }
1739 
1740 std::ostream &
1742 {
1743  switch (state) {
1744  case LSQ::MemoryRunning:
1745  os << "MemoryRunning";
1746  break;
1747  case LSQ::MemoryNeedsRetry:
1748  os << "MemoryNeedsRetry";
1749  break;
1750  default:
1751  os << "MemoryState-" << static_cast<int>(state);
1752  break;
1753  }
1754  return os;
1755 }
1756 
1757 void
1759 {
1760  /* LLSC operations in Minor can't be speculative and are executed from
1761  * the head of the requests queue. We shouldn't need to do more than
1762  * this action on snoops. */
1763  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1764  if (cpu.getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1765  cpu.wakeup(tid);
1766  }
1767  }
1768 
1769  if (pkt->isInvalidate() || pkt->isWrite()) {
1770  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1771  cpu.getContext(tid)->getIsaPtr()->handleLockedSnoop(
1772  pkt, cacheBlockMask);
1773  }
1774  }
1775 }
1776 
1777 void
1779 {
1780  /* LLSC operations in Minor can't be speculative and are executed from
1781  * the head of the requests queue. We shouldn't need to do more than
1782  * this action on snoops. */
1783  ThreadID req_tid = request->inst->id.threadId;
1784  PacketPtr pkt = request->packet;
1785 
1786  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1787  if (tid != req_tid) {
1788  if (cpu.getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1789  cpu.wakeup(tid);
1790  }
1791 
1792  if (pkt->isInvalidate() || pkt->isWrite()) {
1793  cpu.getContext(tid)->getIsaPtr()->handleLockedSnoop(pkt,
1794  cacheBlockMask);
1795  }
1796  }
1797  }
1798 }
1799 
1800 } // namespace minor
1801 } // namespace gem5
gem5::minor::LSQ
Definition: lsq.hh:68
gem5::minor::LSQ::LSQRequest::sentAllPackets
virtual bool sentAllPackets()=0
Have all packets been sent?
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
gem5::minor::LSQ::requests
LSQQueue requests
requests contains LSQRequests which have been issued to the TLB by calling ExecContext::readMem/write...
Definition: lsq.hh:580
gem5::minor::LSQ::LSQRequest::retireResponse
virtual void retireResponse(PacketPtr packet_)=0
Retire a response packet into the LSQRequest packet possibly completing this transfer.
gem5::minor::Queue::front
ElemType & front()
Head value.
Definition: buffers.hh:501
gem5::minor::LSQ::LSQRequest::tryToSuppressFault
void tryToSuppressFault()
Instructions may want to suppress translation faults (e.g.
Definition: lsq.cc:80
gem5::minor::LSQ::LSQRequest::StoreToStoreBuffer
@ StoreToStoreBuffer
Definition: lsq.hh:182
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::minor::LSQ::MemoryNeedsRetry
@ MemoryNeedsRetry
Definition: lsq.hh:82
gem5::BaseISA::handleLockedRead
virtual void handleLockedRead(const RequestPtr &req)
Definition: isa.hh:81
utils.hh
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:495
warn
#define warn(...)
Definition: logging.hh:246
gem5::minor::LSQ::StoreBuffer::StoreBuffer
StoreBuffer(std::string name_, LSQ &lsq_, unsigned int store_buffer_size, unsigned int store_limit_per_cycle)
Definition: lsq.cc:1680
gem5::minor::LSQ::LSQRequest::request
RequestPtr request
The underlying request of this LSQRequest.
Definition: lsq.hh:157
gem5::minor::LSQ::issuedMemBarrierInst
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1713
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::minor::LSQ::SplitDataRequest::retireResponse
void retireResponse(PacketPtr packet_)
For loads, paste the response data into the main response packet.
Definition: lsq.cc:626
gem5::minor::LSQ::LSQRequest::isLoad
bool isLoad
Load/store indication used for building packet.
Definition: lsq.hh:144
gem5::minor::LSQ::AddrRangeCoverage
AddrRangeCoverage
Coverage of one address range with another.
Definition: lsq.hh:90
gem5::minor::LSQ::LSQRequest::RequestIssuing
@ RequestIssuing
Definition: lsq.hh:180
gem5::minor::LSQ::LSQRequest::makePacket
void makePacket()
Make a packet to use with the memory transaction.
Definition: lsq.cc:1725
gem5::minor::LSQ::threadSnoop
void threadSnoop(LSQRequestPtr request)
Snoop other threads monitors on memory system accesses.
Definition: lsq.cc:1778
gem5::Packet::findNextSenderState
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:564
gem5::minor::LSQ::LSQRequest::LSQRequestState
LSQRequestState
Definition: lsq.hh:174
gem5::minor::LSQ::PartialAddrRangeCoverage
@ PartialAddrRangeCoverage
Definition: lsq.hh:92
gem5::minor::LSQ::LSQRequest::packet
PacketPtr packet
Definition: lsq.hh:154
gem5::minor::LSQ::state
MemoryState state
Retry state of last issued memory transfer.
Definition: lsq.hh:551
gem5::minor::LSQ::LSQRequest::setSkipped
void setSkipped()
Set this request as having been skipped before a memory transfer was attempt.
Definition: lsq.hh:225
gem5::Packet::pushSenderState
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:316
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::minor::LSQ::SplitDataRequest::sendNextFragmentToTranslation
void sendNextFragmentToTranslation()
Part of the address translation loop, see startAddTranslation.
Definition: lsq.cc:703
gem5::minor::LSQ::NoAddrRangeCoverage
@ NoAddrRangeCoverage
Definition: lsq.hh:94
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::ThreadContext::getMMUPtr
virtual BaseMMU * getMMUPtr()=0
gem5::minor::LSQ::StoreBuffer::forwardStoreData
void forwardStoreData(LSQRequestPtr load, unsigned int slot_number)
Fill the given packet with appropriate date from slot slot_number.
Definition: lsq.cc:806
gem5::minor::LSQ::StoreBuffer::numUnissuedStores
unsigned int numUnissuedStores()
Number of stores in the store buffer which have not been completely issued to the memory system.
Definition: lsq.hh:526
gem5::minor::LSQ::LSQRequest::issuedToMemory
bool issuedToMemory
This in an access other than a normal cacheable load that's visited the memory system.
Definition: lsq.hh:169
gem5::Complete
@ Complete
Definition: misc.hh:57
gem5::minor::LSQ::LSQRequest::needsToBeSentToStoreBuffer
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:166
gem5::BaseISA::handleLockedWrite
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition: isa.hh:88
gem5::minor::LSQ::FailedDataRequest
FailedDataRequest represents requests from instructions that failed their predicates but need to ride...
Definition: lsq.hh:329
minor
gem5::MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:85
gem5::minor::LSQ::MemoryState
MemoryState
State of memory access for head access.
Definition: lsq.hh:79
gem5::minor::LSQ::SplitDataRequest::finish
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
TLB response interface.
Definition: lsq.cc:336
gem5::minor::LSQ::cacheBlockMask
Addr cacheBlockMask
Address Mask for a cache block (e.g.
Definition: lsq.hh:628
gem5::Packet::isWrite
bool isWrite() const
Definition: packet.hh:583
gem5::minor::LSQ::numAccessesInDTLB
unsigned int numAccessesInDTLB
Number of requests in the DTLB in the requests queue.
Definition: lsq.hh:612
gem5::Packet::createWrite
static PacketPtr createWrite(const RequestPtr &req)
Definition: packet.hh:1013
gem5::minor::LSQ::step
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1479
gem5::addrBlockOffset
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:53
std::vector< bool >
gem5::minor::ExecContext::setMemAccPredicate
void setMemAccPredicate(bool val) override
Definition: exec_context.hh:264
gem5::minor::LSQ::needsToTick
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1567
gem5::minor::LSQ::lineWidth
const unsigned int lineWidth
Memory system access width (and snap) in bytes.
Definition: lsq.hh:557
gem5::minor::LSQ::pushFailedRequest
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1662
gem5::PacketDataPtr
uint8_t * PacketDataPtr
Definition: packet.hh:71
gem5::minor::LSQ::zeroReg
const RegIndex zeroReg
Definition: lsq.hh:75
gem5::minor::Queue::minorTrace
void minorTrace() const
Definition: buffers.hh:512
gem5::minor::LSQ::LSQRequest::Complete
@ Complete
Definition: lsq.hh:193
gem5::minor::LSQ::lastMemBarrier
std::vector< InstSeqNum > lastMemBarrier
Most recent execSeqNum of a memory barrier instruction or 0 if there are no in-flight barriers.
Definition: lsq.hh:547
gem5::minor::LSQ::BarrierDataRequest
Request for doing barrier accounting in the store buffer.
Definition: lsq.hh:339
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::minor::Queue::empty
bool empty() const
Is the queue empty?
Definition: buffers.hh:509
gem5::minor::LSQ::SplitDataRequest::makeFragmentRequests
void makeFragmentRequests()
Make all the Requests for this transfer's fragments so that those requests can be sent for address tr...
Definition: lsq.cc:421
gem5::SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:93
gem5::minor::LSQ::SingleDataRequest::finish
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
TLB interace.
Definition: lsq.cc:273
gem5::minor::LSQ::SingleDataRequest::startAddrTranslation
void startAddrTranslation()
Send single translation request.
Definition: lsq.cc:303
execute.hh
gem5::minor::LSQ::LSQRequest::Translated
@ Translated
Definition: lsq.hh:178
gem5::MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:291
gem5::RefCountingPtr< MinorDynInst >
gem5::minor::LSQ::LSQRequest::containsAddrRangeOf
static AddrRangeCoverage containsAddrRangeOf(Addr req1_addr, unsigned int req1_size, Addr req2_addr, unsigned int req2_size)
Does address range req1 (req1_addr to req1_addr + req1_size - 1) fully cover, partially cover or not ...
Definition: lsq.cc:123
gem5::MinorCPU::threads
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:101
gem5::minor::LSQ::LSQRequest::~LSQRequest
virtual ~LSQRequest()
Definition: lsq.cc:1464
gem5::RubyTester::SenderState
Definition: RubyTester.hh:89
gem5::minor::LSQ::LSQRequest
Derived SenderState to carry data access info.
Definition: lsq.hh:129
gem5::Named
Interface for things with names.
Definition: named.hh:38
gem5::minor::LSQ::isDrained
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1560
gem5::minor::LSQ::LSQRequest::data
PacketDataPtr data
Dynamically allocated and populated data carried for building write packets.
Definition: lsq.hh:148
gem5::minor::LSQ::~LSQ
virtual ~LSQ()
Definition: lsq.cc:1461
gem5::minor::LSQ::StoreBuffer::canForwardDataToLoad
AddrRangeCoverage canForwardDataToLoad(LSQRequestPtr request, unsigned int &found_slot)
Look for a store which satisfies the given load.
Definition: lsq.cc:765
gem5::Flags< FlagsType >
gem5::minor::LSQ::execute
Execute & execute
Definition: lsq.hh:73
gem5::minor::Pipeline::ExecuteStageId
@ ExecuteStageId
Definition: pipeline.hh:104
gem5::minor::LSQ::minorTrace
void minorTrace() const
Definition: lsq.cc:1669
gem5::minor::LSQ::LSQ
LSQ(std::string name_, std::string dcache_port_name_, MinorCPU &cpu_, Execute &execute_, unsigned int max_accesses_in_memory_system, unsigned int line_width, unsigned int requests_queue_size, unsigned int transfers_queue_size, unsigned int store_buffer_size, unsigned int store_buffer_cycle_store_limit, RegIndex zero_reg)
Definition: lsq.cc:1404
gem5::minor::LSQ::clearMemBarrier
void clearMemBarrier(MinorDynInstPtr inst)
Clear a barrier (if it's the last one marked up in lastMemBarrier)
Definition: lsq.cc:260
gem5::minor::LSQ::StoreBuffer::insert
void insert(LSQRequestPtr request)
Insert a request at the back of the queue.
Definition: lsq.cc:743
gem5::minor::LSQ::LSQRequest::Failed
@ Failed
Definition: lsq.hh:179
exec_context.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::minor::LSQ::LSQRequest::disableMemAccess
void disableMemAccess()
Definition: lsq.cc:116
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::minor::LSQ::StoreBuffer::isDrained
bool isDrained() const
Drained if there is absolutely nothing left in the buffer.
Definition: lsq.hh:534
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::minor::LSQ::LSQRequest::inst
MinorDynInstPtr inst
Instruction which made this request.
Definition: lsq.hh:140
gem5::transferNeedsBurst
bool transferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
Returns true if the given memory access (address, size) needs to be fragmented across aligned fixed-s...
Definition: utils.hh:80
gem5::minor::LSQ::LSQRequest::stepToNextPacket
virtual void stepToNextPacket()=0
Step to the next packet for the next call to getHeadPacket.
gem5::minor::LSQ::LSQRequest::StoreBufferIssuing
@ StoreBufferIssuing
Definition: lsq.hh:187
gem5::minor::LSQ::SplitDataRequest::getHeadPacket
PacketPtr getHeadPacket()
Get the head packet as counted by numIssuedFragments.
Definition: lsq.cc:610
gem5::minor::LSQ::StoreBuffer::step
void step()
Try to issue more stores to memory.
Definition: lsq.cc:845
pipeline.hh
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::minor::LSQ::LSQRequest::setState
void setState(LSQRequestState new_state)
Set state and output trace output.
Definition: lsq.cc:172
gem5::minor::LSQ::recvReqRetry
void recvReqRetry()
Definition: lsq.cc:1358
gem5::minor::LSQ::storeBuffer
StoreBuffer storeBuffer
Definition: lsq.hh:600
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::minor::LSQ::StoreBuffer::countIssuedStore
void countIssuedStore(LSQRequestPtr request)
Count a store being issued to memory by decrementing numUnissuedAccesses.
Definition: lsq.cc:836
gem5::minor::LSQ::LSQRequest::getHeadPacket
virtual PacketPtr getHeadPacket()=0
Get the next packet to issue for this request.
gem5::minor::Queue::push
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition: buffers.hh:433
gem5::isAnyActiveElement
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:89
gem5::minor::LSQ::completeMemBarrierInst
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:915
gem5::minor::LSQ::SplitDataRequest::SplitDataRequest
SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, PacketDataPtr data_=NULL, uint64_t *res_=NULL)
Definition: lsq.cc:394
gem5::SimpleThread::getIsaPtr
BaseISA * getIsaPtr() override
Definition: simple_thread.hh:213
gem5::minor::LSQ::tryToSend
bool tryToSend(LSQRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition: lsq.cc:1176
gem5::minor::makePacketForRequest
PacketPtr makePacketForRequest(const RequestPtr &request, bool isLoad, Packet::SenderState *sender_state, PacketDataPtr data)
Make a suitable packet for the given request.
Definition: lsq.cc:1692
gem5::minor::LSQ::popResponse
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1525
compiler.hh
gem5::minor::LSQ::LSQRequest::RequestNeedsRetry
@ RequestNeedsRetry
Definition: lsq.hh:184
gem5::MinorCPU::wakeup
void wakeup(ThreadID tid) override
Definition: cpu.cc:143
gem5::minor::Queue::unreservedRemainingSpace
unsigned int unreservedRemainingSpace() const
Like remainingSpace but does not count reserved spaces.
Definition: buffers.hh:493
gem5::Packet::SenderState
A virtual base opaque structure used to hold state associated with the packet (e.g....
Definition: packet.hh:457
gem5::minor::LSQ::tryToSendToTransfers
void tryToSendToTransfers(LSQRequestPtr request)
Try and issue a memory access for a translated request at the head of the requests queue.
Definition: lsq.cc:961
gem5::Packet::cmd
MemCmd cmd
The command field of the packet.
Definition: packet.hh:361
gem5::minor::LSQ::SplitDataRequest::makeFragmentPackets
void makeFragmentPackets()
Make the packets to go with the requests so they can be sent to the memory system.
Definition: lsq.cc:535
gem5::SimpleThread::pcState
const PCStateBase & pcState() const override
Definition: simple_thread.hh:422
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::minor::LSQ::moveFromRequestsToTransfers
void moveFromRequestsToTransfers(LSQRequestPtr request)
Move a request between queues.
Definition: lsq.cc:1274
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
DPRINTFS
#define DPRINTFS(x, s,...)
Definition: trace.hh:193
gem5::GEM5_DEPRECATED_NAMESPACE
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
gem5::minor::minorTrace
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
gem5::BaseMMU::translateTiming
virtual void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode)
Definition: mmu.cc:111
gem5::minor::LSQ::LSQRequest::InTranslation
@ InTranslation
Definition: lsq.hh:177
gem5::minor::LSQ::numAccessesInMemorySystem
unsigned int numAccessesInMemorySystem
Count of the number of mem.
Definition: lsq.hh:609
gem5::Packet::popSenderState
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:324
gem5::MemCmd::toString
const std::string & toString() const
Return the string to a cmd given by idx.
Definition: packet.hh:265
gem5::minor::LSQ::canSendToMemorySystem
bool canSendToMemorySystem()
Can a request be sent to the memory system.
Definition: lsq.cc:1291
gem5::minor::LSQ::LSQRequest::LSQRequest
LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, RegIndex zero_reg, PacketDataPtr data_=NULL, uint64_t *res_=NULL)
Definition: lsq.cc:60
gem5::minor::LSQ::SplitDataRequest::~SplitDataRequest
~SplitDataRequest()
Definition: lsq.cc:411
gem5::minor::LSQ::SplitDataRequest::stepToNextPacket
void stepToNextPacket()
Step on numIssuedFragments.
Definition: lsq.cc:618
gem5::minor::LSQ::LSQRequest::StoreBufferNeedsRetry
@ StoreBufferNeedsRetry
Definition: lsq.hh:189
gem5::minor::LSQ::inMemorySystemLimit
const unsigned int inMemorySystemLimit
Maximum number of in-flight accesses issued to the memory system.
Definition: lsq.hh:554
gem5::Request::STORE_NO_DATA
static const FlagsType STORE_NO_DATA
Definition: request.hh:246
gem5::minor::ExecContext
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:73
gem5::minor::LSQ::LSQRequest::startAddrTranslation
virtual void startAddrTranslation()=0
Start the address translation process for this request.
gem5::minor::LSQ::StoreBuffer::canInsert
bool canInsert() const
Can a new request be inserted into the queue?
Definition: lsq.cc:722
gem5::minor::LSQ::dcachePort
DcachePort dcachePort
Definition: lsq.hh:123
gem5::Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1326
gem5::minor::LSQ::LSQRequest::state
LSQRequestState state
Definition: lsq.hh:196
gem5::X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:809
gem5::minor::LSQ::numAccessesIssuedToMemory
unsigned int numAccessesIssuedToMemory
The number of accesses which have been issued to the memory system but have not been committed/discar...
Definition: lsq.hh:621
gem5::minor::LSQ::LSQRequest::completeDisabledMemAccess
void completeDisabledMemAccess()
Definition: lsq.cc:99
gem5::minor::LSQ::recvTimingSnoopReq
void recvTimingSnoopReq(PacketPtr pkt)
Definition: lsq.cc:1758
gem5::minor::LSQ::operator<<
friend std::ostream & operator<<(std::ostream &os, MemoryState state)
Print MemoryState values as shown in the enum definition.
Definition: lsq.cc:1741
gem5::minor::Execute
Execute stage.
Definition: execute.hh:68
gem5::Packet::makeResponse
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition: packet.hh:1031
gem5::Packet::dataDynamic
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1172
gem5::minor::LSQ::StoreBuffer::minorTrace
void minorTrace() const
Report queue contents for MinorTrace.
Definition: lsq.cc:932
gem5::minor::LSQ::SingleDataRequest
SingleDataRequest is used for requests that don't fragment.
Definition: lsq.hh:351
gem5::minor::LSQ::MemoryRunning
@ MemoryRunning
Definition: lsq.hh:81
gem5::minor::LSQ::LSQRequest::isBarrier
virtual bool isBarrier()
Is this a request a barrier?
Definition: lsq.cc:160
logging.hh
gem5::minor::LSQ::transfers
LSQQueue transfers
Once issued to memory (or, for stores, just had their state changed to StoreToStoreBuffer) LSQRequest...
Definition: lsq.hh:589
gem5::minor::LSQ::LSQRequest::NotIssued
@ NotIssued
Definition: lsq.hh:176
gem5::minor::LSQ::SplitDataRequest
Definition: lsq.hh:395
gem5::minor::LSQ::retryRequest
LSQRequestPtr retryRequest
The request (from either requests or the store buffer) which is currently waiting have its memory acc...
Definition: lsq.hh:625
gem5::minor::LSQ::SplitDataRequest::startAddrTranslation
void startAddrTranslation()
Start a loop of do { sendNextFragmentToTranslation ; translateTiming ; finish } while (numTranslatedF...
Definition: lsq.cc:585
trace.hh
gem5::minor::LSQ::numStoresInTransfers
unsigned int numStoresInTransfers
The number of stores in the transfers queue.
Definition: lsq.hh:616
gem5::minor::LSQ::StoreBuffer::deleteRequest
void deleteRequest(LSQRequestPtr request)
Delete the given request and free the slot it occupied.
Definition: lsq.cc:729
gem5::minor::LSQ::SingleDataRequest::retireResponse
void retireResponse(PacketPtr packet_)
Keep the given packet as the response packet LSQRequest::packet.
Definition: lsq.cc:327
gem5::minor::LSQ::recvTimingResp
bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition: lsq.cc:1298
gem5::minor::LSQ::cpu
MinorCPU & cpu
My owner(s)
Definition: lsq.hh:72
gem5::RegIndex
uint16_t RegIndex
Definition: types.hh:176
gem5::minor::LSQ::pushRequest
Fault pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data, unsigned int size, Addr addr, Request::Flags flags, uint64_t *res, AtomicOpFunctorPtr amo_op, const std::vector< bool > &byte_enable=std::vector< bool >())
Single interface for readMem/writeMem/amoMem to issue requests into the LSQ.
Definition: lsq.cc:1587
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::AtomicOpFunctorPtr
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:242
gem5::minor::Queue::pop
void pop()
Pop the head item.
Definition: buffers.hh:506
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::Packet::createRead
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition: packet.hh:1007
gem5::minor::LSQ::LSQRequest::reportData
void reportData(std::ostream &os) const
MinorTrace report interface.
Definition: lsq.cc:188
gem5::minor::Execute::instIsHeadInst
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1891
gem5::minor::LSQ::LSQRequest::hasPacketsInMemSystem
virtual bool hasPacketsInMemSystem()=0
True if this request has any issued packets in the memory system and so can't be interrupted until it...
lsq.hh
gem5::minor::LSQ::LSQRequest::isComplete
bool isComplete() const
Has this request been completed.
Definition: lsq.cc:180
gem5::minor::LSQ::sendStoreToStoreBuffer
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1547
gem5::minor::LSQ::findResponse
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1490
gem5::minor::LSQ::FullAddrRangeCoverage
@ FullAddrRangeCoverage
Definition: lsq.hh:93
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::PCStateBase::clone
virtual PCStateBase * clone() const =0
gem5::minor::LSQ::LSQRequest::StoreInStoreBuffer
@ StoreInStoreBuffer
Definition: lsq.hh:185
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:242
gem5::minor::Execute::instIsRightStream
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1885
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::Packet::getPtr
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1184

Generated on Wed May 4 2022 12:13:53 for gem5 by doxygen 1.8.17