gem5  v21.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
lsq.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2017-2018,2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/lsq.hh"
39 
40 #include <iomanip>
41 #include <sstream>
42 
43 #include "arch/locked_mem.hh"
44 #include "base/logging.hh"
45 #include "base/trace.hh"
47 #include "cpu/minor/execute.hh"
48 #include "cpu/minor/pipeline.hh"
49 #include "cpu/utils.hh"
50 #include "debug/Activity.hh"
51 #include "debug/MinorMem.hh"
52 
53 namespace Minor
54 {
55 
56 LSQ::LSQRequest::LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_,
57  PacketDataPtr data_, uint64_t *res_) :
58  SenderState(),
59  port(port_),
60  inst(inst_),
61  isLoad(isLoad_),
62  data(data_),
63  packet(NULL),
64  request(),
65  res(res_),
66  skipped(false),
67  issuedToMemory(false),
68  isTranslationDelayed(false),
69  state(NotIssued)
70 {
71  request = std::make_shared<Request>();
72 }
73 
74 void
76 {
77  SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
78  TheISA::PCState old_pc = thread.pcState();
79  ExecContext context(port.cpu, thread, port.execute, inst);
80  M5_VAR_USED Fault fault = inst->translationFault;
81 
82  // Give the instruction a chance to suppress a translation fault
83  inst->translationFault = inst->staticInst->initiateAcc(&context, nullptr);
84  if (inst->translationFault == NoFault) {
85  DPRINTFS(MinorMem, (&port),
86  "Translation fault suppressed for inst:%s\n", *inst);
87  } else {
88  assert(inst->translationFault == fault);
89  }
90  thread.pcState(old_pc);
91 }
92 
93 void
95 {
96  DPRINTFS(MinorMem, (&port), "Complete disabled mem access for inst:%s\n",
97  *inst);
98 
99  SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
100  TheISA::PCState old_pc = thread.pcState();
101 
102  ExecContext context(port.cpu, thread, port.execute, inst);
103 
104  context.setMemAccPredicate(false);
105  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
106 
107  thread.pcState(old_pc);
108 }
109 
110 void
112 {
113  port.cpu.threads[inst->id.threadId]->setMemAccPredicate(false);
114  DPRINTFS(MinorMem, (&port), "Disable mem access for inst:%s\n", *inst);
115 }
116 
119  Addr req1_addr, unsigned int req1_size,
120  Addr req2_addr, unsigned int req2_size)
121 {
122  /* 'end' here means the address of the byte just past the request
123  * blocks */
124  Addr req2_end_addr = req2_addr + req2_size;
125  Addr req1_end_addr = req1_addr + req1_size;
126 
127  AddrRangeCoverage ret;
128 
129  if (req1_addr >= req2_end_addr || req1_end_addr <= req2_addr)
130  ret = NoAddrRangeCoverage;
131  else if (req1_addr <= req2_addr && req1_end_addr >= req2_end_addr)
132  ret = FullAddrRangeCoverage;
133  else
135 
136  return ret;
137 }
138 
141 {
142  AddrRangeCoverage ret = containsAddrRangeOf(
143  request->getPaddr(), request->getSize(),
144  other_request->request->getPaddr(), other_request->request->getSize());
145  /* If there is a strobe mask then store data forwarding might not be
146  * correct. Instead of checking enablemant of every byte we just fall back
147  * to PartialAddrRangeCoverage to prohibit store data forwarding */
148  if (ret == FullAddrRangeCoverage && request->isMasked())
150  return ret;
151 }
152 
153 
154 bool
156 {
157  return inst->isInst() && inst->staticInst->isFullMemBarrier();
158 }
159 
160 bool
162 {
163  return state == StoreToStoreBuffer;
164 }
165 
166 void
168 {
169  DPRINTFS(MinorMem, (&port), "Setting state from %d to %d for request:"
170  " %s\n", state, new_state, *inst);
171  state = new_state;
172 }
173 
174 bool
176 {
177  /* @todo, There is currently only one 'completed' state. This
178  * may not be a good choice */
179  return state == Complete;
180 }
181 
182 void
183 LSQ::LSQRequest::reportData(std::ostream &os) const
184 {
185  os << (isLoad ? 'R' : 'W') << ';';
186  inst->reportData(os);
187  os << ';' << state;
188 }
189 
190 std::ostream &
191 operator <<(std::ostream &os, LSQ::AddrRangeCoverage coverage)
192 {
193  switch (coverage) {
195  os << "PartialAddrRangeCoverage";
196  break;
198  os << "FullAddrRangeCoverage";
199  break;
201  os << "NoAddrRangeCoverage";
202  break;
203  default:
204  os << "AddrRangeCoverage-" << static_cast<int>(coverage);
205  break;
206  }
207  return os;
208 }
209 
210 std::ostream &
212 {
213  switch (state) {
215  os << "NotIssued";
216  break;
218  os << "InTranslation";
219  break;
221  os << "Translated";
222  break;
224  os << "Failed";
225  break;
227  os << "RequestIssuing";
228  break;
230  os << "StoreToStoreBuffer";
231  break;
233  os << "StoreInStoreBuffer";
234  break;
236  os << "StoreBufferIssuing";
237  break;
239  os << "RequestNeedsRetry";
240  break;
242  os << "StoreBufferNeedsRetry";
243  break;
245  os << "Complete";
246  break;
247  default:
248  os << "LSQRequestState-" << static_cast<int>(state);
249  break;
250  }
251  return os;
252 }
253 
254 void
256 {
257  bool is_last_barrier =
258  inst->id.execSeqNum >= lastMemBarrier[inst->id.threadId];
259 
260  DPRINTF(MinorMem, "Moving %s barrier out of store buffer inst: %s\n",
261  (is_last_barrier ? "last" : "a"), *inst);
262 
263  if (is_last_barrier)
264  lastMemBarrier[inst->id.threadId] = 0;
265 }
266 
267 void
268 LSQ::SingleDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
270 {
271  port.numAccessesInDTLB--;
272 
273  DPRINTFS(MinorMem, (&port), "Received translation response for"
274  " request: %s delayed:%d %s\n", *inst, isTranslationDelayed,
275  fault_ != NoFault ? fault_->name() : "");
276 
277  if (fault_ != NoFault) {
278  inst->translationFault = fault_;
279  if (isTranslationDelayed) {
280  tryToSuppressFault();
281  if (inst->translationFault == NoFault) {
282  completeDisabledMemAccess();
283  setState(Complete);
284  }
285  }
286  setState(Translated);
287  } else {
288  setState(Translated);
289  makePacket();
290  }
291  port.tryToSendToTransfers(this);
292 
293  /* Let's try and wake up the processor for the next cycle */
294  port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
295 }
296 
297 void
299 {
300  ThreadContext *thread = port.cpu.getContext(
301  inst->id.threadId);
302 
303  const auto &byte_enable = request->getByteEnable();
304  if (isAnyActiveElement(byte_enable.cbegin(), byte_enable.cend())) {
305  port.numAccessesInDTLB++;
306 
308 
309  DPRINTFS(MinorMem, (&port), "Submitting DTLB request\n");
310  /* Submit the translation request. The response will come through
311  * finish/markDelayed on the LSQRequest as it bears the Translation
312  * interface */
313  thread->getMMUPtr()->translateTiming(
314  request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
315  } else {
316  disableMemAccess();
317  setState(LSQ::LSQRequest::Complete);
318  }
319 }
320 
321 void
323 {
324  DPRINTFS(MinorMem, (&port), "Retiring packet\n");
325  packet = packet_;
326  packetInFlight = false;
327  setState(Complete);
328 }
329 
330 void
331 LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
333 {
334  port.numAccessesInDTLB--;
335 
336  M5_VAR_USED unsigned int expected_fragment_index =
337  numTranslatedFragments;
338 
339  numInTranslationFragments--;
340  numTranslatedFragments++;
341 
342  DPRINTFS(MinorMem, (&port), "Received translation response for fragment"
343  " %d of request: %s delayed:%d %s\n", expected_fragment_index,
344  *inst, isTranslationDelayed,
345  fault_ != NoFault ? fault_->name() : "");
346 
347  assert(request_ == fragmentRequests[expected_fragment_index]);
348 
349  /* Wake up next cycle to get things going again in case the
350  * tryToSendToTransfers does take */
351  port.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
352 
353  if (fault_ != NoFault) {
354  /* tryToSendToTransfers will handle the fault */
355  inst->translationFault = fault_;
356 
357  DPRINTFS(MinorMem, (&port), "Faulting translation for fragment:"
358  " %d of request: %s\n",
359  expected_fragment_index, *inst);
360 
361  if (expected_fragment_index > 0 || isTranslationDelayed)
362  tryToSuppressFault();
363  if (expected_fragment_index == 0) {
364  if (isTranslationDelayed && inst->translationFault == NoFault) {
365  completeDisabledMemAccess();
366  setState(Complete);
367  } else {
368  setState(Translated);
369  }
370  } else if (inst->translationFault == NoFault) {
371  setState(Translated);
372  numTranslatedFragments--;
373  makeFragmentPackets();
374  } else {
375  setState(Translated);
376  }
377  port.tryToSendToTransfers(this);
378  } else if (numTranslatedFragments == numFragments) {
379  makeFragmentPackets();
380  setState(Translated);
381  port.tryToSendToTransfers(this);
382  } else {
383  /* Avoid calling translateTiming from within ::finish */
384  assert(!translationEvent.scheduled());
385  port.cpu.schedule(translationEvent, curTick());
386  }
387 }
388 
390  bool isLoad_, PacketDataPtr data_, uint64_t *res_) :
391  LSQRequest(port_, inst_, isLoad_, data_, res_),
392  translationEvent([this]{ sendNextFragmentToTranslation(); },
393  "translationEvent"),
394  numFragments(0),
395  numInTranslationFragments(0),
396  numTranslatedFragments(0),
397  numIssuedFragments(0),
398  numRetiredFragments(0),
399  fragmentRequests(),
400  fragmentPackets()
401 {
402  /* Don't know how many elements are needed until the request is
403  * populated by the caller. */
404 }
405 
407 {
408  for (auto i = fragmentPackets.begin();
409  i != fragmentPackets.end(); i++)
410  {
411  delete *i;
412  }
413 }
414 
415 void
417 {
418  Addr base_addr = request->getVaddr();
419  unsigned int whole_size = request->getSize();
420  unsigned int line_width = port.lineWidth;
421 
422  unsigned int fragment_size;
423  Addr fragment_addr;
424 
425  std::vector<bool> fragment_write_byte_en;
426 
427  /* Assume that this transfer is across potentially many block snap
428  * boundaries:
429  *
430  * | _|________|________|________|___ |
431  * | |0| 1 | 2 | 3 | 4 | |
432  * | |_|________|________|________|___| |
433  * | | | | | |
434  *
435  * The first transfer (0) can be up to lineWidth in size.
436  * All the middle transfers (1-3) are lineWidth in size
437  * The last transfer (4) can be from zero to lineWidth - 1 in size
438  */
439  unsigned int first_fragment_offset =
440  addrBlockOffset(base_addr, line_width);
441  unsigned int last_fragment_size =
442  addrBlockOffset(base_addr + whole_size, line_width);
443  unsigned int first_fragment_size =
444  line_width - first_fragment_offset;
445 
446  unsigned int middle_fragments_total_size =
447  whole_size - (first_fragment_size + last_fragment_size);
448 
449  assert(addrBlockOffset(middle_fragments_total_size, line_width) == 0);
450 
451  unsigned int middle_fragment_count =
452  middle_fragments_total_size / line_width;
453 
454  numFragments = 1 /* first */ + middle_fragment_count +
455  (last_fragment_size == 0 ? 0 : 1);
456 
457  DPRINTFS(MinorMem, (&port), "Dividing transfer into %d fragmentRequests."
458  " First fragment size: %d Last fragment size: %d\n",
459  numFragments, first_fragment_size,
460  (last_fragment_size == 0 ? line_width : last_fragment_size));
461 
462  assert(((middle_fragment_count * line_width) +
463  first_fragment_size + last_fragment_size) == whole_size);
464 
465  fragment_addr = base_addr;
466  fragment_size = first_fragment_size;
467 
468  /* Just past the last address in the request */
469  Addr end_addr = base_addr + whole_size;
470 
471  auto& byte_enable = request->getByteEnable();
472  unsigned int num_disabled_fragments = 0;
473 
474  for (unsigned int fragment_index = 0; fragment_index < numFragments;
475  fragment_index++)
476  {
477  M5_VAR_USED bool is_last_fragment = false;
478 
479  if (fragment_addr == base_addr) {
480  /* First fragment */
481  fragment_size = first_fragment_size;
482  } else {
483  if ((fragment_addr + line_width) > end_addr) {
484  /* Adjust size of last fragment */
485  fragment_size = end_addr - fragment_addr;
486  is_last_fragment = true;
487  } else {
488  /* Middle fragments */
489  fragment_size = line_width;
490  }
491  }
492 
493  RequestPtr fragment = std::make_shared<Request>();
494  bool disabled_fragment = false;
495 
496  fragment->setContext(request->contextId());
497  // Set up byte-enable mask for the current fragment
498  auto it_start = byte_enable.begin() +
499  (fragment_addr - base_addr);
500  auto it_end = byte_enable.begin() +
501  (fragment_addr - base_addr) + fragment_size;
502  if (isAnyActiveElement(it_start, it_end)) {
503  fragment->setVirt(
504  fragment_addr, fragment_size, request->getFlags(),
505  request->requestorId(),
506  request->getPC());
507  fragment->setByteEnable(std::vector<bool>(it_start, it_end));
508  } else {
509  disabled_fragment = true;
510  }
511 
512  if (!disabled_fragment) {
513  DPRINTFS(MinorMem, (&port), "Generating fragment addr: 0x%x"
514  " size: %d (whole request addr: 0x%x size: %d) %s\n",
515  fragment_addr, fragment_size, base_addr, whole_size,
516  (is_last_fragment ? "last fragment" : ""));
517 
518  fragmentRequests.push_back(fragment);
519  } else {
520  num_disabled_fragments++;
521  }
522 
523  fragment_addr += fragment_size;
524  }
525  assert(numFragments >= num_disabled_fragments);
526  numFragments -= num_disabled_fragments;
527 }
528 
529 void
531 {
532  assert(numTranslatedFragments > 0);
533  Addr base_addr = request->getVaddr();
534 
535  DPRINTFS(MinorMem, (&port), "Making packets for request: %s\n", *inst);
536 
537  for (unsigned int fragment_index = 0;
538  fragment_index < numTranslatedFragments;
539  fragment_index++)
540  {
541  RequestPtr fragment = fragmentRequests[fragment_index];
542 
543  DPRINTFS(MinorMem, (&port), "Making packet %d for request: %s"
544  " (%d, 0x%x)\n",
545  fragment_index, *inst,
546  (fragment->hasPaddr() ? "has paddr" : "no paddr"),
547  (fragment->hasPaddr() ? fragment->getPaddr() : 0));
548 
549  Addr fragment_addr = fragment->getVaddr();
550  unsigned int fragment_size = fragment->getSize();
551 
552  uint8_t *request_data = NULL;
553 
554  if (!isLoad) {
555  /* Split data for Packets. Will become the property of the
556  * outgoing Packets */
557  request_data = new uint8_t[fragment_size];
558  std::memcpy(request_data, data + (fragment_addr - base_addr),
559  fragment_size);
560  }
561 
562  assert(fragment->hasPaddr());
563 
564  PacketPtr fragment_packet =
565  makePacketForRequest(fragment, isLoad, this, request_data);
566 
567  fragmentPackets.push_back(fragment_packet);
568  /* Accumulate flags in parent request */
569  request->setFlags(fragment->getFlags());
570  }
571 
572  /* Might as well make the overall/response packet here */
573  /* Get the physical address for the whole request/packet from the first
574  * fragment */
575  request->setPaddr(fragmentRequests[0]->getPaddr());
576  makePacket();
577 }
578 
579 void
581 {
582  makeFragmentRequests();
583 
584  if (numFragments > 0) {
586  numInTranslationFragments = 0;
587  numTranslatedFragments = 0;
588 
589  /* @todo, just do these in sequence for now with
590  * a loop of:
591  * do {
592  * sendNextFragmentToTranslation ; translateTiming ; finish
593  * } while (numTranslatedFragments != numFragments);
594  */
595 
596  /* Do first translation */
597  sendNextFragmentToTranslation();
598  } else {
599  disableMemAccess();
600  setState(LSQ::LSQRequest::Complete);
601  }
602 }
603 
604 PacketPtr
606 {
607  assert(numIssuedFragments < numTranslatedFragments);
608 
609  return fragmentPackets[numIssuedFragments];
610 }
611 
612 void
614 {
615  assert(numIssuedFragments < numTranslatedFragments);
616 
617  numIssuedFragments++;
618 }
619 
620 void
622 {
623  assert(inst->translationFault == NoFault);
624  assert(numRetiredFragments < numTranslatedFragments);
625 
626  DPRINTFS(MinorMem, (&port), "Retiring fragment addr: 0x%x size: %d"
627  " offset: 0x%x (retired fragment num: %d)\n",
628  response->req->getVaddr(), response->req->getSize(),
629  request->getVaddr() - response->req->getVaddr(),
630  numRetiredFragments);
631 
632  numRetiredFragments++;
633 
634  if (skipped) {
635  /* Skip because we already knew the request had faulted or been
636  * skipped */
637  DPRINTFS(MinorMem, (&port), "Skipping this fragment\n");
638  } else if (response->isError()) {
639  /* Mark up the error and leave to execute to handle it */
640  DPRINTFS(MinorMem, (&port), "Fragment has an error, skipping\n");
641  setSkipped();
642  packet->copyError(response);
643  } else {
644  if (isLoad) {
645  if (!data) {
646  /* For a split transfer, a Packet must be constructed
647  * to contain all returning data. This is that packet's
648  * data */
649  data = new uint8_t[request->getSize()];
650  }
651 
652  /* Populate the portion of the overall response data represented
653  * by the response fragment */
654  std::memcpy(
655  data + (response->req->getVaddr() - request->getVaddr()),
656  response->getConstPtr<uint8_t>(),
657  response->req->getSize());
658  }
659  }
660 
661  /* Complete early if we're skipping are no more in-flight accesses */
662  if (skipped && !hasPacketsInMemSystem()) {
663  DPRINTFS(MinorMem, (&port), "Completed skipped burst\n");
664  setState(Complete);
665  if (packet->needsResponse())
666  packet->makeResponse();
667  }
668 
669  if (numRetiredFragments == numTranslatedFragments)
670  setState(Complete);
671 
672  if (!skipped && isComplete()) {
673  DPRINTFS(MinorMem, (&port), "Completed burst %d\n", packet != NULL);
674 
675  DPRINTFS(MinorMem, (&port), "Retired packet isRead: %d isWrite: %d"
676  " needsResponse: %d packetSize: %s requestSize: %s responseSize:"
677  " %s\n", packet->isRead(), packet->isWrite(),
678  packet->needsResponse(), packet->getSize(), request->getSize(),
679  response->getSize());
680 
681  /* A request can become complete by several paths, this is a sanity
682  * check to make sure the packet's data is created */
683  if (!data) {
684  data = new uint8_t[request->getSize()];
685  }
686 
687  if (isLoad) {
688  DPRINTFS(MinorMem, (&port), "Copying read data\n");
689  std::memcpy(packet->getPtr<uint8_t>(), data, request->getSize());
690  }
691  packet->makeResponse();
692  }
693 
694  /* Packets are all deallocated together in ~SplitLSQRequest */
695 }
696 
697 void
699 {
700  unsigned int fragment_index = numTranslatedFragments;
701 
702  ThreadContext *thread = port.cpu.getContext(
703  inst->id.threadId);
704 
705  DPRINTFS(MinorMem, (&port), "Submitting DTLB request for fragment: %d\n",
706  fragment_index);
707 
708  port.numAccessesInDTLB++;
709  numInTranslationFragments++;
710 
711  thread->getMMUPtr()->translateTiming(
712  fragmentRequests[fragment_index], thread, this, (isLoad ?
714 }
715 
716 bool
718 {
719  /* @todo, support store amalgamation */
720  return slots.size() < numSlots;
721 }
722 
723 void
725 {
726  auto found = std::find(slots.begin(), slots.end(), request);
727 
728  if (found != slots.end()) {
729  DPRINTF(MinorMem, "Deleting request: %s %s %s from StoreBuffer\n",
730  request, *found, *(request->inst));
731  slots.erase(found);
732 
733  delete request;
734  }
735 }
736 
737 void
739 {
740  if (!canInsert()) {
741  warn("%s: store buffer insertion without space to insert from"
742  " inst: %s\n", name(), *(request->inst));
743  }
744 
745  DPRINTF(MinorMem, "Pushing store: %s into store buffer\n", request);
746 
747  numUnissuedAccesses++;
748 
749  if (request->state != LSQRequest::Complete)
751 
752  slots.push_back(request);
753 
754  /* Let's try and wake up the processor for the next cycle to step
755  * the store buffer */
756  lsq.cpu.wakeupOnEvent(Pipeline::ExecuteStageId);
757 }
758 
761  unsigned int &found_slot)
762 {
763  unsigned int slot_index = slots.size() - 1;
764  auto i = slots.rbegin();
766 
767  /* Traverse the store buffer in reverse order (most to least recent)
768  * and try to find a slot whose address range overlaps this request */
769  while (ret == NoAddrRangeCoverage && i != slots.rend()) {
770  LSQRequestPtr slot = *i;
771 
772  /* Cache maintenance instructions go down via the store path but
773  * they carry no data and they shouldn't be considered
774  * for forwarding */
775  if (slot->packet &&
776  slot->inst->id.threadId == request->inst->id.threadId &&
777  !slot->packet->req->isCacheMaintenance()) {
778  AddrRangeCoverage coverage = slot->containsAddrRangeOf(request);
779 
780  if (coverage != NoAddrRangeCoverage) {
781  DPRINTF(MinorMem, "Forwarding: slot: %d result: %s thisAddr:"
782  " 0x%x thisSize: %d slotAddr: 0x%x slotSize: %d\n",
783  slot_index, coverage,
784  request->request->getPaddr(), request->request->getSize(),
785  slot->request->getPaddr(), slot->request->getSize());
786 
787  found_slot = slot_index;
788  ret = coverage;
789  }
790  }
791 
792  i++;
793  slot_index--;
794  }
795 
796  return ret;
797 }
798 
800 void
802  unsigned int slot_number)
803 {
804  assert(slot_number < slots.size());
805  assert(load->packet);
806  assert(load->isLoad);
807 
808  LSQRequestPtr store = slots[slot_number];
809 
810  assert(store->packet);
811  assert(store->containsAddrRangeOf(load) == FullAddrRangeCoverage);
812 
813  Addr load_addr = load->request->getPaddr();
814  Addr store_addr = store->request->getPaddr();
815  Addr addr_offset = load_addr - store_addr;
816 
817  unsigned int load_size = load->request->getSize();
818 
819  DPRINTF(MinorMem, "Forwarding %d bytes for addr: 0x%x from store buffer"
820  " slot: %d addr: 0x%x addressOffset: 0x%x\n",
821  load_size, load_addr, slot_number,
822  store_addr, addr_offset);
823 
824  void *load_packet_data = load->packet->getPtr<void>();
825  void *store_packet_data = store->packet->getPtr<uint8_t>() + addr_offset;
826 
827  std::memcpy(load_packet_data, store_packet_data, load_size);
828 }
829 
830 void
832 {
833  /* Barriers are accounted for as they are cleared from
834  * the queue, not after their transfers are complete */
835  if (!request->isBarrier())
836  numUnissuedAccesses--;
837 }
838 
839 void
841 {
842  DPRINTF(MinorMem, "StoreBuffer step numUnissuedAccesses: %d\n",
843  numUnissuedAccesses);
844 
845  if (numUnissuedAccesses != 0 && lsq.state == LSQ::MemoryRunning) {
846  /* Clear all the leading barriers */
847  while (!slots.empty() &&
848  slots.front()->isComplete() && slots.front()->isBarrier())
849  {
850  LSQRequestPtr barrier = slots.front();
851 
852  DPRINTF(MinorMem, "Clearing barrier for inst: %s\n",
853  *(barrier->inst));
854 
855  numUnissuedAccesses--;
856  lsq.clearMemBarrier(barrier->inst);
857  slots.pop_front();
858 
859  delete barrier;
860  }
861 
862  auto i = slots.begin();
863  bool issued = true;
864  unsigned int issue_count = 0;
865 
866  /* Skip trying if the memory system is busy */
867  if (lsq.state == LSQ::MemoryNeedsRetry)
868  issued = false;
869 
870  /* Try to issue all stores in order starting from the head
871  * of the queue. Responses are allowed to be retired
872  * out of order */
873  while (issued &&
874  issue_count < storeLimitPerCycle &&
875  lsq.canSendToMemorySystem() &&
876  i != slots.end())
877  {
878  LSQRequestPtr request = *i;
879 
880  DPRINTF(MinorMem, "Considering request: %s, sentAllPackets: %d"
881  " state: %s\n",
882  *(request->inst), request->sentAllPackets(),
883  request->state);
884 
885  if (request->isBarrier() && request->isComplete()) {
886  /* Give up at barriers */
887  issued = false;
888  } else if (!(request->state == LSQRequest::StoreBufferIssuing &&
889  request->sentAllPackets()))
890  {
891  DPRINTF(MinorMem, "Trying to send request: %s to memory"
892  " system\n", *(request->inst));
893 
894  if (lsq.tryToSend(request)) {
895  countIssuedStore(request);
896  issue_count++;
897  } else {
898  /* Don't step on to the next store buffer entry if this
899  * one hasn't issued all its packets as the store
900  * buffer must still enforce ordering */
901  issued = false;
902  }
903  }
904  i++;
905  }
906  }
907 }
908 
909 void
911  bool committed)
912 {
913  if (committed) {
914  /* Not already sent to the store buffer as a store request? */
915  if (!inst->inStoreBuffer) {
916  /* Insert an entry into the store buffer to tick off barriers
917  * until there are none in flight */
918  storeBuffer.insert(new BarrierDataRequest(*this, inst));
919  }
920  } else {
921  /* Clear the barrier anyway if it wasn't actually committed */
922  clearMemBarrier(inst);
923  }
924 }
925 
926 void
928 {
929  unsigned int size = slots.size();
930  unsigned int i = 0;
931  std::ostringstream os;
932 
933  while (i < size) {
934  LSQRequestPtr request = slots[i];
935 
936  request->reportData(os);
937 
938  i++;
939  if (i < numSlots)
940  os << ',';
941  }
942 
943  while (i < numSlots) {
944  os << '-';
945 
946  i++;
947  if (i < numSlots)
948  os << ',';
949  }
950 
951  MINORTRACE("addr=%s num_unissued_stores=%d\n", os.str(),
952  numUnissuedAccesses);
953 }
954 
955 void
957 {
958  if (state == MemoryNeedsRetry) {
959  DPRINTF(MinorMem, "Request needs retry, not issuing to"
960  " memory until retry arrives\n");
961  return;
962  }
963 
964  if (request->state == LSQRequest::InTranslation) {
965  DPRINTF(MinorMem, "Request still in translation, not issuing to"
966  " memory\n");
967  return;
968  }
969 
970  assert(request->state == LSQRequest::Translated ||
971  request->state == LSQRequest::RequestIssuing ||
972  request->state == LSQRequest::Failed ||
973  request->state == LSQRequest::Complete);
974 
975  if (requests.empty() || requests.front() != request) {
976  DPRINTF(MinorMem, "Request not at front of requests queue, can't"
977  " issue to memory\n");
978  return;
979  }
980 
981  if (transfers.unreservedRemainingSpace() == 0) {
982  DPRINTF(MinorMem, "No space to insert request into transfers"
983  " queue\n");
984  return;
985  }
986 
987  if (request->isComplete() || request->state == LSQRequest::Failed) {
988  DPRINTF(MinorMem, "Passing a %s transfer on to transfers"
989  " queue\n", (request->isComplete() ? "completed" : "failed"));
990  request->setState(LSQRequest::Complete);
991  request->setSkipped();
993  return;
994  }
995 
996  if (!execute.instIsRightStream(request->inst)) {
997  /* Wrong stream, try to abort the transfer but only do so if
998  * there are no packets in flight */
999  if (request->hasPacketsInMemSystem()) {
1000  DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
1001  " waiting for responses before aborting request\n");
1002  } else {
1003  DPRINTF(MinorMem, "Request's inst. is from the wrong stream,"
1004  " aborting request\n");
1005  request->setState(LSQRequest::Complete);
1006  request->setSkipped();
1007  moveFromRequestsToTransfers(request);
1008  }
1009  return;
1010  }
1011 
1012  if (request->inst->translationFault != NoFault) {
1013  if (request->inst->staticInst->isPrefetch()) {
1014  DPRINTF(MinorMem, "Not signalling fault for faulting prefetch\n");
1015  }
1016  DPRINTF(MinorMem, "Moving faulting request into the transfers"
1017  " queue\n");
1018  request->setState(LSQRequest::Complete);
1019  request->setSkipped();
1020  moveFromRequestsToTransfers(request);
1021  return;
1022  }
1023 
1024  bool is_load = request->isLoad;
1025  bool is_llsc = request->request->isLLSC();
1026  bool is_release = request->request->isRelease();
1027  bool is_swap = request->request->isSwap();
1028  bool is_atomic = request->request->isAtomic();
1029  bool bufferable = !(request->request->isStrictlyOrdered() ||
1030  is_llsc || is_swap || is_atomic || is_release);
1031 
1032  if (is_load) {
1033  if (numStoresInTransfers != 0) {
1034  DPRINTF(MinorMem, "Load request with stores still in transfers"
1035  " queue, stalling\n");
1036  return;
1037  }
1038  } else {
1039  /* Store. Can it be sent to the store buffer? */
1040  if (bufferable && !request->request->isLocalAccess()) {
1042  moveFromRequestsToTransfers(request);
1043  DPRINTF(MinorMem, "Moving store into transfers queue\n");
1044  return;
1045  }
1046  }
1047 
1048  // Process store conditionals or store release after all previous
1049  // stores are completed
1050  if (((!is_load && is_llsc) || is_release) &&
1051  !storeBuffer.isDrained()) {
1052  DPRINTF(MinorMem, "Memory access needs to wait for store buffer"
1053  " to drain\n");
1054  return;
1055  }
1056 
1057  /* Check if this is the head instruction (and so must be executable as
1058  * its stream sequence number was checked above) for loads which must
1059  * not be speculatively issued and stores which must be issued here */
1060  if (!bufferable) {
1061  if (!execute.instIsHeadInst(request->inst)) {
1062  DPRINTF(MinorMem, "Memory access not the head inst., can't be"
1063  " sure it can be performed, not issuing\n");
1064  return;
1065  }
1066 
1067  unsigned int forwarding_slot = 0;
1068 
1069  if (storeBuffer.canForwardDataToLoad(request, forwarding_slot) !=
1071  {
1072  // There's at least another request that targets the same
1073  // address and is staying in the storeBuffer. Since our
1074  // request is non-bufferable (e.g., strictly ordered or atomic),
1075  // we must wait for the other request in the storeBuffer to
1076  // complete before we can issue this non-bufferable request.
1077  // This is to make sure that the order they access the cache is
1078  // correct.
1079  DPRINTF(MinorMem, "Memory access can receive forwarded data"
1080  " from the store buffer, but need to wait for store buffer"
1081  " to drain\n");
1082  return;
1083  }
1084  }
1085 
1086  /* True: submit this packet to the transfers queue to be sent to the
1087  * memory system.
1088  * False: skip the memory and push a packet for this request onto
1089  * requests */
1090  bool do_access = true;
1091 
1092  if (!is_llsc) {
1093  /* Check for match in the store buffer */
1094  if (is_load) {
1095  unsigned int forwarding_slot = 0;
1096  AddrRangeCoverage forwarding_result =
1098  forwarding_slot);
1099 
1100  switch (forwarding_result) {
1101  case FullAddrRangeCoverage:
1102  /* Forward data from the store buffer into this request and
1103  * repurpose this request's packet into a response packet */
1104  storeBuffer.forwardStoreData(request, forwarding_slot);
1105  request->packet->makeResponse();
1106 
1107  /* Just move between queues, no access */
1108  do_access = false;
1109  break;
1111  DPRINTF(MinorMem, "Load partly satisfied by store buffer"
1112  " data. Must wait for the store to complete\n");
1113  return;
1114  break;
1115  case NoAddrRangeCoverage:
1116  DPRINTF(MinorMem, "No forwardable data from store buffer\n");
1117  /* Fall through to try access */
1118  break;
1119  }
1120  }
1121  } else {
1122  if (!canSendToMemorySystem()) {
1123  DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1124  return;
1125  }
1126 
1127  SimpleThread &thread = *cpu.threads[request->inst->id.threadId];
1128 
1129  TheISA::PCState old_pc = thread.pcState();
1130  ExecContext context(cpu, thread, execute, request->inst);
1131 
1132  /* Handle LLSC requests and tests */
1133  if (is_load) {
1134  TheISA::handleLockedRead(&context, request->request);
1135  } else {
1136  do_access = TheISA::handleLockedWrite(&context,
1137  request->request, cacheBlockMask);
1138 
1139  if (!do_access) {
1140  DPRINTF(MinorMem, "Not perfoming a memory "
1141  "access for store conditional\n");
1142  }
1143  }
1144  thread.pcState(old_pc);
1145  }
1146 
1147  /* See the do_access comment above */
1148  if (do_access) {
1149  if (!canSendToMemorySystem()) {
1150  DPRINTF(MinorMem, "Can't send request to memory system yet\n");
1151  return;
1152  }
1153 
1154  /* Remember if this is an access which can't be idly
1155  * discarded by an interrupt */
1156  if (!bufferable && !request->issuedToMemory) {
1158  request->issuedToMemory = true;
1159  }
1160 
1161  if (tryToSend(request)) {
1162  moveFromRequestsToTransfers(request);
1163  }
1164  } else {
1165  request->setState(LSQRequest::Complete);
1166  moveFromRequestsToTransfers(request);
1167  }
1168 }
1169 
1170 bool
1172 {
1173  bool ret = false;
1174 
1175  if (!canSendToMemorySystem()) {
1176  DPRINTF(MinorMem, "Can't send request: %s yet, no space in memory\n",
1177  *(request->inst));
1178  } else {
1179  PacketPtr packet = request->getHeadPacket();
1180 
1181  DPRINTF(MinorMem, "Trying to send request: %s addr: 0x%x\n",
1182  *(request->inst), packet->req->getVaddr());
1183 
1184  /* The sender state of the packet *must* be an LSQRequest
1185  * so the response can be correctly handled */
1186  assert(packet->findNextSenderState<LSQRequest>());
1187 
1188  if (request->request->isLocalAccess()) {
1189  ThreadContext *thread =
1191  request->request->contextId()));
1192 
1193  if (request->isLoad)
1194  DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
1195  else
1196  DPRINTF(MinorMem, "IPR write inst: %s\n", *(request->inst));
1197 
1198  request->request->localAccessor(thread, packet);
1199 
1200  request->stepToNextPacket();
1201  ret = request->sentAllPackets();
1202 
1203  if (!ret) {
1204  DPRINTF(MinorMem, "IPR access has another packet: %s\n",
1205  *(request->inst));
1206  }
1207 
1208  if (ret)
1209  request->setState(LSQRequest::Complete);
1210  else
1212  } else if (dcachePort.sendTimingReq(packet)) {
1213  DPRINTF(MinorMem, "Sent data memory request\n");
1214 
1216 
1217  request->stepToNextPacket();
1218 
1219  ret = request->sentAllPackets();
1220 
1221  switch (request->state) {
1224  /* Fully or partially issued a request in the transfers
1225  * queue */
1227  break;
1230  /* Fully or partially issued a request in the store
1231  * buffer */
1233  break;
1234  default:
1235  panic("Unrecognized LSQ request state %d.", request->state);
1236  }
1237 
1238  state = MemoryRunning;
1239  } else {
1240  DPRINTF(MinorMem,
1241  "Sending data memory request - needs retry\n");
1242 
1243  /* Needs to be resent, wait for that */
1245  retryRequest = request;
1246 
1247  switch (request->state) {
1251  break;
1255  break;
1256  default:
1257  panic("Unrecognized LSQ request state %d.", request->state);
1258  }
1259  }
1260  }
1261 
1262  if (ret)
1263  threadSnoop(request);
1264 
1265  return ret;
1266 }
1267 
1268 void
1270 {
1271  assert(!requests.empty() && requests.front() == request);
1272  assert(transfers.unreservedRemainingSpace() != 0);
1273 
1274  /* Need to count the number of stores in the transfers
1275  * queue so that loads know when their store buffer forwarding
1276  * results will be correct (only when all those stores
1277  * have reached the store buffer) */
1278  if (!request->isLoad)
1280 
1281  requests.pop();
1282  transfers.push(request);
1283 }
1284 
1285 bool
1287 {
1288  return state == MemoryRunning &&
1290 }
1291 
1292 bool
1294 {
1295  LSQRequestPtr request =
1296  safe_cast<LSQRequestPtr>(response->popSenderState());
1297 
1298  DPRINTF(MinorMem, "Received response packet inst: %s"
1299  " addr: 0x%x cmd: %s\n",
1300  *(request->inst), response->getAddr(),
1301  response->cmd.toString());
1302 
1304 
1305  if (response->isError()) {
1306  DPRINTF(MinorMem, "Received error response packet: %s\n",
1307  *request->inst);
1308  }
1309 
1310  switch (request->state) {
1313  /* Response to a request from the transfers queue */
1314  request->retireResponse(response);
1315 
1316  DPRINTF(MinorMem, "Has outstanding packets?: %d %d\n",
1317  request->hasPacketsInMemSystem(), request->isComplete());
1318 
1319  break;
1322  /* Response to a request from the store buffer */
1323  request->retireResponse(response);
1324 
1325  /* Remove completed requests unless they are barriers (which will
1326  * need to be removed in order */
1327  if (request->isComplete()) {
1328  if (!request->isBarrier()) {
1329  storeBuffer.deleteRequest(request);
1330  } else {
1331  DPRINTF(MinorMem, "Completed transfer for barrier: %s"
1332  " leaving the request as it is also a barrier\n",
1333  *(request->inst));
1334  }
1335  }
1336  break;
1337  default:
1338  panic("Shouldn't be allowed to receive a response from another state");
1339  }
1340 
1341  /* We go to idle even if there are more things in the requests queue
1342  * as it's the job of step to actually step us on to the next
1343  * transaction */
1344 
1345  /* Let's try and wake up the processor for the next cycle */
1347 
1348  /* Never busy */
1349  return true;
1350 }
1351 
1352 void
1354 {
1355  DPRINTF(MinorMem, "Received retry request\n");
1356 
1357  assert(state == MemoryNeedsRetry);
1358 
1359  switch (retryRequest->state) {
1361  /* Retry in the requests queue */
1363  break;
1365  /* Retry in the store buffer */
1367  break;
1368  default:
1369  panic("Unrecognized retry request state %d.", retryRequest->state);
1370  }
1371 
1372  /* Set state back to MemoryRunning so that the following
1373  * tryToSend can actually send. Note that this won't
1374  * allow another transfer in as tryToSend should
1375  * issue a memory request and either succeed for this
1376  * request or return the LSQ back to MemoryNeedsRetry */
1377  state = MemoryRunning;
1378 
1379  /* Try to resend the request */
1380  if (tryToSend(retryRequest)) {
1381  /* Successfully sent, need to move the request */
1382  switch (retryRequest->state) {
1384  /* In the requests queue */
1386  break;
1388  /* In the store buffer */
1390  break;
1391  default:
1392  panic("Unrecognized retry request state %d.", retryRequest->state);
1393  }
1394 
1395  retryRequest = NULL;
1396  }
1397 }
1398 
1399 LSQ::LSQ(std::string name_, std::string dcache_port_name_,
1400  MinorCPU &cpu_, Execute &execute_,
1401  unsigned int in_memory_system_limit, unsigned int line_width,
1402  unsigned int requests_queue_size, unsigned int transfers_queue_size,
1403  unsigned int store_buffer_size,
1404  unsigned int store_buffer_cycle_store_limit) :
1405  Named(name_),
1406  cpu(cpu_),
1407  execute(execute_),
1408  dcachePort(dcache_port_name_, *this, cpu_),
1409  lastMemBarrier(cpu.numThreads, 0),
1411  inMemorySystemLimit(in_memory_system_limit),
1412  lineWidth((line_width == 0 ? cpu.cacheLineSize() : line_width)),
1413  requests(name_ + ".requests", "addr", requests_queue_size),
1414  transfers(name_ + ".transfers", "addr", transfers_queue_size),
1415  storeBuffer(name_ + ".storeBuffer",
1416  *this, store_buffer_size, store_buffer_cycle_store_limit),
1418  numAccessesInDTLB(0),
1421  retryRequest(NULL),
1422  cacheBlockMask(~(cpu_.cacheLineSize() - 1))
1423 {
1424  if (in_memory_system_limit < 1) {
1425  fatal("%s: executeMaxAccessesInMemory must be >= 1 (%d)\n", name_,
1426  in_memory_system_limit);
1427  }
1428 
1429  if (store_buffer_cycle_store_limit < 1) {
1430  fatal("%s: executeLSQMaxStoreBufferStoresPerCycle must be"
1431  " >= 1 (%d)\n", name_, store_buffer_cycle_store_limit);
1432  }
1433 
1434  if (requests_queue_size < 1) {
1435  fatal("%s: executeLSQRequestsQueueSize must be"
1436  " >= 1 (%d)\n", name_, requests_queue_size);
1437  }
1438 
1439  if (transfers_queue_size < 1) {
1440  fatal("%s: executeLSQTransfersQueueSize must be"
1441  " >= 1 (%d)\n", name_, transfers_queue_size);
1442  }
1443 
1444  if (store_buffer_size < 1) {
1445  fatal("%s: executeLSQStoreBufferSize must be"
1446  " >= 1 (%d)\n", name_, store_buffer_size);
1447  }
1448 
1449  if ((lineWidth & (lineWidth - 1)) != 0) {
1450  fatal("%s: lineWidth: %d must be a power of 2\n", name(), lineWidth);
1451  }
1452 }
1453 
1455 { }
1456 
1458 {
1459  if (packet)
1460  delete packet;
1461  if (data)
1462  delete [] data;
1463 }
1464 
1471 void
1473 {
1474  /* Try to move address-translated requests between queues and issue
1475  * them */
1476  if (!requests.empty())
1478 
1479  storeBuffer.step();
1480 }
1481 
1484 {
1485  LSQ::LSQRequestPtr ret = NULL;
1486 
1487  if (!transfers.empty()) {
1488  LSQRequestPtr request = transfers.front();
1489 
1490  /* Same instruction and complete access or a store that's
1491  * capable of being moved to the store buffer */
1492  if (request->inst->id == inst->id) {
1493  bool complete = request->isComplete();
1494  bool can_store = storeBuffer.canInsert();
1495  bool to_store_buffer = request->state ==
1497 
1498  if ((complete && !(request->isBarrier() && !can_store)) ||
1499  (to_store_buffer && can_store))
1500  {
1501  ret = request;
1502  }
1503  }
1504  }
1505 
1506  if (ret) {
1507  DPRINTF(MinorMem, "Found matching memory response for inst: %s\n",
1508  *inst);
1509  } else {
1510  DPRINTF(MinorMem, "No matching memory response for inst: %s\n",
1511  *inst);
1512  }
1513 
1514  return ret;
1515 }
1516 
1517 void
1519 {
1520  assert(!transfers.empty() && transfers.front() == response);
1521 
1522  transfers.pop();
1523 
1524  if (!response->isLoad)
1526 
1527  if (response->issuedToMemory)
1529 
1530  if (response->state != LSQRequest::StoreInStoreBuffer) {
1531  DPRINTF(MinorMem, "Deleting %s request: %s\n",
1532  (response->isLoad ? "load" : "store"),
1533  *(response->inst));
1534 
1535  delete response;
1536  }
1537 }
1538 
1539 void
1541 {
1542  assert(request->state == LSQRequest::StoreToStoreBuffer);
1543 
1544  DPRINTF(MinorMem, "Sending store: %s to store buffer\n",
1545  *(request->inst));
1546 
1547  request->inst->inStoreBuffer = true;
1548 
1549  storeBuffer.insert(request);
1550 }
1551 
1552 bool
1554 {
1555  return requests.empty() && transfers.empty() &&
1557 }
1558 
1559 bool
1561 {
1562  bool ret = false;
1563 
1564  if (canSendToMemorySystem()) {
1565  bool have_translated_requests = !requests.empty() &&
1568 
1569  ret = have_translated_requests ||
1571  }
1572 
1573  if (ret)
1574  DPRINTF(Activity, "Need to tick\n");
1575 
1576  return ret;
1577 }
1578 
1579 Fault
1580 LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
1581  unsigned int size, Addr addr, Request::Flags flags,
1582  uint64_t *res, AtomicOpFunctorPtr amo_op,
1583  const std::vector<bool>& byte_enable)
1584 {
1585  assert(inst->translationFault == NoFault || inst->inLSQ);
1586 
1587  if (inst->inLSQ) {
1588  return inst->translationFault;
1589  }
1590 
1591  bool needs_burst = transferNeedsBurst(addr, size, lineWidth);
1592 
1593  if (needs_burst && inst->staticInst->isAtomic()) {
1594  // AMO requests that access across a cache line boundary are not
1595  // allowed since the cache does not guarantee AMO ops to be executed
1596  // atomically in two cache lines
1597  // For ISAs such as x86 that requires AMO operations to work on
1598  // accesses that cross cache-line boundaries, the cache needs to be
1599  // modified to support locking both cache lines to guarantee the
1600  // atomicity.
1601  panic("Do not expect cross-cache-line atomic memory request\n");
1602  }
1603 
1604  LSQRequestPtr request;
1605 
1606  /* Copy given data into the request. The request will pass this to the
1607  * packet and then it will own the data */
1608  uint8_t *request_data = NULL;
1609 
1610  DPRINTF(MinorMem, "Pushing request (%s) addr: 0x%x size: %d flags:"
1611  " 0x%x%s lineWidth : 0x%x\n",
1612  (isLoad ? "load" : "store/atomic"), addr, size, flags,
1613  (needs_burst ? " (needs burst)" : ""), lineWidth);
1614 
1615  if (!isLoad) {
1616  /* Request_data becomes the property of a ...DataRequest (see below)
1617  * and destroyed by its destructor */
1618  request_data = new uint8_t[size];
1619  if (inst->staticInst->isAtomic() ||
1620  (flags & Request::STORE_NO_DATA)) {
1621  /* For atomic or store-no-data, just use zeroed data */
1622  std::memset(request_data, 0, size);
1623  } else {
1624  std::memcpy(request_data, data, size);
1625  }
1626  }
1627 
1628  if (needs_burst) {
1629  request = new SplitDataRequest(
1630  *this, inst, isLoad, request_data, res);
1631  } else {
1632  request = new SingleDataRequest(
1633  *this, inst, isLoad, request_data, res);
1634  }
1635 
1636  if (inst->traceData)
1637  inst->traceData->setMem(addr, size, flags);
1638 
1639  int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
1640  request->request->setContext(cid);
1641  request->request->setVirt(
1642  addr, size, flags, cpu.dataRequestorId(),
1643  /* I've no idea why we need the PC, but give it */
1644  inst->pc.instAddr(), std::move(amo_op));
1645  request->request->setByteEnable(byte_enable);
1646 
1647  requests.push(request);
1648  inst->inLSQ = true;
1649  request->startAddrTranslation();
1650 
1651  return inst->translationFault;
1652 }
1653 
1654 void
1656 {
1657  LSQRequestPtr request = new FailedDataRequest(*this, inst);
1658  requests.push(request);
1659 }
1660 
1661 void
1663 {
1664  MINORTRACE("state=%s in_tlb_mem=%d/%d stores_in_transfers=%d"
1665  " lastMemBarrier=%d\n",
1668  requests.minorTrace();
1671 }
1672 
1673 LSQ::StoreBuffer::StoreBuffer(std::string name_, LSQ &lsq_,
1674  unsigned int store_buffer_size,
1675  unsigned int store_limit_per_cycle) :
1676  Named(name_), lsq(lsq_),
1677  numSlots(store_buffer_size),
1678  storeLimitPerCycle(store_limit_per_cycle),
1679  slots(),
1680  numUnissuedAccesses(0)
1681 {
1682 }
1683 
1684 PacketPtr
1685 makePacketForRequest(const RequestPtr &request, bool isLoad,
1686  Packet::SenderState *sender_state, PacketDataPtr data)
1687 {
1688  PacketPtr ret = isLoad ? Packet::createRead(request)
1689  : Packet::createWrite(request);
1690 
1691  if (sender_state)
1692  ret->pushSenderState(sender_state);
1693 
1694  if (isLoad) {
1695  ret->allocate();
1696  } else if (!request->isCacheMaintenance()) {
1697  // CMOs are treated as stores but they don't have data. All
1698  // stores otherwise need to allocate for data.
1699  ret->dataDynamic(data);
1700  }
1701 
1702  return ret;
1703 }
1704 
1705 void
1707 {
1708  assert(inst->isInst() && inst->staticInst->isFullMemBarrier());
1709  assert(inst->id.execSeqNum > lastMemBarrier[inst->id.threadId]);
1710 
1711  /* Remember the barrier. We only have a notion of one
1712  * barrier so this may result in some mem refs being
1713  * delayed if they are between barriers */
1714  lastMemBarrier[inst->id.threadId] = inst->id.execSeqNum;
1715 }
1716 
1717 void
1719 {
1720  assert(inst->translationFault == NoFault);
1721 
1722  /* Make the function idempotent */
1723  if (packet)
1724  return;
1725 
1726  packet = makePacketForRequest(request, isLoad, this, data);
1727  /* Null the ret data so we know not to deallocate it when the
1728  * ret is destroyed. The data now belongs to the ret and
1729  * the ret is responsible for its destruction */
1730  data = NULL;
1731 }
1732 
1733 std::ostream &
1735 {
1736  switch (state) {
1737  case LSQ::MemoryRunning:
1738  os << "MemoryRunning";
1739  break;
1740  case LSQ::MemoryNeedsRetry:
1741  os << "MemoryNeedsRetry";
1742  break;
1743  default:
1744  os << "MemoryState-" << static_cast<int>(state);
1745  break;
1746  }
1747  return os;
1748 }
1749 
1750 void
1752 {
1753  /* LLSC operations in Minor can't be speculative and are executed from
1754  * the head of the requests queue. We shouldn't need to do more than
1755  * this action on snoops. */
1756  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1757  if (cpu.getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1758  cpu.wakeup(tid);
1759  }
1760  }
1761 
1762  if (pkt->isInvalidate() || pkt->isWrite()) {
1763  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1765  cacheBlockMask);
1766  }
1767  }
1768 }
1769 
1770 void
1772 {
1773  /* LLSC operations in Minor can't be speculative and are executed from
1774  * the head of the requests queue. We shouldn't need to do more than
1775  * this action on snoops. */
1776  ThreadID req_tid = request->inst->id.threadId;
1777  PacketPtr pkt = request->packet;
1778 
1779  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1780  if (tid != req_tid) {
1781  if (cpu.getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1782  cpu.wakeup(tid);
1783  }
1784 
1785  if (pkt->isInvalidate() || pkt->isWrite()) {
1787  cacheBlockMask);
1788  }
1789  }
1790  }
1791 }
1792 
1793 }
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
Packet::isError
bool isError() const
Definition: packet.hh:584
Minor::LSQ::LSQRequest::LSQRequestState
LSQRequestState
Definition: lsq.hh:165
AtomicOpFunctorPtr
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:239
Minor::LSQ::SingleDataRequest::retireResponse
void retireResponse(PacketPtr packet_)
Keep the given packet as the response packet LSQRequest::packet.
Definition: lsq.cc:322
Minor::LSQ::SplitDataRequest::SplitDataRequest
SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, PacketDataPtr data_=NULL, uint64_t *res_=NULL)
Definition: lsq.cc:389
MinorCPU::wakeup
void wakeup(ThreadID tid) override
Definition: cpu.cc:152
ArmISA::handleLockedWrite
bool handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
Definition: locked_mem.hh:111
Minor::LSQ::popResponse
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1518
Minor::LSQ::LSQRequest::RequestNeedsRetry
@ RequestNeedsRetry
Definition: lsq.hh:175
utils.hh
MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:300
Minor::LSQ::requests
LSQQueue requests
requests contains LSQRequests which have been issued to the TLB by calling ExecContext::readMem/write...
Definition: lsq.hh:570
Minor::LSQ::operator<<
friend std::ostream & operator<<(std::ostream &os, MemoryState state)
Print MemoryState values as shown in the enum definition.
Definition: lsq.cc:1734
X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:803
warn
#define warn(...)
Definition: logging.hh:239
Minor::LSQ::LSQRequest::RequestIssuing
@ RequestIssuing
Definition: lsq.hh:171
SimpleThread::pcState
TheISA::PCState pcState() const override
Definition: simple_thread.hh:505
Minor::LSQ::LSQRequest::StoreBufferNeedsRetry
@ StoreBufferNeedsRetry
Definition: lsq.hh:180
Minor::LSQ::numAccessesIssuedToMemory
unsigned int numAccessesIssuedToMemory
The number of accesses which have been issued to the memory system but have not been committed/discar...
Definition: lsq.hh:611
BaseTLB::Read
@ Read
Definition: tlb.hh:57
data
const char data[]
Definition: circlebuf.test.cc:47
ArmISA::handleLockedSnoop
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
Definition: locked_mem.hh:62
Minor::LSQ::MemoryNeedsRetry
@ MemoryNeedsRetry
Definition: lsq.hh:75
Packet::getAddr
Addr getAddr() const
Definition: packet.hh:755
Minor::LSQ::recvReqRetry
void recvReqRetry()
Definition: lsq.cc:1353
Minor::LSQ::pushFailedRequest
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1655
Minor::LSQ::tryToSendToTransfers
void tryToSendToTransfers(LSQRequestPtr request)
Try and issue a memory access for a translated request at the head of the requests queue.
Definition: lsq.cc:956
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
Minor::LSQ::LSQRequest::Failed
@ Failed
Definition: lsq.hh:170
ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:233
Minor::LSQ::LSQRequest::LSQRequest
LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_, PacketDataPtr data_=NULL, uint64_t *res_=NULL)
Definition: lsq.cc:56
Flags< FlagsType >
Minor::LSQ::transfers
LSQQueue transfers
Once issued to memory (or, for stores, just had their state changed to StoreToStoreBuffer) LSQRequest...
Definition: lsq.hh:579
Minor::LSQ::SplitDataRequest::finish
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseTLB::Mode mode)
TLB response interface.
Definition: lsq.cc:331
ThreadContext::getMMUPtr
virtual BaseMMU * getMMUPtr()=0
AddressMonitor::doMonitor
bool doMonitor(PacketPtr pkt)
Definition: base.cc:684
Minor::Pipeline::ExecuteStageId
@ ExecuteStageId
Definition: pipeline.hh:100
Minor::LSQ::LSQRequest::reportData
void reportData(std::ostream &os) const
MinorTrace report interface.
Definition: lsq.cc:183
Minor::LSQ::numAccessesInDTLB
unsigned int numAccessesInDTLB
Number of requests in the DTLB in the requests queue.
Definition: lsq.hh:602
Minor::LSQ::SplitDataRequest::retireResponse
void retireResponse(PacketPtr packet_)
For loads, paste the response data into the main response packet.
Definition: lsq.cc:621
Minor::LSQ::numAccessesInMemorySystem
unsigned int numAccessesInMemorySystem
Count of the number of mem.
Definition: lsq.hh:599
BaseTLB::Mode
Mode
Definition: tlb.hh:57
Minor::makePacketForRequest
PacketPtr makePacketForRequest(const RequestPtr &request, bool isLoad, Packet::SenderState *sender_state, PacketDataPtr data)
Make a suitable packet for the given request.
Definition: lsq.cc:1685
Minor::Execute::instIsRightStream
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1870
Minor::LSQ::LSQRequest::completeDisabledMemAccess
void completeDisabledMemAccess()
Definition: lsq.cc:94
Minor::LSQ::LSQRequest::makePacket
void makePacket()
Make a packet to use with the memory transaction.
Definition: lsq.cc:1718
Minor::LSQ::FullAddrRangeCoverage
@ FullAddrRangeCoverage
Definition: lsq.hh:86
PacketDataPtr
uint8_t * PacketDataPtr
Definition: packet.hh:68
Minor::LSQ::LSQRequest::isLoad
bool isLoad
Load/store indication used for building packet.
Definition: lsq.hh:135
Minor::LSQ::pushRequest
Fault pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data, unsigned int size, Addr addr, Request::Flags flags, uint64_t *res, AtomicOpFunctorPtr amo_op, const std::vector< bool > &byte_enable=std::vector< bool >())
Single interface for readMem/writeMem/amoMem to issue requests into the LSQ.
Definition: lsq.cc:1580
Minor::LSQ::step
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1472
Minor::Queue::minorTrace
void minorTrace() const
Definition: buffers.hh:507
Minor::LSQ::cpu
MinorCPU & cpu
My owner(s)
Definition: lsq.hh:67
Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:572
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:86
Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:341
MINORTRACE
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:60
Minor::Queue::empty
bool empty() const
Is the queue empty?
Definition: buffers.hh:504
Packet::dataDynamic
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1146
std::vector< bool >
Packet::getSize
unsigned getSize() const
Definition: packet.hh:765
Minor::Queue::push
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition: buffers.hh:428
Minor::Queue::unreservedRemainingSpace
unsigned int unreservedRemainingSpace() const
Like remainingSpace but does not count reserved spaces.
Definition: buffers.hh:488
Minor::LSQ::LSQRequest::stepToNextPacket
virtual void stepToNextPacket()=0
Step to the next packet for the next call to getHeadPacket.
BaseCPU::getContext
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:300
Minor::LSQ::LSQRequest::hasPacketsInMemSystem
virtual bool hasPacketsInMemSystem()=0
True if this request has any issued packets in the memory system and so can't be interrupted until it...
Minor::LSQ::cacheBlockMask
Addr cacheBlockMask
Address Mask for a cache block (e.g.
Definition: lsq.hh:618
Minor::LSQ::SingleDataRequest::startAddrTranslation
void startAddrTranslation()
Send single translation request.
Definition: lsq.cc:298
Minor
Definition: activity.cc:44
execute.hh
SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:90
isAnyActiveElement
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:86
Minor::LSQ::SplitDataRequest::~SplitDataRequest
~SplitDataRequest()
Definition: lsq.cc:406
Minor::LSQ::execute
Execute & execute
Definition: lsq.hh:68
MinorCPU::threads
std::vector< Minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:93
BaseMMU::translateTiming
void translateTiming(const RequestPtr &req, ThreadContext *tc, BaseTLB::Translation *translation, BaseTLB::Mode mode)
Definition: mmu.hh:86
Minor::LSQ::LSQRequest::data
PacketDataPtr data
Dynamically allocated and populated data carried for building write packets.
Definition: lsq.hh:139
Minor::LSQ::completeMemBarrierInst
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:910
Minor::LSQ::LSQRequest::packet
PacketPtr packet
Definition: lsq.hh:145
Minor::LSQ::StoreBuffer::insert
void insert(LSQRequestPtr request)
Insert a request at the back of the queue.
Definition: lsq.cc:738
Minor::LSQ::MemoryRunning
@ MemoryRunning
Definition: lsq.hh:74
Minor::LSQ::LSQRequest::sentAllPackets
virtual bool sentAllPackets()=0
Have all packets been sent?
Minor::LSQ::SplitDataRequest::startAddrTranslation
void startAddrTranslation()
Start a loop of do { sendNextFragmentToTranslation ; translateTiming ; finish } while (numTranslatedF...
Definition: lsq.cc:580
ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:88
Minor::LSQ::StoreBuffer::forwardStoreData
void forwardStoreData(LSQRequestPtr load, unsigned int slot_number)
Fill the given packet with appropriate date from slot slot_number.
Definition: lsq.cc:801
Minor::LSQ::SingleDataRequest::finish
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseTLB::Mode mode)
TLB interace.
Definition: lsq.cc:268
exec_context.hh
Minor::LSQ::LSQRequest::Translated
@ Translated
Definition: lsq.hh:169
Packet::SenderState
A virtual base opaque structure used to hold state associated with the packet (e.g....
Definition: packet.hh:432
RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:492
MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:77
Minor::LSQ::threadSnoop
void threadSnoop(LSQRequestPtr request)
Snoop other threads monitors on memory system accesses.
Definition: lsq.cc:1771
Minor::LSQ::SplitDataRequest::getHeadPacket
PacketPtr getHeadPacket()
Get the head packet as counted by numIssuedFragments.
Definition: lsq.cc:605
MemCmd::toString
const std::string & toString() const
Return the string to a cmd given by idx.
Definition: packet.hh:240
Minor::LSQ::LSQRequest::NotIssued
@ NotIssued
Definition: lsq.hh:167
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:237
Minor::LSQ::SplitDataRequest::makeFragmentRequests
void makeFragmentRequests()
Make all the Requests for this transfer's fragments so that those requests can be sent for address tr...
Definition: lsq.cc:416
Minor::LSQ::StoreBuffer::isDrained
bool isDrained() const
Drained if there is absolutely nothing left in the buffer.
Definition: lsq.hh:524
Minor::LSQ::SplitDataRequest::makeFragmentPackets
void makeFragmentPackets()
Make the packets to go with the requests so they can be sent to the memory system.
Definition: lsq.cc:530
Minor::LSQ::LSQRequest::needsToBeSentToStoreBuffer
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:161
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:246
Packet::createRead
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition: packet.hh:981
Minor::LSQ::PartialAddrRangeCoverage
@ PartialAddrRangeCoverage
Definition: lsq.hh:85
Packet::findNextSenderState
T * findNextSenderState() const
Go through the sender state stack and return the first instance that is of type T (as determined by a...
Definition: packet.hh:539
Minor::LSQ::recvTimingSnoopReq
void recvTimingSnoopReq(PacketPtr pkt)
Definition: lsq.cc:1751
Minor::LSQ::minorTrace
void minorTrace() const
Definition: lsq.cc:1662
pipeline.hh
Minor::LSQ::NoAddrRangeCoverage
@ NoAddrRangeCoverage
Definition: lsq.hh:87
Minor::LSQ::StoreBuffer::numUnissuedStores
unsigned int numUnissuedStores()
Number of stores in the store buffer which have not been completely issued to the memory system.
Definition: lsq.hh:516
Minor::LSQ::needsToTick
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1560
RubyTester::SenderState
Definition: RubyTester.hh:86
ArmISA::mode
Bitfield< 4, 0 > mode
Definition: miscregs_types.hh:70
Minor::LSQ::SplitDataRequest
Definition: lsq.hh:385
Minor::LSQ::dcachePort
DcachePort dcachePort
Definition: lsq.hh:116
Minor::LSQ::LSQRequest::disableMemAccess
void disableMemAccess()
Definition: lsq.cc:111
Minor::LSQ::SplitDataRequest::stepToNextPacket
void stepToNextPacket()
Step on numIssuedFragments.
Definition: lsq.cc:613
Minor::Queue::front
ElemType & front()
Head value.
Definition: buffers.hh:496
Minor::LSQ::StoreBuffer::minorTrace
void minorTrace() const
Report queue contents for MinorTrace.
Definition: lsq.cc:927
Minor::LSQ::LSQRequest::StoreInStoreBuffer
@ StoreInStoreBuffer
Definition: lsq.hh:176
Minor::LSQ::LSQRequest::Complete
@ Complete
Definition: lsq.hh:184
BaseCPU::getCpuAddrMonitor
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:618
Minor::Queue::pop
void pop()
Pop the head item.
Definition: buffers.hh:501
addrBlockOffset
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition: utils.hh:50
Minor::LSQ::tryToSend
bool tryToSend(LSQRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition: lsq.cc:1171
NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:251
Minor::LSQ::StoreBuffer::canInsert
bool canInsert() const
Can a new request be inserted into the queue?
Definition: lsq.cc:717
Minor::LSQ::moveFromRequestsToTransfers
void moveFromRequestsToTransfers(LSQRequestPtr request)
Move a request between queues.
Definition: lsq.cc:1269
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:148
Minor::LSQ::LSQRequest::InTranslation
@ InTranslation
Definition: lsq.hh:168
Minor::LSQ::LSQRequest::containsAddrRangeOf
static AddrRangeCoverage containsAddrRangeOf(Addr req1_addr, unsigned int req1_size, Addr req2_addr, unsigned int req2_size)
Does address range req1 (req1_addr to req1_addr + req1_size - 1) fully cover, partially cover or not ...
Definition: lsq.cc:118
Packet::makeResponse
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition: packet.hh:1005
transferNeedsBurst
bool transferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
Returns true if the given memory access (address, size) needs to be fragmented across aligned fixed-s...
Definition: utils.hh:77
Minor::LSQ::LSQ
LSQ(std::string name_, std::string dcache_port_name_, MinorCPU &cpu_, Execute &execute_, unsigned int max_accesses_in_memory_system, unsigned int line_width, unsigned int requests_queue_size, unsigned int transfers_queue_size, unsigned int store_buffer_size, unsigned int store_buffer_cycle_store_limit)
Definition: lsq.cc:1399
Minor::LSQ::SplitDataRequest::sendNextFragmentToTranslation
void sendNextFragmentToTranslation()
Part of the address translation loop, see startAddTranslation.
Definition: lsq.cc:698
Minor::LSQ::storeBuffer
StoreBuffer storeBuffer
Definition: lsq.hh:590
Minor::LSQ::LSQRequest::request
RequestPtr request
The underlying request of this LSQRequest.
Definition: lsq.hh:148
Minor::LSQ::StoreBuffer::countIssuedStore
void countIssuedStore(LSQRequestPtr request)
Count a store being issued to memory by decrementing numUnissuedAccesses.
Definition: lsq.cc:831
Minor::LSQ::LSQRequest::StoreBufferIssuing
@ StoreBufferIssuing
Definition: lsq.hh:178
Minor::LSQ::LSQRequest::tryToSuppressFault
void tryToSuppressFault()
Instructions may want to suppress translation faults (e.g.
Definition: lsq.cc:75
BaseCPU::contextToThread
ThreadID contextToThread(ContextID cid)
Convert ContextID to threadID.
Definition: base.hh:308
Minor::LSQ::LSQRequest::issuedToMemory
bool issuedToMemory
This in an access other than a normal cacheable load that's visited the memory system.
Definition: lsq.hh:160
Minor::LSQ::LSQRequest::getHeadPacket
virtual PacketPtr getHeadPacket()=0
Get the next packet to issue for this request.
Minor::LSQ::retryRequest
LSQRequestPtr retryRequest
The request (from either requests or the store buffer) which is currently waiting have its memory acc...
Definition: lsq.hh:615
Named
Definition: trace.hh:150
Minor::LSQ::StoreBuffer::canForwardDataToLoad
AddrRangeCoverage canForwardDataToLoad(LSQRequestPtr request, unsigned int &found_slot)
Look for a store which satisfies the given load.
Definition: lsq.cc:760
X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:80
BaseTLB::Write
@ Write
Definition: tlb.hh:57
Packet::cmd
MemCmd cmd
The command field of the packet.
Definition: packet.hh:336
Minor::LSQ::numStoresInTransfers
unsigned int numStoresInTransfers
The number of stores in the transfers queue.
Definition: lsq.hh:606
Packet::createWrite
static PacketPtr createWrite(const RequestPtr &req)
Definition: packet.hh:987
ArmISA::handleLockedRead
void handleLockedRead(XC *xc, const RequestPtr &req)
Definition: locked_mem.hh:91
Minor::LSQ::LSQRequest::isComplete
bool isComplete() const
Has this request been completed.
Definition: lsq.cc:175
Minor::LSQ::FailedDataRequest
FailedDataRequest represents requests from instructions that failed their predicates but need to ride...
Definition: lsq.hh:319
Minor::Execute::instIsHeadInst
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1876
Packet::pushSenderState
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:332
Minor::LSQ::issuedMemBarrierInst
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1706
Minor::ExecContext::setMemAccPredicate
void setMemAccPredicate(bool val) override
Definition: exec_context.hh:338
Minor::ExecContext
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:69
MipsISA::PCState
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
Minor::LSQ::LSQRequest::inst
MinorDynInstPtr inst
Instruction which made this request.
Definition: lsq.hh:131
Minor::LSQ::LSQRequest::retireResponse
virtual void retireResponse(PacketPtr packet_)=0
Retire a response packet into the LSQRequest packet possibly completing this transfer.
Minor::LSQ::sendStoreToStoreBuffer
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1540
Minor::LSQ::StoreBuffer::step
void step()
Try to issue more stores to memory.
Definition: lsq.cc:840
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:258
Minor::LSQ::LSQRequest::setSkipped
void setSkipped()
Set this request as having been skipped before a memory transfer was attempt.
Definition: lsq.hh:215
Packet::popSenderState
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:340
Minor::LSQ::SingleDataRequest
SingleDataRequest is used for requests that don't fragment.
Definition: lsq.hh:341
BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:378
Minor::LSQ::lastMemBarrier
std::vector< InstSeqNum > lastMemBarrier
Most recent execSeqNum of a memory barrier instruction or 0 if there are no in-flight barriers.
Definition: lsq.hh:537
DPRINTFS
#define DPRINTFS(x,...)
Definition: trace.hh:238
logging.hh
BaseCPU::dataRequestorId
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition: base.hh:201
Minor::LSQ::BarrierDataRequest
Request for doing barrier accounting in the store buffer.
Definition: lsq.hh:329
Packet::isWrite
bool isWrite() const
Definition: packet.hh:558
Minor::LSQ::AddrRangeCoverage
AddrRangeCoverage
Coverage of one address range with another.
Definition: lsq.hh:83
Minor::LSQ::canSendToMemorySystem
bool canSendToMemorySystem()
Can a request be sent to the memory system.
Definition: lsq.cc:1286
Minor::LSQ::~LSQ
virtual ~LSQ()
Definition: lsq.cc:1454
Packet::getPtr
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1158
Minor::LSQ::LSQRequest::setState
void setState(LSQRequestState new_state)
Set state and output trace output.
Definition: lsq.cc:167
Minor::LSQ::LSQRequest::startAddrTranslation
virtual void startAddrTranslation()=0
Start the address translation process for this request.
RefCountingPtr< MinorDynInst >
Complete
@ Complete
Definition: misc.hh:55
Minor::LSQ
Definition: lsq.hh:63
curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:43
trace.hh
Minor::LSQ::StoreBuffer::deleteRequest
void deleteRequest(LSQRequestPtr request)
Delete the given request and free the slot it occupied.
Definition: lsq.cc:724
Minor::LSQ::LSQRequest::~LSQRequest
virtual ~LSQRequest()
Definition: lsq.cc:1457
Minor::LSQ::LSQRequest::isBarrier
virtual bool isBarrier()
Is this a request a barrier?
Definition: lsq.cc:155
Request::STORE_NO_DATA
static const FlagsType STORE_NO_DATA
Definition: request.hh:237
Named::name
const std::string & name() const
Definition: trace.hh:159
Minor::LSQ::MemoryState
MemoryState
State of memory access for head access.
Definition: lsq.hh:72
Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1300
Minor::LSQ::isDrained
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1553
Minor::LSQ::state
MemoryState state
Retry state of last issued memory transfer.
Definition: lsq.hh:541
Minor::LSQ::LSQRequest::StoreToStoreBuffer
@ StoreToStoreBuffer
Definition: lsq.hh:173
lsq.hh
Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1167
Minor::LSQ::LSQRequest::state
LSQRequestState state
Definition: lsq.hh:187
Minor::Execute
Execute stage.
Definition: execute.hh:63
Minor::LSQ::clearMemBarrier
void clearMemBarrier(MinorDynInstPtr inst)
Clear a barrier (if it's the last one marked up in lastMemBarrier)
Definition: lsq.cc:255
Minor::LSQ::findResponse
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1483
Minor::LSQ::inMemorySystemLimit
const unsigned int inMemorySystemLimit
Maximum number of in-flight accesses issued to the memory system.
Definition: lsq.hh:544
Minor::LSQ::LSQRequest
Derived SenderState to carry data access info.
Definition: lsq.hh:122
Minor::LSQ::lineWidth
const unsigned int lineWidth
Memory system access width (and snap) in bytes.
Definition: lsq.hh:547
Minor::LSQ::recvTimingResp
bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition: lsq.cc:1293
Minor::LSQ::StoreBuffer::StoreBuffer
StoreBuffer(std::string name_, LSQ &lsq_, unsigned int store_buffer_size, unsigned int store_limit_per_cycle)
Definition: lsq.cc:1673
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171

Generated on Tue Mar 23 2021 19:41:24 for gem5 by doxygen 1.8.17