gem5  v21.2.1.1
lsq.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2012, 2014, 2017-2019, 2021 ARM Limited
3  * Copyright (c) 2013 Advanced Micro Devices, Inc.
4  * All rights reserved
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2005-2006 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include "cpu/o3/lsq.hh"
43 
44 #include <algorithm>
45 #include <list>
46 #include <string>
47 
48 #include "base/compiler.hh"
49 #include "base/logging.hh"
50 #include "cpu/o3/cpu.hh"
51 #include "cpu/o3/dyn_inst.hh"
52 #include "cpu/o3/iew.hh"
53 #include "cpu/o3/limits.hh"
54 #include "debug/Drain.hh"
55 #include "debug/Fetch.hh"
56 #include "debug/HtmCpu.hh"
57 #include "debug/LSQ.hh"
58 #include "debug/Writeback.hh"
59 #include "params/O3CPU.hh"
60 
61 namespace gem5
62 {
63 
64 namespace o3
65 {
66 
68  RequestPort(_cpu->name() + ".dcache_port", _cpu), lsq(_lsq), cpu(_cpu)
69 {}
70 
71 LSQ::LSQ(CPU *cpu_ptr, IEW *iew_ptr, const O3CPUParams &params)
72  : cpu(cpu_ptr), iewStage(iew_ptr),
73  _cacheBlocked(false),
76  lsqPolicy(params.smtLSQPolicy),
77  LQEntries(params.LQEntries),
78  SQEntries(params.SQEntries),
80  params.smtLSQThreshold)),
82  params.smtLSQThreshold)),
83  dcachePort(this, cpu_ptr),
84  numThreads(params.numThreads)
85 {
86  assert(numThreads > 0 && numThreads <= MaxThreads);
87 
88  //**********************************************
89  //************ Handle SMT Parameters ***********
90  //**********************************************
91 
92  /* Run SMT olicy checks. */
93  if (lsqPolicy == SMTQueuePolicy::Dynamic) {
94  DPRINTF(LSQ, "LSQ sharing policy set to Dynamic\n");
95  } else if (lsqPolicy == SMTQueuePolicy::Partitioned) {
96  DPRINTF(Fetch, "LSQ sharing policy set to Partitioned: "
97  "%i entries per LQ | %i entries per SQ\n",
99  } else if (lsqPolicy == SMTQueuePolicy::Threshold) {
100 
101  assert(params.smtLSQThreshold > params.LQEntries);
102  assert(params.smtLSQThreshold > params.SQEntries);
103 
104  DPRINTF(LSQ, "LSQ sharing policy set to Threshold: "
105  "%i entries per LQ | %i entries per SQ\n",
107  } else {
108  panic("Invalid LSQ sharing policy. Options are: Dynamic, "
109  "Partitioned, Threshold");
110  }
111 
112  thread.reserve(numThreads);
113  for (ThreadID tid = 0; tid < numThreads; tid++) {
114  thread.emplace_back(maxLQEntries, maxSQEntries);
115  thread[tid].init(cpu, iew_ptr, params, this, tid);
116  thread[tid].setDcachePort(&dcachePort);
117  }
118 }
119 
120 
121 std::string
122 LSQ::name() const
123 {
124  return iewStage->name() + ".lsq";
125 }
126 
127 void
129 {
130  activeThreads = at_ptr;
131  assert(activeThreads != 0);
132 }
133 
134 void
136 {
137  assert(isDrained());
138 
139  for (ThreadID tid = 0; tid < numThreads; tid++)
140  thread[tid].drainSanityCheck();
141 }
142 
143 bool
145 {
146  bool drained(true);
147 
148  if (!lqEmpty()) {
149  DPRINTF(Drain, "Not drained, LQ not empty.\n");
150  drained = false;
151  }
152 
153  if (!sqEmpty()) {
154  DPRINTF(Drain, "Not drained, SQ not empty.\n");
155  drained = false;
156  }
157 
158  return drained;
159 }
160 
161 void
163 {
164  usedStorePorts = 0;
165  _cacheBlocked = false;
166 
167  for (ThreadID tid = 0; tid < numThreads; tid++) {
168  thread[tid].takeOverFrom();
169  }
170 }
171 
172 void
174 {
175  // Re-issue loads which got blocked on the per-cycle load ports limit.
178 
179  usedLoadPorts = 0;
180  usedStorePorts = 0;
181 }
182 
183 bool
185 {
186  return _cacheBlocked;
187 }
188 
189 void
191 {
192  _cacheBlocked = v;
193 }
194 
195 bool
196 LSQ::cachePortAvailable(bool is_load) const
197 {
198  bool ret;
199  if (is_load) {
201  } else {
203  }
204  return ret;
205 }
206 
207 void
208 LSQ::cachePortBusy(bool is_load)
209 {
210  assert(cachePortAvailable(is_load));
211  if (is_load) {
212  usedLoadPorts++;
213  } else {
214  usedStorePorts++;
215  }
216 }
217 
218 void
219 LSQ::insertLoad(const DynInstPtr &load_inst)
220 {
221  ThreadID tid = load_inst->threadNumber;
222 
223  thread[tid].insertLoad(load_inst);
224 }
225 
226 void
227 LSQ::insertStore(const DynInstPtr &store_inst)
228 {
229  ThreadID tid = store_inst->threadNumber;
230 
231  thread[tid].insertStore(store_inst);
232 }
233 
234 Fault
236 {
237  ThreadID tid = inst->threadNumber;
238 
239  return thread[tid].executeLoad(inst);
240 }
241 
242 Fault
244 {
245  ThreadID tid = inst->threadNumber;
246 
247  return thread[tid].executeStore(inst);
248 }
249 
250 void
252 {
253  thread.at(tid).commitLoads(youngest_inst);
254 }
255 
256 void
258 {
259  thread.at(tid).commitStores(youngest_inst);
260 }
261 
262 void
264 {
265  std::list<ThreadID>::iterator threads = activeThreads->begin();
267 
268  while (threads != end) {
269  ThreadID tid = *threads++;
270 
271  if (numStoresToWB(tid) > 0) {
272  DPRINTF(Writeback,"[tid:%i] Writing back stores. %i stores "
273  "available for Writeback.\n", tid, numStoresToWB(tid));
274  }
275 
276  thread[tid].writebackStores();
277  }
278 }
279 
280 void
281 LSQ::squash(const InstSeqNum &squashed_num, ThreadID tid)
282 {
283  thread.at(tid).squash(squashed_num);
284 }
285 
286 bool
288 {
289  /* Answers: Does Anybody Have a Violation?*/
290  std::list<ThreadID>::iterator threads = activeThreads->begin();
292 
293  while (threads != end) {
294  ThreadID tid = *threads++;
295 
296  if (thread[tid].violation())
297  return true;
298  }
299 
300  return false;
301 }
302 
303 bool LSQ::violation(ThreadID tid) { return thread.at(tid).violation(); }
304 
307 {
308  return thread.at(tid).getMemDepViolator();
309 }
310 
311 int
313 {
314  return thread.at(tid).getLoadHead();
315 }
316 
319 {
320  return thread.at(tid).getLoadHeadSeqNum();
321 }
322 
323 int
325 {
326  return thread.at(tid).getStoreHead();
327 }
328 
331 {
332  return thread.at(tid).getStoreHeadSeqNum();
333 }
334 
335 int LSQ::getCount(ThreadID tid) { return thread.at(tid).getCount(); }
336 
337 int LSQ::numLoads(ThreadID tid) { return thread.at(tid).numLoads(); }
338 
339 int LSQ::numStores(ThreadID tid) { return thread.at(tid).numStores(); }
340 
341 int
343 {
344  if (tid == InvalidThreadID)
345  return 0;
346  else
347  return thread[tid].numHtmStarts();
348 }
349 int
351 {
352  if (tid == InvalidThreadID)
353  return 0;
354  else
355  return thread[tid].numHtmStops();
356 }
357 
358 void
360 {
361  if (tid != InvalidThreadID)
362  thread[tid].resetHtmStartsStops();
363 }
364 
365 uint64_t
367 {
368  if (tid == InvalidThreadID)
369  return 0;
370  else
371  return thread[tid].getLatestHtmUid();
372 }
373 
374 void
375 LSQ::setLastRetiredHtmUid(ThreadID tid, uint64_t htmUid)
376 {
377  if (tid != InvalidThreadID)
378  thread[tid].setLastRetiredHtmUid(htmUid);
379 }
380 
381 void
383 {
385  cacheBlocked(false);
386 
387  for (ThreadID tid : *activeThreads) {
388  thread[tid].recvRetry();
389  }
390 }
391 
392 void
394 {
395  LSQRequest *request = dynamic_cast<LSQRequest*>(pkt->senderState);
396  thread[cpu->contextToThread(request->contextId())]
397  .completeDataAccess(pkt);
398 }
399 
400 bool
402 {
403  if (pkt->isError())
404  DPRINTF(LSQ, "Got error packet back for address: %#X\n",
405  pkt->getAddr());
406 
407  LSQRequest *request = dynamic_cast<LSQRequest*>(pkt->senderState);
408  panic_if(!request, "Got packet back with unknown sender state\n");
409 
410  thread[cpu->contextToThread(request->contextId())].recvTimingResp(pkt);
411 
412  if (pkt->isInvalidate()) {
413  // This response also contains an invalidate; e.g. this can be the case
414  // if cmd is ReadRespWithInvalidate.
415  //
416  // The calling order between completeDataAccess and checkSnoop matters.
417  // By calling checkSnoop after completeDataAccess, we ensure that the
418  // fault set by checkSnoop is not lost. Calling writeback (more
419  // specifically inst->completeAcc) in completeDataAccess overwrites
420  // fault, and in case this instruction requires squashing (as
421  // determined by checkSnoop), the ReExec fault set by checkSnoop would
422  // be lost otherwise.
423 
424  DPRINTF(LSQ, "received invalidation with response for addr:%#x\n",
425  pkt->getAddr());
426 
427  for (ThreadID tid = 0; tid < numThreads; tid++) {
428  thread[tid].checkSnoop(pkt);
429  }
430  }
431  // Update the LSQRequest state (this may delete the request)
432  request->packetReplied();
433 
434  return true;
435 }
436 
437 void
439 {
440  DPRINTF(LSQ, "received pkt for addr:%#x %s\n", pkt->getAddr(),
441  pkt->cmdString());
442 
443  // must be a snoop
444  if (pkt->isInvalidate()) {
445  DPRINTF(LSQ, "received invalidation for addr:%#x\n",
446  pkt->getAddr());
447  for (ThreadID tid = 0; tid < numThreads; tid++) {
448  thread[tid].checkSnoop(pkt);
449  }
450  }
451 }
452 
453 int
455 {
456  unsigned total = 0;
457 
458  std::list<ThreadID>::iterator threads = activeThreads->begin();
460 
461  while (threads != end) {
462  ThreadID tid = *threads++;
463 
464  total += getCount(tid);
465  }
466 
467  return total;
468 }
469 
470 int
472 {
473  unsigned total = 0;
474 
475  std::list<ThreadID>::iterator threads = activeThreads->begin();
477 
478  while (threads != end) {
479  ThreadID tid = *threads++;
480 
481  total += numLoads(tid);
482  }
483 
484  return total;
485 }
486 
487 int
489 {
490  unsigned total = 0;
491 
492  std::list<ThreadID>::iterator threads = activeThreads->begin();
494 
495  while (threads != end) {
496  ThreadID tid = *threads++;
497 
498  total += thread[tid].numStores();
499  }
500 
501  return total;
502 }
503 
504 unsigned
506 {
507  unsigned total = 0;
508 
509  std::list<ThreadID>::iterator threads = activeThreads->begin();
511 
512  while (threads != end) {
513  ThreadID tid = *threads++;
514 
515  total += thread[tid].numFreeLoadEntries();
516  }
517 
518  return total;
519 }
520 
521 unsigned
523 {
524  unsigned total = 0;
525 
526  std::list<ThreadID>::iterator threads = activeThreads->begin();
528 
529  while (threads != end) {
530  ThreadID tid = *threads++;
531 
532  total += thread[tid].numFreeStoreEntries();
533  }
534 
535  return total;
536 }
537 
538 unsigned
540 {
541  return thread[tid].numFreeLoadEntries();
542 }
543 
544 unsigned
546 {
547  return thread[tid].numFreeStoreEntries();
548 }
549 
550 bool
552 {
553  std::list<ThreadID>::iterator threads = activeThreads->begin();
555 
556  while (threads != end) {
557  ThreadID tid = *threads++;
558 
559  if (!(thread[tid].lqFull() || thread[tid].sqFull()))
560  return false;
561  }
562 
563  return true;
564 }
565 
566 bool
568 {
569  //@todo: Change to Calculate All Entries for
570  //Dynamic Policy
571  if (lsqPolicy == SMTQueuePolicy::Dynamic)
572  return isFull();
573  else
574  return thread[tid].lqFull() || thread[tid].sqFull();
575 }
576 
577 bool
579 {
580  return lqEmpty() && sqEmpty();
581 }
582 
583 bool
585 {
588 
589  while (threads != end) {
590  ThreadID tid = *threads++;
591 
592  if (!thread[tid].lqEmpty())
593  return false;
594  }
595 
596  return true;
597 }
598 
599 bool
601 {
604 
605  while (threads != end) {
606  ThreadID tid = *threads++;
607 
608  if (!thread[tid].sqEmpty())
609  return false;
610  }
611 
612  return true;
613 }
614 
615 bool
617 {
618  std::list<ThreadID>::iterator threads = activeThreads->begin();
620 
621  while (threads != end) {
622  ThreadID tid = *threads++;
623 
624  if (!thread[tid].lqFull())
625  return false;
626  }
627 
628  return true;
629 }
630 
631 bool
633 {
634  //@todo: Change to Calculate All Entries for
635  //Dynamic Policy
636  if (lsqPolicy == SMTQueuePolicy::Dynamic)
637  return lqFull();
638  else
639  return thread[tid].lqFull();
640 }
641 
642 bool
644 {
645  std::list<ThreadID>::iterator threads = activeThreads->begin();
647 
648  while (threads != end) {
649  ThreadID tid = *threads++;
650 
651  if (!sqFull(tid))
652  return false;
653  }
654 
655  return true;
656 }
657 
658 bool
660 {
661  //@todo: Change to Calculate All Entries for
662  //Dynamic Policy
663  if (lsqPolicy == SMTQueuePolicy::Dynamic)
664  return sqFull();
665  else
666  return thread[tid].sqFull();
667 }
668 
669 bool
671 {
672  std::list<ThreadID>::iterator threads = activeThreads->begin();
674 
675  while (threads != end) {
676  ThreadID tid = *threads++;
677 
678  if (!thread[tid].isStalled())
679  return false;
680  }
681 
682  return true;
683 }
684 
685 bool
687 {
688  if (lsqPolicy == SMTQueuePolicy::Dynamic)
689  return isStalled();
690  else
691  return thread[tid].isStalled();
692 }
693 
694 bool
696 {
697  std::list<ThreadID>::iterator threads = activeThreads->begin();
699 
700  while (threads != end) {
701  ThreadID tid = *threads++;
702 
703  if (hasStoresToWB(tid))
704  return true;
705  }
706 
707  return false;
708 }
709 
710 bool
712 {
713  return thread.at(tid).hasStoresToWB();
714 }
715 
716 int
718 {
719  return thread.at(tid).numStoresToWB();
720 }
721 
722 bool
724 {
725  std::list<ThreadID>::iterator threads = activeThreads->begin();
727 
728  while (threads != end) {
729  ThreadID tid = *threads++;
730 
731  if (willWB(tid))
732  return true;
733  }
734 
735  return false;
736 }
737 
738 bool
740 {
741  return thread.at(tid).willWB();
742 }
743 
744 void
746 {
749 
750  while (threads != end) {
751  ThreadID tid = *threads++;
752 
753  thread[tid].dumpInsts();
754  }
755 }
756 
757 void
759 {
760  thread.at(tid).dumpInsts();
761 }
762 
763 Fault
764 LSQ::pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
765  unsigned int size, Addr addr, Request::Flags flags, uint64_t *res,
766  AtomicOpFunctorPtr amo_op, const std::vector<bool>& byte_enable)
767 {
768  // This comming request can be either load, store or atomic.
769  // Atomic request has a corresponding pointer to its atomic memory
770  // operation
771  [[maybe_unused]] bool isAtomic = !isLoad && amo_op;
772 
773  ThreadID tid = cpu->contextToThread(inst->contextId());
774  auto cacheLineSize = cpu->cacheLineSize();
775  bool needs_burst = transferNeedsBurst(addr, size, cacheLineSize);
776  LSQRequest* request = nullptr;
777 
778  // Atomic requests that access data across cache line boundary are
779  // currently not allowed since the cache does not guarantee corresponding
780  // atomic memory operations to be executed atomically across a cache line.
781  // For ISAs such as x86 that supports cross-cache-line atomic instructions,
782  // the cache needs to be modified to perform atomic update to both cache
783  // lines. For now, such cross-line update is not supported.
784  assert(!isAtomic || (isAtomic && !needs_burst));
785 
786  const bool htm_cmd = isLoad && (flags & Request::HTM_CMD);
787 
788  if (inst->translationStarted()) {
789  request = inst->savedRequest;
790  assert(request);
791  } else {
792  if (htm_cmd) {
793  assert(addr == 0x0lu);
794  assert(size == 8);
795  request = new HtmCmdRequest(&thread[tid], inst, flags);
796  } else if (needs_burst) {
797  request = new SplitDataRequest(&thread[tid], inst, isLoad, addr,
798  size, flags, data, res);
799  } else {
800  request = new SingleDataRequest(&thread[tid], inst, isLoad, addr,
801  size, flags, data, res, std::move(amo_op));
802  }
803  assert(request);
804  request->_byteEnable = byte_enable;
805  inst->setRequest();
806  request->taskId(cpu->taskId());
807 
808  // There might be fault from a previous execution attempt if this is
809  // a strictly ordered load
810  inst->getFault() = NoFault;
811 
812  request->initiateTranslation();
813  }
814 
815  /* This is the place were instructions get the effAddr. */
816  if (request->isTranslationComplete()) {
817  if (request->isMemAccessRequired()) {
818  inst->effAddr = request->getVaddr();
819  inst->effSize = size;
820  inst->effAddrValid(true);
821 
822  if (cpu->checker) {
823  inst->reqToVerify = std::make_shared<Request>(*request->req());
824  }
825  Fault fault;
826  if (isLoad)
827  fault = read(request, inst->lqIdx);
828  else
829  fault = write(request, data, inst->sqIdx);
830  // inst->getFault() may have the first-fault of a
831  // multi-access split request at this point.
832  // Overwrite that only if we got another type of fault
833  // (e.g. re-exec).
834  if (fault != NoFault)
835  inst->getFault() = fault;
836  } else if (isLoad) {
837  inst->setMemAccPredicate(false);
838  // Commit will have to clean up whatever happened. Set this
839  // instruction as executed.
840  inst->setExecuted();
841  }
842  }
843 
844  if (inst->traceData)
845  inst->traceData->setMem(addr, size, flags);
846 
847  return inst->getFault();
848 }
849 
850 void
851 LSQ::SingleDataRequest::finish(const Fault &fault, const RequestPtr &request,
853 {
854  _fault.push_back(fault);
857  /* If the instruction has been squahsed, let the request know
858  * as it may have to self-destruct. */
859  if (_inst->isSquashed()) {
861  } else {
862  _inst->strictlyOrdered(request->isStrictlyOrdered());
863 
864  flags.set(Flag::TranslationFinished);
865  if (fault == NoFault) {
866  _inst->physEffAddr = request->getPaddr();
867  _inst->memReqFlags = request->getFlags();
868  if (request->isCondSwap()) {
869  assert(_res);
870  request->setExtraData(*_res);
871  }
873  } else {
875  }
876 
877  LSQRequest::_inst->fault = fault;
878  LSQRequest::_inst->translationCompleted(true);
879  }
880 }
881 
882 void
885 {
886  int i;
887  for (i = 0; i < _reqs.size() && _reqs[i] != req; i++);
888  assert(i < _reqs.size());
889  _fault[i] = fault;
890 
891  numInTranslationFragments--;
892  numTranslatedFragments++;
893 
894  if (fault == NoFault)
895  _mainReq->setFlags(req->getFlags());
896 
897  if (numTranslatedFragments == _reqs.size()) {
898  if (_inst->isSquashed()) {
899  squashTranslation();
900  } else {
901  _inst->strictlyOrdered(_mainReq->isStrictlyOrdered());
902  flags.set(Flag::TranslationFinished);
903  _inst->translationCompleted(true);
904 
905  for (i = 0; i < _fault.size() && _fault[i] == NoFault; i++);
906  if (i > 0) {
907  _inst->physEffAddr = LSQRequest::req()->getPaddr();
908  _inst->memReqFlags = _mainReq->getFlags();
909  if (_mainReq->isCondSwap()) {
910  assert (i == _fault.size());
911  assert(_res);
912  _mainReq->setExtraData(*_res);
913  }
914  if (i == _fault.size()) {
915  _inst->fault = NoFault;
916  setState(State::Request);
917  } else {
918  _inst->fault = _fault[i];
919  setState(State::PartialFault);
920  }
921  } else {
922  _inst->fault = _fault[0];
923  setState(State::Fault);
924  }
925  }
926 
927  }
928 }
929 
930 void
932 {
933  assert(_reqs.size() == 0);
934 
935  addReq(_addr, _size, _byteEnable);
936 
937  if (_reqs.size() > 0) {
938  _reqs.back()->setReqInstSeqNum(_inst->seqNum);
939  _reqs.back()->taskId(_taskId);
940  _inst->translationStarted(true);
941  setState(State::Translation);
942  flags.set(Flag::TranslationStarted);
943 
944  _inst->savedRequest = this;
945  sendFragmentToTranslation(0);
946  } else {
947  _inst->setMemAccPredicate(false);
948  }
949 }
950 
951 PacketPtr
953 {
954  return _mainPacket;
955 }
956 
959 {
960  return _mainReq;
961 }
962 
963 void
965 {
966  auto cacheLineSize = _port.cacheLineSize();
967  Addr base_addr = _addr;
968  Addr next_addr = addrBlockAlign(_addr + cacheLineSize, cacheLineSize);
969  Addr final_addr = addrBlockAlign(_addr + _size, cacheLineSize);
970  uint32_t size_so_far = 0;
971 
972  _mainReq = std::make_shared<Request>(base_addr,
973  _size, _flags, _inst->requestorId(),
974  _inst->pcState().instAddr(), _inst->contextId());
975  _mainReq->setByteEnable(_byteEnable);
976 
977  // Paddr is not used in _mainReq. However, we will accumulate the flags
978  // from the sub requests into _mainReq by calling setFlags() in finish().
979  // setFlags() assumes that paddr is set so flip the paddr valid bit here to
980  // avoid a potential assert in setFlags() when we call it from finish().
981  _mainReq->setPaddr(0);
982 
983  /* Get the pre-fix, possibly unaligned. */
984  auto it_start = _byteEnable.begin();
985  auto it_end = _byteEnable.begin() + (next_addr - base_addr);
986  addReq(base_addr, next_addr - base_addr,
987  std::vector<bool>(it_start, it_end));
988  size_so_far = next_addr - base_addr;
989 
990  /* We are block aligned now, reading whole blocks. */
991  base_addr = next_addr;
992  while (base_addr != final_addr) {
993  auto it_start = _byteEnable.begin() + size_so_far;
994  auto it_end = _byteEnable.begin() + size_so_far + cacheLineSize;
995  addReq(base_addr, cacheLineSize,
996  std::vector<bool>(it_start, it_end));
997  size_so_far += cacheLineSize;
998  base_addr += cacheLineSize;
999  }
1000 
1001  /* Deal with the tail. */
1002  if (size_so_far < _size) {
1003  auto it_start = _byteEnable.begin() + size_so_far;
1004  auto it_end = _byteEnable.end();
1005  addReq(base_addr, _size - size_so_far,
1006  std::vector<bool>(it_start, it_end));
1007  }
1008 
1009  if (_reqs.size() > 0) {
1010  /* Setup the requests and send them to translation. */
1011  for (auto& r: _reqs) {
1012  r->setReqInstSeqNum(_inst->seqNum);
1013  r->taskId(_taskId);
1014  }
1015 
1016  _inst->translationStarted(true);
1017  setState(State::Translation);
1018  flags.set(Flag::TranslationStarted);
1019  _inst->savedRequest = this;
1020  numInTranslationFragments = 0;
1021  numTranslatedFragments = 0;
1022  _fault.resize(_reqs.size());
1023 
1024  for (uint32_t i = 0; i < _reqs.size(); i++) {
1025  sendFragmentToTranslation(i);
1026  }
1027  } else {
1028  _inst->setMemAccPredicate(false);
1029  }
1030 }
1031 
1033  LSQUnit *port, const DynInstPtr& inst, bool isLoad) :
1034  _state(State::NotIssued),
1035  _port(*port), _inst(inst), _data(nullptr),
1036  _res(nullptr), _addr(0), _size(0), _flags(0),
1037  _numOutstandingPackets(0), _amo_op(nullptr)
1038 {
1039  flags.set(Flag::IsLoad, isLoad);
1040  flags.set(Flag::WriteBackToRegister,
1041  _inst->isStoreConditional() || _inst->isAtomic() ||
1042  _inst->isLoad());
1043  flags.set(Flag::IsAtomic, _inst->isAtomic());
1044  install();
1045 }
1046 
1048  LSQUnit *port, const DynInstPtr& inst, bool isLoad,
1049  const Addr& addr, const uint32_t& size, const Request::Flags& flags_,
1050  PacketDataPtr data, uint64_t* res, AtomicOpFunctorPtr amo_op)
1051  : _state(State::NotIssued),
1052  numTranslatedFragments(0),
1053  numInTranslationFragments(0),
1054  _port(*port), _inst(inst), _data(data),
1055  _res(res), _addr(addr), _size(size),
1056  _flags(flags_),
1057  _numOutstandingPackets(0),
1058  _amo_op(std::move(amo_op))
1059 {
1060  flags.set(Flag::IsLoad, isLoad);
1061  flags.set(Flag::WriteBackToRegister,
1062  _inst->isStoreConditional() || _inst->isAtomic() ||
1063  _inst->isLoad());
1064  flags.set(Flag::IsAtomic, _inst->isAtomic());
1065  install();
1066 }
1067 
1068 void
1070 {
1071  if (isLoad()) {
1072  _port.loadQueue[_inst->lqIdx].setRequest(this);
1073  } else {
1074  // Store, StoreConditional, and Atomic requests are pushed
1075  // to this storeQueue
1076  _port.storeQueue[_inst->sqIdx].setRequest(this);
1077  }
1078 }
1079 
1080 bool LSQ::LSQRequest::squashed() const { return _inst->isSquashed(); }
1081 
1082 void
1084  const std::vector<bool>& byte_enable)
1085 {
1086  if (isAnyActiveElement(byte_enable.begin(), byte_enable.end())) {
1087  auto req = std::make_shared<Request>(
1088  addr, size, _flags, _inst->requestorId(),
1089  _inst->pcState().instAddr(), _inst->contextId(),
1090  std::move(_amo_op));
1091  req->setByteEnable(byte_enable);
1092  _reqs.push_back(req);
1093  }
1094 }
1095 
1097 {
1098  assert(!isAnyOutstandingRequest());
1099  _inst->savedRequest = nullptr;
1100 
1101  for (auto r: _packets)
1102  delete r;
1103 };
1104 
1105 ContextID
1107 {
1108  return _inst->contextId();
1109 }
1110 
1111 void
1113 {
1114  numInTranslationFragments++;
1115  _port.getMMUPtr()->translateTiming(req(i), _inst->thread->getTC(),
1116  this, isLoad() ? BaseMMU::Read : BaseMMU::Write);
1117 }
1118 
1119 bool
1121 {
1122  assert(_numOutstandingPackets == 1);
1123  flags.set(Flag::Complete);
1124  assert(pkt == _packets.front());
1125  _port.completeDataAccess(pkt);
1126  return true;
1127 }
1128 
1129 bool
1131 {
1132  uint32_t pktIdx = 0;
1133  while (pktIdx < _packets.size() && pkt != _packets[pktIdx])
1134  pktIdx++;
1135  assert(pktIdx < _packets.size());
1136  numReceivedPackets++;
1137  if (numReceivedPackets == _packets.size()) {
1138  flags.set(Flag::Complete);
1139  /* Assemble packets. */
1140  PacketPtr resp = isLoad()
1141  ? Packet::createRead(_mainReq)
1142  : Packet::createWrite(_mainReq);
1143  if (isLoad())
1144  resp->dataStatic(_inst->memData);
1145  else
1146  resp->dataStatic(_data);
1147  resp->senderState = this;
1148  _port.completeDataAccess(resp);
1149  delete resp;
1150  }
1151  return true;
1152 }
1153 
1154 void
1156 {
1157  /* Retries do not create new packets. */
1158  if (_packets.size() == 0) {
1159  _packets.push_back(
1160  isLoad()
1161  ? Packet::createRead(req())
1162  : Packet::createWrite(req()));
1163  _packets.back()->dataStatic(_inst->memData);
1164  _packets.back()->senderState = this;
1165 
1166  // hardware transactional memory
1167  // If request originates in a transaction (not necessarily a HtmCmd),
1168  // then the packet should be marked as such.
1169  if (_inst->inHtmTransactionalState()) {
1170  _packets.back()->setHtmTransactional(
1171  _inst->getHtmTransactionUid());
1172 
1173  DPRINTF(HtmCpu,
1174  "HTM %s pc=0x%lx - vaddr=0x%lx - paddr=0x%lx - htmUid=%u\n",
1175  isLoad() ? "LD" : "ST",
1176  _inst->pcState().instAddr(),
1177  _packets.back()->req->hasVaddr() ?
1178  _packets.back()->req->getVaddr() : 0lu,
1179  _packets.back()->getAddr(),
1180  _inst->getHtmTransactionUid());
1181  }
1182  }
1183  assert(_packets.size() == 1);
1184 }
1185 
1186 void
1188 {
1189  /* Extra data?? */
1190  Addr base_address = _addr;
1191 
1192  if (_packets.size() == 0) {
1193  /* New stuff */
1194  if (isLoad()) {
1195  _mainPacket = Packet::createRead(_mainReq);
1196  _mainPacket->dataStatic(_inst->memData);
1197 
1198  // hardware transactional memory
1199  // If request originates in a transaction,
1200  // packet should be marked as such
1201  if (_inst->inHtmTransactionalState()) {
1202  _mainPacket->setHtmTransactional(
1203  _inst->getHtmTransactionUid());
1204  DPRINTF(HtmCpu,
1205  "HTM LD.0 pc=0x%lx-vaddr=0x%lx-paddr=0x%lx-htmUid=%u\n",
1206  _inst->pcState().instAddr(),
1207  _mainPacket->req->hasVaddr() ?
1208  _mainPacket->req->getVaddr() : 0lu,
1209  _mainPacket->getAddr(),
1210  _inst->getHtmTransactionUid());
1211  }
1212  }
1213  for (int i = 0; i < _reqs.size() && _fault[i] == NoFault; i++) {
1214  RequestPtr req = _reqs[i];
1215  PacketPtr pkt = isLoad() ? Packet::createRead(req)
1216  : Packet::createWrite(req);
1217  ptrdiff_t offset = req->getVaddr() - base_address;
1218  if (isLoad()) {
1219  pkt->dataStatic(_inst->memData + offset);
1220  } else {
1221  uint8_t* req_data = new uint8_t[req->getSize()];
1222  std::memcpy(req_data,
1223  _inst->memData + offset,
1224  req->getSize());
1225  pkt->dataDynamic(req_data);
1226  }
1227  pkt->senderState = this;
1228  _packets.push_back(pkt);
1229 
1230  // hardware transactional memory
1231  // If request originates in a transaction,
1232  // packet should be marked as such
1233  if (_inst->inHtmTransactionalState()) {
1234  _packets.back()->setHtmTransactional(
1235  _inst->getHtmTransactionUid());
1236  DPRINTF(HtmCpu,
1237  "HTM %s.%d pc=0x%lx-vaddr=0x%lx-paddr=0x%lx-htmUid=%u\n",
1238  isLoad() ? "LD" : "ST",
1239  i+1,
1240  _inst->pcState().instAddr(),
1241  _packets.back()->req->hasVaddr() ?
1242  _packets.back()->req->getVaddr() : 0lu,
1243  _packets.back()->getAddr(),
1244  _inst->getHtmTransactionUid());
1245  }
1246  }
1247  }
1248  assert(_packets.size() > 0);
1249 }
1250 
1251 void
1253 {
1254  assert(_numOutstandingPackets == 0);
1255  if (lsqUnit()->trySendPacket(isLoad(), _packets.at(0)))
1256  _numOutstandingPackets = 1;
1257 }
1258 
1259 void
1261 {
1262  /* Try to send the packets. */
1263  while (numReceivedPackets + _numOutstandingPackets < _packets.size() &&
1264  lsqUnit()->trySendPacket(isLoad(),
1265  _packets.at(numReceivedPackets + _numOutstandingPackets))) {
1266  _numOutstandingPackets++;
1267  }
1268 }
1269 
1270 Cycles
1273 {
1274  return pkt->req->localAccessor(thread, pkt);
1275 }
1276 
1277 Cycles
1280 {
1281  Cycles delay(0);
1282  unsigned offset = 0;
1283 
1284  for (auto r: _reqs) {
1285  PacketPtr pkt =
1286  new Packet(r, isLoad() ? MemCmd::ReadReq : MemCmd::WriteReq);
1287  pkt->dataStatic(mainPkt->getPtr<uint8_t>() + offset);
1288  Cycles d = r->localAccessor(thread, pkt);
1289  if (d > delay)
1290  delay = d;
1291  offset += r->getSize();
1292  delete pkt;
1293  }
1294  return delay;
1295 }
1296 
1297 bool
1299 {
1300  return ( (LSQRequest::_reqs[0]->getPaddr() & blockMask) == blockAddr);
1301 }
1302 
1318 bool
1320 {
1321  bool is_hit = false;
1322  for (auto &r: _reqs) {
1332  if (r->hasPaddr() && (r->getPaddr() & blockMask) == blockAddr) {
1333  is_hit = true;
1334  break;
1335  }
1336  }
1337  return is_hit;
1338 }
1339 
1340 bool
1342 {
1343  return lsq->recvTimingResp(pkt);
1344 }
1345 
1346 void
1348 {
1349  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
1350  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1351  cpu->wakeup(tid);
1352  }
1353  }
1354  lsq->recvTimingSnoopReq(pkt);
1355 }
1356 
1357 void
1359 {
1360  lsq->recvReqRetry();
1361 }
1362 
1364  const Request::Flags& flags_) :
1365  SingleDataRequest(port, inst, true, 0x0lu, 8, flags_,
1366  nullptr, nullptr, nullptr)
1367 {
1368 }
1369 
1370 void
1372 {
1373  // Special commands are implemented as loads to avoid significant
1374  // changes to the cpu and memory interfaces
1375  // The virtual and physical address uses a dummy value of 0x00
1376  // Address translation does not really occur thus the code below
1377 
1378  assert(_reqs.size() == 0);
1379 
1380  addReq(_addr, _size, _byteEnable);
1381 
1382  if (_reqs.size() > 0) {
1383  _reqs.back()->setReqInstSeqNum(_inst->seqNum);
1384  _reqs.back()->taskId(_taskId);
1385  _reqs.back()->setPaddr(_addr);
1386  _reqs.back()->setInstCount(_inst->getCpuPtr()->totalInsts());
1387 
1388  _inst->strictlyOrdered(_reqs.back()->isStrictlyOrdered());
1389  _inst->fault = NoFault;
1390  _inst->physEffAddr = _reqs.back()->getPaddr();
1391  _inst->memReqFlags = _reqs.back()->getFlags();
1392  _inst->savedRequest = this;
1393 
1394  flags.set(Flag::TranslationStarted);
1395  flags.set(Flag::TranslationFinished);
1396 
1397  _inst->translationStarted(true);
1398  _inst->translationCompleted(true);
1399 
1400  setState(State::Request);
1401  } else {
1402  panic("unexpected behaviour in initiateTranslation()");
1403  }
1404 }
1405 
1406 void
1409 {
1410  panic("unexpected behaviour - finish()");
1411 }
1412 
1413 Fault
1414 LSQ::read(LSQRequest* request, int load_idx)
1415 {
1416  assert(request->req()->contextId() == request->contextId());
1417  ThreadID tid = cpu->contextToThread(request->req()->contextId());
1418 
1419  return thread.at(tid).read(request, load_idx);
1420 }
1421 
1422 Fault
1423 LSQ::write(LSQRequest* request, uint8_t *data, int store_idx)
1424 {
1425  ThreadID tid = cpu->contextToThread(request->req()->contextId());
1426 
1427  return thread.at(tid).write(request, data, store_idx);
1428 }
1429 
1430 } // namespace o3
1431 } // namespace gem5
gem5::o3::LSQ::LQEntries
unsigned LQEntries
Total Size of LQ Entries.
Definition: lsq.hh:918
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:577
gem5::o3::LSQ::lsqPolicy
SMTQueuePolicy lsqPolicy
The LSQ policy for SMT mode.
Definition: lsq.hh:889
gem5::o3::LSQ::insertStore
void insertStore(const DynInstPtr &store_inst)
Inserts a store into the LSQ.
Definition: lsq.cc:227
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:56
gem5::o3::LSQ::LSQRequest::_res
uint64_t * _res
Definition: lsq.hh:253
gem5::o3::LSQ::LSQRequest
Memory operation metadata.
Definition: lsq.hh:189
gem5::o3::LSQ::SingleDataRequest::initiateTranslation
virtual void initiateTranslation()
Definition: lsq.cc:931
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::o3::LSQ::commitLoads
void commitLoads(InstSeqNum &youngest_inst, ThreadID tid)
Commits loads up until the given sequence number for a specific thread.
Definition: lsq.cc:251
gem5::o3::LSQ::numStoresToWB
int numStoresToWB(ThreadID tid)
Returns the number of stores a specific thread has to write back.
Definition: lsq.cc:717
gem5::o3::LSQ::cacheBlocked
bool cacheBlocked() const
Is D-cache blocked?
Definition: lsq.cc:184
gem5::o3::LSQ::LSQRequest::isTranslationComplete
bool isTranslationComplete()
Definition: lsq.hh:460
gem5::o3::LSQ::LSQRequest::_byteEnable
std::vector< bool > _byteEnable
Definition: lsq.hh:257
gem5::o3::LSQ::numThreads
ThreadID numThreads
Number of Threads.
Definition: lsq.hh:935
gem5::o3::LSQ::LSQRequest::taskId
void taskId(const uint32_t &v)
Definition: lsq.hh:347
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::o3::LSQ::dumpInsts
void dumpInsts() const
Debugging function to print out all instructions.
Definition: lsq.cc:745
gem5::o3::LSQ::getCount
int getCount()
Returns the number of instructions in all of the queues.
Definition: lsq.cc:454
gem5::o3::LSQ::SplitDataRequest::buildPackets
virtual void buildPackets()
Definition: lsq.cc:1187
gem5::o3::LSQ::writebackStores
void writebackStores()
Attempts to write back stores until all cache ports are used or the interface becomes blocked.
Definition: lsq.cc:263
gem5::o3::LSQ::setActiveThreads
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets the pointer to the list of active threads.
Definition: lsq.cc:128
gem5::o3::LSQ::lqEmpty
bool lqEmpty() const
Returns if all of the LQs are empty.
Definition: lsq.cc:584
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::Flags::set
void set(Type mask)
Set all flag's bits matching the given mask.
Definition: flags.hh:116
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::o3::LSQ::_cacheBlocked
bool _cacheBlocked
D-cache is blocked.
Definition: lsq.hh:877
gem5::o3::LSQ::SingleDataRequest::buildPackets
virtual void buildPackets()
Definition: lsq.cc:1155
gem5::o3::LSQ::LSQRequest::install
void install()
Install the request in the LQ/SQ.
Definition: lsq.cc:1069
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:56
gem5::o3::LSQ::SplitDataRequest::initiateTranslation
virtual void initiateTranslation()
Definition: lsq.cc:964
gem5::o3::LSQ::LSQRequest::setState
void setState(const State &newState)
Definition: lsq.hh:236
gem5::o3::LSQ::SplitDataRequest::handleLocalAccess
virtual Cycles handleLocalAccess(gem5::ThreadContext *thread, PacketPtr pkt)
Memory mapped IPR accesses.
Definition: lsq.cc:1278
gem5::Complete
@ Complete
Definition: misc.hh:57
gem5::o3::LSQ::LSQRequest::isMemAccessRequired
bool isMemAccessRequired()
Definition: lsq.hh:487
gem5::o3::LSQ::getStoreHeadSeqNum
InstSeqNum getStoreHeadSeqNum(ThreadID tid)
Returns the sequence number of the head of the store queue.
Definition: lsq.cc:330
gem5::o3::LSQ::numLoads
int numLoads()
Returns the total number of loads in the load queue.
Definition: lsq.cc:471
gem5::o3::LSQ::SplitDataRequest::isCacheBlockHit
virtual bool isCacheBlockHit(Addr blockAddr, Addr cacheBlockMask)
Caches may probe into the load-store queue to enforce memory ordering guarantees.
Definition: lsq.cc:1319
gem5::o3::LSQ::cachePortAvailable
bool cachePortAvailable(bool is_load) const
Is any store port available to use?
Definition: lsq.cc:196
gem5::Request::HTM_CMD
static const FlagsType HTM_CMD
Definition: request.hh:249
gem5::o3::LSQ
Definition: lsq.hh:75
gem5::o3::LSQ::LSQRequest::squashed
bool squashed() const override
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: lsq.cc:1080
gem5::o3::LSQ::recvTimingSnoopReq
void recvTimingSnoopReq(PacketPtr pkt)
Definition: lsq.cc:438
gem5::Packet::createWrite
static PacketPtr createWrite(const RequestPtr &req)
Definition: packet.hh:1013
std::vector< bool >
gem5::o3::LSQ::getStoreHead
int getStoreHead(ThreadID tid)
Returns the head index of the store queue.
Definition: lsq.cc:324
dyn_inst.hh
gem5::o3::Fetch
Fetch class handles both single threaded and SMT fetch.
Definition: fetch.hh:79
gem5::o3::LSQ::LSQRequest::getVaddr
Addr getVaddr(int idx=0) const
Definition: lsq.hh:359
gem5::PacketDataPtr
uint8_t * PacketDataPtr
Definition: packet.hh:71
gem5::o3::LSQ::SplitDataRequest::sendPacketToCache
virtual void sendPacketToCache()
Definition: lsq.cc:1260
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
iew.hh
gem5::o3::LSQ::name
std::string name() const
Returns the name of the LSQ.
Definition: lsq.cc:122
gem5::o3::LSQ::LSQRequest::~LSQRequest
virtual ~LSQRequest()
Destructor.
Definition: lsq.cc:1096
gem5::o3::LSQ::dcachePort
DcachePort dcachePort
Data port.
Definition: lsq.hh:929
gem5::o3::LSQ::DcachePort::recvTimingSnoopReq
virtual void recvTimingSnoopReq(PacketPtr pkt)
Receive a timing snoop request from the peer.
Definition: lsq.cc:1347
gem5::o3::LSQ::isEmpty
bool isEmpty() const
Returns if the LSQ is empty (both LQ and SQ are empty).
Definition: lsq.cc:578
gem5::o3::LSQ::DcachePort::DcachePort
DcachePort(LSQ *_lsq, CPU *_cpu)
Default constructor.
Definition: lsq.cc:67
gem5::o3::LSQ::activeThreads
std::list< ThreadID > * activeThreads
List of Active Threads in System.
Definition: lsq.hh:915
gem5::o3::LSQ::LSQRequest::contextId
ContextID contextId() const
Definition: lsq.cc:1106
gem5::o3::LSQ::SplitDataRequest::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Definition: lsq.cc:1130
gem5::o3::LSQ::SingleDataRequest::sendPacketToCache
virtual void sendPacketToCache()
Definition: lsq.cc:1252
gem5::o3::LSQ::isStalled
bool isStalled()
Returns if the LSQ is stalled due to a memory operation that must be replayed.
Definition: lsq.cc:670
gem5::o3::LSQ::executeStore
Fault executeStore(const DynInstPtr &inst)
Executes a store.
Definition: lsq.cc:243
gem5::o3::LSQ::LSQRequest::State::Request
@ Request
gem5::o3::LSQ::cacheStorePorts
int cacheStorePorts
The number of cache ports available each cycle (stores only).
Definition: lsq.hh:879
gem5::RefCountingPtr< DynInst >
gem5::o3::LSQ::sqFull
bool sqFull()
Returns if any of the SQs are full.
Definition: lsq.cc:643
gem5::o3::LSQ::resetHtmStartsStops
void resetHtmStartsStops(ThreadID tid)
Definition: lsq.cc:359
gem5::o3::LSQ::LSQRequest::req
RequestPtr req(int idx=0)
Definition: lsq.hh:356
gem5::o3::LSQ::write
Fault write(LSQRequest *request, uint8_t *data, int store_idx)
Executes a store operation, using the store specified at the store index.
Definition: lsq.cc:1423
gem5::o3::CPU::checker
gem5::Checker< DynInstPtr > * checker
Pointer to the checker, which can dynamically verify instruction results at run time.
Definition: cpu.hh:578
gem5::RequestPort
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition: port.hh:77
gem5::o3::LSQ::violation
bool violation()
Returns whether or not there was a memory ordering violation.
Definition: lsq.cc:287
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
gem5::o3::LSQ::completeDataAccess
void completeDataAccess(PacketPtr pkt)
Definition: lsq.cc:393
gem5::o3::LSQ::numHtmStarts
int numHtmStarts(ThreadID tid) const
Definition: lsq.cc:342
gem5::o3::LSQ::numStores
int numStores()
Returns the total number of stores in the store queue.
Definition: lsq.cc:488
gem5::o3::LSQ::LSQRequest::packetReplied
void packetReplied()
Definition: lsq.hh:518
gem5::Flags< FlagsType >
gem5::o3::LSQUnit
Class that implements the actual LQ and SQ for each specific thread.
Definition: lsq_unit.hh:89
gem5::o3::LSQ::HtmCmdRequest::HtmCmdRequest
HtmCmdRequest(LSQUnit *port, const DynInstPtr &inst, const Request::Flags &flags_)
Definition: lsq.cc:1363
gem5::o3::LSQ::SQEntries
unsigned SQEntries
Total Size of SQ Entries.
Definition: lsq.hh:920
gem5::o3::LSQ::tick
void tick()
Ticks the LSQ.
Definition: lsq.cc:173
gem5::o3::CPU::wakeup
virtual void wakeup(ThreadID tid) override
Definition: cpu.cc:1534
gem5::o3::LSQ::LSQRequest::_inst
const DynInstPtr _inst
Definition: lsq.hh:247
gem5::o3::CPU
O3CPU class, has each of the stages (fetch through commit) within it, as well as all of the time buff...
Definition: cpu.hh:94
gem5::o3::LSQ::LSQRequest::squashTranslation
void squashTranslation()
Definition: lsq.hh:544
gem5::o3::LSQ::maxSQEntries
unsigned maxSQEntries
Max SQ Size - Used to Enforce Sharing Policies.
Definition: lsq.hh:926
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::addrBlockAlign
Addr addrBlockAlign(Addr addr, Addr block_size)
Returns the address of the closest aligned fixed-size block to the given address.
Definition: utils.hh:66
gem5::o3::LSQ::SplitDataRequest
Definition: lsq.hh:601
gem5::o3::LSQ::isDrained
bool isDrained() const
Has the LSQ drained?
Definition: lsq.cc:144
gem5::o3::LSQ::iewStage
IEW * iewStage
The IEW stage pointer.
Definition: lsq.hh:862
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::ArmISA::d
Bitfield< 9 > d
Definition: misc_types.hh:64
gem5::o3::LSQ::LSQRequest::State::Fault
@ Fault
gem5::transferNeedsBurst
bool transferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
Returns true if the given memory access (address, size) needs to be fragmented across aligned fixed-s...
Definition: utils.hh:80
gem5::o3::LSQ::LSQRequest::State
State
Definition: lsq.hh:227
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::o3::IEW
IEW handles both single threaded and SMT IEW (issue/execute/writeback).
Definition: iew.hh:87
gem5::ArmISA::v
Bitfield< 28 > v
Definition: misc_types.hh:54
gem5::o3::LSQ::LSQRequest::flags
FlagsType flags
Definition: lsq.hh:225
gem5::o3::LSQ::SplitDataRequest::mainReq
virtual RequestPtr mainReq()
Definition: lsq.cc:958
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::o3::LSQ::getLatestHtmUid
uint64_t getLatestHtmUid(ThreadID tid) const
Definition: lsq.cc:366
gem5::o3::LSQ::willWB
bool willWB()
Returns if the LSQ will write back to memory this cycle.
Definition: lsq.cc:723
gem5::o3::LSQ::numFreeLoadEntries
unsigned numFreeLoadEntries()
Returns the number of free load entries.
Definition: lsq.cc:505
gem5::isAnyActiveElement
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:89
gem5::o3::LSQ::LSQRequest::_reqs
std::vector< RequestPtr > _reqs
Definition: lsq.hh:251
gem5::ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:144
gem5::InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:243
gem5::o3::LSQ::cachePortBusy
void cachePortBusy(bool is_load)
Another store port is in use.
Definition: lsq.cc:208
gem5::o3::LSQ::DcachePort::recvReqRetry
virtual void recvReqRetry()
Handles doing a retry of the previous send.
Definition: lsq.cc:1358
compiler.hh
gem5::o3::LSQ::SplitDataRequest::mainPacket
virtual PacketPtr mainPacket()
Definition: lsq.cc:952
gem5::o3::LSQ::setLastRetiredHtmUid
void setLastRetiredHtmUid(ThreadID tid, uint64_t htmUid)
Definition: lsq.cc:375
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::o3::LSQ::SingleDataRequest::isCacheBlockHit
virtual bool isCacheBlockHit(Addr blockAddr, Addr cacheBlockMask)
Test if the request accesses a particular cache line.
Definition: lsq.cc:1298
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:534
name
const std::string & name()
Definition: trace.cc:49
gem5::o3::LSQ::getMemDepViolator
DynInstPtr getMemDepViolator(ThreadID tid)
Gets the instruction that caused the memory ordering violation.
Definition: lsq.cc:306
gem5::o3::LSQ::squash
void squash(const InstSeqNum &squashed_num, ThreadID tid)
Squash instructions from a thread until the specified sequence number.
Definition: lsq.cc:281
gem5::o3::LSQ::sqEmpty
bool sqEmpty() const
Returns if all of the SQs are empty.
Definition: lsq.cc:600
gem5::o3::LSQ::drainSanityCheck
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: lsq.cc:135
gem5::o3::LSQ::maxLSQAllocation
static uint32_t maxLSQAllocation(SMTQueuePolicy pol, uint32_t entries, uint32_t numThreads, uint32_t SMTThreshold)
Auxiliary function to calculate per-thread max LSQ allocation limit.
Definition: lsq.hh:897
gem5::o3::LSQ::read
Fault read(LSQRequest *request, int load_idx)
Executes a read operation, using the load specified at the load index.
Definition: lsq.cc:1414
gem5::o3::LSQ::SingleDataRequest::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Definition: lsq.cc:1120
gem5::o3::LSQ::recvReqRetry
void recvReqRetry()
Retry the previous send that failed.
Definition: lsq.cc:382
gem5::o3::LSQ::SingleDataRequest::handleLocalAccess
virtual Cycles handleLocalAccess(gem5::ThreadContext *thread, PacketPtr pkt)
Memory mapped IPR accesses.
Definition: lsq.cc:1271
gem5::o3::MaxThreads
static constexpr int MaxThreads
Definition: limits.hh:38
gem5::o3::LSQ::HtmCmdRequest::initiateTranslation
virtual void initiateTranslation()
Definition: lsq.cc:1371
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:204
gem5::o3::LSQ::insertLoad
void insertLoad(const DynInstPtr &load_inst)
Inserts a load into the LSQ.
Definition: lsq.cc:219
gem5::o3::LSQ::LSQRequest::LSQRequest
LSQRequest(LSQUnit *port, const DynInstPtr &inst, bool isLoad)
Definition: lsq.cc:1032
gem5::o3::LSQ::cpu
CPU * cpu
The CPU pointer.
Definition: lsq.hh:859
gem5::o3::LSQ::hasStoresToWB
bool hasStoresToWB()
Returns whether or not there are any stores to write back to memory.
Definition: lsq.cc:695
std
Overload hash function for BasicBlockRange type.
Definition: types.hh:111
gem5::o3::LSQ::getLoadHead
int getLoadHead(ThreadID tid)
Returns the head index of the load queue for a specific thread.
Definition: lsq.cc:312
gem5::o3::LSQ::SplitDataRequest::finish
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Definition: lsq.cc:883
gem5::o3::LSQ::maxLQEntries
unsigned maxLQEntries
Max LQ Size - Used to Enforce Sharing Policies.
Definition: lsq.hh:923
gem5::o3::IEW::cacheUnblocked
void cacheUnblocked()
Notifies that the cache has become unblocked.
Definition: iew.cc:562
gem5::o3::LSQ::getLoadHeadSeqNum
InstSeqNum getLoadHeadSeqNum(ThreadID tid)
Returns the sequence number of the head of the load queue.
Definition: lsq.cc:318
gem5::o3::LSQ::HtmCmdRequest::finish
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Definition: lsq.cc:1407
gem5::Packet::dataDynamic
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1172
gem5::ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:246
gem5::o3::LSQ::numFreeStoreEntries
unsigned numFreeStoreEntries()
Returns the number of free store entries.
Definition: lsq.cc:522
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::o3::LSQ::LSQRequest::initiateTranslation
virtual void initiateTranslation()=0
logging.hh
gem5::o3::LSQ::usedStorePorts
int usedStorePorts
The number of used cache ports in this cycle by stores.
Definition: lsq.hh:881
gem5::o3::LSQ::LSQ
LSQ(CPU *cpu_ptr, IEW *iew_ptr, const O3CPUParams &params)
Constructs an LSQ with the given parameters.
Definition: lsq.cc:71
gem5::InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:40
gem5::MipsISA::r
r
Definition: pra_constants.hh:98
gem5::o3::LSQ::SingleDataRequest::finish
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Definition: lsq.cc:851
gem5::o3::LSQ::HtmCmdRequest
Definition: lsq.hh:589
gem5::o3::LSQ::pushRequest
Fault pushRequest(const DynInstPtr &inst, bool isLoad, uint8_t *data, unsigned int size, Addr addr, Request::Flags flags, uint64_t *res, AtomicOpFunctorPtr amo_op, const std::vector< bool > &byte_enable)
Definition: lsq.cc:764
gem5::o3::LSQ::LSQRequest::numTranslatedFragments
uint32_t numTranslatedFragments
Definition: lsq.hh:238
gem5::o3::LSQ::DcachePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
Definition: lsq.cc:1341
gem5::o3::LSQ::LSQRequest::isLoad
bool isLoad() const
Definition: lsq.hh:270
gem5::o3::LSQ::takeOverFrom
void takeOverFrom()
Takes over execution from another CPU's thread.
Definition: lsq.cc:162
gem5::o3::LSQ::commitStores
void commitStores(InstSeqNum &youngest_inst, ThreadID tid)
Commits stores up until the given sequence number for a specific thread.
Definition: lsq.cc:257
cpu.hh
std::list< ThreadID >
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::AtomicOpFunctorPtr
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:242
gem5::o3::LSQ::LSQRequest::addReq
void addReq(Addr addr, unsigned size, const std::vector< bool > &byte_enable)
Helper function used to add a (sub)request, given its address addr, size size and byte-enable mask by...
Definition: lsq.cc:1083
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::Packet::createRead
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition: packet.hh:1007
lsq.hh
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
limits.hh
gem5::o3::LSQ::lqFull
bool lqFull()
Returns if any of the LQs are full.
Definition: lsq.cc:616
gem5::o3::LSQ::numHtmStops
int numHtmStops(ThreadID tid) const
Definition: lsq.cc:350
gem5::o3::LSQ::LSQRequest::sendFragmentToTranslation
void sendFragmentToTranslation(int i)
Definition: lsq.cc:1112
gem5::o3::LSQ::LSQRequest::_fault
std::vector< Fault > _fault
Definition: lsq.hh:252
gem5::o3::LSQ::executeLoad
Fault executeLoad(const DynInstPtr &inst)
Executes a load.
Definition: lsq.cc:235
gem5::o3::LSQ::LSQRequest::numInTranslationFragments
uint32_t numInTranslationFragments
Definition: lsq.hh:239
gem5::o3::LSQ::usedLoadPorts
int usedLoadPorts
The number of used cache ports in this cycle by loads.
Definition: lsq.hh:885
gem5::o3::LSQ::thread
std::vector< LSQUnit > thread
The LSQ units for individual threads.
Definition: lsq.hh:932
gem5::o3::LSQ::isFull
bool isFull()
Returns if the LSQ is full (either LQ or SQ is full).
Definition: lsq.cc:551
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:242
gem5::o3::LSQ::SingleDataRequest
Definition: lsq.hh:563
gem5::o3::LSQ::recvTimingResp
bool recvTimingResp(PacketPtr pkt)
Handles writing back and completing the load or store that has returned from memory.
Definition: lsq.cc:401
gem5::o3::IEW::name
std::string name() const
Returns the name of the IEW stage.
Definition: iew.cc:119
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::Packet::getPtr
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1184
gem5::o3::LSQ::cacheLoadPorts
int cacheLoadPorts
The number of cache ports available each cycle (loads only).
Definition: lsq.hh:883

Generated on Wed May 4 2022 12:13:53 for gem5 by doxygen 1.8.17