gem5  v21.1.0.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
lsq.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011-2012, 2014, 2017-2019 ARM Limited
3  * Copyright (c) 2013 Advanced Micro Devices, Inc.
4  * All rights reserved
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2005-2006 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include "cpu/o3/lsq.hh"
43 
44 #include <algorithm>
45 #include <list>
46 #include <string>
47 
48 #include "base/compiler.hh"
49 #include "base/logging.hh"
50 #include "cpu/o3/cpu.hh"
51 #include "cpu/o3/dyn_inst.hh"
52 #include "cpu/o3/iew.hh"
53 #include "cpu/o3/limits.hh"
54 #include "debug/Drain.hh"
55 #include "debug/Fetch.hh"
56 #include "debug/HtmCpu.hh"
57 #include "debug/LSQ.hh"
58 #include "debug/Writeback.hh"
59 #include "params/O3CPU.hh"
60 
61 namespace gem5
62 {
63 
64 namespace o3
65 {
66 
68  _request(request), isLoad(is_load), needWB(is_load)
69 {}
70 
73 {
74  return inst->contextId();
75 }
76 
78  RequestPort(_cpu->name() + ".dcache_port", _cpu), lsq(_lsq), cpu(_cpu)
79 {}
80 
81 LSQ::LSQ(CPU *cpu_ptr, IEW *iew_ptr, const O3CPUParams &params)
82  : cpu(cpu_ptr), iewStage(iew_ptr),
83  _cacheBlocked(false),
86  lsqPolicy(params.smtLSQPolicy),
87  LQEntries(params.LQEntries),
88  SQEntries(params.SQEntries),
90  params.smtLSQThreshold)),
92  params.smtLSQThreshold)),
93  dcachePort(this, cpu_ptr),
94  numThreads(params.numThreads)
95 {
96  assert(numThreads > 0 && numThreads <= MaxThreads);
97 
98  //**********************************************
99  //************ Handle SMT Parameters ***********
100  //**********************************************
101 
102  /* Run SMT olicy checks. */
103  if (lsqPolicy == SMTQueuePolicy::Dynamic) {
104  DPRINTF(LSQ, "LSQ sharing policy set to Dynamic\n");
105  } else if (lsqPolicy == SMTQueuePolicy::Partitioned) {
106  DPRINTF(Fetch, "LSQ sharing policy set to Partitioned: "
107  "%i entries per LQ | %i entries per SQ\n",
109  } else if (lsqPolicy == SMTQueuePolicy::Threshold) {
110 
111  assert(params.smtLSQThreshold > params.LQEntries);
112  assert(params.smtLSQThreshold > params.SQEntries);
113 
114  DPRINTF(LSQ, "LSQ sharing policy set to Threshold: "
115  "%i entries per LQ | %i entries per SQ\n",
117  } else {
118  panic("Invalid LSQ sharing policy. Options are: Dynamic, "
119  "Partitioned, Threshold");
120  }
121 
122  thread.reserve(numThreads);
123  for (ThreadID tid = 0; tid < numThreads; tid++) {
124  thread.emplace_back(maxLQEntries, maxSQEntries);
125  thread[tid].init(cpu, iew_ptr, params, this, tid);
126  thread[tid].setDcachePort(&dcachePort);
127  }
128 }
129 
130 
131 std::string
132 LSQ::name() const
133 {
134  return iewStage->name() + ".lsq";
135 }
136 
137 void
139 {
140  activeThreads = at_ptr;
141  assert(activeThreads != 0);
142 }
143 
144 void
146 {
147  assert(isDrained());
148 
149  for (ThreadID tid = 0; tid < numThreads; tid++)
150  thread[tid].drainSanityCheck();
151 }
152 
153 bool
155 {
156  bool drained(true);
157 
158  if (!lqEmpty()) {
159  DPRINTF(Drain, "Not drained, LQ not empty.\n");
160  drained = false;
161  }
162 
163  if (!sqEmpty()) {
164  DPRINTF(Drain, "Not drained, SQ not empty.\n");
165  drained = false;
166  }
167 
168  return drained;
169 }
170 
171 void
173 {
174  usedStorePorts = 0;
175  _cacheBlocked = false;
176 
177  for (ThreadID tid = 0; tid < numThreads; tid++) {
178  thread[tid].takeOverFrom();
179  }
180 }
181 
182 void
184 {
185  // Re-issue loads which got blocked on the per-cycle load ports limit.
188 
189  usedLoadPorts = 0;
190  usedStorePorts = 0;
191 }
192 
193 bool
195 {
196  return _cacheBlocked;
197 }
198 
199 void
201 {
202  _cacheBlocked = v;
203 }
204 
205 bool
206 LSQ::cachePortAvailable(bool is_load) const
207 {
208  bool ret;
209  if (is_load) {
211  } else {
213  }
214  return ret;
215 }
216 
217 void
218 LSQ::cachePortBusy(bool is_load)
219 {
220  assert(cachePortAvailable(is_load));
221  if (is_load) {
222  usedLoadPorts++;
223  } else {
224  usedStorePorts++;
225  }
226 }
227 
228 void
229 LSQ::insertLoad(const DynInstPtr &load_inst)
230 {
231  ThreadID tid = load_inst->threadNumber;
232 
233  thread[tid].insertLoad(load_inst);
234 }
235 
236 void
237 LSQ::insertStore(const DynInstPtr &store_inst)
238 {
239  ThreadID tid = store_inst->threadNumber;
240 
241  thread[tid].insertStore(store_inst);
242 }
243 
244 Fault
246 {
247  ThreadID tid = inst->threadNumber;
248 
249  return thread[tid].executeLoad(inst);
250 }
251 
252 Fault
254 {
255  ThreadID tid = inst->threadNumber;
256 
257  return thread[tid].executeStore(inst);
258 }
259 
260 void
262 {
263  thread.at(tid).commitLoads(youngest_inst);
264 }
265 
266 void
268 {
269  thread.at(tid).commitStores(youngest_inst);
270 }
271 
272 void
274 {
275  std::list<ThreadID>::iterator threads = activeThreads->begin();
277 
278  while (threads != end) {
279  ThreadID tid = *threads++;
280 
281  if (numStoresToWB(tid) > 0) {
282  DPRINTF(Writeback,"[tid:%i] Writing back stores. %i stores "
283  "available for Writeback.\n", tid, numStoresToWB(tid));
284  }
285 
286  thread[tid].writebackStores();
287  }
288 }
289 
290 void
291 LSQ::squash(const InstSeqNum &squashed_num, ThreadID tid)
292 {
293  thread.at(tid).squash(squashed_num);
294 }
295 
296 bool
298 {
299  /* Answers: Does Anybody Have a Violation?*/
300  std::list<ThreadID>::iterator threads = activeThreads->begin();
302 
303  while (threads != end) {
304  ThreadID tid = *threads++;
305 
306  if (thread[tid].violation())
307  return true;
308  }
309 
310  return false;
311 }
312 
313 bool LSQ::violation(ThreadID tid) { return thread.at(tid).violation(); }
314 
317 {
318  return thread.at(tid).getMemDepViolator();
319 }
320 
321 int
323 {
324  return thread.at(tid).getLoadHead();
325 }
326 
329 {
330  return thread.at(tid).getLoadHeadSeqNum();
331 }
332 
333 int
335 {
336  return thread.at(tid).getStoreHead();
337 }
338 
341 {
342  return thread.at(tid).getStoreHeadSeqNum();
343 }
344 
345 int LSQ::getCount(ThreadID tid) { return thread.at(tid).getCount(); }
346 
347 int LSQ::numLoads(ThreadID tid) { return thread.at(tid).numLoads(); }
348 
349 int LSQ::numStores(ThreadID tid) { return thread.at(tid).numStores(); }
350 
351 int
353 {
354  if (tid == InvalidThreadID)
355  return 0;
356  else
357  return thread[tid].numHtmStarts();
358 }
359 int
361 {
362  if (tid == InvalidThreadID)
363  return 0;
364  else
365  return thread[tid].numHtmStops();
366 }
367 
368 void
370 {
371  if (tid != InvalidThreadID)
372  thread[tid].resetHtmStartsStops();
373 }
374 
375 uint64_t
377 {
378  if (tid == InvalidThreadID)
379  return 0;
380  else
381  return thread[tid].getLatestHtmUid();
382 }
383 
384 void
385 LSQ::setLastRetiredHtmUid(ThreadID tid, uint64_t htmUid)
386 {
387  if (tid != InvalidThreadID)
388  thread[tid].setLastRetiredHtmUid(htmUid);
389 }
390 
391 void
393 {
395  cacheBlocked(false);
396 
397  for (ThreadID tid : *activeThreads) {
398  thread[tid].recvRetry();
399  }
400 }
401 
402 void
404 {
405  auto senderState = dynamic_cast<LSQSenderState*>(pkt->senderState);
406  thread[cpu->contextToThread(senderState->contextId())]
407  .completeDataAccess(pkt);
408 }
409 
410 bool
412 {
413  if (pkt->isError())
414  DPRINTF(LSQ, "Got error packet back for address: %#X\n",
415  pkt->getAddr());
416 
417  auto senderState = dynamic_cast<LSQSenderState*>(pkt->senderState);
418  panic_if(!senderState, "Got packet back with unknown sender state\n");
419 
420  thread[cpu->contextToThread(senderState->contextId())].recvTimingResp(pkt);
421 
422  if (pkt->isInvalidate()) {
423  // This response also contains an invalidate; e.g. this can be the case
424  // if cmd is ReadRespWithInvalidate.
425  //
426  // The calling order between completeDataAccess and checkSnoop matters.
427  // By calling checkSnoop after completeDataAccess, we ensure that the
428  // fault set by checkSnoop is not lost. Calling writeback (more
429  // specifically inst->completeAcc) in completeDataAccess overwrites
430  // fault, and in case this instruction requires squashing (as
431  // determined by checkSnoop), the ReExec fault set by checkSnoop would
432  // be lost otherwise.
433 
434  DPRINTF(LSQ, "received invalidation with response for addr:%#x\n",
435  pkt->getAddr());
436 
437  for (ThreadID tid = 0; tid < numThreads; tid++) {
438  thread[tid].checkSnoop(pkt);
439  }
440  }
441  // Update the LSQRequest state (this may delete the request)
442  senderState->request()->packetReplied();
443 
444  return true;
445 }
446 
447 void
449 {
450  DPRINTF(LSQ, "received pkt for addr:%#x %s\n", pkt->getAddr(),
451  pkt->cmdString());
452 
453  // must be a snoop
454  if (pkt->isInvalidate()) {
455  DPRINTF(LSQ, "received invalidation for addr:%#x\n",
456  pkt->getAddr());
457  for (ThreadID tid = 0; tid < numThreads; tid++) {
458  thread[tid].checkSnoop(pkt);
459  }
460  }
461 }
462 
463 int
465 {
466  unsigned total = 0;
467 
468  std::list<ThreadID>::iterator threads = activeThreads->begin();
470 
471  while (threads != end) {
472  ThreadID tid = *threads++;
473 
474  total += getCount(tid);
475  }
476 
477  return total;
478 }
479 
480 int
482 {
483  unsigned total = 0;
484 
485  std::list<ThreadID>::iterator threads = activeThreads->begin();
487 
488  while (threads != end) {
489  ThreadID tid = *threads++;
490 
491  total += numLoads(tid);
492  }
493 
494  return total;
495 }
496 
497 int
499 {
500  unsigned total = 0;
501 
502  std::list<ThreadID>::iterator threads = activeThreads->begin();
504 
505  while (threads != end) {
506  ThreadID tid = *threads++;
507 
508  total += thread[tid].numStores();
509  }
510 
511  return total;
512 }
513 
514 unsigned
516 {
517  unsigned total = 0;
518 
519  std::list<ThreadID>::iterator threads = activeThreads->begin();
521 
522  while (threads != end) {
523  ThreadID tid = *threads++;
524 
525  total += thread[tid].numFreeLoadEntries();
526  }
527 
528  return total;
529 }
530 
531 unsigned
533 {
534  unsigned total = 0;
535 
536  std::list<ThreadID>::iterator threads = activeThreads->begin();
538 
539  while (threads != end) {
540  ThreadID tid = *threads++;
541 
542  total += thread[tid].numFreeStoreEntries();
543  }
544 
545  return total;
546 }
547 
548 unsigned
550 {
551  return thread[tid].numFreeLoadEntries();
552 }
553 
554 unsigned
556 {
557  return thread[tid].numFreeStoreEntries();
558 }
559 
560 bool
562 {
563  std::list<ThreadID>::iterator threads = activeThreads->begin();
565 
566  while (threads != end) {
567  ThreadID tid = *threads++;
568 
569  if (!(thread[tid].lqFull() || thread[tid].sqFull()))
570  return false;
571  }
572 
573  return true;
574 }
575 
576 bool
578 {
579  //@todo: Change to Calculate All Entries for
580  //Dynamic Policy
581  if (lsqPolicy == SMTQueuePolicy::Dynamic)
582  return isFull();
583  else
584  return thread[tid].lqFull() || thread[tid].sqFull();
585 }
586 
587 bool
589 {
590  return lqEmpty() && sqEmpty();
591 }
592 
593 bool
595 {
598 
599  while (threads != end) {
600  ThreadID tid = *threads++;
601 
602  if (!thread[tid].lqEmpty())
603  return false;
604  }
605 
606  return true;
607 }
608 
609 bool
611 {
614 
615  while (threads != end) {
616  ThreadID tid = *threads++;
617 
618  if (!thread[tid].sqEmpty())
619  return false;
620  }
621 
622  return true;
623 }
624 
625 bool
627 {
628  std::list<ThreadID>::iterator threads = activeThreads->begin();
630 
631  while (threads != end) {
632  ThreadID tid = *threads++;
633 
634  if (!thread[tid].lqFull())
635  return false;
636  }
637 
638  return true;
639 }
640 
641 bool
643 {
644  //@todo: Change to Calculate All Entries for
645  //Dynamic Policy
646  if (lsqPolicy == SMTQueuePolicy::Dynamic)
647  return lqFull();
648  else
649  return thread[tid].lqFull();
650 }
651 
652 bool
654 {
655  std::list<ThreadID>::iterator threads = activeThreads->begin();
657 
658  while (threads != end) {
659  ThreadID tid = *threads++;
660 
661  if (!sqFull(tid))
662  return false;
663  }
664 
665  return true;
666 }
667 
668 bool
670 {
671  //@todo: Change to Calculate All Entries for
672  //Dynamic Policy
673  if (lsqPolicy == SMTQueuePolicy::Dynamic)
674  return sqFull();
675  else
676  return thread[tid].sqFull();
677 }
678 
679 bool
681 {
682  std::list<ThreadID>::iterator threads = activeThreads->begin();
684 
685  while (threads != end) {
686  ThreadID tid = *threads++;
687 
688  if (!thread[tid].isStalled())
689  return false;
690  }
691 
692  return true;
693 }
694 
695 bool
697 {
698  if (lsqPolicy == SMTQueuePolicy::Dynamic)
699  return isStalled();
700  else
701  return thread[tid].isStalled();
702 }
703 
704 bool
706 {
707  std::list<ThreadID>::iterator threads = activeThreads->begin();
709 
710  while (threads != end) {
711  ThreadID tid = *threads++;
712 
713  if (hasStoresToWB(tid))
714  return true;
715  }
716 
717  return false;
718 }
719 
720 bool
722 {
723  return thread.at(tid).hasStoresToWB();
724 }
725 
726 int
728 {
729  return thread.at(tid).numStoresToWB();
730 }
731 
732 bool
734 {
735  std::list<ThreadID>::iterator threads = activeThreads->begin();
737 
738  while (threads != end) {
739  ThreadID tid = *threads++;
740 
741  if (willWB(tid))
742  return true;
743  }
744 
745  return false;
746 }
747 
748 bool
750 {
751  return thread.at(tid).willWB();
752 }
753 
754 void
756 {
759 
760  while (threads != end) {
761  ThreadID tid = *threads++;
762 
763  thread[tid].dumpInsts();
764  }
765 }
766 
767 void
769 {
770  thread.at(tid).dumpInsts();
771 }
772 
773 Fault
774 LSQ::pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
775  unsigned int size, Addr addr, Request::Flags flags, uint64_t *res,
776  AtomicOpFunctorPtr amo_op, const std::vector<bool>& byte_enable)
777 {
778  // This comming request can be either load, store or atomic.
779  // Atomic request has a corresponding pointer to its atomic memory
780  // operation
781  GEM5_VAR_USED bool isAtomic = !isLoad && amo_op;
782 
783  ThreadID tid = cpu->contextToThread(inst->contextId());
784  auto cacheLineSize = cpu->cacheLineSize();
785  bool needs_burst = transferNeedsBurst(addr, size, cacheLineSize);
786  LSQRequest* req = nullptr;
787 
788  // Atomic requests that access data across cache line boundary are
789  // currently not allowed since the cache does not guarantee corresponding
790  // atomic memory operations to be executed atomically across a cache line.
791  // For ISAs such as x86 that supports cross-cache-line atomic instructions,
792  // the cache needs to be modified to perform atomic update to both cache
793  // lines. For now, such cross-line update is not supported.
794  assert(!isAtomic || (isAtomic && !needs_burst));
795 
796  const bool htm_cmd = isLoad && (flags & Request::HTM_CMD);
797 
798  if (inst->translationStarted()) {
799  req = inst->savedReq;
800  assert(req);
801  } else {
802  if (htm_cmd) {
803  assert(addr == 0x0lu);
804  assert(size == 8);
805  req = new HtmCmdRequest(&thread[tid], inst, flags);
806  } else if (needs_burst) {
807  req = new SplitDataRequest(&thread[tid], inst, isLoad, addr,
808  size, flags, data, res);
809  } else {
810  req = new SingleDataRequest(&thread[tid], inst, isLoad, addr,
811  size, flags, data, res, std::move(amo_op));
812  }
813  assert(req);
814  req->_byteEnable = byte_enable;
815  inst->setRequest();
816  req->taskId(cpu->taskId());
817 
818  // There might be fault from a previous execution attempt if this is
819  // a strictly ordered load
820  inst->getFault() = NoFault;
821 
822  req->initiateTranslation();
823  }
824 
825  /* This is the place were instructions get the effAddr. */
826  if (req->isTranslationComplete()) {
827  if (req->isMemAccessRequired()) {
828  inst->effAddr = req->getVaddr();
829  inst->effSize = size;
830  inst->effAddrValid(true);
831 
832  if (cpu->checker) {
833  inst->reqToVerify = std::make_shared<Request>(*req->request());
834  }
835  Fault fault;
836  if (isLoad)
837  fault = cpu->read(req, inst->lqIdx);
838  else
839  fault = cpu->write(req, data, inst->sqIdx);
840  // inst->getFault() may have the first-fault of a
841  // multi-access split request at this point.
842  // Overwrite that only if we got another type of fault
843  // (e.g. re-exec).
844  if (fault != NoFault)
845  inst->getFault() = fault;
846  } else if (isLoad) {
847  inst->setMemAccPredicate(false);
848  // Commit will have to clean up whatever happened. Set this
849  // instruction as executed.
850  inst->setExecuted();
851  }
852  }
853 
854  if (inst->traceData)
855  inst->traceData->setMem(addr, size, flags);
856 
857  return inst->getFault();
858 }
859 
860 void
863 {
864  _fault.push_back(fault);
867  /* If the instruction has been squahsed, let the request know
868  * as it may have to self-destruct. */
869  if (_inst->isSquashed()) {
871  } else {
872  _inst->strictlyOrdered(req->isStrictlyOrdered());
873 
874  flags.set(Flag::TranslationFinished);
875  if (fault == NoFault) {
876  _inst->physEffAddr = req->getPaddr();
877  _inst->memReqFlags = req->getFlags();
878  if (req->isCondSwap()) {
879  assert(_res);
880  req->setExtraData(*_res);
881  }
883  } else {
885  }
886 
887  LSQRequest::_inst->fault = fault;
888  LSQRequest::_inst->translationCompleted(true);
889  }
890 }
891 
892 void
895 {
896  int i;
897  for (i = 0; i < _requests.size() && _requests[i] != req; i++);
898  assert(i < _requests.size());
899  _fault[i] = fault;
900 
901  numInTranslationFragments--;
902  numTranslatedFragments++;
903 
904  if (fault == NoFault)
905  mainReq->setFlags(req->getFlags());
906 
907  if (numTranslatedFragments == _requests.size()) {
908  if (_inst->isSquashed()) {
909  squashTranslation();
910  } else {
911  _inst->strictlyOrdered(mainReq->isStrictlyOrdered());
912  flags.set(Flag::TranslationFinished);
913  _inst->translationCompleted(true);
914 
915  for (i = 0; i < _fault.size() && _fault[i] == NoFault; i++);
916  if (i > 0) {
917  _inst->physEffAddr = request(0)->getPaddr();
918  _inst->memReqFlags = mainReq->getFlags();
919  if (mainReq->isCondSwap()) {
920  assert (i == _fault.size());
921  assert(_res);
922  mainReq->setExtraData(*_res);
923  }
924  if (i == _fault.size()) {
925  _inst->fault = NoFault;
926  setState(State::Request);
927  } else {
928  _inst->fault = _fault[i];
929  setState(State::PartialFault);
930  }
931  } else {
932  _inst->fault = _fault[0];
933  setState(State::Fault);
934  }
935  }
936 
937  }
938 }
939 
940 void
942 {
943  assert(_requests.size() == 0);
944 
945  addRequest(_addr, _size, _byteEnable);
946 
947  if (_requests.size() > 0) {
948  _requests.back()->setReqInstSeqNum(_inst->seqNum);
949  _requests.back()->taskId(_taskId);
950  _inst->translationStarted(true);
951  setState(State::Translation);
952  flags.set(Flag::TranslationStarted);
953 
954  _inst->savedReq = this;
955  sendFragmentToTranslation(0);
956  } else {
957  _inst->setMemAccPredicate(false);
958  }
959 }
960 
961 PacketPtr
963 {
964  return _mainPacket;
965 }
966 
969 {
970  return mainReq;
971 }
972 
973 void
975 {
976  auto cacheLineSize = _port.cacheLineSize();
977  Addr base_addr = _addr;
978  Addr next_addr = addrBlockAlign(_addr + cacheLineSize, cacheLineSize);
979  Addr final_addr = addrBlockAlign(_addr + _size, cacheLineSize);
980  uint32_t size_so_far = 0;
981 
982  mainReq = std::make_shared<Request>(base_addr,
983  _size, _flags, _inst->requestorId(),
984  _inst->instAddr(), _inst->contextId());
985  mainReq->setByteEnable(_byteEnable);
986 
987  // Paddr is not used in mainReq. However, we will accumulate the flags
988  // from the sub requests into mainReq by calling setFlags() in finish().
989  // setFlags() assumes that paddr is set so flip the paddr valid bit here to
990  // avoid a potential assert in setFlags() when we call it from finish().
991  mainReq->setPaddr(0);
992 
993  /* Get the pre-fix, possibly unaligned. */
994  auto it_start = _byteEnable.begin();
995  auto it_end = _byteEnable.begin() + (next_addr - base_addr);
996  addRequest(base_addr, next_addr - base_addr,
997  std::vector<bool>(it_start, it_end));
998  size_so_far = next_addr - base_addr;
999 
1000  /* We are block aligned now, reading whole blocks. */
1001  base_addr = next_addr;
1002  while (base_addr != final_addr) {
1003  auto it_start = _byteEnable.begin() + size_so_far;
1004  auto it_end = _byteEnable.begin() + size_so_far + cacheLineSize;
1005  addRequest(base_addr, cacheLineSize,
1006  std::vector<bool>(it_start, it_end));
1007  size_so_far += cacheLineSize;
1008  base_addr += cacheLineSize;
1009  }
1010 
1011  /* Deal with the tail. */
1012  if (size_so_far < _size) {
1013  auto it_start = _byteEnable.begin() + size_so_far;
1014  auto it_end = _byteEnable.end();
1015  addRequest(base_addr, _size - size_so_far,
1016  std::vector<bool>(it_start, it_end));
1017  }
1018 
1019  if (_requests.size() > 0) {
1020  /* Setup the requests and send them to translation. */
1021  for (auto& r: _requests) {
1022  r->setReqInstSeqNum(_inst->seqNum);
1023  r->taskId(_taskId);
1024  }
1025 
1026  _inst->translationStarted(true);
1027  setState(State::Translation);
1028  flags.set(Flag::TranslationStarted);
1029  _inst->savedReq = this;
1030  numInTranslationFragments = 0;
1031  numTranslatedFragments = 0;
1032  _fault.resize(_requests.size());
1033 
1034  for (uint32_t i = 0; i < _requests.size(); i++) {
1035  sendFragmentToTranslation(i);
1036  }
1037  } else {
1038  _inst->setMemAccPredicate(false);
1039  }
1040 }
1041 
1043  LSQUnit *port, const DynInstPtr& inst, bool isLoad) :
1044  _state(State::NotIssued), _senderState(nullptr),
1045  _port(*port), _inst(inst), _data(nullptr),
1046  _res(nullptr), _addr(0), _size(0), _flags(0),
1047  _numOutstandingPackets(0), _amo_op(nullptr)
1048 {
1049  flags.set(Flag::IsLoad, isLoad);
1050  flags.set(Flag::WbStore,
1051  _inst->isStoreConditional() || _inst->isAtomic());
1052  flags.set(Flag::IsAtomic, _inst->isAtomic());
1053  install();
1054 }
1055 
1057  LSQUnit *port, const DynInstPtr& inst, bool isLoad,
1058  const Addr& addr, const uint32_t& size, const Request::Flags& flags_,
1059  PacketDataPtr data, uint64_t* res, AtomicOpFunctorPtr amo_op)
1060  : _state(State::NotIssued), _senderState(nullptr),
1061  numTranslatedFragments(0),
1062  numInTranslationFragments(0),
1063  _port(*port), _inst(inst), _data(data),
1064  _res(res), _addr(addr), _size(size),
1065  _flags(flags_),
1066  _numOutstandingPackets(0),
1067  _amo_op(std::move(amo_op))
1068 {
1069  flags.set(Flag::IsLoad, isLoad);
1070  flags.set(Flag::WbStore,
1071  _inst->isStoreConditional() || _inst->isAtomic());
1072  flags.set(Flag::IsAtomic, _inst->isAtomic());
1073  install();
1074 }
1075 
1076 void
1078 {
1079  if (isLoad()) {
1080  _port.loadQueue[_inst->lqIdx].setRequest(this);
1081  } else {
1082  // Store, StoreConditional, and Atomic requests are pushed
1083  // to this storeQueue
1084  _port.storeQueue[_inst->sqIdx].setRequest(this);
1085  }
1086 }
1087 
1088 bool LSQ::LSQRequest::squashed() const { return _inst->isSquashed(); }
1089 
1090 void
1092  const std::vector<bool>& byte_enable)
1093 {
1094  if (isAnyActiveElement(byte_enable.begin(), byte_enable.end())) {
1095  auto request = std::make_shared<Request>(
1096  addr, size, _flags, _inst->requestorId(),
1097  _inst->instAddr(), _inst->contextId(),
1098  std::move(_amo_op));
1099  request->setByteEnable(byte_enable);
1100  _requests.push_back(request);
1101  }
1102 }
1103 
1105 {
1106  assert(!isAnyOutstandingRequest());
1107  _inst->savedReq = nullptr;
1108  if (_senderState)
1109  delete _senderState;
1110 
1111  for (auto r: _packets)
1112  delete r;
1113 };
1114 
1115 void
1117 {
1118  numInTranslationFragments++;
1119  _port.getMMUPtr()->translateTiming(request(i), _inst->thread->getTC(),
1120  this, isLoad() ? BaseMMU::Read : BaseMMU::Write);
1121 }
1122 
1123 bool
1125 {
1126  assert(_numOutstandingPackets == 1);
1127  auto state = dynamic_cast<LSQSenderState*>(pkt->senderState);
1128  flags.set(Flag::Complete);
1129  state->outstanding--;
1130  assert(pkt == _packets.front());
1131  _port.completeDataAccess(pkt);
1132  return true;
1133 }
1134 
1135 bool
1137 {
1138  auto state = dynamic_cast<LSQSenderState*>(pkt->senderState);
1139  uint32_t pktIdx = 0;
1140  while (pktIdx < _packets.size() && pkt != _packets[pktIdx])
1141  pktIdx++;
1142  assert(pktIdx < _packets.size());
1143  numReceivedPackets++;
1144  state->outstanding--;
1145  if (numReceivedPackets == _packets.size()) {
1146  flags.set(Flag::Complete);
1147  /* Assemble packets. */
1148  PacketPtr resp = isLoad()
1149  ? Packet::createRead(mainReq)
1150  : Packet::createWrite(mainReq);
1151  if (isLoad())
1152  resp->dataStatic(_inst->memData);
1153  else
1154  resp->dataStatic(_data);
1155  resp->senderState = _senderState;
1156  _port.completeDataAccess(resp);
1157  delete resp;
1158  }
1159  return true;
1160 }
1161 
1162 void
1164 {
1165  assert(_senderState);
1166  /* Retries do not create new packets. */
1167  if (_packets.size() == 0) {
1168  _packets.push_back(
1169  isLoad()
1170  ? Packet::createRead(request())
1171  : Packet::createWrite(request()));
1172  _packets.back()->dataStatic(_inst->memData);
1173  _packets.back()->senderState = _senderState;
1174 
1175  // hardware transactional memory
1176  // If request originates in a transaction (not necessarily a HtmCmd),
1177  // then the packet should be marked as such.
1178  if (_inst->inHtmTransactionalState()) {
1179  _packets.back()->setHtmTransactional(
1180  _inst->getHtmTransactionUid());
1181 
1182  DPRINTF(HtmCpu,
1183  "HTM %s pc=0x%lx - vaddr=0x%lx - paddr=0x%lx - htmUid=%u\n",
1184  isLoad() ? "LD" : "ST",
1185  _inst->instAddr(),
1186  _packets.back()->req->hasVaddr() ?
1187  _packets.back()->req->getVaddr() : 0lu,
1188  _packets.back()->getAddr(),
1189  _inst->getHtmTransactionUid());
1190  }
1191  }
1192  assert(_packets.size() == 1);
1193 }
1194 
1195 void
1197 {
1198  /* Extra data?? */
1199  Addr base_address = _addr;
1200 
1201  if (_packets.size() == 0) {
1202  /* New stuff */
1203  if (isLoad()) {
1204  _mainPacket = Packet::createRead(mainReq);
1205  _mainPacket->dataStatic(_inst->memData);
1206 
1207  // hardware transactional memory
1208  // If request originates in a transaction,
1209  // packet should be marked as such
1210  if (_inst->inHtmTransactionalState()) {
1211  _mainPacket->setHtmTransactional(
1212  _inst->getHtmTransactionUid());
1213  DPRINTF(HtmCpu,
1214  "HTM LD.0 pc=0x%lx-vaddr=0x%lx-paddr=0x%lx-htmUid=%u\n",
1215  _inst->instAddr(),
1216  _mainPacket->req->hasVaddr() ?
1217  _mainPacket->req->getVaddr() : 0lu,
1218  _mainPacket->getAddr(),
1219  _inst->getHtmTransactionUid());
1220  }
1221  }
1222  for (int i = 0; i < _requests.size() && _fault[i] == NoFault; i++) {
1223  RequestPtr r = _requests[i];
1224  PacketPtr pkt = isLoad() ? Packet::createRead(r)
1226  ptrdiff_t offset = r->getVaddr() - base_address;
1227  if (isLoad()) {
1228  pkt->dataStatic(_inst->memData + offset);
1229  } else {
1230  uint8_t* req_data = new uint8_t[r->getSize()];
1231  std::memcpy(req_data,
1232  _inst->memData + offset,
1233  r->getSize());
1234  pkt->dataDynamic(req_data);
1235  }
1236  pkt->senderState = _senderState;
1237  _packets.push_back(pkt);
1238 
1239  // hardware transactional memory
1240  // If request originates in a transaction,
1241  // packet should be marked as such
1242  if (_inst->inHtmTransactionalState()) {
1243  _packets.back()->setHtmTransactional(
1244  _inst->getHtmTransactionUid());
1245  DPRINTF(HtmCpu,
1246  "HTM %s.%d pc=0x%lx-vaddr=0x%lx-paddr=0x%lx-htmUid=%u\n",
1247  isLoad() ? "LD" : "ST",
1248  i+1,
1249  _inst->instAddr(),
1250  _packets.back()->req->hasVaddr() ?
1251  _packets.back()->req->getVaddr() : 0lu,
1252  _packets.back()->getAddr(),
1253  _inst->getHtmTransactionUid());
1254  }
1255  }
1256  }
1257  assert(_packets.size() > 0);
1258 }
1259 
1260 void
1262 {
1263  assert(_numOutstandingPackets == 0);
1264  if (lsqUnit()->trySendPacket(isLoad(), _packets.at(0)))
1265  _numOutstandingPackets = 1;
1266 }
1267 
1268 void
1270 {
1271  /* Try to send the packets. */
1272  while (numReceivedPackets + _numOutstandingPackets < _packets.size() &&
1273  lsqUnit()->trySendPacket(isLoad(),
1274  _packets.at(numReceivedPackets + _numOutstandingPackets))) {
1275  _numOutstandingPackets++;
1276  }
1277 }
1278 
1279 Cycles
1282 {
1283  return pkt->req->localAccessor(thread, pkt);
1284 }
1285 
1286 Cycles
1289 {
1290  Cycles delay(0);
1291  unsigned offset = 0;
1292 
1293  for (auto r: _requests) {
1294  PacketPtr pkt =
1295  new Packet(r, isLoad() ? MemCmd::ReadReq : MemCmd::WriteReq);
1296  pkt->dataStatic(mainPkt->getPtr<uint8_t>() + offset);
1297  Cycles d = r->localAccessor(thread, pkt);
1298  if (d > delay)
1299  delay = d;
1300  offset += r->getSize();
1301  delete pkt;
1302  }
1303  return delay;
1304 }
1305 
1306 bool
1308 {
1309  return ( (LSQRequest::_requests[0]->getPaddr() & blockMask) == blockAddr);
1310 }
1311 
1327 bool
1329 {
1330  bool is_hit = false;
1331  for (auto &r: _requests) {
1341  if (r->hasPaddr() && (r->getPaddr() & blockMask) == blockAddr) {
1342  is_hit = true;
1343  break;
1344  }
1345  }
1346  return is_hit;
1347 }
1348 
1349 bool
1351 {
1352  return lsq->recvTimingResp(pkt);
1353 }
1354 
1355 void
1357 {
1358  for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
1359  if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1360  cpu->wakeup(tid);
1361  }
1362  }
1363  lsq->recvTimingSnoopReq(pkt);
1364 }
1365 
1366 void
1368 {
1369  lsq->recvReqRetry();
1370 }
1371 
1373  const Request::Flags& flags_) :
1374  SingleDataRequest(port, inst, true, 0x0lu, 8, flags_,
1375  nullptr, nullptr, nullptr)
1376 {
1377  assert(_requests.size() == 0);
1378 
1380 
1381  if (_requests.size() > 0) {
1382  _requests.back()->setReqInstSeqNum(_inst->seqNum);
1383  _requests.back()->taskId(_taskId);
1384  _requests.back()->setPaddr(_addr);
1385  _requests.back()->setInstCount(_inst->getCpuPtr()->totalInsts());
1386 
1387  _inst->strictlyOrdered(_requests.back()->isStrictlyOrdered());
1388  _inst->fault = NoFault;
1389  _inst->physEffAddr = _requests.back()->getPaddr();
1390  _inst->memReqFlags = _requests.back()->getFlags();
1391  _inst->savedReq = this;
1392 
1394  } else {
1395  panic("unexpected behaviour");
1396  }
1397 }
1398 
1399 void
1401 {
1402  // Transaction commands are implemented as loads to avoid significant
1403  // changes to the cpu and memory interfaces
1404  // The virtual and physical address uses a dummy value of 0x00
1405  // Address translation does not really occur thus the code below
1406 
1407  flags.set(Flag::TranslationStarted);
1408  flags.set(Flag::TranslationFinished);
1409 
1410  _inst->translationStarted(true);
1411  _inst->translationCompleted(true);
1412 
1413  setState(State::Request);
1414 }
1415 
1416 void
1419 {
1420  panic("unexpected behaviour");
1421 }
1422 
1423 Fault
1424 LSQ::read(LSQRequest* req, int load_idx)
1425 {
1426  ThreadID tid = cpu->contextToThread(req->request()->contextId());
1427 
1428  return thread.at(tid).read(req, load_idx);
1429 }
1430 
1431 Fault
1432 LSQ::write(LSQRequest* req, uint8_t *data, int store_idx)
1433 {
1434  ThreadID tid = cpu->contextToThread(req->request()->contextId());
1435 
1436  return thread.at(tid).write(req, data, store_idx);
1437 }
1438 
1439 } // namespace o3
1440 } // namespace gem5
gem5::o3::LSQ::LSQRequest::State::Translation
@ Translation
gem5::o3::LSQ::LQEntries
unsigned LQEntries
Total Size of LQ Entries.
Definition: lsq.hh:1055
gem5::Packet::cmdString
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition: packet.hh:577
gem5::o3::LSQ::lsqPolicy
SMTQueuePolicy lsqPolicy
The LSQ policy for SMT mode.
Definition: lsq.hh:1026
gem5::o3::LSQ::insertStore
void insertStore(const DynInstPtr &store_inst)
Inserts a store into the LSQ.
Definition: lsq.cc:237
gem5::BaseMMU::Read
@ Read
Definition: mmu.hh:53
gem5::o3::LSQ::LSQRequest::_res
uint64_t * _res
Definition: lsq.hh:295
gem5::o3::LSQ::LSQRequest
Memory operation metadata.
Definition: lsq.hh:231
gem5::o3::LSQ::SingleDataRequest::initiateTranslation
virtual void initiateTranslation()
Definition: lsq.cc:941
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
gem5::o3::LSQ::commitLoads
void commitLoads(InstSeqNum &youngest_inst, ThreadID tid)
Commits loads up until the given sequence number for a specific thread.
Definition: lsq.cc:261
gem5::o3::LSQ::LSQRequest::_addr
const Addr _addr
Definition: lsq.hh:296
gem5::o3::LSQ::numStoresToWB
int numStoresToWB(ThreadID tid)
Returns the number of stores a specific thread has to write back.
Definition: lsq.cc:727
gem5::o3::LSQ::cacheBlocked
bool cacheBlocked() const
Is D-cache blocked?
Definition: lsq.cc:194
gem5::o3::LSQ::LSQRequest::isTranslationComplete
bool isTranslationComplete()
Definition: lsq.hh:526
gem5::o3::LSQ::LSQRequest::_taskId
uint32_t _taskId
Definition: lsq.hh:290
gem5::o3::LSQ::LSQRequest::_byteEnable
std::vector< bool > _byteEnable
Definition: lsq.hh:299
gem5::o3::LSQ::numThreads
ThreadID numThreads
Number of Threads.
Definition: lsq.hh:1072
gem5::o3::LSQ::LSQRequest::taskId
void taskId(const uint32_t &v)
Definition: lsq.hh:400
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::o3::LSQ::dumpInsts
void dumpInsts() const
Debugging function to print out all instructions.
Definition: lsq.cc:755
gem5::o3::LSQ::getCount
int getCount()
Returns the number of instructions in all of the queues.
Definition: lsq.cc:464
gem5::o3::LSQ::SplitDataRequest::buildPackets
virtual void buildPackets()
Definition: lsq.cc:1196
gem5::o3::LSQ::writebackStores
void writebackStores()
Attempts to write back stores until all cache ports are used or the interface becomes blocked.
Definition: lsq.cc:273
gem5::o3::LSQ::setActiveThreads
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets the pointer to the list of active threads.
Definition: lsq.cc:138
gem5::o3::LSQ::read
Fault read(LSQRequest *req, int load_idx)
Executes a read operation, using the load specified at the load index.
Definition: lsq.cc:1424
gem5::o3::LSQ::lqEmpty
bool lqEmpty() const
Returns if all of the LQs are empty.
Definition: lsq.cc:594
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:53
gem5::Flags::set
void set(Type mask)
Set all flag's bits matching the given mask.
Definition: flags.hh:116
gem5::Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:366
gem5::o3::LSQ::_cacheBlocked
bool _cacheBlocked
D-cache is blocked.
Definition: lsq.hh:1014
gem5::o3::LSQ::SingleDataRequest::buildPackets
virtual void buildPackets()
Definition: lsq.cc:1163
gem5::o3::LSQ::LSQRequest::install
void install()
Install the request in the LQ/SQ.
Definition: lsq.cc:1077
gem5::BaseMMU::Write
@ Write
Definition: mmu.hh:53
gem5::o3::LSQ::SplitDataRequest::initiateTranslation
virtual void initiateTranslation()
Definition: lsq.cc:974
gem5::BaseCPU::cacheLineSize
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:381
gem5::o3::LSQ::LSQRequest::setState
void setState(const State &newState)
Definition: lsq.hh:276
gem5::AddressMonitor::doMonitor
bool doMonitor(PacketPtr pkt)
Definition: base.cc:688
gem5::o3::LSQ::SplitDataRequest::handleLocalAccess
virtual Cycles handleLocalAccess(gem5::ThreadContext *thread, PacketPtr pkt)
Memory mapped IPR accesses.
Definition: lsq.cc:1287
gem5::Complete
@ Complete
Definition: misc.hh:59
gem5::o3::LSQ::LSQRequest::isMemAccessRequired
bool isMemAccessRequired()
Definition: lsq.hh:553
gem5::o3::LSQ::getStoreHeadSeqNum
InstSeqNum getStoreHeadSeqNum(ThreadID tid)
Returns the sequence number of the head of the store queue.
Definition: lsq.cc:340
gem5::o3::LSQ::numLoads
int numLoads()
Returns the total number of loads in the load queue.
Definition: lsq.cc:481
gem5::o3::LSQ::SplitDataRequest::isCacheBlockHit
virtual bool isCacheBlockHit(Addr blockAddr, Addr cacheBlockMask)
Caches may probe into the load-store queue to enforce memory ordering guarantees.
Definition: lsq.cc:1328
gem5::o3::LSQ::cachePortAvailable
bool cachePortAvailable(bool is_load) const
Is any store port available to use?
Definition: lsq.cc:206
gem5::o3::LSQ::LSQSenderState::outstanding
uint8_t outstanding
Number of outstanding packets to complete.
Definition: lsq.hh:97
gem5::Request::HTM_CMD
static const FlagsType HTM_CMD
Definition: request.hh:247
gem5::o3::LSQ
Definition: lsq.hh:75
gem5::o3::LSQ::LSQRequest::squashed
bool squashed() const override
This function is used by the page table walker to determine if it should translate the a pending requ...
Definition: lsq.cc:1088
gem5::o3::LSQ::recvTimingSnoopReq
void recvTimingSnoopReq(PacketPtr pkt)
Definition: lsq.cc:448
gem5::Packet::createWrite
static PacketPtr createWrite(const RequestPtr &req)
Definition: packet.hh:1013
std::vector< bool >
gem5::o3::LSQ::getStoreHead
int getStoreHead(ThreadID tid)
Returns the head index of the store queue.
Definition: lsq.cc:334
dyn_inst.hh
gem5::o3::Fetch
Fetch class handles both single threaded and SMT fetch.
Definition: fetch.hh:79
gem5::o3::LSQ::LSQRequest::getVaddr
Addr getVaddr(int idx=0) const
Definition: lsq.hh:416
gem5::PacketDataPtr
uint8_t * PacketDataPtr
Definition: packet.hh:71
gem5::o3::LSQ::SplitDataRequest::sendPacketToCache
virtual void sendPacketToCache()
Definition: lsq.cc:1269
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
iew.hh
gem5::o3::LSQ::name
std::string name() const
Returns the name of the LSQ.
Definition: lsq.cc:132
gem5::o3::LSQ::LSQRequest::~LSQRequest
virtual ~LSQRequest()
Destructor.
Definition: lsq.cc:1104
gem5::o3::LSQ::dcachePort
DcachePort dcachePort
Data port.
Definition: lsq.hh:1066
gem5::o3::LSQ::DcachePort::recvTimingSnoopReq
virtual void recvTimingSnoopReq(PacketPtr pkt)
Receive a timing snoop request from the peer.
Definition: lsq.cc:1356
gem5::o3::LSQ::isEmpty
bool isEmpty() const
Returns if the LSQ is empty (both LQ and SQ are empty).
Definition: lsq.cc:588
gem5::o3::LSQ::DcachePort::DcachePort
DcachePort(LSQ *_lsq, CPU *_cpu)
Default constructor.
Definition: lsq.cc:77
gem5::o3::LSQ::activeThreads
std::list< ThreadID > * activeThreads
List of Active Threads in System.
Definition: lsq.hh:1052
gem5::o3::LSQ::SplitDataRequest::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Definition: lsq.cc:1136
gem5::o3::LSQ::SingleDataRequest::sendPacketToCache
virtual void sendPacketToCache()
Definition: lsq.cc:1261
gem5::o3::LSQ::isStalled
bool isStalled()
Returns if the LSQ is stalled due to a memory operation that must be replayed.
Definition: lsq.cc:680
gem5::o3::LSQ::executeStore
Fault executeStore(const DynInstPtr &inst)
Executes a store.
Definition: lsq.cc:253
gem5::o3::LSQ::LSQRequest::State::Request
@ Request
gem5::o3::LSQ::LSQSenderState::LSQSenderState
LSQSenderState(LSQRequest *request, bool is_load)
Default constructor.
Definition: lsq.cc:67
gem5::o3::LSQ::cacheStorePorts
int cacheStorePorts
The number of cache ports available each cycle (stores only).
Definition: lsq.hh:1016
gem5::RefCountingPtr< DynInst >
gem5::o3::LSQ::sqFull
bool sqFull()
Returns if any of the SQs are full.
Definition: lsq.cc:653
gem5::o3::LSQ::resetHtmStartsStops
void resetHtmStartsStops(ThreadID tid)
Definition: lsq.cc:369
gem5::o3::CPU::checker
gem5::Checker< DynInstPtr > * checker
Pointer to the checker, which can dynamically verify instruction results at run time.
Definition: cpu.hh:602
gem5::RequestPort
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition: port.hh:77
gem5::o3::LSQ::violation
bool violation()
Returns whether or not there was a memory ordering violation.
Definition: lsq.cc:297
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:368
gem5::Packet::dataStatic
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition: packet.hh:1134
gem5::o3::LSQ::completeDataAccess
void completeDataAccess(PacketPtr pkt)
Definition: lsq.cc:403
gem5::o3::LSQ::numHtmStarts
int numHtmStarts(ThreadID tid) const
Definition: lsq.cc:352
gem5::o3::LSQ::LSQRequest::_size
const uint32_t _size
Definition: lsq.hh:297
gem5::o3::LSQ::numStores
int numStores()
Returns the total number of stores in the store queue.
Definition: lsq.cc:498
gem5::Flags< FlagsType >
gem5::o3::LSQUnit
Class that implements the actual LQ and SQ for each specific thread.
Definition: lsq_unit.hh:90
gem5::o3::LSQ::HtmCmdRequest::HtmCmdRequest
HtmCmdRequest(LSQUnit *port, const DynInstPtr &inst, const Request::Flags &flags_)
Definition: lsq.cc:1372
gem5::o3::LSQ::SQEntries
unsigned SQEntries
Total Size of SQ Entries.
Definition: lsq.hh:1057
gem5::BaseCPU::taskId
uint32_t taskId() const
Get cpu task id.
Definition: base.hh:212
gem5::o3::LSQ::tick
void tick()
Ticks the LSQ.
Definition: lsq.cc:183
gem5::o3::CPU::wakeup
virtual void wakeup(ThreadID tid) override
Definition: cpu.cc:1615
gem5::o3::LSQ::LSQRequest::_inst
const DynInstPtr _inst
Definition: lsq.hh:289
gem5::o3::CPU
O3CPU class, has each of the stages (fetch through commit) within it, as well as all of the time buff...
Definition: cpu.hh:95
gem5::o3::LSQ::write
Fault write(LSQRequest *req, uint8_t *data, int store_idx)
Executes a store operation, using the store specified at the store index.
Definition: lsq.cc:1432
gem5::o3::LSQ::LSQRequest::squashTranslation
void squashTranslation()
Definition: lsq.hh:610
gem5::o3::LSQ::maxSQEntries
unsigned maxSQEntries
Max SQ Size - Used to Enforce Sharing Policies.
Definition: lsq.hh:1063
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:93
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::addrBlockAlign
Addr addrBlockAlign(Addr addr, Addr block_size)
Returns the address of the closest aligned fixed-size block to the given address.
Definition: utils.hh:66
gem5::o3::LSQ::SplitDataRequest
Definition: lsq.hh:709
gem5::o3::LSQ::isDrained
bool isDrained() const
Has the LSQ drained?
Definition: lsq.cc:154
gem5::o3::LSQ::iewStage
IEW * iewStage
The IEW stage pointer.
Definition: lsq.hh:999
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::ArmISA::d
Bitfield< 9 > d
Definition: misc_types.hh:63
gem5::o3::LSQ::LSQRequest::State::Fault
@ Fault
gem5::transferNeedsBurst
bool transferNeedsBurst(Addr addr, unsigned int size, unsigned int block_size)
Returns true if the given memory access (address, size) needs to be fragmented across aligned fixed-s...
Definition: utils.hh:80
gem5::o3::LSQ::LSQRequest::State
State
Definition: lsq.hh:266
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
gem5::o3::IEW
IEW handles both single threaded and SMT IEW (issue/execute/writeback).
Definition: iew.hh:87
gem5::ArmISA::v
Bitfield< 28 > v
Definition: misc_types.hh:54
gem5::o3::LSQ::LSQRequest::flags
FlagsType flags
Definition: lsq.hh:264
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:86
gem5::o3::LSQ::getLatestHtmUid
uint64_t getLatestHtmUid(ThreadID tid) const
Definition: lsq.cc:376
gem5::o3::LSQ::willWB
bool willWB()
Returns if the LSQ will write back to memory this cycle.
Definition: lsq.cc:733
gem5::o3::LSQ::numFreeLoadEntries
unsigned numFreeLoadEntries()
Returns the number of free load entries.
Definition: lsq.cc:515
gem5::isAnyActiveElement
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition: utils.hh:89
gem5::ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:144
gem5::InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:243
gem5::o3::LSQ::cachePortBusy
void cachePortBusy(bool is_load)
Another store port is in use.
Definition: lsq.cc:218
gem5::o3::LSQ::DcachePort::recvReqRetry
virtual void recvReqRetry()
Handles doing a retry of the previous send.
Definition: lsq.cc:1367
compiler.hh
gem5::BaseCPU::contextToThread
ThreadID contextToThread(ContextID cid)
Convert ContextID to threadID.
Definition: base.hh:298
gem5::o3::LSQ::LSQRequest::request
RequestPtr request(int idx=0)
Definition: lsq.hh:408
gem5::o3::LSQ::SplitDataRequest::mainPacket
virtual PacketPtr mainPacket()
Definition: lsq.cc:962
gem5::o3::LSQ::setLastRetiredHtmUid
void setLastRetiredHtmUid(ThreadID tid, uint64_t htmUid)
Definition: lsq.cc:385
gem5::BaseCPU::getCpuAddrMonitor
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition: base.hh:609
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::o3::LSQ::SingleDataRequest::isCacheBlockHit
virtual bool isCacheBlockHit(Addr blockAddr, Addr cacheBlockMask)
Test if the request accesses a particular cache line.
Definition: lsq.cc:1307
gem5::Packet::senderState
SenderState * senderState
This packet's sender state.
Definition: packet.hh:534
gem5::o3::LSQ::getMemDepViolator
DynInstPtr getMemDepViolator(ThreadID tid)
Gets the instruction that caused the memory ordering violation.
Definition: lsq.cc:316
gem5::o3::LSQ::LSQSenderState::contextId
ContextID contextId()
Definition: lsq.cc:72
gem5::o3::LSQ::squash
void squash(const InstSeqNum &squashed_num, ThreadID tid)
Squash instructions from a thread until the specified sequence number.
Definition: lsq.cc:291
gem5::o3::LSQ::sqEmpty
bool sqEmpty() const
Returns if all of the SQs are empty.
Definition: lsq.cc:610
gem5::o3::LSQ::drainSanityCheck
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: lsq.cc:145
gem5::o3::LSQ::maxLSQAllocation
static uint32_t maxLSQAllocation(SMTQueuePolicy pol, uint32_t entries, uint32_t numThreads, uint32_t SMTThreshold)
Auxiliary function to calculate per-thread max LSQ allocation limit.
Definition: lsq.hh:1034
gem5::o3::LSQ::SingleDataRequest::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Definition: lsq.cc:1124
gem5::o3::LSQ::recvReqRetry
void recvReqRetry()
Retry the previous send that failed.
Definition: lsq.cc:392
gem5::o3::LSQ::SingleDataRequest::handleLocalAccess
virtual Cycles handleLocalAccess(gem5::ThreadContext *thread, PacketPtr pkt)
Memory mapped IPR accesses.
Definition: lsq.cc:1280
gem5::o3::MaxThreads
static constexpr int MaxThreads
Definition: limits.hh:38
gem5::o3::LSQ::HtmCmdRequest::initiateTranslation
virtual void initiateTranslation()
Definition: lsq.cc:1400
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:203
gem5::o3::CPU::write
Fault write(LSQRequest *req, uint8_t *data, int store_idx)
CPU write function, forwards write to LSQ.
Definition: cpu.hh:644
gem5::o3::LSQ::insertLoad
void insertLoad(const DynInstPtr &load_inst)
Inserts a load into the LSQ.
Definition: lsq.cc:229
gem5::o3::LSQ::LSQSenderState
Derived class to hold any sender state the LSQ needs.
Definition: lsq.hh:80
gem5::o3::LSQ::LSQRequest::LSQRequest
LSQRequest(LSQUnit *port, const DynInstPtr &inst, bool isLoad)
Definition: lsq.cc:1042
gem5::o3::LSQ::cpu
CPU * cpu
The CPU pointer.
Definition: lsq.hh:996
gem5::o3::LSQ::hasStoresToWB
bool hasStoresToWB()
Returns whether or not there are any stores to write back to memory.
Definition: lsq.cc:705
std
Overload hash function for BasicBlockRange type.
Definition: types.hh:111
gem5::o3::LSQ::getLoadHead
int getLoadHead(ThreadID tid)
Returns the head index of the load queue for a specific thread.
Definition: lsq.cc:322
gem5::o3::LSQ::SplitDataRequest::finish
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Definition: lsq.cc:893
gem5::o3::LSQ::LSQRequest::_requests
std::vector< RequestPtr > _requests
Definition: lsq.hh:293
gem5::o3::LSQ::maxLQEntries
unsigned maxLQEntries
Max LQ Size - Used to Enforce Sharing Policies.
Definition: lsq.hh:1060
gem5::o3::LSQ::SplitDataRequest::mainRequest
virtual RequestPtr mainRequest()
Definition: lsq.cc:968
gem5::o3::IEW::cacheUnblocked
void cacheUnblocked()
Notifies that the cache has become unblocked.
Definition: iew.cc:563
gem5::o3::LSQ::getLoadHeadSeqNum
InstSeqNum getLoadHeadSeqNum(ThreadID tid)
Returns the sequence number of the head of the load queue.
Definition: lsq.cc:328
gem5::o3::LSQ::HtmCmdRequest::finish
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Definition: lsq.cc:1417
gem5::Packet::dataDynamic
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1172
gem5::ContextID
int ContextID
Globally unique thread context ID.
Definition: types.hh:246
gem5::o3::LSQ::numFreeStoreEntries
unsigned numFreeStoreEntries()
Returns the number of free store entries.
Definition: lsq.cc:532
gem5::MemCmd::WriteReq
@ WriteReq
Definition: packet.hh:89
gem5::o3::LSQ::LSQRequest::initiateTranslation
virtual void initiateTranslation()=0
logging.hh
gem5::o3::LSQ::usedStorePorts
int usedStorePorts
The number of used cache ports in this cycle by stores.
Definition: lsq.hh:1018
gem5::o3::LSQ::LSQ
LSQ(CPU *cpu_ptr, IEW *iew_ptr, const O3CPUParams &params)
Constructs an LSQ with the given parameters.
Definition: lsq.cc:81
gem5::InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:40
gem5::MipsISA::r
r
Definition: pra_constants.hh:98
gem5::o3::LSQ::SingleDataRequest::finish
virtual void finish(const Fault &fault, const RequestPtr &req, gem5::ThreadContext *tc, BaseMMU::Mode mode)
Definition: lsq.cc:861
gem5::o3::LSQ::HtmCmdRequest
Definition: lsq.hh:684
gem5::o3::LSQ::pushRequest
Fault pushRequest(const DynInstPtr &inst, bool isLoad, uint8_t *data, unsigned int size, Addr addr, Request::Flags flags, uint64_t *res, AtomicOpFunctorPtr amo_op, const std::vector< bool > &byte_enable)
Definition: lsq.cc:774
gem5::o3::LSQ::LSQRequest::numTranslatedFragments
uint32_t numTranslatedFragments
Definition: lsq.hh:278
gem5::o3::LSQ::DcachePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
Definition: lsq.cc:1350
gem5::o3::LSQ::LSQRequest::isLoad
bool isLoad() const
Definition: lsq.hh:311
gem5::o3::LSQ::takeOverFrom
void takeOverFrom()
Takes over execution from another CPU's thread.
Definition: lsq.cc:172
gem5::o3::LSQ::commitStores
void commitStores(InstSeqNum &youngest_inst, ThreadID tid)
Commits stores up until the given sequence number for a specific thread.
Definition: lsq.cc:267
cpu.hh
std::list< ThreadID >
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::AtomicOpFunctorPtr
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition: amo.hh:242
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::Packet::createRead
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition: packet.hh:1007
lsq.hh
gem5::statistics::total
const FlagsType total
Print the total.
Definition: info.hh:60
limits.hh
gem5::o3::LSQ::lqFull
bool lqFull()
Returns if any of the LQs are full.
Definition: lsq.cc:626
gem5::o3::LSQ::numHtmStops
int numHtmStops(ThreadID tid) const
Definition: lsq.cc:360
gem5::o3::LSQ::LSQRequest::sendFragmentToTranslation
void sendFragmentToTranslation(int i)
Definition: lsq.cc:1116
gem5::o3::LSQ::LSQRequest::_fault
std::vector< Fault > _fault
Definition: lsq.hh:294
gem5::o3::LSQ::LSQRequest::addRequest
void addRequest(Addr addr, unsigned size, const std::vector< bool > &byte_enable)
Helper function used to add a (sub)request, given its address addr, size size and byte-enable mask by...
Definition: lsq.cc:1091
gem5::o3::LSQ::executeLoad
Fault executeLoad(const DynInstPtr &inst)
Executes a load.
Definition: lsq.cc:245
gem5::o3::LSQ::LSQRequest::numInTranslationFragments
uint32_t numInTranslationFragments
Definition: lsq.hh:279
gem5::o3::CPU::read
Fault read(LSQRequest *req, int load_idx)
CPU read function, forwards read to LSQ.
Definition: cpu.hh:638
gem5::o3::LSQ::usedLoadPorts
int usedLoadPorts
The number of used cache ports in this cycle by loads.
Definition: lsq.hh:1022
gem5::o3::LSQ::thread
std::vector< LSQUnit > thread
The LSQ units for individual threads.
Definition: lsq.hh:1069
gem5::o3::LSQ::isFull
bool isFull()
Returns if the LSQ is full (either LQ or SQ is full).
Definition: lsq.cc:561
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:242
gem5::o3::LSQ::SingleDataRequest
Definition: lsq.hh:629
gem5::o3::LSQ::recvTimingResp
bool recvTimingResp(PacketPtr pkt)
Handles writing back and completing the load or store that has returned from memory.
Definition: lsq.cc:411
gem5::o3::IEW::name
std::string name() const
Returns the name of the IEW stage.
Definition: iew.cc:119
gem5::Packet::isInvalidate
bool isInvalidate() const
Definition: packet.hh:598
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:73
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84
gem5::Packet::getPtr
T * getPtr()
get a pointer to the data ptr.
Definition: packet.hh:1184
gem5::o3::LSQ::cacheLoadPorts
int cacheLoadPorts
The number of cache ports available each cycle (loads only).
Definition: lsq.hh:1020

Generated on Tue Sep 7 2021 14:53:44 for gem5 by doxygen 1.8.17