gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fetch_impl.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2014 ARM Limited
3  * Copyright (c) 2012-2013 AMD
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2004-2006 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  *
41  * Authors: Kevin Lim
42  * Korey Sewell
43  */
44 
45 #ifndef __CPU_O3_FETCH_IMPL_HH__
46 #define __CPU_O3_FETCH_IMPL_HH__
47 
48 #include <algorithm>
49 #include <cstring>
50 #include <list>
51 #include <map>
52 #include <queue>
53 
54 #include "arch/generic/tlb.hh"
55 #include "arch/isa_traits.hh"
56 #include "arch/utility.hh"
57 #include "arch/vtophys.hh"
58 #include "base/random.hh"
59 #include "base/types.hh"
60 #include "config/the_isa.hh"
61 #include "cpu/base.hh"
62 //#include "cpu/checker/cpu.hh"
63 #include "cpu/o3/cpu.hh"
64 #include "cpu/o3/fetch.hh"
65 #include "cpu/exetrace.hh"
66 #include "debug/Activity.hh"
67 #include "debug/Drain.hh"
68 #include "debug/Fetch.hh"
69 #include "debug/O3CPU.hh"
70 #include "debug/O3PipeView.hh"
71 #include "mem/packet.hh"
72 #include "params/DerivO3CPU.hh"
73 #include "sim/byteswap.hh"
74 #include "sim/core.hh"
75 #include "sim/eventq.hh"
76 #include "sim/full_system.hh"
77 #include "sim/system.hh"
78 #include "cpu/o3/isa_specific.hh"
79 
80 using namespace std;
81 
82 template<class Impl>
83 DefaultFetch<Impl>::DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params)
84  : fetchPolicy(params->smtFetchPolicy),
85  cpu(_cpu),
86  branchPred(nullptr),
87  decodeToFetchDelay(params->decodeToFetchDelay),
88  renameToFetchDelay(params->renameToFetchDelay),
89  iewToFetchDelay(params->iewToFetchDelay),
90  commitToFetchDelay(params->commitToFetchDelay),
91  fetchWidth(params->fetchWidth),
92  decodeWidth(params->decodeWidth),
93  retryPkt(NULL),
94  retryTid(InvalidThreadID),
95  cacheBlkSize(cpu->cacheLineSize()),
96  fetchBufferSize(params->fetchBufferSize),
97  fetchBufferMask(fetchBufferSize - 1),
98  fetchQueueSize(params->fetchQueueSize),
99  numThreads(params->numThreads),
100  numFetchingThreads(params->smtNumFetchingThreads),
101  icachePort(this, _cpu),
102  finishTranslationEvent(this)
103 {
104  if (numThreads > Impl::MaxThreads)
105  fatal("numThreads (%d) is larger than compiled limit (%d),\n"
106  "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
107  numThreads, static_cast<int>(Impl::MaxThreads));
108  if (fetchWidth > Impl::MaxWidth)
109  fatal("fetchWidth (%d) is larger than compiled limit (%d),\n"
110  "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
111  fetchWidth, static_cast<int>(Impl::MaxWidth));
113  fatal("fetch buffer size (%u bytes) is greater than the cache "
114  "block size (%u bytes)\n", fetchBufferSize, cacheBlkSize);
116  fatal("cache block (%u bytes) is not a multiple of the "
117  "fetch buffer (%u bytes)\n", cacheBlkSize, fetchBufferSize);
118 
119  // Figure out fetch policy
120  panic_if(fetchPolicy == FetchPolicy::SingleThread && numThreads > 1,
121  "Invalid Fetch Policy for a SMT workload.");
122 
123  // Get the size of an instruction.
124  instSize = sizeof(TheISA::MachInst);
125 
126  for (int i = 0; i < Impl::MaxThreads; i++) {
127  fetchStatus[i] = Idle;
128  decoder[i] = nullptr;
129  pc[i] = 0;
130  fetchOffset[i] = 0;
131  macroop[i] = nullptr;
132  delayedCommit[i] = false;
133  memReq[i] = nullptr;
134  stalls[i] = {false, false};
135  fetchBuffer[i] = NULL;
136  fetchBufferPC[i] = 0;
137  fetchBufferValid[i] = false;
138  lastIcacheStall[i] = 0;
139  issuePipelinedIfetch[i] = false;
140  }
141 
142  branchPred = params->branchPred;
143 
144  for (ThreadID tid = 0; tid < numThreads; tid++) {
145  decoder[tid] = new TheISA::Decoder(params->isa[tid]);
146  // Create space to buffer the cache line data,
147  // which may not hold the entire cache line.
148  fetchBuffer[tid] = new uint8_t[fetchBufferSize];
149  }
150 }
151 
152 template <class Impl>
153 std::string
155 {
156  return cpu->name() + ".fetch";
157 }
158 
159 template <class Impl>
160 void
162 {
163  ppFetch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Fetch");
164  ppFetchRequestSent = new ProbePointArg<RequestPtr>(cpu->getProbeManager(),
165  "FetchRequest");
166 
167 }
168 
169 template <class Impl>
170 void
172 {
174  .name(name() + ".icacheStallCycles")
175  .desc("Number of cycles fetch is stalled on an Icache miss")
177 
179  .name(name() + ".Insts")
180  .desc("Number of instructions fetch has processed")
182 
184  .name(name() + ".Branches")
185  .desc("Number of branches that fetch encountered")
187 
189  .name(name() + ".predictedBranches")
190  .desc("Number of branches that fetch has predicted taken")
192 
194  .name(name() + ".Cycles")
195  .desc("Number of cycles fetch has run and was not squashing or"
196  " blocked")
198 
200  .name(name() + ".SquashCycles")
201  .desc("Number of cycles fetch has spent squashing")
203 
205  .name(name() + ".TlbCycles")
206  .desc("Number of cycles fetch has spent waiting for tlb")
208 
210  .name(name() + ".IdleCycles")
211  .desc("Number of cycles fetch was idle")
213 
215  .name(name() + ".BlockedCycles")
216  .desc("Number of cycles fetch has spent blocked")
218 
220  .name(name() + ".CacheLines")
221  .desc("Number of cache lines fetched")
223 
225  .name(name() + ".MiscStallCycles")
226  .desc("Number of cycles fetch has spent waiting on interrupts, or "
227  "bad addresses, or out of MSHRs")
229 
231  .name(name() + ".PendingDrainCycles")
232  .desc("Number of cycles fetch has spent waiting on pipes to drain")
234 
236  .name(name() + ".NoActiveThreadStallCycles")
237  .desc("Number of stall cycles due to no active thread to fetch from")
239 
241  .name(name() + ".PendingTrapStallCycles")
242  .desc("Number of stall cycles due to pending traps")
244 
246  .name(name() + ".PendingQuiesceStallCycles")
247  .desc("Number of stall cycles due to pending quiesce instructions")
249 
251  .name(name() + ".IcacheWaitRetryStallCycles")
252  .desc("Number of stall cycles due to full MSHR")
254 
256  .name(name() + ".IcacheSquashes")
257  .desc("Number of outstanding Icache misses that were squashed")
259 
261  .name(name() + ".ItlbSquashes")
262  .desc("Number of outstanding ITLB misses that were squashed")
264 
266  .init(/* base value */ 0,
267  /* last value */ fetchWidth,
268  /* bucket size */ 1)
269  .name(name() + ".rateDist")
270  .desc("Number of instructions fetched each cycle (Total)")
271  .flags(Stats::pdf);
272 
273  idleRate
274  .name(name() + ".idleRate")
275  .desc("Percent of cycles fetch was idle")
276  .prereq(idleRate);
277  idleRate = fetchIdleCycles * 100 / cpu->numCycles;
278 
279  branchRate
280  .name(name() + ".branchRate")
281  .desc("Number of branch fetches per cycle")
283  branchRate = fetchedBranches / cpu->numCycles;
284 
285  fetchRate
286  .name(name() + ".rate")
287  .desc("Number of inst fetches per cycle")
289  fetchRate = fetchedInsts / cpu->numCycles;
290 }
291 
292 template<class Impl>
293 void
295 {
296  timeBuffer = time_buffer;
297 
298  // Create wires to get information from proper places in time buffer.
303 }
304 
305 template<class Impl>
306 void
308 {
309  activeThreads = at_ptr;
310 }
311 
312 template<class Impl>
313 void
315 {
316  // Create wire to write information to proper place in fetch time buf.
317  toDecode = ftb_ptr->getWire(0);
318 }
319 
320 template<class Impl>
321 void
323 {
324  assert(priorityList.empty());
325  resetStage();
326 
327  // Fetch needs to start fetching instructions at the very beginning,
328  // so it must start up in active state.
329  switchToActive();
330 }
331 
332 template<class Impl>
333 void
335 {
336  fetchStatus[tid] = Running;
337  pc[tid] = cpu->pcState(tid);
338  fetchOffset[tid] = 0;
339  macroop[tid] = NULL;
340  delayedCommit[tid] = false;
341  memReq[tid] = NULL;
342  stalls[tid].decode = false;
343  stalls[tid].drain = false;
344  fetchBufferPC[tid] = 0;
345  fetchBufferValid[tid] = false;
346  fetchQueue[tid].clear();
347 
348  // TODO not sure what to do with priorityList for now
349  // priorityList.push_back(tid);
350 }
351 
352 template<class Impl>
353 void
355 {
356  numInst = 0;
357  interruptPending = false;
358  cacheBlocked = false;
359 
360  priorityList.clear();
361 
362  // Setup PC and nextPC with initial state.
363  for (ThreadID tid = 0; tid < numThreads; ++tid) {
364  fetchStatus[tid] = Running;
365  pc[tid] = cpu->pcState(tid);
366  fetchOffset[tid] = 0;
367  macroop[tid] = NULL;
368 
369  delayedCommit[tid] = false;
370  memReq[tid] = NULL;
371 
372  stalls[tid].decode = false;
373  stalls[tid].drain = false;
374 
375  fetchBufferPC[tid] = 0;
376  fetchBufferValid[tid] = false;
377 
378  fetchQueue[tid].clear();
379 
380  priorityList.push_back(tid);
381  }
382 
383  wroteToTimeBuffer = false;
384  _status = Inactive;
385 }
386 
387 template<class Impl>
388 void
390 {
391  ThreadID tid = cpu->contextToThread(pkt->req->contextId());
392 
393  DPRINTF(Fetch, "[tid:%i] Waking up from cache miss.\n", tid);
394  assert(!cpu->switchedOut());
395 
396  // Only change the status if it's still waiting on the icache access
397  // to return.
398  if (fetchStatus[tid] != IcacheWaitResponse ||
399  pkt->req != memReq[tid]) {
401  delete pkt;
402  return;
403  }
404 
405  memcpy(fetchBuffer[tid], pkt->getConstPtr<uint8_t>(), fetchBufferSize);
406  fetchBufferValid[tid] = true;
407 
408  // Wake up the CPU (if it went to sleep and was waiting on
409  // this completion event).
410  cpu->wakeCPU();
411 
412  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache completion\n",
413  tid);
414 
415  switchToActive();
416 
417  // Only switch to IcacheAccessComplete if we're not stalled as well.
418  if (checkStall(tid)) {
419  fetchStatus[tid] = Blocked;
420  } else {
422  }
423 
424  pkt->req->setAccessLatency();
425  cpu->ppInstAccessComplete->notify(pkt);
426  // Reset the mem req to NULL.
427  delete pkt;
428  memReq[tid] = NULL;
429 }
430 
431 template <class Impl>
432 void
434 {
435  for (ThreadID i = 0; i < numThreads; ++i) {
436  stalls[i].decode = false;
437  stalls[i].drain = false;
438  }
439 }
440 
441 template <class Impl>
442 void
444 {
445  assert(isDrained());
446  assert(retryPkt == NULL);
447  assert(retryTid == InvalidThreadID);
448  assert(!cacheBlocked);
449  assert(!interruptPending);
450 
451  for (ThreadID i = 0; i < numThreads; ++i) {
452  assert(!memReq[i]);
453  assert(fetchStatus[i] == Idle || stalls[i].drain);
454  }
455 
457 }
458 
459 template <class Impl>
460 bool
462 {
463  /* Make sure that threads are either idle of that the commit stage
464  * has signaled that draining has completed by setting the drain
465  * stall flag. This effectively forces the pipeline to be disabled
466  * until the whole system is drained (simulation may continue to
467  * drain other components).
468  */
469  for (ThreadID i = 0; i < numThreads; ++i) {
470  // Verify fetch queues are drained
471  if (!fetchQueue[i].empty())
472  return false;
473 
474  // Return false if not idle or drain stalled
475  if (fetchStatus[i] != Idle) {
476  if (fetchStatus[i] == Blocked && stalls[i].drain)
477  continue;
478  else
479  return false;
480  }
481  }
482 
483  /* The pipeline might start up again in the middle of the drain
484  * cycle if the finish translation event is scheduled, so make
485  * sure that's not the case.
486  */
488 }
489 
490 template <class Impl>
491 void
493 {
494  assert(cpu->getInstPort().isConnected());
495  resetStage();
496 
497 }
498 
499 template <class Impl>
500 void
502 {
503  assert(cpu->isDraining());
504  assert(!stalls[tid].drain);
505  DPRINTF(Drain, "%i: Thread drained.\n", tid);
506  stalls[tid].drain = true;
507 }
508 
509 template <class Impl>
510 void
512 {
513  DPRINTF(Fetch, "Waking up from quiesce\n");
514  // Hopefully this is safe
515  // @todo: Allow other threads to wake from quiesce.
516  fetchStatus[0] = Running;
517 }
518 
519 template <class Impl>
520 inline void
522 {
523  if (_status == Inactive) {
524  DPRINTF(Activity, "Activating stage.\n");
525 
526  cpu->activateStage(O3CPU::FetchIdx);
527 
528  _status = Active;
529  }
530 }
531 
532 template <class Impl>
533 inline void
535 {
536  if (_status == Active) {
537  DPRINTF(Activity, "Deactivating stage.\n");
538 
539  cpu->deactivateStage(O3CPU::FetchIdx);
540 
541  _status = Inactive;
542  }
543 }
544 
545 template <class Impl>
546 void
548 {
549  // Update priority list
550  auto thread_it = std::find(priorityList.begin(), priorityList.end(), tid);
551  if (thread_it != priorityList.end()) {
552  priorityList.erase(thread_it);
553  }
554 }
555 
556 template <class Impl>
557 bool
559  const DynInstPtr &inst, TheISA::PCState &nextPC)
560 {
561  // Do branch prediction check here.
562  // A bit of a misnomer...next_PC is actually the current PC until
563  // this function updates it.
564  bool predict_taken;
565 
566  if (!inst->isControl()) {
567  TheISA::advancePC(nextPC, inst->staticInst);
568  inst->setPredTarg(nextPC);
569  inst->setPredTaken(false);
570  return false;
571  }
572 
573  ThreadID tid = inst->threadNumber;
574  predict_taken = branchPred->predict(inst->staticInst, inst->seqNum,
575  nextPC, tid);
576 
577  if (predict_taken) {
578  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
579  "predicted to be taken to %s\n",
580  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
581  } else {
582  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
583  "predicted to be not taken\n",
584  tid, inst->seqNum, inst->pcState().instAddr());
585  }
586 
587  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
588  "predicted to go to %s\n",
589  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
590  inst->setPredTarg(nextPC);
591  inst->setPredTaken(predict_taken);
592 
593  ++fetchedBranches;
594 
595  if (predict_taken) {
597  }
598 
599  return predict_taken;
600 }
601 
602 template <class Impl>
603 bool
605 {
606  Fault fault = NoFault;
607 
608  assert(!cpu->switchedOut());
609 
610  // @todo: not sure if these should block translation.
611  //AlphaDep
612  if (cacheBlocked) {
613  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
614  tid);
615  return false;
616  } else if (checkInterrupt(pc) && !delayedCommit[tid]) {
617  // Hold off fetch from getting new instructions when:
618  // Cache is blocked, or
619  // while an interrupt is pending and we're not in PAL mode, or
620  // fetch is switched out.
621  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
622  tid);
623  return false;
624  }
625 
626  // Align the fetch address to the start of a fetch buffer segment.
627  Addr fetchBufferBlockPC = fetchBufferAlignPC(vaddr);
628 
629  DPRINTF(Fetch, "[tid:%i] Fetching cache line %#x for addr %#x\n",
630  tid, fetchBufferBlockPC, vaddr);
631 
632  // Setup the memReq to do a read of the first instruction's address.
633  // Set the appropriate read size and flags as well.
634  // Build request here.
635  RequestPtr mem_req = std::make_shared<Request>(
636  tid, fetchBufferBlockPC, fetchBufferSize,
637  Request::INST_FETCH, cpu->instMasterId(), pc,
638  cpu->thread[tid]->contextId());
639 
640  mem_req->taskId(cpu->taskId());
641 
642  memReq[tid] = mem_req;
643 
644  // Initiate translation of the icache block
645  fetchStatus[tid] = ItlbWait;
646  FetchTranslation *trans = new FetchTranslation(this);
647  cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(),
648  trans, BaseTLB::Execute);
649  return true;
650 }
651 
652 template <class Impl>
653 void
655  const RequestPtr &mem_req)
656 {
657  ThreadID tid = cpu->contextToThread(mem_req->contextId());
658  Addr fetchBufferBlockPC = mem_req->getVaddr();
659 
660  assert(!cpu->switchedOut());
661 
662  // Wake up CPU if it was idle
663  cpu->wakeCPU();
664 
665  if (fetchStatus[tid] != ItlbWait || mem_req != memReq[tid] ||
666  mem_req->getVaddr() != memReq[tid]->getVaddr()) {
667  DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
668  tid);
670  return;
671  }
672 
673 
674  // If translation was successful, attempt to read the icache block.
675  if (fault == NoFault) {
676  // Check that we're not going off into random memory
677  // If we have, just wait around for commit to squash something and put
678  // us on the right track
679  if (!cpu->system->isMemAddr(mem_req->getPaddr())) {
680  warn("Address %#x is outside of physical memory, stopping fetch\n",
681  mem_req->getPaddr());
682  fetchStatus[tid] = NoGoodAddr;
683  memReq[tid] = NULL;
684  return;
685  }
686 
687  // Build packet here.
688  PacketPtr data_pkt = new Packet(mem_req, MemCmd::ReadReq);
689  data_pkt->dataDynamic(new uint8_t[fetchBufferSize]);
690 
691  fetchBufferPC[tid] = fetchBufferBlockPC;
692  fetchBufferValid[tid] = false;
693  DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
694 
696 
697  // Access the cache.
698  if (!icachePort.sendTimingReq(data_pkt)) {
699  assert(retryPkt == NULL);
700  assert(retryTid == InvalidThreadID);
701  DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
702 
704  retryPkt = data_pkt;
705  retryTid = tid;
706  cacheBlocked = true;
707  } else {
708  DPRINTF(Fetch, "[tid:%i] Doing Icache access.\n", tid);
709  DPRINTF(Activity, "[tid:%i] Activity: Waiting on I-cache "
710  "response.\n", tid);
711  lastIcacheStall[tid] = curTick();
713  // Notify Fetch Request probe when a packet containing a fetch
714  // request is successfully sent
715  ppFetchRequestSent->notify(mem_req);
716  }
717  } else {
718  // Don't send an instruction to decode if we can't handle it.
719  if (!(numInst < fetchWidth) || !(fetchQueue[tid].size() < fetchQueueSize)) {
723  cpu->schedule(finishTranslationEvent,
724  cpu->clockEdge(Cycles(1)));
725  return;
726  }
727  DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
728  tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
729  // Translation faulted, icache request won't be sent.
730  memReq[tid] = NULL;
731 
732  // Send the fault to commit. This thread will not do anything
733  // until commit handles the fault. The only other way it can
734  // wake up is if a squash comes along and changes the PC.
735  TheISA::PCState fetchPC = pc[tid];
736 
737  DPRINTF(Fetch, "[tid:%i] Translation faulted, building noop.\n", tid);
738  // We will use a nop in ordier to carry the fault.
740  NULL, fetchPC, fetchPC, false);
741  instruction->setNotAnInst();
742 
743  instruction->setPredTarg(fetchPC);
744  instruction->fault = fault;
745  wroteToTimeBuffer = true;
746 
747  DPRINTF(Activity, "Activity this cycle.\n");
748  cpu->activityThisCycle();
749 
750  fetchStatus[tid] = TrapPending;
751 
752  DPRINTF(Fetch, "[tid:%i] Blocked, need to handle the trap.\n", tid);
753  DPRINTF(Fetch, "[tid:%i] fault (%s) detected @ PC %s.\n",
754  tid, fault->name(), pc[tid]);
755  }
757 }
758 
759 template <class Impl>
760 inline void
762  const DynInstPtr squashInst, ThreadID tid)
763 {
764  DPRINTF(Fetch, "[tid:%i] Squashing, setting PC to: %s.\n",
765  tid, newPC);
766 
767  pc[tid] = newPC;
768  fetchOffset[tid] = 0;
769  if (squashInst && squashInst->pcState().instAddr() == newPC.instAddr())
770  macroop[tid] = squashInst->macroop;
771  else
772  macroop[tid] = NULL;
773  decoder[tid]->reset();
774 
775  // Clear the icache miss if it's outstanding.
776  if (fetchStatus[tid] == IcacheWaitResponse) {
777  DPRINTF(Fetch, "[tid:%i] Squashing outstanding Icache miss.\n",
778  tid);
779  memReq[tid] = NULL;
780  } else if (fetchStatus[tid] == ItlbWait) {
781  DPRINTF(Fetch, "[tid:%i] Squashing outstanding ITLB miss.\n",
782  tid);
783  memReq[tid] = NULL;
784  }
785 
786  // Get rid of the retrying packet if it was from this thread.
787  if (retryTid == tid) {
788  assert(cacheBlocked);
789  if (retryPkt) {
790  delete retryPkt;
791  }
792  retryPkt = NULL;
794  }
795 
796  fetchStatus[tid] = Squashing;
797 
798  // Empty fetch queue
799  fetchQueue[tid].clear();
800 
801  // microops are being squashed, it is not known wheather the
802  // youngest non-squashed microop was marked delayed commit
803  // or not. Setting the flag to true ensures that the
804  // interrupts are not handled when they cannot be, though
805  // some opportunities to handle interrupts may be missed.
806  delayedCommit[tid] = true;
807 
809 }
810 
811 template<class Impl>
812 void
814  const DynInstPtr squashInst,
815  const InstSeqNum seq_num, ThreadID tid)
816 {
817  DPRINTF(Fetch, "[tid:%i] Squashing from decode.\n", tid);
818 
819  doSquash(newPC, squashInst, tid);
820 
821  // Tell the CPU to remove any instructions that are in flight between
822  // fetch and decode.
823  cpu->removeInstsUntil(seq_num, tid);
824 }
825 
826 template<class Impl>
827 bool
829 {
830  bool ret_val = false;
831 
832  if (stalls[tid].drain) {
833  assert(cpu->isDraining());
834  DPRINTF(Fetch,"[tid:%i] Drain stall detected.\n",tid);
835  ret_val = true;
836  }
837 
838  return ret_val;
839 }
840 
841 template<class Impl>
844 {
845  //Check Running
846  list<ThreadID>::iterator threads = activeThreads->begin();
848 
849  while (threads != end) {
850  ThreadID tid = *threads++;
851 
852  if (fetchStatus[tid] == Running ||
853  fetchStatus[tid] == Squashing ||
855 
856  if (_status == Inactive) {
857  DPRINTF(Activity, "[tid:%i] Activating stage.\n",tid);
858 
859  if (fetchStatus[tid] == IcacheAccessComplete) {
860  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache"
861  "completion\n",tid);
862  }
863 
864  cpu->activateStage(O3CPU::FetchIdx);
865  }
866 
867  return Active;
868  }
869  }
870 
871  // Stage is switching from active to inactive, notify CPU of it.
872  if (_status == Active) {
873  DPRINTF(Activity, "Deactivating stage.\n");
874 
875  cpu->deactivateStage(O3CPU::FetchIdx);
876  }
877 
878  return Inactive;
879 }
880 
881 template <class Impl>
882 void
884  const InstSeqNum seq_num, DynInstPtr squashInst,
885  ThreadID tid)
886 {
887  DPRINTF(Fetch, "[tid:%i] Squash from commit.\n", tid);
888 
889  doSquash(newPC, squashInst, tid);
890 
891  // Tell the CPU to remove any instructions that are not in the ROB.
892  cpu->removeInstsNotInROB(tid);
893 }
894 
895 template <class Impl>
896 void
898 {
899  list<ThreadID>::iterator threads = activeThreads->begin();
901  bool status_change = false;
902 
903  wroteToTimeBuffer = false;
904 
905  for (ThreadID i = 0; i < numThreads; ++i) {
906  issuePipelinedIfetch[i] = false;
907  }
908 
909  while (threads != end) {
910  ThreadID tid = *threads++;
911 
912  // Check the signals for each thread to determine the proper status
913  // for each thread.
914  bool updated_status = checkSignalsAndUpdate(tid);
915  status_change = status_change || updated_status;
916  }
917 
918  DPRINTF(Fetch, "Running stage.\n");
919 
920  if (FullSystem) {
921  if (fromCommit->commitInfo[0].interruptPending) {
922  interruptPending = true;
923  }
924 
925  if (fromCommit->commitInfo[0].clearInterrupt) {
926  interruptPending = false;
927  }
928  }
929 
931  threadFetched++) {
932  // Fetch each of the actively fetching threads.
933  fetch(status_change);
934  }
935 
936  // Record number of instructions fetched this cycle for distribution.
938 
939  if (status_change) {
940  // Change the fetch stage status if there was a status change.
942  }
943 
944  // Issue the next I-cache request if possible.
945  for (ThreadID i = 0; i < numThreads; ++i) {
946  if (issuePipelinedIfetch[i]) {
948  }
949  }
950 
951  // Send instructions enqueued into the fetch queue to decode.
952  // Limit rate by fetchWidth. Stall if decode is stalled.
953  unsigned insts_to_decode = 0;
954  unsigned available_insts = 0;
955 
956  for (auto tid : *activeThreads) {
957  if (!stalls[tid].decode) {
958  available_insts += fetchQueue[tid].size();
959  }
960  }
961 
962  // Pick a random thread to start trying to grab instructions from
963  auto tid_itr = activeThreads->begin();
964  std::advance(tid_itr, random_mt.random<uint8_t>(0, activeThreads->size() - 1));
965 
966  while (available_insts != 0 && insts_to_decode < decodeWidth) {
967  ThreadID tid = *tid_itr;
968  if (!stalls[tid].decode && !fetchQueue[tid].empty()) {
969  const auto& inst = fetchQueue[tid].front();
970  toDecode->insts[toDecode->size++] = inst;
971  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Sending instruction to decode "
972  "from fetch queue. Fetch queue size: %i.\n",
973  tid, inst->seqNum, fetchQueue[tid].size());
974 
975  wroteToTimeBuffer = true;
976  fetchQueue[tid].pop_front();
977  insts_to_decode++;
978  available_insts--;
979  }
980 
981  tid_itr++;
982  // Wrap around if at end of active threads list
983  if (tid_itr == activeThreads->end())
984  tid_itr = activeThreads->begin();
985  }
986 
987  // If there was activity this cycle, inform the CPU of it.
988  if (wroteToTimeBuffer) {
989  DPRINTF(Activity, "Activity this cycle.\n");
990  cpu->activityThisCycle();
991  }
992 
993  // Reset the number of the instruction we've fetched.
994  numInst = 0;
995 }
996 
997 template <class Impl>
998 bool
1000 {
1001  // Update the per thread stall statuses.
1002  if (fromDecode->decodeBlock[tid]) {
1003  stalls[tid].decode = true;
1004  }
1005 
1006  if (fromDecode->decodeUnblock[tid]) {
1007  assert(stalls[tid].decode);
1008  assert(!fromDecode->decodeBlock[tid]);
1009  stalls[tid].decode = false;
1010  }
1011 
1012  // Check squash signals from commit.
1013  if (fromCommit->commitInfo[tid].squash) {
1014 
1015  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1016  "from commit.\n",tid);
1017  // In any case, squash.
1018  squash(fromCommit->commitInfo[tid].pc,
1019  fromCommit->commitInfo[tid].doneSeqNum,
1020  fromCommit->commitInfo[tid].squashInst, tid);
1021 
1022  // If it was a branch mispredict on a control instruction, update the
1023  // branch predictor with that instruction, otherwise just kill the
1024  // invalid state we generated in after sequence number
1025  if (fromCommit->commitInfo[tid].mispredictInst &&
1026  fromCommit->commitInfo[tid].mispredictInst->isControl()) {
1027  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
1028  fromCommit->commitInfo[tid].pc,
1029  fromCommit->commitInfo[tid].branchTaken,
1030  tid);
1031  } else {
1032  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
1033  tid);
1034  }
1035 
1036  return true;
1037  } else if (fromCommit->commitInfo[tid].doneSeqNum) {
1038  // Update the branch predictor if it wasn't a squashed instruction
1039  // that was broadcasted.
1040  branchPred->update(fromCommit->commitInfo[tid].doneSeqNum, tid);
1041  }
1042 
1043  // Check squash signals from decode.
1044  if (fromDecode->decodeInfo[tid].squash) {
1045  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1046  "from decode.\n",tid);
1047 
1048  // Update the branch predictor.
1049  if (fromDecode->decodeInfo[tid].branchMispredict) {
1050  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1051  fromDecode->decodeInfo[tid].nextPC,
1052  fromDecode->decodeInfo[tid].branchTaken,
1053  tid);
1054  } else {
1055  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1056  tid);
1057  }
1058 
1059  if (fetchStatus[tid] != Squashing) {
1060 
1061  DPRINTF(Fetch, "Squashing from decode with PC = %s\n",
1062  fromDecode->decodeInfo[tid].nextPC);
1063  // Squash unless we're already squashing
1064  squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
1065  fromDecode->decodeInfo[tid].squashInst,
1066  fromDecode->decodeInfo[tid].doneSeqNum,
1067  tid);
1068 
1069  return true;
1070  }
1071  }
1072 
1073  if (checkStall(tid) &&
1074  fetchStatus[tid] != IcacheWaitResponse &&
1075  fetchStatus[tid] != IcacheWaitRetry &&
1076  fetchStatus[tid] != ItlbWait &&
1077  fetchStatus[tid] != QuiescePending) {
1078  DPRINTF(Fetch, "[tid:%i] Setting to blocked\n",tid);
1079 
1080  fetchStatus[tid] = Blocked;
1081 
1082  return true;
1083  }
1084 
1085  if (fetchStatus[tid] == Blocked ||
1086  fetchStatus[tid] == Squashing) {
1087  // Switch status to running if fetch isn't being told to block or
1088  // squash this cycle.
1089  DPRINTF(Fetch, "[tid:%i] Done squashing, switching to running.\n",
1090  tid);
1091 
1092  fetchStatus[tid] = Running;
1093 
1094  return true;
1095  }
1096 
1097  // If we've reached this point, we have not gotten any signals that
1098  // cause fetch to change its status. Fetch remains the same as before.
1099  return false;
1100 }
1101 
1102 template<class Impl>
1103 typename Impl::DynInstPtr
1105  StaticInstPtr curMacroop, TheISA::PCState thisPC,
1106  TheISA::PCState nextPC, bool trace)
1107 {
1108  // Get a sequence number.
1109  InstSeqNum seq = cpu->getAndIncrementInstSeq();
1110 
1111  // Create a new DynInst from the instruction fetched.
1112  DynInstPtr instruction =
1113  new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu);
1114  instruction->setTid(tid);
1115 
1116  instruction->setASID(tid);
1117 
1118  instruction->setThreadState(cpu->thread[tid]);
1119 
1120  DPRINTF(Fetch, "[tid:%i] Instruction PC %#x (%d) created "
1121  "[sn:%lli].\n", tid, thisPC.instAddr(),
1122  thisPC.microPC(), seq);
1123 
1124  DPRINTF(Fetch, "[tid:%i] Instruction is: %s\n", tid,
1125  instruction->staticInst->
1126  disassemble(thisPC.instAddr()));
1127 
1128 #if TRACING_ON
1129  if (trace) {
1130  instruction->traceData =
1131  cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
1132  instruction->staticInst, thisPC, curMacroop);
1133  }
1134 #else
1135  instruction->traceData = NULL;
1136 #endif
1137 
1138  // Add instruction to the CPU's list of instructions.
1139  instruction->setInstListIt(cpu->addInst(instruction));
1140 
1141  // Write the instruction to the first slot in the queue
1142  // that heads to decode.
1143  assert(numInst < fetchWidth);
1144  fetchQueue[tid].push_back(instruction);
1145  assert(fetchQueue[tid].size() <= fetchQueueSize);
1146  DPRINTF(Fetch, "[tid:%i] Fetch queue entry created (%i/%i).\n",
1147  tid, fetchQueue[tid].size(), fetchQueueSize);
1148  //toDecode->insts[toDecode->size++] = instruction;
1149 
1150  // Keep track of if we can take an interrupt at this boundary
1151  delayedCommit[tid] = instruction->isDelayedCommit();
1152 
1153  return instruction;
1154 }
1155 
1156 template<class Impl>
1157 void
1158 DefaultFetch<Impl>::fetch(bool &status_change)
1159 {
1161  // Start actual fetch
1163  ThreadID tid = getFetchingThread();
1164 
1165  assert(!cpu->switchedOut());
1166 
1167  if (tid == InvalidThreadID) {
1168  // Breaks looping condition in tick()
1170 
1171  if (numThreads == 1) { // @todo Per-thread stats
1172  profileStall(0);
1173  }
1174 
1175  return;
1176  }
1177 
1178  DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid);
1179 
1180  // The current PC.
1181  TheISA::PCState thisPC = pc[tid];
1182 
1183  Addr pcOffset = fetchOffset[tid];
1184  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1185 
1186  bool inRom = isRomMicroPC(thisPC.microPC());
1187 
1188  // If returning from the delay of a cache miss, then update the status
1189  // to running, otherwise do the cache access. Possibly move this up
1190  // to tick() function.
1191  if (fetchStatus[tid] == IcacheAccessComplete) {
1192  DPRINTF(Fetch, "[tid:%i] Icache miss is complete.\n", tid);
1193 
1194  fetchStatus[tid] = Running;
1195  status_change = true;
1196  } else if (fetchStatus[tid] == Running) {
1197  // Align the fetch PC so its at the start of a fetch buffer segment.
1198  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1199 
1200  // If buffer is no longer valid or fetchAddr has moved to point
1201  // to the next cache block, AND we have no remaining ucode
1202  // from a macro-op, then start fetch from icache.
1203  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])
1204  && !inRom && !macroop[tid]) {
1205  DPRINTF(Fetch, "[tid:%i] Attempting to translate and read "
1206  "instruction, starting at PC %s.\n", tid, thisPC);
1207 
1208  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1209 
1210  if (fetchStatus[tid] == IcacheWaitResponse)
1212  else if (fetchStatus[tid] == ItlbWait)
1213  ++fetchTlbCycles;
1214  else
1216  return;
1217  } else if ((checkInterrupt(thisPC.instAddr()) && !delayedCommit[tid])) {
1218  // Stall CPU if an interrupt is posted and we're not issuing
1219  // an delayed commit micro-op currently (delayed commit instructions
1220  // are not interruptable by interrupts, only faults)
1222  DPRINTF(Fetch, "[tid:%i] Fetch is stalled!\n", tid);
1223  return;
1224  }
1225  } else {
1226  if (fetchStatus[tid] == Idle) {
1227  ++fetchIdleCycles;
1228  DPRINTF(Fetch, "[tid:%i] Fetch is idle!\n", tid);
1229  }
1230 
1231  // Status is Idle, so fetch should do nothing.
1232  return;
1233  }
1234 
1235  ++fetchCycles;
1236 
1237  TheISA::PCState nextPC = thisPC;
1238 
1239  StaticInstPtr staticInst = NULL;
1240  StaticInstPtr curMacroop = macroop[tid];
1241 
1242  // If the read of the first instruction was successful, then grab the
1243  // instructions from the rest of the cache line and put them into the
1244  // queue heading to decode.
1245 
1246  DPRINTF(Fetch, "[tid:%i] Adding instructions to queue to "
1247  "decode.\n", tid);
1248 
1249  // Need to keep track of whether or not a predicted branch
1250  // ended this fetch block.
1251  bool predictedBranch = false;
1252 
1253  // Need to halt fetch if quiesce instruction detected
1254  bool quiesce = false;
1255 
1256  TheISA::MachInst *cacheInsts =
1257  reinterpret_cast<TheISA::MachInst *>(fetchBuffer[tid]);
1258 
1259  const unsigned numInsts = fetchBufferSize / instSize;
1260  unsigned blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1261 
1262  // Loop through instruction memory from the cache.
1263  // Keep issuing while fetchWidth is available and branch is not
1264  // predicted taken
1265  while (numInst < fetchWidth && fetchQueue[tid].size() < fetchQueueSize
1266  && !predictedBranch && !quiesce) {
1267  // We need to process more memory if we aren't going to get a
1268  // StaticInst from the rom, the current macroop, or what's already
1269  // in the decoder.
1270  bool needMem = !inRom && !curMacroop &&
1271  !decoder[tid]->instReady();
1272  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1273  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1274 
1275  if (needMem) {
1276  // If buffer is no longer valid or fetchAddr has moved to point
1277  // to the next cache block then start fetch from icache.
1278  if (!fetchBufferValid[tid] ||
1279  fetchBufferBlockPC != fetchBufferPC[tid])
1280  break;
1281 
1282  if (blkOffset >= numInsts) {
1283  // We need to process more memory, but we've run out of the
1284  // current block.
1285  break;
1286  }
1287 
1288  decoder[tid]->moreBytes(thisPC, fetchAddr, cacheInsts[blkOffset]);
1289 
1290  if (decoder[tid]->needMoreBytes()) {
1291  blkOffset++;
1292  fetchAddr += instSize;
1293  pcOffset += instSize;
1294  }
1295  }
1296 
1297  // Extract as many instructions and/or microops as we can from
1298  // the memory we've processed so far.
1299  do {
1300  if (!(curMacroop || inRom)) {
1301  if (decoder[tid]->instReady()) {
1302  staticInst = decoder[tid]->decode(thisPC);
1303 
1304  // Increment stat of fetched instructions.
1305  ++fetchedInsts;
1306 
1307  if (staticInst->isMacroop()) {
1308  curMacroop = staticInst;
1309  } else {
1310  pcOffset = 0;
1311  }
1312  } else {
1313  // We need more bytes for this instruction so blkOffset and
1314  // pcOffset will be updated
1315  break;
1316  }
1317  }
1318  // Whether we're moving to a new macroop because we're at the
1319  // end of the current one, or the branch predictor incorrectly
1320  // thinks we are...
1321  bool newMacro = false;
1322  if (curMacroop || inRom) {
1323  if (inRom) {
1324  staticInst = cpu->microcodeRom.fetchMicroop(
1325  thisPC.microPC(), curMacroop);
1326  } else {
1327  staticInst = curMacroop->fetchMicroop(thisPC.microPC());
1328  }
1329  newMacro |= staticInst->isLastMicroop();
1330  }
1331 
1332  DynInstPtr instruction =
1333  buildInst(tid, staticInst, curMacroop,
1334  thisPC, nextPC, true);
1335 
1336  ppFetch->notify(instruction);
1337  numInst++;
1338 
1339 #if TRACING_ON
1340  if (DTRACE(O3PipeView)) {
1341  instruction->fetchTick = curTick();
1342  }
1343 #endif
1344 
1345  nextPC = thisPC;
1346 
1347  // If we're branching after this instruction, quit fetching
1348  // from the same block.
1349  predictedBranch |= thisPC.branching();
1350  predictedBranch |=
1351  lookupAndUpdateNextPC(instruction, nextPC);
1352  if (predictedBranch) {
1353  DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC);
1354  }
1355 
1356  newMacro |= thisPC.instAddr() != nextPC.instAddr();
1357 
1358  // Move to the next instruction, unless we have a branch.
1359  thisPC = nextPC;
1360  inRom = isRomMicroPC(thisPC.microPC());
1361 
1362  if (newMacro) {
1363  fetchAddr = thisPC.instAddr() & BaseCPU::PCMask;
1364  blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1365  pcOffset = 0;
1366  curMacroop = NULL;
1367  }
1368 
1369  if (instruction->isQuiesce()) {
1370  DPRINTF(Fetch,
1371  "Quiesce instruction encountered, halting fetch!\n");
1372  fetchStatus[tid] = QuiescePending;
1373  status_change = true;
1374  quiesce = true;
1375  break;
1376  }
1377  } while ((curMacroop || decoder[tid]->instReady()) &&
1378  numInst < fetchWidth &&
1379  fetchQueue[tid].size() < fetchQueueSize);
1380 
1381  // Re-evaluate whether the next instruction to fetch is in micro-op ROM
1382  // or not.
1383  inRom = isRomMicroPC(thisPC.microPC());
1384  }
1385 
1386  if (predictedBranch) {
1387  DPRINTF(Fetch, "[tid:%i] Done fetching, predicted branch "
1388  "instruction encountered.\n", tid);
1389  } else if (numInst >= fetchWidth) {
1390  DPRINTF(Fetch, "[tid:%i] Done fetching, reached fetch bandwidth "
1391  "for this cycle.\n", tid);
1392  } else if (blkOffset >= fetchBufferSize) {
1393  DPRINTF(Fetch, "[tid:%i] Done fetching, reached the end of the"
1394  "fetch buffer.\n", tid);
1395  }
1396 
1397  macroop[tid] = curMacroop;
1398  fetchOffset[tid] = pcOffset;
1399 
1400  if (numInst > 0) {
1401  wroteToTimeBuffer = true;
1402  }
1403 
1404  pc[tid] = thisPC;
1405 
1406  // pipeline a fetch if we're crossing a fetch buffer boundary and not in
1407  // a state that would preclude fetching
1408  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1409  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1410  issuePipelinedIfetch[tid] = fetchBufferBlockPC != fetchBufferPC[tid] &&
1411  fetchStatus[tid] != IcacheWaitResponse &&
1412  fetchStatus[tid] != ItlbWait &&
1413  fetchStatus[tid] != IcacheWaitRetry &&
1414  fetchStatus[tid] != QuiescePending &&
1415  !curMacroop;
1416 }
1417 
1418 template<class Impl>
1419 void
1421 {
1422  if (retryPkt != NULL) {
1423  assert(cacheBlocked);
1424  assert(retryTid != InvalidThreadID);
1425  assert(fetchStatus[retryTid] == IcacheWaitRetry);
1426 
1429  // Notify Fetch Request probe when a retryPkt is successfully sent.
1430  // Note that notify must be called before retryPkt is set to NULL.
1432  retryPkt = NULL;
1434  cacheBlocked = false;
1435  }
1436  } else {
1437  assert(retryTid == InvalidThreadID);
1438  // Access has been squashed since it was sent out. Just clear
1439  // the cache being blocked.
1440  cacheBlocked = false;
1441  }
1442 }
1443 
1445 // //
1446 // SMT FETCH POLICY MAINTAINED HERE //
1447 // //
1449 template<class Impl>
1450 ThreadID
1452 {
1453  if (numThreads > 1) {
1454  switch (fetchPolicy) {
1455  case FetchPolicy::RoundRobin:
1456  return roundRobin();
1457  case FetchPolicy::IQCount:
1458  return iqCount();
1459  case FetchPolicy::LSQCount:
1460  return lsqCount();
1461  case FetchPolicy::Branch:
1462  return branchCount();
1463  default:
1464  return InvalidThreadID;
1465  }
1466  } else {
1467  list<ThreadID>::iterator thread = activeThreads->begin();
1468  if (thread == activeThreads->end()) {
1469  return InvalidThreadID;
1470  }
1471 
1472  ThreadID tid = *thread;
1473 
1474  if (fetchStatus[tid] == Running ||
1476  fetchStatus[tid] == Idle) {
1477  return tid;
1478  } else {
1479  return InvalidThreadID;
1480  }
1481  }
1482 }
1483 
1484 
1485 template<class Impl>
1486 ThreadID
1488 {
1489  list<ThreadID>::iterator pri_iter = priorityList.begin();
1491 
1492  ThreadID high_pri;
1493 
1494  while (pri_iter != end) {
1495  high_pri = *pri_iter;
1496 
1497  assert(high_pri <= numThreads);
1498 
1499  if (fetchStatus[high_pri] == Running ||
1500  fetchStatus[high_pri] == IcacheAccessComplete ||
1501  fetchStatus[high_pri] == Idle) {
1502 
1503  priorityList.erase(pri_iter);
1504  priorityList.push_back(high_pri);
1505 
1506  return high_pri;
1507  }
1508 
1509  pri_iter++;
1510  }
1511 
1512  return InvalidThreadID;
1513 }
1514 
1515 template<class Impl>
1516 ThreadID
1518 {
1519  //sorted from lowest->highest
1520  std::priority_queue<unsigned,vector<unsigned>,
1521  std::greater<unsigned> > PQ;
1522  std::map<unsigned, ThreadID> threadMap;
1523 
1524  list<ThreadID>::iterator threads = activeThreads->begin();
1526 
1527  while (threads != end) {
1528  ThreadID tid = *threads++;
1529  unsigned iqCount = fromIEW->iewInfo[tid].iqCount;
1530 
1531  //we can potentially get tid collisions if two threads
1532  //have the same iqCount, but this should be rare.
1533  PQ.push(iqCount);
1534  threadMap[iqCount] = tid;
1535  }
1536 
1537  while (!PQ.empty()) {
1538  ThreadID high_pri = threadMap[PQ.top()];
1539 
1540  if (fetchStatus[high_pri] == Running ||
1541  fetchStatus[high_pri] == IcacheAccessComplete ||
1542  fetchStatus[high_pri] == Idle)
1543  return high_pri;
1544  else
1545  PQ.pop();
1546 
1547  }
1548 
1549  return InvalidThreadID;
1550 }
1551 
1552 template<class Impl>
1553 ThreadID
1555 {
1556  //sorted from lowest->highest
1557  std::priority_queue<unsigned,vector<unsigned>,
1558  std::greater<unsigned> > PQ;
1559  std::map<unsigned, ThreadID> threadMap;
1560 
1561  list<ThreadID>::iterator threads = activeThreads->begin();
1563 
1564  while (threads != end) {
1565  ThreadID tid = *threads++;
1566  unsigned ldstqCount = fromIEW->iewInfo[tid].ldstqCount;
1567 
1568  //we can potentially get tid collisions if two threads
1569  //have the same iqCount, but this should be rare.
1570  PQ.push(ldstqCount);
1571  threadMap[ldstqCount] = tid;
1572  }
1573 
1574  while (!PQ.empty()) {
1575  ThreadID high_pri = threadMap[PQ.top()];
1576 
1577  if (fetchStatus[high_pri] == Running ||
1578  fetchStatus[high_pri] == IcacheAccessComplete ||
1579  fetchStatus[high_pri] == Idle)
1580  return high_pri;
1581  else
1582  PQ.pop();
1583  }
1584 
1585  return InvalidThreadID;
1586 }
1587 
1588 template<class Impl>
1589 ThreadID
1591 {
1592  panic("Branch Count Fetch policy unimplemented\n");
1593  return InvalidThreadID;
1594 }
1595 
1596 template<class Impl>
1597 void
1599 {
1600  if (!issuePipelinedIfetch[tid]) {
1601  return;
1602  }
1603 
1604  // The next PC to access.
1605  TheISA::PCState thisPC = pc[tid];
1606 
1607  if (isRomMicroPC(thisPC.microPC())) {
1608  return;
1609  }
1610 
1611  Addr pcOffset = fetchOffset[tid];
1612  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1613 
1614  // Align the fetch PC so its at the start of a fetch buffer segment.
1615  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1616 
1617  // Unless buffer already got the block, fetch it from icache.
1618  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])) {
1619  DPRINTF(Fetch, "[tid:%i] Issuing a pipelined I-cache access, "
1620  "starting at PC %s.\n", tid, thisPC);
1621 
1622  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1623  }
1624 }
1625 
1626 template<class Impl>
1627 void
1629  DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
1630 
1631  // @todo Per-thread stats
1632 
1633  if (stalls[tid].drain) {
1635  DPRINTF(Fetch, "Fetch is waiting for a drain!\n");
1636  } else if (activeThreads->empty()) {
1638  DPRINTF(Fetch, "Fetch has no active thread!\n");
1639  } else if (fetchStatus[tid] == Blocked) {
1641  DPRINTF(Fetch, "[tid:%i] Fetch is blocked!\n", tid);
1642  } else if (fetchStatus[tid] == Squashing) {
1644  DPRINTF(Fetch, "[tid:%i] Fetch is squashing!\n", tid);
1645  } else if (fetchStatus[tid] == IcacheWaitResponse) {
1647  DPRINTF(Fetch, "[tid:%i] Fetch is waiting cache response!\n",
1648  tid);
1649  } else if (fetchStatus[tid] == ItlbWait) {
1650  ++fetchTlbCycles;
1651  DPRINTF(Fetch, "[tid:%i] Fetch is waiting ITLB walk to "
1652  "finish!\n", tid);
1653  } else if (fetchStatus[tid] == TrapPending) {
1655  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending trap!\n",
1656  tid);
1657  } else if (fetchStatus[tid] == QuiescePending) {
1659  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending quiesce "
1660  "instruction!\n", tid);
1661  } else if (fetchStatus[tid] == IcacheWaitRetry) {
1663  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for an I-cache retry!\n",
1664  tid);
1665  } else if (fetchStatus[tid] == NoGoodAddr) {
1666  DPRINTF(Fetch, "[tid:%i] Fetch predicted non-executable address\n",
1667  tid);
1668  } else {
1669  DPRINTF(Fetch, "[tid:%i] Unexpected fetch stall reason "
1670  "(Status: %i)\n",
1671  tid, fetchStatus[tid]);
1672  }
1673 }
1674 
1675 template<class Impl>
1676 bool
1678 {
1679  DPRINTF(O3CPU, "Fetch unit received timing\n");
1680  // We shouldn't ever get a cacheable block in Modified state
1681  assert(pkt->req->isUncacheable() ||
1682  !(pkt->cacheResponding() && !pkt->hasSharers()));
1683  fetch->processCacheCompletion(pkt);
1684 
1685  return true;
1686 }
1687 
1688 template<class Impl>
1689 void
1691 {
1692  fetch->recvReqRetry();
1693 }
1694 
1695 #endif//__CPU_O3_FETCH_IMPL_HH__
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
Impl::O3CPU O3CPU
Definition: fetch.hh:81
#define DPRINTF(x,...)
Definition: trace.hh:229
Stats::Formula fetchRate
Number of instruction fetched per cycle.
Definition: fetch.hh:602
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:53
Impl::DynInst DynInst
Definition: fetch.hh:79
unsigned fetchWidth
The width of fetch in instructions.
Definition: fetch.hh:480
virtual void recvReqRetry()
Handles doing a retry of a failed fetch.
Definition: fetch_impl.hh:1690
ThreadID iqCount()
Returns the appropriate thread to fetch using the IQ count policy.
Definition: fetch_impl.hh:1517
void profileStall(ThreadID tid)
Profile the reasons of fetch stall.
Definition: fetch_impl.hh:1628
TheISA::Decoder * decoder[Impl::MaxThreads]
The decoder.
Definition: fetch.hh:381
decltype(nullptr) constexpr NoFault
Definition: types.hh:245
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
Stats::Formula idleRate
Rate of how often fetch was idle.
Definition: fetch.hh:598
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
void tick()
Ticks the fetch stage, processing all inputs signals and fetching as many instructions as possible...
Definition: fetch_impl.hh:897
void resetStage()
Reset this pipeline stage.
Definition: fetch_impl.hh:354
Bitfield< 7 > i
void squashFromDecode(const TheISA::PCState &newPC, const DynInstPtr squashInst, const InstSeqNum seq_num, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:813
TimeBuffer< TimeStruct >::wire fromRename
Wire to get rename&#39;s information from backwards time buffer.
Definition: fetch.hh:423
void pipelineIcacheAccesses(ThreadID tid)
Pipeline the next I-cache access to the current one.
Definition: fetch_impl.hh:1598
Addr fetchBufferPC[Impl::MaxThreads]
The PC of the first instruction loaded into the fetch buffer.
Definition: fetch.hh:509
bool isMacroop() const
Definition: static_inst.hh:196
bool checkInterrupt(Addr pc)
Check if an interrupt is pending and that we need to handle.
Definition: fetch.hh:326
std::shared_ptr< Request > RequestPtr
Definition: request.hh:83
FetchStatus _status
Fetch status.
Definition: fetch.hh:205
void notify(const Arg &arg)
called at the ProbePoint call site, passes arg to each listener.
Definition: probe.hh:288
Impl::DynInstPtr DynInstPtr
Definition: fetch.hh:80
bool checkStall(ThreadID tid) const
Checks if a thread is stalled.
Definition: fetch_impl.hh:828
Stats::Scalar fetchPendingTrapStallCycles
Total number of stall cycles caused by pending traps.
Definition: fetch.hh:580
TimeBuffer< FetchStruct >::wire toDecode
Wire used to write any information heading to decode.
Definition: fetch.hh:433
int numInst
Tracks how many instructions has been fetched this cycle.
Definition: fetch.hh:456
bool cacheResponding() const
Definition: packet.hh:591
std::string name() const
Returns the name of fetch.
Definition: fetch_impl.hh:154
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:136
void processCacheCompletion(PacketPtr pkt)
Processes cache completion event.
Definition: fetch_impl.hh:389
Stats::Scalar fetchMiscStallCycles
Total number of cycles spent in any other state.
Definition: fetch.hh:574
Stats::Scalar fetchIdleCycles
Stat for total number of cycles spent blocked due to other stages in the pipeline.
Definition: fetch.hh:570
Stats::Formula branchRate
Number of branch fetches per cycle.
Definition: fetch.hh:600
unsigned fetchQueueSize
The size of the fetch queue in micro-ops.
Definition: fetch.hh:512
void update(const InstSeqNum &done_sn, ThreadID tid)
Tells the branch predictor to commit any updates until the given sequence number. ...
Definition: bpred_unit.cc:345
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:586
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the slave port by calling its corresponding receive function...
Definition: port.hh:445
Stats::Scalar fetchTlbSquashes
Total number of outstanding tlb accesses that were dropped due to a squash.
Definition: fetch.hh:594
TimeBuffer< TimeStruct >::wire fromDecode
Wire to get decode&#39;s information from backwards time buffer.
Definition: fetch.hh:420
Stats::Scalar icacheStallCycles
Stat for total number of cycles stalled due to an icache miss.
Definition: fetch.hh:554
ThreadID getFetchingThread()
Returns the appropriate thread to fetch, given the fetch policy.
Definition: fetch_impl.hh:1451
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:336
bool issuePipelinedIfetch[Impl::MaxThreads]
Set to true if a pipelined I-cache request should be issued.
Definition: fetch.hh:547
ThreadID numThreads
Number of threads.
Definition: fetch.hh:530
std::enable_if< std::is_integral< T >::value, T >::type random()
Use the SFINAE idiom to choose an implementation based on whether the type is integral or floating po...
Definition: random.hh:83
Stats::Scalar fetchedInsts
Stat for total number of fetched instructions.
Definition: fetch.hh:556
Stats::Scalar fetchNoActiveThreadStallCycles
Total number of stall cycles caused by no active threads to run.
Definition: fetch.hh:578
uint32_t MachInst
Definition: types.hh:40
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: fetch_impl.hh:443
RequestPtr req
A pointer to the original request.
Definition: packet.hh:327
FetchStatus
Overall fetch status.
Definition: fetch.hh:182
Stats::Distribution fetchNisnDist
Distribution of number of instructions fetched each cycle.
Definition: fetch.hh:596
void deactivateThread(ThreadID tid)
For priority-based fetch policies, need to keep update priorityList.
Definition: fetch_impl.hh:547
Stats::Scalar fetchPendingDrainCycles
Total number of cycles spent in waiting for drains.
Definition: fetch.hh:576
PacketPtr retryPkt
The packet that is waiting to be retried.
Definition: fetch.hh:489
ThreadID numFetchingThreads
Number of threads that are actively fetching.
Definition: fetch.hh:533
Cycles iewToFetchDelay
IEW to fetch delay.
Definition: fetch.hh:474
void takeOverFrom()
Takes over from another CPU&#39;s thread.
Definition: fetch_impl.hh:492
void drainStall(ThreadID tid)
Stall the fetch stage after reaching a safe drain point.
Definition: fetch_impl.hh:501
Stats::Scalar fetchCycles
Stat for total number of cycles spent fetching.
Definition: fetch.hh:562
ThreadID threadFetched
Thread ID being fetched.
Definition: fetch.hh:536
Tick curTick()
The current simulated tick.
Definition: core.hh:47
void quiesce(ThreadContext *tc)
Definition: pseudo_inst.cc:102
void regStats()
Registers statistics.
Definition: fetch_impl.hh:171
void clearStates(ThreadID tid)
Clear all thread-specific states.
Definition: fetch_impl.hh:334
Addr fetchOffset[Impl::MaxThreads]
Definition: fetch.hh:440
#define DTRACE(x)
Definition: trace.hh:227
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:385
ThreadID roundRobin()
Returns the appropriate thread to fetch using a round robin policy.
Definition: fetch_impl.hh:1487
virtual StaticInstPtr fetchMicroop(MicroPC upc) const
Return the microop that goes with a particular micropc.
Definition: static_inst.cc:100
void switchToActive()
Changes the status of this stage to active, and indicates this to the CPU.
Definition: fetch_impl.hh:521
Derived & prereq(const Stat &prereq)
Set the prerequisite stat and marks this stat to print at the end of simulation.
Definition: statistics.hh:350
int instSize
Size of instructions.
Definition: fetch.hh:521
void finishTranslation(const Fault &fault, const RequestPtr &mem_req)
Definition: fetch_impl.hh:654
void fetch(bool &status_change)
Does the actual fetching of instructions and passing them on to the next stage.
Definition: fetch_impl.hh:1158
Stats::Scalar fetchTlbCycles
Stat for total number of cycles spent waiting for translation.
Definition: fetch.hh:566
RequestPtr memReq[Impl::MaxThreads]
Memory request used to access cache.
Definition: fetch.hh:448
IcachePort icachePort
Instruction port.
Definition: fetch.hh:544
bool fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
Fetches the cache line that contains the fetch PC.
Definition: fetch_impl.hh:604
void recvReqRetry()
Handles retrying the fetch access.
Definition: fetch_impl.hh:1420
bool predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, TheISA::PCState &pc, ThreadID tid)
Predicts whether or not the instruction is a taken branch, and the target of the branch if it is take...
Definition: bpred_unit.cc:172
DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params)
DefaultFetch constructor.
Definition: fetch_impl.hh:83
uint64_t InstSeqNum
Definition: inst_seq.hh:40
void setReq(const RequestPtr &_req)
Definition: fetch.hh:160
ThreadID retryTid
The thread that is waiting on the cache to tell fetch to retry.
Definition: fetch.hh:492
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
Definition: statistics.hh:2609
Stats::Scalar predictedBranches
Stat for total number of predicted branches.
Definition: fetch.hh:560
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets pointer to list of active threads.
Definition: fetch_impl.hh:307
std::list< ThreadID > * activeThreads
List of Active Threads.
Definition: fetch.hh:527
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,16,32,64}_t.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
void regProbePoints()
Registers probes.
Definition: fetch_impl.hh:161
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:98
TimeBuffer< TimeStruct >::wire fromCommit
Wire to get commit&#39;s information from backwards time buffer.
Definition: fetch.hh:429
Stats::Scalar fetchIcacheWaitRetryStallCycles
Total number of stall cycles caused by I-cache wait retrys.
Definition: fetch.hh:584
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
uint8_t * fetchBuffer[Impl::MaxThreads]
The fetch data that is being fetched and buffered.
Definition: fetch.hh:506
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:161
std::deque< DynInstPtr > fetchQueue[Impl::MaxThreads]
Queue of fetched instructions.
Definition: fetch.hh:515
Stats::Scalar fetchIcacheSquashes
Total number of outstanding icache accesses that were dropped due to a squash.
Definition: fetch.hh:590
bool interruptPending
Checks if there is an interrupt pending.
Definition: fetch.hh:541
static StaticInstPtr nopStaticInstPtr
Pointer to a statically allocated generic "nop" instruction object.
Definition: static_inst.hh:226
ThreadID branchCount()
Returns the appropriate thread to fetch using the branch count policy.
Definition: fetch_impl.hh:1590
The request was an instruction fetch.
Definition: request.hh:105
const FlagsType total
Print the total.
Definition: info.hh:51
const ThreadID InvalidThreadID
Definition: types.hh:228
unsigned decodeWidth
The width of decode in instructions.
Definition: fetch.hh:483
TheISA::PCState pc[Impl::MaxThreads]
Definition: fetch.hh:438
static const Addr PCMask
Definition: base.hh:279
bool hasSharers() const
Definition: packet.hh:618
Counter lastIcacheStall[Impl::MaxThreads]
Icache stall statistics.
Definition: fetch.hh:524
FinishTranslationEvent finishTranslationEvent
Event used to delay fault generation of translation faults.
Definition: fetch.hh:550
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:279
Stats::Scalar fetchPendingQuiesceStallCycles
Total number of stall cycles caused by pending quiesce instructions.
Definition: fetch.hh:582
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
void setTimeBuffer(TimeBuffer< TimeStruct > *time_buffer)
Sets the main backwards communication time buffer pointer.
Definition: fetch_impl.hh:294
void setFetchQueue(TimeBuffer< FetchStruct > *fq_ptr)
Sets pointer to time buffer used to communicate to the next stage.
Definition: fetch_impl.hh:314
Addr fetchBufferAlignPC(Addr addr)
Align a PC to the start of a fetch buffer block.
Definition: fetch.hh:375
ThreadStatus fetchStatus[Impl::MaxThreads]
Per-thread status.
Definition: fetch.hh:208
Declaration of the Packet class.
Stalls stalls[Impl::MaxThreads]
Tracks which stages are telling fetch to stall.
Definition: fetch.hh:465
O3CPU * cpu
Pointer to the O3CPU.
Definition: fetch.hh:414
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: bpred_unit.cc:163
bool cacheBlocked
Is the cache blocked? If so no threads can access it.
Definition: fetch.hh:486
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
FetchStatus updateFetchStatus()
Updates overall fetch stage status; to be called at the end of each cycle.
Definition: fetch_impl.hh:843
Cycles decodeToFetchDelay
Decode to fetch delay.
Definition: fetch.hh:468
Cycles commitToFetchDelay
Commit to fetch delay.
Definition: fetch.hh:477
void switchToInactive()
Changes the status of this stage to inactive, and indicates this to the CPU.
Definition: fetch_impl.hh:534
void startupStage()
Initialize stage.
Definition: fetch_impl.hh:322
bool lookupAndUpdateNextPC(const DynInstPtr &inst, TheISA::PCState &pc)
Looks up in the branch predictor to see if the next PC should be either next PC+=MachInst or a branch...
Definition: fetch_impl.hh:558
Random random_mt
Definition: random.cc:100
ProbePointArg< RequestPtr > * ppFetchRequestSent
To probe when a fetch request is successfully sent.
Definition: fetch.hh:219
const T * getConstPtr() const
Definition: packet.hh:1099
ProbePointArg< DynInstPtr > * ppFetch
Probe points.
Definition: fetch.hh:217
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1078
unsigned size
Definition: timebuf.hh:45
wire getWire(int idx)
Definition: timebuf.hh:232
void squash(const InstSeqNum &squashed_sn, ThreadID tid)
Squashes all outstanding updates until a given sequence number.
Definition: bpred_unit.cc:368
void setFault(Fault _fault)
Definition: fetch.hh:155
Stats::Scalar fetchSquashCycles
Stat for total number of cycles spent squashing.
Definition: fetch.hh:564
void squash(const TheISA::PCState &newPC, const InstSeqNum seq_num, DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:883
unsigned fetchBufferSize
The size of the fetch buffer in bytes.
Definition: fetch.hh:500
ThreadID lsqCount()
Returns the appropriate thread to fetch using the LSQ count policy.
Definition: fetch_impl.hh:1554
FetchPolicy fetchPolicy
Fetch policy.
Definition: fetch.hh:211
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:312
Cycles renameToFetchDelay
Rename to fetch delay.
Definition: fetch.hh:471
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
Definition: fetch_impl.hh:1677
bool isDrained() const
Has the stage drained?
Definition: fetch_impl.hh:461
void wakeFromQuiesce()
Tells fetch to wake up from a quiesce instruction.
Definition: fetch_impl.hh:511
#define warn(...)
Definition: logging.hh:212
Stats::Scalar fetchBlockedCycles
Total number of cycles spent blocked.
Definition: fetch.hh:572
Stats::Scalar fetchedCacheLines
Stat for total number of fetched cache lines.
Definition: fetch.hh:586
void drainResume()
Resume after a drain.
Definition: fetch_impl.hh:433
std::list< ThreadID > priorityList
List that has the threads organized by priority.
Definition: fetch.hh:214
bool checkSignalsAndUpdate(ThreadID tid)
Checks all input signals and updates the status as necessary.
Definition: fetch_impl.hh:999
StaticInstPtr macroop[Impl::MaxThreads]
Definition: fetch.hh:442
bool delayedCommit[Impl::MaxThreads]
Can the fetch stage redirect from an interrupt on this instruction?
Definition: fetch.hh:445
Stats::Scalar fetchedBranches
Total number of fetched branches.
Definition: fetch.hh:558
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:185
DynInstPtr buildInst(ThreadID tid, StaticInstPtr staticInst, StaticInstPtr curMacroop, TheISA::PCState thisPC, TheISA::PCState nextPC, bool trace)
Definition: fetch_impl.hh:1104
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
BPredUnit * branchPred
BPredUnit.
Definition: fetch.hh:436
TimeBuffer< TimeStruct > * timeBuffer
Time buffer interface.
Definition: fetch.hh:417
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1899
bool wroteToTimeBuffer
Variable that tracks if fetch has written to the time buffer this cycle.
Definition: fetch.hh:453
void doSquash(const TheISA::PCState &newPC, const DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:761
unsigned int cacheBlkSize
Cache block size.
Definition: fetch.hh:495
bool fetchBufferValid[Impl::MaxThreads]
Whether or not the fetch buffer data is valid.
Definition: fetch.hh:518
bool isLastMicroop() const
Definition: static_inst.hh:199
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:104
TimeBuffer< TimeStruct >::wire fromIEW
Wire to get iew&#39;s information from backwards time buffer.
Definition: fetch.hh:426

Generated on Fri Feb 28 2020 16:26:59 for gem5 by doxygen 1.8.13