gem5  v20.0.0.2
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fetch_impl.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2014 ARM Limited
3  * Copyright (c) 2012-2013 AMD
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2004-2006 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #ifndef __CPU_O3_FETCH_IMPL_HH__
43 #define __CPU_O3_FETCH_IMPL_HH__
44 
45 #include <algorithm>
46 #include <cstring>
47 #include <list>
48 #include <map>
49 #include <queue>
50 
51 #include "arch/generic/tlb.hh"
52 #include "arch/isa_traits.hh"
53 #include "arch/utility.hh"
54 #include "base/random.hh"
55 #include "base/types.hh"
56 #include "config/the_isa.hh"
57 #include "cpu/base.hh"
58 //#include "cpu/checker/cpu.hh"
59 #include "cpu/o3/cpu.hh"
60 #include "cpu/o3/fetch.hh"
61 #include "cpu/exetrace.hh"
62 #include "debug/Activity.hh"
63 #include "debug/Drain.hh"
64 #include "debug/Fetch.hh"
65 #include "debug/O3CPU.hh"
66 #include "debug/O3PipeView.hh"
67 #include "mem/packet.hh"
68 #include "params/DerivO3CPU.hh"
69 #include "sim/byteswap.hh"
70 #include "sim/core.hh"
71 #include "sim/eventq.hh"
72 #include "sim/full_system.hh"
73 #include "sim/system.hh"
74 #include "cpu/o3/isa_specific.hh"
75 
76 using namespace std;
77 
78 template<class Impl>
79 DefaultFetch<Impl>::DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params)
80  : fetchPolicy(params->smtFetchPolicy),
81  cpu(_cpu),
82  branchPred(nullptr),
83  decodeToFetchDelay(params->decodeToFetchDelay),
84  renameToFetchDelay(params->renameToFetchDelay),
85  iewToFetchDelay(params->iewToFetchDelay),
86  commitToFetchDelay(params->commitToFetchDelay),
87  fetchWidth(params->fetchWidth),
88  decodeWidth(params->decodeWidth),
89  retryPkt(NULL),
90  retryTid(InvalidThreadID),
91  cacheBlkSize(cpu->cacheLineSize()),
92  fetchBufferSize(params->fetchBufferSize),
93  fetchBufferMask(fetchBufferSize - 1),
94  fetchQueueSize(params->fetchQueueSize),
95  numThreads(params->numThreads),
96  numFetchingThreads(params->smtNumFetchingThreads),
97  icachePort(this, _cpu),
98  finishTranslationEvent(this)
99 {
100  if (numThreads > Impl::MaxThreads)
101  fatal("numThreads (%d) is larger than compiled limit (%d),\n"
102  "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
103  numThreads, static_cast<int>(Impl::MaxThreads));
104  if (fetchWidth > Impl::MaxWidth)
105  fatal("fetchWidth (%d) is larger than compiled limit (%d),\n"
106  "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
107  fetchWidth, static_cast<int>(Impl::MaxWidth));
109  fatal("fetch buffer size (%u bytes) is greater than the cache "
110  "block size (%u bytes)\n", fetchBufferSize, cacheBlkSize);
112  fatal("cache block (%u bytes) is not a multiple of the "
113  "fetch buffer (%u bytes)\n", cacheBlkSize, fetchBufferSize);
114 
115  // Figure out fetch policy
116  panic_if(fetchPolicy == FetchPolicy::SingleThread && numThreads > 1,
117  "Invalid Fetch Policy for a SMT workload.");
118 
119  // Get the size of an instruction.
120  instSize = sizeof(TheISA::MachInst);
121 
122  for (int i = 0; i < Impl::MaxThreads; i++) {
123  fetchStatus[i] = Idle;
124  decoder[i] = nullptr;
125  pc[i] = 0;
126  fetchOffset[i] = 0;
127  macroop[i] = nullptr;
128  delayedCommit[i] = false;
129  memReq[i] = nullptr;
130  stalls[i] = {false, false};
131  fetchBuffer[i] = NULL;
132  fetchBufferPC[i] = 0;
133  fetchBufferValid[i] = false;
134  lastIcacheStall[i] = 0;
135  issuePipelinedIfetch[i] = false;
136  }
137 
138  branchPred = params->branchPred;
139 
140  for (ThreadID tid = 0; tid < numThreads; tid++) {
141  decoder[tid] = new TheISA::Decoder(
142  dynamic_cast<TheISA::ISA *>(params->isa[tid]));
143  // Create space to buffer the cache line data,
144  // which may not hold the entire cache line.
145  fetchBuffer[tid] = new uint8_t[fetchBufferSize];
146  }
147 }
148 
149 template <class Impl>
150 std::string
152 {
153  return cpu->name() + ".fetch";
154 }
155 
156 template <class Impl>
157 void
159 {
160  ppFetch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Fetch");
161  ppFetchRequestSent = new ProbePointArg<RequestPtr>(cpu->getProbeManager(),
162  "FetchRequest");
163 
164 }
165 
166 template <class Impl>
167 void
169 {
171  .name(name() + ".icacheStallCycles")
172  .desc("Number of cycles fetch is stalled on an Icache miss")
174 
176  .name(name() + ".Insts")
177  .desc("Number of instructions fetch has processed")
179 
181  .name(name() + ".Branches")
182  .desc("Number of branches that fetch encountered")
184 
186  .name(name() + ".predictedBranches")
187  .desc("Number of branches that fetch has predicted taken")
189 
191  .name(name() + ".Cycles")
192  .desc("Number of cycles fetch has run and was not squashing or"
193  " blocked")
195 
197  .name(name() + ".SquashCycles")
198  .desc("Number of cycles fetch has spent squashing")
200 
202  .name(name() + ".TlbCycles")
203  .desc("Number of cycles fetch has spent waiting for tlb")
205 
207  .name(name() + ".IdleCycles")
208  .desc("Number of cycles fetch was idle")
210 
212  .name(name() + ".BlockedCycles")
213  .desc("Number of cycles fetch has spent blocked")
215 
217  .name(name() + ".CacheLines")
218  .desc("Number of cache lines fetched")
220 
222  .name(name() + ".MiscStallCycles")
223  .desc("Number of cycles fetch has spent waiting on interrupts, or "
224  "bad addresses, or out of MSHRs")
226 
228  .name(name() + ".PendingDrainCycles")
229  .desc("Number of cycles fetch has spent waiting on pipes to drain")
231 
233  .name(name() + ".NoActiveThreadStallCycles")
234  .desc("Number of stall cycles due to no active thread to fetch from")
236 
238  .name(name() + ".PendingTrapStallCycles")
239  .desc("Number of stall cycles due to pending traps")
241 
243  .name(name() + ".PendingQuiesceStallCycles")
244  .desc("Number of stall cycles due to pending quiesce instructions")
246 
248  .name(name() + ".IcacheWaitRetryStallCycles")
249  .desc("Number of stall cycles due to full MSHR")
251 
253  .name(name() + ".IcacheSquashes")
254  .desc("Number of outstanding Icache misses that were squashed")
256 
258  .name(name() + ".ItlbSquashes")
259  .desc("Number of outstanding ITLB misses that were squashed")
261 
263  .init(/* base value */ 0,
264  /* last value */ fetchWidth,
265  /* bucket size */ 1)
266  .name(name() + ".rateDist")
267  .desc("Number of instructions fetched each cycle (Total)")
268  .flags(Stats::pdf);
269 
270  idleRate
271  .name(name() + ".idleRate")
272  .desc("Percent of cycles fetch was idle")
273  .prereq(idleRate);
274  idleRate = fetchIdleCycles * 100 / cpu->numCycles;
275 
276  branchRate
277  .name(name() + ".branchRate")
278  .desc("Number of branch fetches per cycle")
280  branchRate = fetchedBranches / cpu->numCycles;
281 
282  fetchRate
283  .name(name() + ".rate")
284  .desc("Number of inst fetches per cycle")
286  fetchRate = fetchedInsts / cpu->numCycles;
287 }
288 
289 template<class Impl>
290 void
292 {
293  timeBuffer = time_buffer;
294 
295  // Create wires to get information from proper places in time buffer.
300 }
301 
302 template<class Impl>
303 void
305 {
306  activeThreads = at_ptr;
307 }
308 
309 template<class Impl>
310 void
312 {
313  // Create wire to write information to proper place in fetch time buf.
314  toDecode = ftb_ptr->getWire(0);
315 }
316 
317 template<class Impl>
318 void
320 {
321  assert(priorityList.empty());
322  resetStage();
323 
324  // Fetch needs to start fetching instructions at the very beginning,
325  // so it must start up in active state.
326  switchToActive();
327 }
328 
329 template<class Impl>
330 void
332 {
333  fetchStatus[tid] = Running;
334  pc[tid] = cpu->pcState(tid);
335  fetchOffset[tid] = 0;
336  macroop[tid] = NULL;
337  delayedCommit[tid] = false;
338  memReq[tid] = NULL;
339  stalls[tid].decode = false;
340  stalls[tid].drain = false;
341  fetchBufferPC[tid] = 0;
342  fetchBufferValid[tid] = false;
343  fetchQueue[tid].clear();
344 
345  // TODO not sure what to do with priorityList for now
346  // priorityList.push_back(tid);
347 }
348 
349 template<class Impl>
350 void
352 {
353  numInst = 0;
354  interruptPending = false;
355  cacheBlocked = false;
356 
357  priorityList.clear();
358 
359  // Setup PC and nextPC with initial state.
360  for (ThreadID tid = 0; tid < numThreads; ++tid) {
361  fetchStatus[tid] = Running;
362  pc[tid] = cpu->pcState(tid);
363  fetchOffset[tid] = 0;
364  macroop[tid] = NULL;
365 
366  delayedCommit[tid] = false;
367  memReq[tid] = NULL;
368 
369  stalls[tid].decode = false;
370  stalls[tid].drain = false;
371 
372  fetchBufferPC[tid] = 0;
373  fetchBufferValid[tid] = false;
374 
375  fetchQueue[tid].clear();
376 
377  priorityList.push_back(tid);
378  }
379 
380  wroteToTimeBuffer = false;
381  _status = Inactive;
382 }
383 
384 template<class Impl>
385 void
387 {
388  ThreadID tid = cpu->contextToThread(pkt->req->contextId());
389 
390  DPRINTF(Fetch, "[tid:%i] Waking up from cache miss.\n", tid);
391  assert(!cpu->switchedOut());
392 
393  // Only change the status if it's still waiting on the icache access
394  // to return.
395  if (fetchStatus[tid] != IcacheWaitResponse ||
396  pkt->req != memReq[tid]) {
398  delete pkt;
399  return;
400  }
401 
402  memcpy(fetchBuffer[tid], pkt->getConstPtr<uint8_t>(), fetchBufferSize);
403  fetchBufferValid[tid] = true;
404 
405  // Wake up the CPU (if it went to sleep and was waiting on
406  // this completion event).
407  cpu->wakeCPU();
408 
409  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache completion\n",
410  tid);
411 
412  switchToActive();
413 
414  // Only switch to IcacheAccessComplete if we're not stalled as well.
415  if (checkStall(tid)) {
416  fetchStatus[tid] = Blocked;
417  } else {
419  }
420 
421  pkt->req->setAccessLatency();
422  cpu->ppInstAccessComplete->notify(pkt);
423  // Reset the mem req to NULL.
424  delete pkt;
425  memReq[tid] = NULL;
426 }
427 
428 template <class Impl>
429 void
431 {
432  for (ThreadID i = 0; i < numThreads; ++i) {
433  stalls[i].decode = false;
434  stalls[i].drain = false;
435  }
436 }
437 
438 template <class Impl>
439 void
441 {
442  assert(isDrained());
443  assert(retryPkt == NULL);
444  assert(retryTid == InvalidThreadID);
445  assert(!cacheBlocked);
446  assert(!interruptPending);
447 
448  for (ThreadID i = 0; i < numThreads; ++i) {
449  assert(!memReq[i]);
450  assert(fetchStatus[i] == Idle || stalls[i].drain);
451  }
452 
454 }
455 
456 template <class Impl>
457 bool
459 {
460  /* Make sure that threads are either idle of that the commit stage
461  * has signaled that draining has completed by setting the drain
462  * stall flag. This effectively forces the pipeline to be disabled
463  * until the whole system is drained (simulation may continue to
464  * drain other components).
465  */
466  for (ThreadID i = 0; i < numThreads; ++i) {
467  // Verify fetch queues are drained
468  if (!fetchQueue[i].empty())
469  return false;
470 
471  // Return false if not idle or drain stalled
472  if (fetchStatus[i] != Idle) {
473  if (fetchStatus[i] == Blocked && stalls[i].drain)
474  continue;
475  else
476  return false;
477  }
478  }
479 
480  /* The pipeline might start up again in the middle of the drain
481  * cycle if the finish translation event is scheduled, so make
482  * sure that's not the case.
483  */
485 }
486 
487 template <class Impl>
488 void
490 {
491  assert(cpu->getInstPort().isConnected());
492  resetStage();
493 
494 }
495 
496 template <class Impl>
497 void
499 {
500  assert(cpu->isDraining());
501  assert(!stalls[tid].drain);
502  DPRINTF(Drain, "%i: Thread drained.\n", tid);
503  stalls[tid].drain = true;
504 }
505 
506 template <class Impl>
507 void
509 {
510  DPRINTF(Fetch, "Waking up from quiesce\n");
511  // Hopefully this is safe
512  // @todo: Allow other threads to wake from quiesce.
513  fetchStatus[0] = Running;
514 }
515 
516 template <class Impl>
517 inline void
519 {
520  if (_status == Inactive) {
521  DPRINTF(Activity, "Activating stage.\n");
522 
523  cpu->activateStage(O3CPU::FetchIdx);
524 
525  _status = Active;
526  }
527 }
528 
529 template <class Impl>
530 inline void
532 {
533  if (_status == Active) {
534  DPRINTF(Activity, "Deactivating stage.\n");
535 
536  cpu->deactivateStage(O3CPU::FetchIdx);
537 
538  _status = Inactive;
539  }
540 }
541 
542 template <class Impl>
543 void
545 {
546  // Update priority list
547  auto thread_it = std::find(priorityList.begin(), priorityList.end(), tid);
548  if (thread_it != priorityList.end()) {
549  priorityList.erase(thread_it);
550  }
551 }
552 
553 template <class Impl>
554 bool
556  const DynInstPtr &inst, TheISA::PCState &nextPC)
557 {
558  // Do branch prediction check here.
559  // A bit of a misnomer...next_PC is actually the current PC until
560  // this function updates it.
561  bool predict_taken;
562 
563  if (!inst->isControl()) {
564  TheISA::advancePC(nextPC, inst->staticInst);
565  inst->setPredTarg(nextPC);
566  inst->setPredTaken(false);
567  return false;
568  }
569 
570  ThreadID tid = inst->threadNumber;
571  predict_taken = branchPred->predict(inst->staticInst, inst->seqNum,
572  nextPC, tid);
573 
574  if (predict_taken) {
575  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
576  "predicted to be taken to %s\n",
577  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
578  } else {
579  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
580  "predicted to be not taken\n",
581  tid, inst->seqNum, inst->pcState().instAddr());
582  }
583 
584  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
585  "predicted to go to %s\n",
586  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
587  inst->setPredTarg(nextPC);
588  inst->setPredTaken(predict_taken);
589 
590  ++fetchedBranches;
591 
592  if (predict_taken) {
594  }
595 
596  return predict_taken;
597 }
598 
599 template <class Impl>
600 bool
602 {
603  Fault fault = NoFault;
604 
605  assert(!cpu->switchedOut());
606 
607  // @todo: not sure if these should block translation.
608  //AlphaDep
609  if (cacheBlocked) {
610  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
611  tid);
612  return false;
613  } else if (checkInterrupt(pc) && !delayedCommit[tid]) {
614  // Hold off fetch from getting new instructions when:
615  // Cache is blocked, or
616  // while an interrupt is pending and we're not in PAL mode, or
617  // fetch is switched out.
618  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
619  tid);
620  return false;
621  }
622 
623  // Align the fetch address to the start of a fetch buffer segment.
624  Addr fetchBufferBlockPC = fetchBufferAlignPC(vaddr);
625 
626  DPRINTF(Fetch, "[tid:%i] Fetching cache line %#x for addr %#x\n",
627  tid, fetchBufferBlockPC, vaddr);
628 
629  // Setup the memReq to do a read of the first instruction's address.
630  // Set the appropriate read size and flags as well.
631  // Build request here.
632  RequestPtr mem_req = std::make_shared<Request>(
633  fetchBufferBlockPC, fetchBufferSize,
634  Request::INST_FETCH, cpu->instMasterId(), pc,
635  cpu->thread[tid]->contextId());
636 
637  mem_req->taskId(cpu->taskId());
638 
639  memReq[tid] = mem_req;
640 
641  // Initiate translation of the icache block
642  fetchStatus[tid] = ItlbWait;
643  FetchTranslation *trans = new FetchTranslation(this);
644  cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(),
645  trans, BaseTLB::Execute);
646  return true;
647 }
648 
649 template <class Impl>
650 void
652  const RequestPtr &mem_req)
653 {
654  ThreadID tid = cpu->contextToThread(mem_req->contextId());
655  Addr fetchBufferBlockPC = mem_req->getVaddr();
656 
657  assert(!cpu->switchedOut());
658 
659  // Wake up CPU if it was idle
660  cpu->wakeCPU();
661 
662  if (fetchStatus[tid] != ItlbWait || mem_req != memReq[tid] ||
663  mem_req->getVaddr() != memReq[tid]->getVaddr()) {
664  DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
665  tid);
667  return;
668  }
669 
670 
671  // If translation was successful, attempt to read the icache block.
672  if (fault == NoFault) {
673  // Check that we're not going off into random memory
674  // If we have, just wait around for commit to squash something and put
675  // us on the right track
676  if (!cpu->system->isMemAddr(mem_req->getPaddr())) {
677  warn("Address %#x is outside of physical memory, stopping fetch\n",
678  mem_req->getPaddr());
679  fetchStatus[tid] = NoGoodAddr;
680  memReq[tid] = NULL;
681  return;
682  }
683 
684  // Build packet here.
685  PacketPtr data_pkt = new Packet(mem_req, MemCmd::ReadReq);
686  data_pkt->dataDynamic(new uint8_t[fetchBufferSize]);
687 
688  fetchBufferPC[tid] = fetchBufferBlockPC;
689  fetchBufferValid[tid] = false;
690  DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
691 
693 
694  // Access the cache.
695  if (!icachePort.sendTimingReq(data_pkt)) {
696  assert(retryPkt == NULL);
697  assert(retryTid == InvalidThreadID);
698  DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
699 
701  retryPkt = data_pkt;
702  retryTid = tid;
703  cacheBlocked = true;
704  } else {
705  DPRINTF(Fetch, "[tid:%i] Doing Icache access.\n", tid);
706  DPRINTF(Activity, "[tid:%i] Activity: Waiting on I-cache "
707  "response.\n", tid);
708  lastIcacheStall[tid] = curTick();
710  // Notify Fetch Request probe when a packet containing a fetch
711  // request is successfully sent
712  ppFetchRequestSent->notify(mem_req);
713  }
714  } else {
715  // Don't send an instruction to decode if we can't handle it.
716  if (!(numInst < fetchWidth) || !(fetchQueue[tid].size() < fetchQueueSize)) {
720  cpu->schedule(finishTranslationEvent,
721  cpu->clockEdge(Cycles(1)));
722  return;
723  }
724  DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
725  tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
726  // Translation faulted, icache request won't be sent.
727  memReq[tid] = NULL;
728 
729  // Send the fault to commit. This thread will not do anything
730  // until commit handles the fault. The only other way it can
731  // wake up is if a squash comes along and changes the PC.
732  TheISA::PCState fetchPC = pc[tid];
733 
734  DPRINTF(Fetch, "[tid:%i] Translation faulted, building noop.\n", tid);
735  // We will use a nop in ordier to carry the fault.
737  NULL, fetchPC, fetchPC, false);
738  instruction->setNotAnInst();
739 
740  instruction->setPredTarg(fetchPC);
741  instruction->fault = fault;
742  wroteToTimeBuffer = true;
743 
744  DPRINTF(Activity, "Activity this cycle.\n");
745  cpu->activityThisCycle();
746 
747  fetchStatus[tid] = TrapPending;
748 
749  DPRINTF(Fetch, "[tid:%i] Blocked, need to handle the trap.\n", tid);
750  DPRINTF(Fetch, "[tid:%i] fault (%s) detected @ PC %s.\n",
751  tid, fault->name(), pc[tid]);
752  }
754 }
755 
756 template <class Impl>
757 inline void
759  const DynInstPtr squashInst, ThreadID tid)
760 {
761  DPRINTF(Fetch, "[tid:%i] Squashing, setting PC to: %s.\n",
762  tid, newPC);
763 
764  pc[tid] = newPC;
765  fetchOffset[tid] = 0;
766  if (squashInst && squashInst->pcState().instAddr() == newPC.instAddr())
767  macroop[tid] = squashInst->macroop;
768  else
769  macroop[tid] = NULL;
770  decoder[tid]->reset();
771 
772  // Clear the icache miss if it's outstanding.
773  if (fetchStatus[tid] == IcacheWaitResponse) {
774  DPRINTF(Fetch, "[tid:%i] Squashing outstanding Icache miss.\n",
775  tid);
776  memReq[tid] = NULL;
777  } else if (fetchStatus[tid] == ItlbWait) {
778  DPRINTF(Fetch, "[tid:%i] Squashing outstanding ITLB miss.\n",
779  tid);
780  memReq[tid] = NULL;
781  }
782 
783  // Get rid of the retrying packet if it was from this thread.
784  if (retryTid == tid) {
785  assert(cacheBlocked);
786  if (retryPkt) {
787  delete retryPkt;
788  }
789  retryPkt = NULL;
791  }
792 
793  fetchStatus[tid] = Squashing;
794 
795  // Empty fetch queue
796  fetchQueue[tid].clear();
797 
798  // microops are being squashed, it is not known wheather the
799  // youngest non-squashed microop was marked delayed commit
800  // or not. Setting the flag to true ensures that the
801  // interrupts are not handled when they cannot be, though
802  // some opportunities to handle interrupts may be missed.
803  delayedCommit[tid] = true;
804 
806 }
807 
808 template<class Impl>
809 void
811  const DynInstPtr squashInst,
812  const InstSeqNum seq_num, ThreadID tid)
813 {
814  DPRINTF(Fetch, "[tid:%i] Squashing from decode.\n", tid);
815 
816  doSquash(newPC, squashInst, tid);
817 
818  // Tell the CPU to remove any instructions that are in flight between
819  // fetch and decode.
820  cpu->removeInstsUntil(seq_num, tid);
821 }
822 
823 template<class Impl>
824 bool
826 {
827  bool ret_val = false;
828 
829  if (stalls[tid].drain) {
830  assert(cpu->isDraining());
831  DPRINTF(Fetch,"[tid:%i] Drain stall detected.\n",tid);
832  ret_val = true;
833  }
834 
835  return ret_val;
836 }
837 
838 template<class Impl>
841 {
842  //Check Running
843  list<ThreadID>::iterator threads = activeThreads->begin();
845 
846  while (threads != end) {
847  ThreadID tid = *threads++;
848 
849  if (fetchStatus[tid] == Running ||
850  fetchStatus[tid] == Squashing ||
852 
853  if (_status == Inactive) {
854  DPRINTF(Activity, "[tid:%i] Activating stage.\n",tid);
855 
856  if (fetchStatus[tid] == IcacheAccessComplete) {
857  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache"
858  "completion\n",tid);
859  }
860 
861  cpu->activateStage(O3CPU::FetchIdx);
862  }
863 
864  return Active;
865  }
866  }
867 
868  // Stage is switching from active to inactive, notify CPU of it.
869  if (_status == Active) {
870  DPRINTF(Activity, "Deactivating stage.\n");
871 
872  cpu->deactivateStage(O3CPU::FetchIdx);
873  }
874 
875  return Inactive;
876 }
877 
878 template <class Impl>
879 void
881  const InstSeqNum seq_num, DynInstPtr squashInst,
882  ThreadID tid)
883 {
884  DPRINTF(Fetch, "[tid:%i] Squash from commit.\n", tid);
885 
886  doSquash(newPC, squashInst, tid);
887 
888  // Tell the CPU to remove any instructions that are not in the ROB.
889  cpu->removeInstsNotInROB(tid);
890 }
891 
892 template <class Impl>
893 void
895 {
896  list<ThreadID>::iterator threads = activeThreads->begin();
898  bool status_change = false;
899 
900  wroteToTimeBuffer = false;
901 
902  for (ThreadID i = 0; i < numThreads; ++i) {
903  issuePipelinedIfetch[i] = false;
904  }
905 
906  while (threads != end) {
907  ThreadID tid = *threads++;
908 
909  // Check the signals for each thread to determine the proper status
910  // for each thread.
911  bool updated_status = checkSignalsAndUpdate(tid);
912  status_change = status_change || updated_status;
913  }
914 
915  DPRINTF(Fetch, "Running stage.\n");
916 
917  if (FullSystem) {
918  if (fromCommit->commitInfo[0].interruptPending) {
919  interruptPending = true;
920  }
921 
922  if (fromCommit->commitInfo[0].clearInterrupt) {
923  interruptPending = false;
924  }
925  }
926 
928  threadFetched++) {
929  // Fetch each of the actively fetching threads.
930  fetch(status_change);
931  }
932 
933  // Record number of instructions fetched this cycle for distribution.
935 
936  if (status_change) {
937  // Change the fetch stage status if there was a status change.
939  }
940 
941  // Issue the next I-cache request if possible.
942  for (ThreadID i = 0; i < numThreads; ++i) {
943  if (issuePipelinedIfetch[i]) {
945  }
946  }
947 
948  // Send instructions enqueued into the fetch queue to decode.
949  // Limit rate by fetchWidth. Stall if decode is stalled.
950  unsigned insts_to_decode = 0;
951  unsigned available_insts = 0;
952 
953  for (auto tid : *activeThreads) {
954  if (!stalls[tid].decode) {
955  available_insts += fetchQueue[tid].size();
956  }
957  }
958 
959  // Pick a random thread to start trying to grab instructions from
960  auto tid_itr = activeThreads->begin();
961  std::advance(tid_itr, random_mt.random<uint8_t>(0, activeThreads->size() - 1));
962 
963  while (available_insts != 0 && insts_to_decode < decodeWidth) {
964  ThreadID tid = *tid_itr;
965  if (!stalls[tid].decode && !fetchQueue[tid].empty()) {
966  const auto& inst = fetchQueue[tid].front();
967  toDecode->insts[toDecode->size++] = inst;
968  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Sending instruction to decode "
969  "from fetch queue. Fetch queue size: %i.\n",
970  tid, inst->seqNum, fetchQueue[tid].size());
971 
972  wroteToTimeBuffer = true;
973  fetchQueue[tid].pop_front();
974  insts_to_decode++;
975  available_insts--;
976  }
977 
978  tid_itr++;
979  // Wrap around if at end of active threads list
980  if (tid_itr == activeThreads->end())
981  tid_itr = activeThreads->begin();
982  }
983 
984  // If there was activity this cycle, inform the CPU of it.
985  if (wroteToTimeBuffer) {
986  DPRINTF(Activity, "Activity this cycle.\n");
987  cpu->activityThisCycle();
988  }
989 
990  // Reset the number of the instruction we've fetched.
991  numInst = 0;
992 }
993 
994 template <class Impl>
995 bool
997 {
998  // Update the per thread stall statuses.
999  if (fromDecode->decodeBlock[tid]) {
1000  stalls[tid].decode = true;
1001  }
1002 
1003  if (fromDecode->decodeUnblock[tid]) {
1004  assert(stalls[tid].decode);
1005  assert(!fromDecode->decodeBlock[tid]);
1006  stalls[tid].decode = false;
1007  }
1008 
1009  // Check squash signals from commit.
1010  if (fromCommit->commitInfo[tid].squash) {
1011 
1012  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1013  "from commit.\n",tid);
1014  // In any case, squash.
1015  squash(fromCommit->commitInfo[tid].pc,
1016  fromCommit->commitInfo[tid].doneSeqNum,
1017  fromCommit->commitInfo[tid].squashInst, tid);
1018 
1019  // If it was a branch mispredict on a control instruction, update the
1020  // branch predictor with that instruction, otherwise just kill the
1021  // invalid state we generated in after sequence number
1022  if (fromCommit->commitInfo[tid].mispredictInst &&
1023  fromCommit->commitInfo[tid].mispredictInst->isControl()) {
1024  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
1025  fromCommit->commitInfo[tid].pc,
1026  fromCommit->commitInfo[tid].branchTaken,
1027  tid);
1028  } else {
1029  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
1030  tid);
1031  }
1032 
1033  return true;
1034  } else if (fromCommit->commitInfo[tid].doneSeqNum) {
1035  // Update the branch predictor if it wasn't a squashed instruction
1036  // that was broadcasted.
1037  branchPred->update(fromCommit->commitInfo[tid].doneSeqNum, tid);
1038  }
1039 
1040  // Check squash signals from decode.
1041  if (fromDecode->decodeInfo[tid].squash) {
1042  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1043  "from decode.\n",tid);
1044 
1045  // Update the branch predictor.
1046  if (fromDecode->decodeInfo[tid].branchMispredict) {
1047  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1048  fromDecode->decodeInfo[tid].nextPC,
1049  fromDecode->decodeInfo[tid].branchTaken,
1050  tid);
1051  } else {
1052  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1053  tid);
1054  }
1055 
1056  if (fetchStatus[tid] != Squashing) {
1057 
1058  DPRINTF(Fetch, "Squashing from decode with PC = %s\n",
1059  fromDecode->decodeInfo[tid].nextPC);
1060  // Squash unless we're already squashing
1061  squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
1062  fromDecode->decodeInfo[tid].squashInst,
1063  fromDecode->decodeInfo[tid].doneSeqNum,
1064  tid);
1065 
1066  return true;
1067  }
1068  }
1069 
1070  if (checkStall(tid) &&
1071  fetchStatus[tid] != IcacheWaitResponse &&
1072  fetchStatus[tid] != IcacheWaitRetry &&
1073  fetchStatus[tid] != ItlbWait &&
1074  fetchStatus[tid] != QuiescePending) {
1075  DPRINTF(Fetch, "[tid:%i] Setting to blocked\n",tid);
1076 
1077  fetchStatus[tid] = Blocked;
1078 
1079  return true;
1080  }
1081 
1082  if (fetchStatus[tid] == Blocked ||
1083  fetchStatus[tid] == Squashing) {
1084  // Switch status to running if fetch isn't being told to block or
1085  // squash this cycle.
1086  DPRINTF(Fetch, "[tid:%i] Done squashing, switching to running.\n",
1087  tid);
1088 
1089  fetchStatus[tid] = Running;
1090 
1091  return true;
1092  }
1093 
1094  // If we've reached this point, we have not gotten any signals that
1095  // cause fetch to change its status. Fetch remains the same as before.
1096  return false;
1097 }
1098 
1099 template<class Impl>
1100 typename Impl::DynInstPtr
1102  StaticInstPtr curMacroop, TheISA::PCState thisPC,
1103  TheISA::PCState nextPC, bool trace)
1104 {
1105  // Get a sequence number.
1106  InstSeqNum seq = cpu->getAndIncrementInstSeq();
1107 
1108  // Create a new DynInst from the instruction fetched.
1109  DynInstPtr instruction =
1110  new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu);
1111  instruction->setTid(tid);
1112 
1113  instruction->setThreadState(cpu->thread[tid]);
1114 
1115  DPRINTF(Fetch, "[tid:%i] Instruction PC %#x (%d) created "
1116  "[sn:%lli].\n", tid, thisPC.instAddr(),
1117  thisPC.microPC(), seq);
1118 
1119  DPRINTF(Fetch, "[tid:%i] Instruction is: %s\n", tid,
1120  instruction->staticInst->
1121  disassemble(thisPC.instAddr()));
1122 
1123 #if TRACING_ON
1124  if (trace) {
1125  instruction->traceData =
1126  cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
1127  instruction->staticInst, thisPC, curMacroop);
1128  }
1129 #else
1130  instruction->traceData = NULL;
1131 #endif
1132 
1133  // Add instruction to the CPU's list of instructions.
1134  instruction->setInstListIt(cpu->addInst(instruction));
1135 
1136  // Write the instruction to the first slot in the queue
1137  // that heads to decode.
1138  assert(numInst < fetchWidth);
1139  fetchQueue[tid].push_back(instruction);
1140  assert(fetchQueue[tid].size() <= fetchQueueSize);
1141  DPRINTF(Fetch, "[tid:%i] Fetch queue entry created (%i/%i).\n",
1142  tid, fetchQueue[tid].size(), fetchQueueSize);
1143  //toDecode->insts[toDecode->size++] = instruction;
1144 
1145  // Keep track of if we can take an interrupt at this boundary
1146  delayedCommit[tid] = instruction->isDelayedCommit();
1147 
1148  return instruction;
1149 }
1150 
1151 template<class Impl>
1152 void
1153 DefaultFetch<Impl>::fetch(bool &status_change)
1154 {
1156  // Start actual fetch
1158  ThreadID tid = getFetchingThread();
1159 
1160  assert(!cpu->switchedOut());
1161 
1162  if (tid == InvalidThreadID) {
1163  // Breaks looping condition in tick()
1165 
1166  if (numThreads == 1) { // @todo Per-thread stats
1167  profileStall(0);
1168  }
1169 
1170  return;
1171  }
1172 
1173  DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid);
1174 
1175  // The current PC.
1176  TheISA::PCState thisPC = pc[tid];
1177 
1178  Addr pcOffset = fetchOffset[tid];
1179  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1180 
1181  bool inRom = isRomMicroPC(thisPC.microPC());
1182 
1183  // If returning from the delay of a cache miss, then update the status
1184  // to running, otherwise do the cache access. Possibly move this up
1185  // to tick() function.
1186  if (fetchStatus[tid] == IcacheAccessComplete) {
1187  DPRINTF(Fetch, "[tid:%i] Icache miss is complete.\n", tid);
1188 
1189  fetchStatus[tid] = Running;
1190  status_change = true;
1191  } else if (fetchStatus[tid] == Running) {
1192  // Align the fetch PC so its at the start of a fetch buffer segment.
1193  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1194 
1195  // If buffer is no longer valid or fetchAddr has moved to point
1196  // to the next cache block, AND we have no remaining ucode
1197  // from a macro-op, then start fetch from icache.
1198  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])
1199  && !inRom && !macroop[tid]) {
1200  DPRINTF(Fetch, "[tid:%i] Attempting to translate and read "
1201  "instruction, starting at PC %s.\n", tid, thisPC);
1202 
1203  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1204 
1205  if (fetchStatus[tid] == IcacheWaitResponse)
1207  else if (fetchStatus[tid] == ItlbWait)
1208  ++fetchTlbCycles;
1209  else
1211  return;
1212  } else if ((checkInterrupt(thisPC.instAddr()) && !delayedCommit[tid])) {
1213  // Stall CPU if an interrupt is posted and we're not issuing
1214  // an delayed commit micro-op currently (delayed commit instructions
1215  // are not interruptable by interrupts, only faults)
1217  DPRINTF(Fetch, "[tid:%i] Fetch is stalled!\n", tid);
1218  return;
1219  }
1220  } else {
1221  if (fetchStatus[tid] == Idle) {
1222  ++fetchIdleCycles;
1223  DPRINTF(Fetch, "[tid:%i] Fetch is idle!\n", tid);
1224  }
1225 
1226  // Status is Idle, so fetch should do nothing.
1227  return;
1228  }
1229 
1230  ++fetchCycles;
1231 
1232  TheISA::PCState nextPC = thisPC;
1233 
1234  StaticInstPtr staticInst = NULL;
1235  StaticInstPtr curMacroop = macroop[tid];
1236 
1237  // If the read of the first instruction was successful, then grab the
1238  // instructions from the rest of the cache line and put them into the
1239  // queue heading to decode.
1240 
1241  DPRINTF(Fetch, "[tid:%i] Adding instructions to queue to "
1242  "decode.\n", tid);
1243 
1244  // Need to keep track of whether or not a predicted branch
1245  // ended this fetch block.
1246  bool predictedBranch = false;
1247 
1248  // Need to halt fetch if quiesce instruction detected
1249  bool quiesce = false;
1250 
1251  TheISA::MachInst *cacheInsts =
1252  reinterpret_cast<TheISA::MachInst *>(fetchBuffer[tid]);
1253 
1254  const unsigned numInsts = fetchBufferSize / instSize;
1255  unsigned blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1256 
1257  // Loop through instruction memory from the cache.
1258  // Keep issuing while fetchWidth is available and branch is not
1259  // predicted taken
1260  while (numInst < fetchWidth && fetchQueue[tid].size() < fetchQueueSize
1261  && !predictedBranch && !quiesce) {
1262  // We need to process more memory if we aren't going to get a
1263  // StaticInst from the rom, the current macroop, or what's already
1264  // in the decoder.
1265  bool needMem = !inRom && !curMacroop &&
1266  !decoder[tid]->instReady();
1267  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1268  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1269 
1270  if (needMem) {
1271  // If buffer is no longer valid or fetchAddr has moved to point
1272  // to the next cache block then start fetch from icache.
1273  if (!fetchBufferValid[tid] ||
1274  fetchBufferBlockPC != fetchBufferPC[tid])
1275  break;
1276 
1277  if (blkOffset >= numInsts) {
1278  // We need to process more memory, but we've run out of the
1279  // current block.
1280  break;
1281  }
1282 
1283  decoder[tid]->moreBytes(thisPC, fetchAddr, cacheInsts[blkOffset]);
1284 
1285  if (decoder[tid]->needMoreBytes()) {
1286  blkOffset++;
1287  fetchAddr += instSize;
1288  pcOffset += instSize;
1289  }
1290  }
1291 
1292  // Extract as many instructions and/or microops as we can from
1293  // the memory we've processed so far.
1294  do {
1295  if (!(curMacroop || inRom)) {
1296  if (decoder[tid]->instReady()) {
1297  staticInst = decoder[tid]->decode(thisPC);
1298 
1299  // Increment stat of fetched instructions.
1300  ++fetchedInsts;
1301 
1302  if (staticInst->isMacroop()) {
1303  curMacroop = staticInst;
1304  } else {
1305  pcOffset = 0;
1306  }
1307  } else {
1308  // We need more bytes for this instruction so blkOffset and
1309  // pcOffset will be updated
1310  break;
1311  }
1312  }
1313  // Whether we're moving to a new macroop because we're at the
1314  // end of the current one, or the branch predictor incorrectly
1315  // thinks we are...
1316  bool newMacro = false;
1317  if (curMacroop || inRom) {
1318  if (inRom) {
1319  staticInst = cpu->microcodeRom.fetchMicroop(
1320  thisPC.microPC(), curMacroop);
1321  } else {
1322  staticInst = curMacroop->fetchMicroop(thisPC.microPC());
1323  }
1324  newMacro |= staticInst->isLastMicroop();
1325  }
1326 
1327  DynInstPtr instruction =
1328  buildInst(tid, staticInst, curMacroop,
1329  thisPC, nextPC, true);
1330 
1331  ppFetch->notify(instruction);
1332  numInst++;
1333 
1334 #if TRACING_ON
1335  if (DTRACE(O3PipeView)) {
1336  instruction->fetchTick = curTick();
1337  }
1338 #endif
1339 
1340  nextPC = thisPC;
1341 
1342  // If we're branching after this instruction, quit fetching
1343  // from the same block.
1344  predictedBranch |= thisPC.branching();
1345  predictedBranch |=
1346  lookupAndUpdateNextPC(instruction, nextPC);
1347  if (predictedBranch) {
1348  DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC);
1349  }
1350 
1351  newMacro |= thisPC.instAddr() != nextPC.instAddr();
1352 
1353  // Move to the next instruction, unless we have a branch.
1354  thisPC = nextPC;
1355  inRom = isRomMicroPC(thisPC.microPC());
1356 
1357  if (newMacro) {
1358  fetchAddr = thisPC.instAddr() & BaseCPU::PCMask;
1359  blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1360  pcOffset = 0;
1361  curMacroop = NULL;
1362  }
1363 
1364  if (instruction->isQuiesce()) {
1365  DPRINTF(Fetch,
1366  "Quiesce instruction encountered, halting fetch!\n");
1367  fetchStatus[tid] = QuiescePending;
1368  status_change = true;
1369  quiesce = true;
1370  break;
1371  }
1372  } while ((curMacroop || decoder[tid]->instReady()) &&
1373  numInst < fetchWidth &&
1374  fetchQueue[tid].size() < fetchQueueSize);
1375 
1376  // Re-evaluate whether the next instruction to fetch is in micro-op ROM
1377  // or not.
1378  inRom = isRomMicroPC(thisPC.microPC());
1379  }
1380 
1381  if (predictedBranch) {
1382  DPRINTF(Fetch, "[tid:%i] Done fetching, predicted branch "
1383  "instruction encountered.\n", tid);
1384  } else if (numInst >= fetchWidth) {
1385  DPRINTF(Fetch, "[tid:%i] Done fetching, reached fetch bandwidth "
1386  "for this cycle.\n", tid);
1387  } else if (blkOffset >= fetchBufferSize) {
1388  DPRINTF(Fetch, "[tid:%i] Done fetching, reached the end of the"
1389  "fetch buffer.\n", tid);
1390  }
1391 
1392  macroop[tid] = curMacroop;
1393  fetchOffset[tid] = pcOffset;
1394 
1395  if (numInst > 0) {
1396  wroteToTimeBuffer = true;
1397  }
1398 
1399  pc[tid] = thisPC;
1400 
1401  // pipeline a fetch if we're crossing a fetch buffer boundary and not in
1402  // a state that would preclude fetching
1403  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1404  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1405  issuePipelinedIfetch[tid] = fetchBufferBlockPC != fetchBufferPC[tid] &&
1406  fetchStatus[tid] != IcacheWaitResponse &&
1407  fetchStatus[tid] != ItlbWait &&
1408  fetchStatus[tid] != IcacheWaitRetry &&
1409  fetchStatus[tid] != QuiescePending &&
1410  !curMacroop;
1411 }
1412 
1413 template<class Impl>
1414 void
1416 {
1417  if (retryPkt != NULL) {
1418  assert(cacheBlocked);
1419  assert(retryTid != InvalidThreadID);
1420  assert(fetchStatus[retryTid] == IcacheWaitRetry);
1421 
1424  // Notify Fetch Request probe when a retryPkt is successfully sent.
1425  // Note that notify must be called before retryPkt is set to NULL.
1427  retryPkt = NULL;
1429  cacheBlocked = false;
1430  }
1431  } else {
1432  assert(retryTid == InvalidThreadID);
1433  // Access has been squashed since it was sent out. Just clear
1434  // the cache being blocked.
1435  cacheBlocked = false;
1436  }
1437 }
1438 
1440 // //
1441 // SMT FETCH POLICY MAINTAINED HERE //
1442 // //
1444 template<class Impl>
1445 ThreadID
1447 {
1448  if (numThreads > 1) {
1449  switch (fetchPolicy) {
1450  case FetchPolicy::RoundRobin:
1451  return roundRobin();
1452  case FetchPolicy::IQCount:
1453  return iqCount();
1454  case FetchPolicy::LSQCount:
1455  return lsqCount();
1456  case FetchPolicy::Branch:
1457  return branchCount();
1458  default:
1459  return InvalidThreadID;
1460  }
1461  } else {
1462  list<ThreadID>::iterator thread = activeThreads->begin();
1463  if (thread == activeThreads->end()) {
1464  return InvalidThreadID;
1465  }
1466 
1467  ThreadID tid = *thread;
1468 
1469  if (fetchStatus[tid] == Running ||
1471  fetchStatus[tid] == Idle) {
1472  return tid;
1473  } else {
1474  return InvalidThreadID;
1475  }
1476  }
1477 }
1478 
1479 
1480 template<class Impl>
1481 ThreadID
1483 {
1484  list<ThreadID>::iterator pri_iter = priorityList.begin();
1486 
1487  ThreadID high_pri;
1488 
1489  while (pri_iter != end) {
1490  high_pri = *pri_iter;
1491 
1492  assert(high_pri <= numThreads);
1493 
1494  if (fetchStatus[high_pri] == Running ||
1495  fetchStatus[high_pri] == IcacheAccessComplete ||
1496  fetchStatus[high_pri] == Idle) {
1497 
1498  priorityList.erase(pri_iter);
1499  priorityList.push_back(high_pri);
1500 
1501  return high_pri;
1502  }
1503 
1504  pri_iter++;
1505  }
1506 
1507  return InvalidThreadID;
1508 }
1509 
1510 template<class Impl>
1511 ThreadID
1513 {
1514  //sorted from lowest->highest
1515  std::priority_queue<unsigned,vector<unsigned>,
1516  std::greater<unsigned> > PQ;
1517  std::map<unsigned, ThreadID> threadMap;
1518 
1519  list<ThreadID>::iterator threads = activeThreads->begin();
1521 
1522  while (threads != end) {
1523  ThreadID tid = *threads++;
1524  unsigned iqCount = fromIEW->iewInfo[tid].iqCount;
1525 
1526  //we can potentially get tid collisions if two threads
1527  //have the same iqCount, but this should be rare.
1528  PQ.push(iqCount);
1529  threadMap[iqCount] = tid;
1530  }
1531 
1532  while (!PQ.empty()) {
1533  ThreadID high_pri = threadMap[PQ.top()];
1534 
1535  if (fetchStatus[high_pri] == Running ||
1536  fetchStatus[high_pri] == IcacheAccessComplete ||
1537  fetchStatus[high_pri] == Idle)
1538  return high_pri;
1539  else
1540  PQ.pop();
1541 
1542  }
1543 
1544  return InvalidThreadID;
1545 }
1546 
1547 template<class Impl>
1548 ThreadID
1550 {
1551  //sorted from lowest->highest
1552  std::priority_queue<unsigned,vector<unsigned>,
1553  std::greater<unsigned> > PQ;
1554  std::map<unsigned, ThreadID> threadMap;
1555 
1556  list<ThreadID>::iterator threads = activeThreads->begin();
1558 
1559  while (threads != end) {
1560  ThreadID tid = *threads++;
1561  unsigned ldstqCount = fromIEW->iewInfo[tid].ldstqCount;
1562 
1563  //we can potentially get tid collisions if two threads
1564  //have the same iqCount, but this should be rare.
1565  PQ.push(ldstqCount);
1566  threadMap[ldstqCount] = tid;
1567  }
1568 
1569  while (!PQ.empty()) {
1570  ThreadID high_pri = threadMap[PQ.top()];
1571 
1572  if (fetchStatus[high_pri] == Running ||
1573  fetchStatus[high_pri] == IcacheAccessComplete ||
1574  fetchStatus[high_pri] == Idle)
1575  return high_pri;
1576  else
1577  PQ.pop();
1578  }
1579 
1580  return InvalidThreadID;
1581 }
1582 
1583 template<class Impl>
1584 ThreadID
1586 {
1587  panic("Branch Count Fetch policy unimplemented\n");
1588  return InvalidThreadID;
1589 }
1590 
1591 template<class Impl>
1592 void
1594 {
1595  if (!issuePipelinedIfetch[tid]) {
1596  return;
1597  }
1598 
1599  // The next PC to access.
1600  TheISA::PCState thisPC = pc[tid];
1601 
1602  if (isRomMicroPC(thisPC.microPC())) {
1603  return;
1604  }
1605 
1606  Addr pcOffset = fetchOffset[tid];
1607  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1608 
1609  // Align the fetch PC so its at the start of a fetch buffer segment.
1610  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1611 
1612  // Unless buffer already got the block, fetch it from icache.
1613  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])) {
1614  DPRINTF(Fetch, "[tid:%i] Issuing a pipelined I-cache access, "
1615  "starting at PC %s.\n", tid, thisPC);
1616 
1617  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1618  }
1619 }
1620 
1621 template<class Impl>
1622 void
1624  DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
1625 
1626  // @todo Per-thread stats
1627 
1628  if (stalls[tid].drain) {
1630  DPRINTF(Fetch, "Fetch is waiting for a drain!\n");
1631  } else if (activeThreads->empty()) {
1633  DPRINTF(Fetch, "Fetch has no active thread!\n");
1634  } else if (fetchStatus[tid] == Blocked) {
1636  DPRINTF(Fetch, "[tid:%i] Fetch is blocked!\n", tid);
1637  } else if (fetchStatus[tid] == Squashing) {
1639  DPRINTF(Fetch, "[tid:%i] Fetch is squashing!\n", tid);
1640  } else if (fetchStatus[tid] == IcacheWaitResponse) {
1642  DPRINTF(Fetch, "[tid:%i] Fetch is waiting cache response!\n",
1643  tid);
1644  } else if (fetchStatus[tid] == ItlbWait) {
1645  ++fetchTlbCycles;
1646  DPRINTF(Fetch, "[tid:%i] Fetch is waiting ITLB walk to "
1647  "finish!\n", tid);
1648  } else if (fetchStatus[tid] == TrapPending) {
1650  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending trap!\n",
1651  tid);
1652  } else if (fetchStatus[tid] == QuiescePending) {
1654  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending quiesce "
1655  "instruction!\n", tid);
1656  } else if (fetchStatus[tid] == IcacheWaitRetry) {
1658  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for an I-cache retry!\n",
1659  tid);
1660  } else if (fetchStatus[tid] == NoGoodAddr) {
1661  DPRINTF(Fetch, "[tid:%i] Fetch predicted non-executable address\n",
1662  tid);
1663  } else {
1664  DPRINTF(Fetch, "[tid:%i] Unexpected fetch stall reason "
1665  "(Status: %i)\n",
1666  tid, fetchStatus[tid]);
1667  }
1668 }
1669 
1670 template<class Impl>
1671 bool
1673 {
1674  DPRINTF(O3CPU, "Fetch unit received timing\n");
1675  // We shouldn't ever get a cacheable block in Modified state
1676  assert(pkt->req->isUncacheable() ||
1677  !(pkt->cacheResponding() && !pkt->hasSharers()));
1678  fetch->processCacheCompletion(pkt);
1679 
1680  return true;
1681 }
1682 
1683 template<class Impl>
1684 void
1686 {
1687  fetch->recvReqRetry();
1688 }
1689 
1690 #endif//__CPU_O3_FETCH_IMPL_HH__
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
Impl::O3CPU O3CPU
Definition: fetch.hh:78
uint32_t MachInst
Definition: types.hh:52
#define DPRINTF(x,...)
Definition: trace.hh:222
Stats::Formula fetchRate
Number of instruction fetched per cycle.
Definition: fetch.hh:599
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:51
Impl::DynInst DynInst
Definition: fetch.hh:76
unsigned fetchWidth
The width of fetch in instructions.
Definition: fetch.hh:477
virtual void recvReqRetry()
Handles doing a retry of a failed fetch.
Definition: fetch_impl.hh:1685
ThreadID iqCount()
Returns the appropriate thread to fetch using the IQ count policy.
Definition: fetch_impl.hh:1512
void profileStall(ThreadID tid)
Profile the reasons of fetch stall.
Definition: fetch_impl.hh:1623
TheISA::Decoder * decoder[Impl::MaxThreads]
The decoder.
Definition: fetch.hh:378
decltype(nullptr) constexpr NoFault
Definition: types.hh:243
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:81
Stats::Formula idleRate
Rate of how often fetch was idle.
Definition: fetch.hh:595
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
void tick()
Ticks the fetch stage, processing all inputs signals and fetching as many instructions as possible...
Definition: fetch_impl.hh:894
void resetStage()
Reset this pipeline stage.
Definition: fetch_impl.hh:351
Bitfield< 7 > i
void squashFromDecode(const TheISA::PCState &newPC, const DynInstPtr squashInst, const InstSeqNum seq_num, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:810
TimeBuffer< TimeStruct >::wire fromRename
Wire to get rename&#39;s information from backwards time buffer.
Definition: fetch.hh:420
void pipelineIcacheAccesses(ThreadID tid)
Pipeline the next I-cache access to the current one.
Definition: fetch_impl.hh:1593
Addr fetchBufferPC[Impl::MaxThreads]
The PC of the first instruction loaded into the fetch buffer.
Definition: fetch.hh:506
bool isMacroop() const
Definition: static_inst.hh:198
bool checkInterrupt(Addr pc)
Check if an interrupt is pending and that we need to handle.
Definition: fetch.hh:323
std::shared_ptr< Request > RequestPtr
Definition: request.hh:81
FetchStatus _status
Fetch status.
Definition: fetch.hh:202
void notify(const Arg &arg)
called at the ProbePoint call site, passes arg to each listener.
Definition: probe.hh:286
Impl::DynInstPtr DynInstPtr
Definition: fetch.hh:77
bool checkStall(ThreadID tid) const
Checks if a thread is stalled.
Definition: fetch_impl.hh:825
Stats::Scalar fetchPendingTrapStallCycles
Total number of stall cycles caused by pending traps.
Definition: fetch.hh:577
TimeBuffer< FetchStruct >::wire toDecode
Wire used to write any information heading to decode.
Definition: fetch.hh:430
int numInst
Tracks how many instructions has been fetched this cycle.
Definition: fetch.hh:453
bool cacheResponding() const
Definition: packet.hh:585
std::string name() const
Returns the name of fetch.
Definition: fetch_impl.hh:151
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:132
void processCacheCompletion(PacketPtr pkt)
Processes cache completion event.
Definition: fetch_impl.hh:386
Stats::Scalar fetchMiscStallCycles
Total number of cycles spent in any other state.
Definition: fetch.hh:571
Stats::Scalar fetchIdleCycles
Stat for total number of cycles spent blocked due to other stages in the pipeline.
Definition: fetch.hh:567
Stats::Formula branchRate
Number of branch fetches per cycle.
Definition: fetch.hh:597
unsigned fetchQueueSize
The size of the fetch queue in micro-ops.
Definition: fetch.hh:509
void update(const InstSeqNum &done_sn, ThreadID tid)
Tells the branch predictor to commit any updates until the given sequence number. ...
Definition: bpred_unit.cc:345
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the slave port by calling its corresponding receive function...
Definition: port.hh:441
Stats::Scalar fetchTlbSquashes
Total number of outstanding tlb accesses that were dropped due to a squash.
Definition: fetch.hh:591
TimeBuffer< TimeStruct >::wire fromDecode
Wire to get decode&#39;s information from backwards time buffer.
Definition: fetch.hh:417
#define DTRACE(x)
Definition: debug.hh:143
Stats::Scalar icacheStallCycles
Stat for total number of cycles stalled due to an icache miss.
Definition: fetch.hh:551
ThreadID getFetchingThread()
Returns the appropriate thread to fetch, given the fetch policy.
Definition: fetch_impl.hh:1446
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:333
bool issuePipelinedIfetch[Impl::MaxThreads]
Set to true if a pipelined I-cache request should be issued.
Definition: fetch.hh:544
ThreadID numThreads
Number of threads.
Definition: fetch.hh:527
std::enable_if< std::is_integral< T >::value, T >::type random()
Use the SFINAE idiom to choose an implementation based on whether the type is integral or floating po...
Definition: random.hh:79
Stats::Scalar fetchedInsts
Stat for total number of fetched instructions.
Definition: fetch.hh:553
Stats::Scalar fetchNoActiveThreadStallCycles
Total number of stall cycles caused by no active threads to run.
Definition: fetch.hh:575
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: fetch_impl.hh:440
RequestPtr req
A pointer to the original request.
Definition: packet.hh:321
FetchStatus
Overall fetch status.
Definition: fetch.hh:179
Stats::Distribution fetchNisnDist
Distribution of number of instructions fetched each cycle.
Definition: fetch.hh:593
void deactivateThread(ThreadID tid)
For priority-based fetch policies, need to keep update priorityList.
Definition: fetch_impl.hh:544
Stats::Scalar fetchPendingDrainCycles
Total number of cycles spent in waiting for drains.
Definition: fetch.hh:573
PacketPtr retryPkt
The packet that is waiting to be retried.
Definition: fetch.hh:486
ThreadID numFetchingThreads
Number of threads that are actively fetching.
Definition: fetch.hh:530
Cycles iewToFetchDelay
IEW to fetch delay.
Definition: fetch.hh:471
void takeOverFrom()
Takes over from another CPU&#39;s thread.
Definition: fetch_impl.hh:489
void drainStall(ThreadID tid)
Stall the fetch stage after reaching a safe drain point.
Definition: fetch_impl.hh:498
Stats::Scalar fetchCycles
Stat for total number of cycles spent fetching.
Definition: fetch.hh:559
ThreadID threadFetched
Thread ID being fetched.
Definition: fetch.hh:533
Tick curTick()
The current simulated tick.
Definition: core.hh:44
void quiesce(ThreadContext *tc)
Definition: pseudo_inst.cc:124
void regStats()
Registers statistics.
Definition: fetch_impl.hh:168
void clearStates(ThreadID tid)
Clear all thread-specific states.
Definition: fetch_impl.hh:331
Addr fetchOffset[Impl::MaxThreads]
Definition: fetch.hh:437
ThreadID roundRobin()
Returns the appropriate thread to fetch using a round robin policy.
Definition: fetch_impl.hh:1482
virtual StaticInstPtr fetchMicroop(MicroPC upc) const
Return the microop that goes with a particular micropc.
Definition: static_inst.cc:98
void switchToActive()
Changes the status of this stage to active, and indicates this to the CPU.
Definition: fetch_impl.hh:518
Derived & prereq(const Stat &prereq)
Set the prerequisite stat and marks this stat to print at the end of simulation.
Definition: statistics.hh:347
int instSize
Size of instructions.
Definition: fetch.hh:518
void finishTranslation(const Fault &fault, const RequestPtr &mem_req)
Definition: fetch_impl.hh:651
void fetch(bool &status_change)
Does the actual fetching of instructions and passing them on to the next stage.
Definition: fetch_impl.hh:1153
Stats::Scalar fetchTlbCycles
Stat for total number of cycles spent waiting for translation.
Definition: fetch.hh:563
RequestPtr memReq[Impl::MaxThreads]
Memory request used to access cache.
Definition: fetch.hh:445
IcachePort icachePort
Instruction port.
Definition: fetch.hh:541
bool fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
Fetches the cache line that contains the fetch PC.
Definition: fetch_impl.hh:601
void recvReqRetry()
Handles retrying the fetch access.
Definition: fetch_impl.hh:1415
bool predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, TheISA::PCState &pc, ThreadID tid)
Predicts whether or not the instruction is a taken branch, and the target of the branch if it is take...
Definition: bpred_unit.cc:170
DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params)
DefaultFetch constructor.
Definition: fetch_impl.hh:79
The request was an instruction fetch.
Definition: request.hh:103
uint64_t InstSeqNum
Definition: inst_seq.hh:37
void setReq(const RequestPtr &_req)
Definition: fetch.hh:157
ThreadID retryTid
The thread that is waiting on the cache to tell fetch to retry.
Definition: fetch.hh:489
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
Definition: statistics.hh:2606
Stats::Scalar predictedBranches
Stat for total number of predicted branches.
Definition: fetch.hh:557
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:393
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets pointer to list of active threads.
Definition: fetch_impl.hh:304
std::list< ThreadID > * activeThreads
List of Active Threads.
Definition: fetch.hh:524
Defines global host-dependent types: Counter, Tick, and (indirectly) {int,uint}{8,16,32,64}_t.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:140
void regProbePoints()
Registers probes.
Definition: fetch_impl.hh:158
TimeBuffer< TimeStruct >::wire fromCommit
Wire to get commit&#39;s information from backwards time buffer.
Definition: fetch.hh:426
Stats::Scalar fetchIcacheWaitRetryStallCycles
Total number of stall cycles caused by I-cache wait retrys.
Definition: fetch.hh:581
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:249
uint8_t * fetchBuffer[Impl::MaxThreads]
The fetch data that is being fetched and buffered.
Definition: fetch.hh:503
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:159
std::deque< DynInstPtr > fetchQueue[Impl::MaxThreads]
Queue of fetched instructions.
Definition: fetch.hh:512
Stats::Scalar fetchIcacheSquashes
Total number of outstanding icache accesses that were dropped due to a squash.
Definition: fetch.hh:587
bool interruptPending
Checks if there is an interrupt pending.
Definition: fetch.hh:538
static StaticInstPtr nopStaticInstPtr
Pointer to a statically allocated generic "nop" instruction object.
Definition: static_inst.hh:228
ThreadID branchCount()
Returns the appropriate thread to fetch using the branch count policy.
Definition: fetch_impl.hh:1585
const FlagsType total
Print the total.
Definition: info.hh:49
const ThreadID InvalidThreadID
Definition: types.hh:226
unsigned decodeWidth
The width of decode in instructions.
Definition: fetch.hh:480
TheISA::PCState pc[Impl::MaxThreads]
Definition: fetch.hh:435
static const Addr PCMask
Definition: base.hh:275
bool hasSharers() const
Definition: packet.hh:612
Counter lastIcacheStall[Impl::MaxThreads]
Icache stall statistics.
Definition: fetch.hh:521
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:460
FinishTranslationEvent finishTranslationEvent
Event used to delay fault generation of translation faults.
Definition: fetch.hh:547
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:276
Stats::Scalar fetchPendingQuiesceStallCycles
Total number of stall cycles caused by pending quiesce instructions.
Definition: fetch.hh:579
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:225
void setTimeBuffer(TimeBuffer< TimeStruct > *time_buffer)
Sets the main backwards communication time buffer pointer.
Definition: fetch_impl.hh:291
void setFetchQueue(TimeBuffer< FetchStruct > *fq_ptr)
Sets pointer to time buffer used to communicate to the next stage.
Definition: fetch_impl.hh:311
Addr fetchBufferAlignPC(Addr addr)
Align a PC to the start of a fetch buffer block.
Definition: fetch.hh:372
ThreadStatus fetchStatus[Impl::MaxThreads]
Per-thread status.
Definition: fetch.hh:205
Declaration of the Packet class.
Stalls stalls[Impl::MaxThreads]
Tracks which stages are telling fetch to stall.
Definition: fetch.hh:462
O3CPU * cpu
Pointer to the O3CPU.
Definition: fetch.hh:411
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: bpred_unit.cc:161
bool cacheBlocked
Is the cache blocked? If so no threads can access it.
Definition: fetch.hh:483
FetchStatus updateFetchStatus()
Updates overall fetch stage status; to be called at the end of each cycle.
Definition: fetch_impl.hh:840
Cycles decodeToFetchDelay
Decode to fetch delay.
Definition: fetch.hh:465
Cycles commitToFetchDelay
Commit to fetch delay.
Definition: fetch.hh:474
void switchToInactive()
Changes the status of this stage to inactive, and indicates this to the CPU.
Definition: fetch_impl.hh:531
void startupStage()
Initialize stage.
Definition: fetch_impl.hh:319
bool lookupAndUpdateNextPC(const DynInstPtr &inst, TheISA::PCState &pc)
Looks up in the branch predictor to see if the next PC should be either next PC+=MachInst or a branch...
Definition: fetch_impl.hh:555
Random random_mt
Definition: random.cc:96
ProbePointArg< RequestPtr > * ppFetchRequestSent
To probe when a fetch request is successfully sent.
Definition: fetch.hh:216
const T * getConstPtr() const
Definition: packet.hh:1093
ProbePointArg< DynInstPtr > * ppFetch
Probe points.
Definition: fetch.hh:214
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1072
unsigned size
Definition: timebuf.hh:42
wire getWire(int idx)
Definition: timebuf.hh:229
void squash(const InstSeqNum &squashed_sn, ThreadID tid)
Squashes all outstanding updates until a given sequence number.
Definition: bpred_unit.cc:368
void setFault(Fault _fault)
Definition: fetch.hh:152
Stats::Scalar fetchSquashCycles
Stat for total number of cycles spent squashing.
Definition: fetch.hh:561
void squash(const TheISA::PCState &newPC, const InstSeqNum seq_num, DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:880
unsigned fetchBufferSize
The size of the fetch buffer in bytes.
Definition: fetch.hh:497
ThreadID lsqCount()
Returns the appropriate thread to fetch using the LSQ count policy.
Definition: fetch_impl.hh:1549
FetchPolicy fetchPolicy
Fetch policy.
Definition: fetch.hh:208
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:309
Cycles renameToFetchDelay
Rename to fetch delay.
Definition: fetch.hh:468
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
Definition: fetch_impl.hh:1672
bool isDrained() const
Has the stage drained?
Definition: fetch_impl.hh:458
void wakeFromQuiesce()
Tells fetch to wake up from a quiesce instruction.
Definition: fetch_impl.hh:508
#define warn(...)
Definition: logging.hh:208
Stats::Scalar fetchBlockedCycles
Total number of cycles spent blocked.
Definition: fetch.hh:569
Stats::Scalar fetchedCacheLines
Stat for total number of fetched cache lines.
Definition: fetch.hh:583
void drainResume()
Resume after a drain.
Definition: fetch_impl.hh:430
std::list< ThreadID > priorityList
List that has the threads organized by priority.
Definition: fetch.hh:211
bool checkSignalsAndUpdate(ThreadID tid)
Checks all input signals and updates the status as necessary.
Definition: fetch_impl.hh:996
StaticInstPtr macroop[Impl::MaxThreads]
Definition: fetch.hh:439
bool delayedCommit[Impl::MaxThreads]
Can the fetch stage redirect from an interrupt on this instruction?
Definition: fetch.hh:442
Stats::Scalar fetchedBranches
Total number of fetched branches.
Definition: fetch.hh:555
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:181
DynInstPtr buildInst(ThreadID tid, StaticInstPtr staticInst, StaticInstPtr curMacroop, TheISA::PCState thisPC, TheISA::PCState nextPC, bool trace)
Definition: fetch_impl.hh:1101
std::shared_ptr< FaultBase > Fault
Definition: types.hh:238
BPredUnit * branchPred
BPredUnit.
Definition: fetch.hh:433
TimeBuffer< TimeStruct > * timeBuffer
Time buffer interface.
Definition: fetch.hh:414
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1896
bool wroteToTimeBuffer
Variable that tracks if fetch has written to the time buffer this cycle.
Definition: fetch.hh:450
void doSquash(const TheISA::PCState &newPC, const DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:758
unsigned int cacheBlkSize
Cache block size.
Definition: fetch.hh:492
bool fetchBufferValid[Impl::MaxThreads]
Whether or not the fetch buffer data is valid.
Definition: fetch.hh:515
bool isLastMicroop() const
Definition: static_inst.hh:201
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
TimeBuffer< TimeStruct >::wire fromIEW
Wire to get iew&#39;s information from backwards time buffer.
Definition: fetch.hh:423

Generated on Mon Jun 8 2020 15:45:08 for gem5 by doxygen 1.8.13