gem5  v20.1.0.0
fetch_impl.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2014 ARM Limited
3  * Copyright (c) 2012-2013 AMD
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2004-2006 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #ifndef __CPU_O3_FETCH_IMPL_HH__
43 #define __CPU_O3_FETCH_IMPL_HH__
44 
45 #include <algorithm>
46 #include <cstring>
47 #include <list>
48 #include <map>
49 #include <queue>
50 
51 #include "arch/generic/tlb.hh"
52 #include "arch/utility.hh"
53 #include "base/random.hh"
54 #include "base/types.hh"
55 #include "config/the_isa.hh"
56 #include "cpu/base.hh"
57 //#include "cpu/checker/cpu.hh"
58 #include "cpu/o3/cpu.hh"
59 #include "cpu/o3/fetch.hh"
60 #include "cpu/exetrace.hh"
61 #include "debug/Activity.hh"
62 #include "debug/Drain.hh"
63 #include "debug/Fetch.hh"
64 #include "debug/O3CPU.hh"
65 #include "debug/O3PipeView.hh"
66 #include "mem/packet.hh"
67 #include "params/DerivO3CPU.hh"
68 #include "sim/byteswap.hh"
69 #include "sim/core.hh"
70 #include "sim/eventq.hh"
71 #include "sim/full_system.hh"
72 #include "sim/system.hh"
73 #include "cpu/o3/isa_specific.hh"
74 
75 using namespace std;
76 
77 template<class Impl>
78 DefaultFetch<Impl>::DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params)
79  : fetchPolicy(params->smtFetchPolicy),
80  cpu(_cpu),
81  branchPred(nullptr),
82  decodeToFetchDelay(params->decodeToFetchDelay),
83  renameToFetchDelay(params->renameToFetchDelay),
84  iewToFetchDelay(params->iewToFetchDelay),
85  commitToFetchDelay(params->commitToFetchDelay),
86  fetchWidth(params->fetchWidth),
87  decodeWidth(params->decodeWidth),
88  retryPkt(NULL),
89  retryTid(InvalidThreadID),
90  cacheBlkSize(cpu->cacheLineSize()),
91  fetchBufferSize(params->fetchBufferSize),
92  fetchBufferMask(fetchBufferSize - 1),
93  fetchQueueSize(params->fetchQueueSize),
94  numThreads(params->numThreads),
95  numFetchingThreads(params->smtNumFetchingThreads),
96  icachePort(this, _cpu),
97  finishTranslationEvent(this), fetchStats(_cpu, this)
98 {
99  if (numThreads > Impl::MaxThreads)
100  fatal("numThreads (%d) is larger than compiled limit (%d),\n"
101  "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
102  numThreads, static_cast<int>(Impl::MaxThreads));
103  if (fetchWidth > Impl::MaxWidth)
104  fatal("fetchWidth (%d) is larger than compiled limit (%d),\n"
105  "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
106  fetchWidth, static_cast<int>(Impl::MaxWidth));
108  fatal("fetch buffer size (%u bytes) is greater than the cache "
109  "block size (%u bytes)\n", fetchBufferSize, cacheBlkSize);
111  fatal("cache block (%u bytes) is not a multiple of the "
112  "fetch buffer (%u bytes)\n", cacheBlkSize, fetchBufferSize);
113 
114  // Figure out fetch policy
115  panic_if(fetchPolicy == FetchPolicy::SingleThread && numThreads > 1,
116  "Invalid Fetch Policy for a SMT workload.");
117 
118  // Get the size of an instruction.
119  instSize = sizeof(TheISA::MachInst);
120 
121  for (int i = 0; i < Impl::MaxThreads; i++) {
122  fetchStatus[i] = Idle;
123  decoder[i] = nullptr;
124  pc[i] = 0;
125  fetchOffset[i] = 0;
126  macroop[i] = nullptr;
127  delayedCommit[i] = false;
128  memReq[i] = nullptr;
129  stalls[i] = {false, false};
130  fetchBuffer[i] = NULL;
131  fetchBufferPC[i] = 0;
132  fetchBufferValid[i] = false;
133  lastIcacheStall[i] = 0;
134  issuePipelinedIfetch[i] = false;
135  }
136 
137  branchPred = params->branchPred;
138 
139  for (ThreadID tid = 0; tid < numThreads; tid++) {
140  decoder[tid] = new TheISA::Decoder(
141  dynamic_cast<TheISA::ISA *>(params->isa[tid]));
142  // Create space to buffer the cache line data,
143  // which may not hold the entire cache line.
144  fetchBuffer[tid] = new uint8_t[fetchBufferSize];
145  }
146 }
147 
148 template <class Impl>
149 std::string
151 {
152  return cpu->name() + ".fetch";
153 }
154 
155 template <class Impl>
156 void
158 {
159  ppFetch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Fetch");
160  ppFetchRequestSent = new ProbePointArg<RequestPtr>(cpu->getProbeManager(),
161  "FetchRequest");
162 
163 }
164 
165 template <class Impl>
168  : Stats::Group(cpu, "fetch"),
169  ADD_STAT(icacheStallCycles,
170  "Number of cycles fetch is stalled on an Icache miss"),
171  ADD_STAT(insts, "Number of instructions fetch has processed"),
172  ADD_STAT(branches, "Number of branches that fetch encountered"),
173  ADD_STAT(predictedBranches,
174  "Number of branches that fetch has predicted taken"),
175  ADD_STAT(cycles,
176  "Number of cycles fetch has run and was not squashing or blocked"),
177  ADD_STAT(squashCycles, "Number of cycles fetch has spent squashing"),
178  ADD_STAT(tlbCycles,
179  "Number of cycles fetch has spent waiting for tlb"),
180  ADD_STAT(idleCycles, "Number of cycles fetch was idle"),
181  ADD_STAT(blockedCycles, "Number of cycles fetch has spent blocked"),
182  ADD_STAT(miscStallCycles,
183  "Number of cycles fetch has spent waiting on interrupts,"
184  "or bad addresses, or out of MSHRs"),
185  ADD_STAT(pendingDrainCycles,
186  "Number of cycles fetch has spent waiting on pipes to drain"),
187  ADD_STAT(noActiveThreadStallCycles,
188  "Number of stall cycles due to no active thread to fetch from"),
189  ADD_STAT(pendingTrapStallCycles,
190  "Number of stall cycles due to pending traps"),
191  ADD_STAT(pendingQuiesceStallCycles,
192  "Number of stall cycles due to pending quiesce instructions"),
193  ADD_STAT(icacheWaitRetryStallCycles,
194  "Number of stall cycles due to full MSHR"),
195  ADD_STAT(cacheLines, "Number of cache lines fetched"),
196  ADD_STAT(icacheSquashes,
197  "Number of outstanding Icache misses that were squashed"),
198  ADD_STAT(tlbSquashes,
199  "Number of outstanding ITLB misses that were squashed"),
200  ADD_STAT(nisnDist,
201  "Number of instructions fetched each cycle (Total)"),
202  ADD_STAT(idleRate, "Percent of cycles fetch was idle",
203  idleCycles * 100 / cpu->numCycles),
204  ADD_STAT(branchRate, "Number of branch fetches per cycle",
205  branches / cpu->numCycles),
206  ADD_STAT(rate, "Number of inst fetches per cycle",
207  insts / cpu->numCycles)
208 {
211  insts
212  .prereq(insts);
213  branches
214  .prereq(branches);
217  cycles
218  .prereq(cycles);
221  tlbCycles
222  .prereq(tlbCycles);
223  idleCycles
224  .prereq(idleCycles);
227  cacheLines
228  .prereq(cacheLines);
245  nisnDist
246  .init(/* base value */ 0,
247  /* last value */ fetch->fetchWidth,
248  /* bucket size */ 1)
249  .flags(Stats::pdf);
250  idleRate
251  .prereq(idleRate);
252  branchRate
254  rate
256 }
257 template<class Impl>
258 void
260 {
261  timeBuffer = time_buffer;
262 
263  // Create wires to get information from proper places in time buffer.
268 }
269 
270 template<class Impl>
271 void
273 {
274  activeThreads = at_ptr;
275 }
276 
277 template<class Impl>
278 void
280 {
281  // Create wire to write information to proper place in fetch time buf.
282  toDecode = ftb_ptr->getWire(0);
283 }
284 
285 template<class Impl>
286 void
288 {
289  assert(priorityList.empty());
290  resetStage();
291 
292  // Fetch needs to start fetching instructions at the very beginning,
293  // so it must start up in active state.
294  switchToActive();
295 }
296 
297 template<class Impl>
298 void
300 {
301  fetchStatus[tid] = Running;
302  pc[tid] = cpu->pcState(tid);
303  fetchOffset[tid] = 0;
304  macroop[tid] = NULL;
305  delayedCommit[tid] = false;
306  memReq[tid] = NULL;
307  stalls[tid].decode = false;
308  stalls[tid].drain = false;
309  fetchBufferPC[tid] = 0;
310  fetchBufferValid[tid] = false;
311  fetchQueue[tid].clear();
312 
313  // TODO not sure what to do with priorityList for now
314  // priorityList.push_back(tid);
315 }
316 
317 template<class Impl>
318 void
320 {
321  numInst = 0;
322  interruptPending = false;
323  cacheBlocked = false;
324 
325  priorityList.clear();
326 
327  // Setup PC and nextPC with initial state.
328  for (ThreadID tid = 0; tid < numThreads; ++tid) {
329  fetchStatus[tid] = Running;
330  pc[tid] = cpu->pcState(tid);
331  fetchOffset[tid] = 0;
332  macroop[tid] = NULL;
333 
334  delayedCommit[tid] = false;
335  memReq[tid] = NULL;
336 
337  stalls[tid].decode = false;
338  stalls[tid].drain = false;
339 
340  fetchBufferPC[tid] = 0;
341  fetchBufferValid[tid] = false;
342 
343  fetchQueue[tid].clear();
344 
345  priorityList.push_back(tid);
346  }
347 
348  wroteToTimeBuffer = false;
349  _status = Inactive;
350 }
351 
352 template<class Impl>
353 void
355 {
356  ThreadID tid = cpu->contextToThread(pkt->req->contextId());
357 
358  DPRINTF(Fetch, "[tid:%i] Waking up from cache miss.\n", tid);
359  assert(!cpu->switchedOut());
360 
361  // Only change the status if it's still waiting on the icache access
362  // to return.
363  if (fetchStatus[tid] != IcacheWaitResponse ||
364  pkt->req != memReq[tid]) {
366  delete pkt;
367  return;
368  }
369 
370  memcpy(fetchBuffer[tid], pkt->getConstPtr<uint8_t>(), fetchBufferSize);
371  fetchBufferValid[tid] = true;
372 
373  // Wake up the CPU (if it went to sleep and was waiting on
374  // this completion event).
375  cpu->wakeCPU();
376 
377  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache completion\n",
378  tid);
379 
380  switchToActive();
381 
382  // Only switch to IcacheAccessComplete if we're not stalled as well.
383  if (checkStall(tid)) {
384  fetchStatus[tid] = Blocked;
385  } else {
387  }
388 
389  pkt->req->setAccessLatency();
390  cpu->ppInstAccessComplete->notify(pkt);
391  // Reset the mem req to NULL.
392  delete pkt;
393  memReq[tid] = NULL;
394 }
395 
396 template <class Impl>
397 void
399 {
400  for (ThreadID i = 0; i < numThreads; ++i) {
401  stalls[i].decode = false;
402  stalls[i].drain = false;
403  }
404 }
405 
406 template <class Impl>
407 void
409 {
410  assert(isDrained());
411  assert(retryPkt == NULL);
412  assert(retryTid == InvalidThreadID);
413  assert(!cacheBlocked);
414  assert(!interruptPending);
415 
416  for (ThreadID i = 0; i < numThreads; ++i) {
417  assert(!memReq[i]);
418  assert(fetchStatus[i] == Idle || stalls[i].drain);
419  }
420 
422 }
423 
424 template <class Impl>
425 bool
427 {
428  /* Make sure that threads are either idle of that the commit stage
429  * has signaled that draining has completed by setting the drain
430  * stall flag. This effectively forces the pipeline to be disabled
431  * until the whole system is drained (simulation may continue to
432  * drain other components).
433  */
434  for (ThreadID i = 0; i < numThreads; ++i) {
435  // Verify fetch queues are drained
436  if (!fetchQueue[i].empty())
437  return false;
438 
439  // Return false if not idle or drain stalled
440  if (fetchStatus[i] != Idle) {
441  if (fetchStatus[i] == Blocked && stalls[i].drain)
442  continue;
443  else
444  return false;
445  }
446  }
447 
448  /* The pipeline might start up again in the middle of the drain
449  * cycle if the finish translation event is scheduled, so make
450  * sure that's not the case.
451  */
453 }
454 
455 template <class Impl>
456 void
458 {
459  assert(cpu->getInstPort().isConnected());
460  resetStage();
461 
462 }
463 
464 template <class Impl>
465 void
467 {
468  assert(cpu->isDraining());
469  assert(!stalls[tid].drain);
470  DPRINTF(Drain, "%i: Thread drained.\n", tid);
471  stalls[tid].drain = true;
472 }
473 
474 template <class Impl>
475 void
477 {
478  DPRINTF(Fetch, "Waking up from quiesce\n");
479  // Hopefully this is safe
480  // @todo: Allow other threads to wake from quiesce.
481  fetchStatus[0] = Running;
482 }
483 
484 template <class Impl>
485 inline void
487 {
488  if (_status == Inactive) {
489  DPRINTF(Activity, "Activating stage.\n");
490 
491  cpu->activateStage(O3CPU::FetchIdx);
492 
493  _status = Active;
494  }
495 }
496 
497 template <class Impl>
498 inline void
500 {
501  if (_status == Active) {
502  DPRINTF(Activity, "Deactivating stage.\n");
503 
504  cpu->deactivateStage(O3CPU::FetchIdx);
505 
506  _status = Inactive;
507  }
508 }
509 
510 template <class Impl>
511 void
513 {
514  // Update priority list
515  auto thread_it = std::find(priorityList.begin(), priorityList.end(), tid);
516  if (thread_it != priorityList.end()) {
517  priorityList.erase(thread_it);
518  }
519 }
520 
521 template <class Impl>
522 bool
524  const DynInstPtr &inst, TheISA::PCState &nextPC)
525 {
526  // Do branch prediction check here.
527  // A bit of a misnomer...next_PC is actually the current PC until
528  // this function updates it.
529  bool predict_taken;
530 
531  if (!inst->isControl()) {
532  TheISA::advancePC(nextPC, inst->staticInst);
533  inst->setPredTarg(nextPC);
534  inst->setPredTaken(false);
535  return false;
536  }
537 
538  ThreadID tid = inst->threadNumber;
539  predict_taken = branchPred->predict(inst->staticInst, inst->seqNum,
540  nextPC, tid);
541 
542  if (predict_taken) {
543  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
544  "predicted to be taken to %s\n",
545  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
546  } else {
547  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
548  "predicted to be not taken\n",
549  tid, inst->seqNum, inst->pcState().instAddr());
550  }
551 
552  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
553  "predicted to go to %s\n",
554  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
555  inst->setPredTarg(nextPC);
556  inst->setPredTaken(predict_taken);
557 
559 
560  if (predict_taken) {
562  }
563 
564  return predict_taken;
565 }
566 
567 template <class Impl>
568 bool
570 {
571  Fault fault = NoFault;
572 
573  assert(!cpu->switchedOut());
574 
575  // @todo: not sure if these should block translation.
576  //AlphaDep
577  if (cacheBlocked) {
578  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
579  tid);
580  return false;
581  } else if (checkInterrupt(pc) && !delayedCommit[tid]) {
582  // Hold off fetch from getting new instructions when:
583  // Cache is blocked, or
584  // while an interrupt is pending and we're not in PAL mode, or
585  // fetch is switched out.
586  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
587  tid);
588  return false;
589  }
590 
591  // Align the fetch address to the start of a fetch buffer segment.
592  Addr fetchBufferBlockPC = fetchBufferAlignPC(vaddr);
593 
594  DPRINTF(Fetch, "[tid:%i] Fetching cache line %#x for addr %#x\n",
595  tid, fetchBufferBlockPC, vaddr);
596 
597  // Setup the memReq to do a read of the first instruction's address.
598  // Set the appropriate read size and flags as well.
599  // Build request here.
600  RequestPtr mem_req = std::make_shared<Request>(
601  fetchBufferBlockPC, fetchBufferSize,
602  Request::INST_FETCH, cpu->instRequestorId(), pc,
603  cpu->thread[tid]->contextId());
604 
605  mem_req->taskId(cpu->taskId());
606 
607  memReq[tid] = mem_req;
608 
609  // Initiate translation of the icache block
610  fetchStatus[tid] = ItlbWait;
611  FetchTranslation *trans = new FetchTranslation(this);
612  cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(),
613  trans, BaseTLB::Execute);
614  return true;
615 }
616 
617 template <class Impl>
618 void
620  const RequestPtr &mem_req)
621 {
622  ThreadID tid = cpu->contextToThread(mem_req->contextId());
623  Addr fetchBufferBlockPC = mem_req->getVaddr();
624 
625  assert(!cpu->switchedOut());
626 
627  // Wake up CPU if it was idle
628  cpu->wakeCPU();
629 
630  if (fetchStatus[tid] != ItlbWait || mem_req != memReq[tid] ||
631  mem_req->getVaddr() != memReq[tid]->getVaddr()) {
632  DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
633  tid);
635  return;
636  }
637 
638 
639  // If translation was successful, attempt to read the icache block.
640  if (fault == NoFault) {
641  // Check that we're not going off into random memory
642  // If we have, just wait around for commit to squash something and put
643  // us on the right track
644  if (!cpu->system->isMemAddr(mem_req->getPaddr())) {
645  warn("Address %#x is outside of physical memory, stopping fetch\n",
646  mem_req->getPaddr());
647  fetchStatus[tid] = NoGoodAddr;
648  memReq[tid] = NULL;
649  return;
650  }
651 
652  // Build packet here.
653  PacketPtr data_pkt = new Packet(mem_req, MemCmd::ReadReq);
654  data_pkt->dataDynamic(new uint8_t[fetchBufferSize]);
655 
656  fetchBufferPC[tid] = fetchBufferBlockPC;
657  fetchBufferValid[tid] = false;
658  DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
659 
661 
662  // Access the cache.
663  if (!icachePort.sendTimingReq(data_pkt)) {
664  assert(retryPkt == NULL);
665  assert(retryTid == InvalidThreadID);
666  DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
667 
669  retryPkt = data_pkt;
670  retryTid = tid;
671  cacheBlocked = true;
672  } else {
673  DPRINTF(Fetch, "[tid:%i] Doing Icache access.\n", tid);
674  DPRINTF(Activity, "[tid:%i] Activity: Waiting on I-cache "
675  "response.\n", tid);
676  lastIcacheStall[tid] = curTick();
678  // Notify Fetch Request probe when a packet containing a fetch
679  // request is successfully sent
680  ppFetchRequestSent->notify(mem_req);
681  }
682  } else {
683  // Don't send an instruction to decode if we can't handle it.
684  if (!(numInst < fetchWidth) || !(fetchQueue[tid].size() < fetchQueueSize)) {
688  cpu->schedule(finishTranslationEvent,
689  cpu->clockEdge(Cycles(1)));
690  return;
691  }
692  DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
693  tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
694  // Translation faulted, icache request won't be sent.
695  memReq[tid] = NULL;
696 
697  // Send the fault to commit. This thread will not do anything
698  // until commit handles the fault. The only other way it can
699  // wake up is if a squash comes along and changes the PC.
700  TheISA::PCState fetchPC = pc[tid];
701 
702  DPRINTF(Fetch, "[tid:%i] Translation faulted, building noop.\n", tid);
703  // We will use a nop in ordier to carry the fault.
705  NULL, fetchPC, fetchPC, false);
706  instruction->setNotAnInst();
707 
708  instruction->setPredTarg(fetchPC);
709  instruction->fault = fault;
710  wroteToTimeBuffer = true;
711 
712  DPRINTF(Activity, "Activity this cycle.\n");
713  cpu->activityThisCycle();
714 
715  fetchStatus[tid] = TrapPending;
716 
717  DPRINTF(Fetch, "[tid:%i] Blocked, need to handle the trap.\n", tid);
718  DPRINTF(Fetch, "[tid:%i] fault (%s) detected @ PC %s.\n",
719  tid, fault->name(), pc[tid]);
720  }
722 }
723 
724 template <class Impl>
725 inline void
727  const DynInstPtr squashInst, ThreadID tid)
728 {
729  DPRINTF(Fetch, "[tid:%i] Squashing, setting PC to: %s.\n",
730  tid, newPC);
731 
732  pc[tid] = newPC;
733  fetchOffset[tid] = 0;
734  if (squashInst && squashInst->pcState().instAddr() == newPC.instAddr())
735  macroop[tid] = squashInst->macroop;
736  else
737  macroop[tid] = NULL;
738  decoder[tid]->reset();
739 
740  // Clear the icache miss if it's outstanding.
741  if (fetchStatus[tid] == IcacheWaitResponse) {
742  DPRINTF(Fetch, "[tid:%i] Squashing outstanding Icache miss.\n",
743  tid);
744  memReq[tid] = NULL;
745  } else if (fetchStatus[tid] == ItlbWait) {
746  DPRINTF(Fetch, "[tid:%i] Squashing outstanding ITLB miss.\n",
747  tid);
748  memReq[tid] = NULL;
749  }
750 
751  // Get rid of the retrying packet if it was from this thread.
752  if (retryTid == tid) {
753  assert(cacheBlocked);
754  if (retryPkt) {
755  delete retryPkt;
756  }
757  retryPkt = NULL;
759  }
760 
761  fetchStatus[tid] = Squashing;
762 
763  // Empty fetch queue
764  fetchQueue[tid].clear();
765 
766  // microops are being squashed, it is not known wheather the
767  // youngest non-squashed microop was marked delayed commit
768  // or not. Setting the flag to true ensures that the
769  // interrupts are not handled when they cannot be, though
770  // some opportunities to handle interrupts may be missed.
771  delayedCommit[tid] = true;
772 
774 }
775 
776 template<class Impl>
777 void
779  const DynInstPtr squashInst,
780  const InstSeqNum seq_num, ThreadID tid)
781 {
782  DPRINTF(Fetch, "[tid:%i] Squashing from decode.\n", tid);
783 
784  doSquash(newPC, squashInst, tid);
785 
786  // Tell the CPU to remove any instructions that are in flight between
787  // fetch and decode.
788  cpu->removeInstsUntil(seq_num, tid);
789 }
790 
791 template<class Impl>
792 bool
794 {
795  bool ret_val = false;
796 
797  if (stalls[tid].drain) {
798  assert(cpu->isDraining());
799  DPRINTF(Fetch,"[tid:%i] Drain stall detected.\n",tid);
800  ret_val = true;
801  }
802 
803  return ret_val;
804 }
805 
806 template<class Impl>
809 {
810  //Check Running
811  list<ThreadID>::iterator threads = activeThreads->begin();
813 
814  while (threads != end) {
815  ThreadID tid = *threads++;
816 
817  if (fetchStatus[tid] == Running ||
818  fetchStatus[tid] == Squashing ||
820 
821  if (_status == Inactive) {
822  DPRINTF(Activity, "[tid:%i] Activating stage.\n",tid);
823 
824  if (fetchStatus[tid] == IcacheAccessComplete) {
825  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache"
826  "completion\n",tid);
827  }
828 
829  cpu->activateStage(O3CPU::FetchIdx);
830  }
831 
832  return Active;
833  }
834  }
835 
836  // Stage is switching from active to inactive, notify CPU of it.
837  if (_status == Active) {
838  DPRINTF(Activity, "Deactivating stage.\n");
839 
840  cpu->deactivateStage(O3CPU::FetchIdx);
841  }
842 
843  return Inactive;
844 }
845 
846 template <class Impl>
847 void
849  const InstSeqNum seq_num, DynInstPtr squashInst,
850  ThreadID tid)
851 {
852  DPRINTF(Fetch, "[tid:%i] Squash from commit.\n", tid);
853 
854  doSquash(newPC, squashInst, tid);
855 
856  // Tell the CPU to remove any instructions that are not in the ROB.
857  cpu->removeInstsNotInROB(tid);
858 }
859 
860 template <class Impl>
861 void
863 {
864  list<ThreadID>::iterator threads = activeThreads->begin();
866  bool status_change = false;
867 
868  wroteToTimeBuffer = false;
869 
870  for (ThreadID i = 0; i < numThreads; ++i) {
871  issuePipelinedIfetch[i] = false;
872  }
873 
874  while (threads != end) {
875  ThreadID tid = *threads++;
876 
877  // Check the signals for each thread to determine the proper status
878  // for each thread.
879  bool updated_status = checkSignalsAndUpdate(tid);
880  status_change = status_change || updated_status;
881  }
882 
883  DPRINTF(Fetch, "Running stage.\n");
884 
885  if (FullSystem) {
886  if (fromCommit->commitInfo[0].interruptPending) {
887  interruptPending = true;
888  }
889 
890  if (fromCommit->commitInfo[0].clearInterrupt) {
891  interruptPending = false;
892  }
893  }
894 
896  threadFetched++) {
897  // Fetch each of the actively fetching threads.
898  fetch(status_change);
899  }
900 
901  // Record number of instructions fetched this cycle for distribution.
903 
904  if (status_change) {
905  // Change the fetch stage status if there was a status change.
907  }
908 
909  // Issue the next I-cache request if possible.
910  for (ThreadID i = 0; i < numThreads; ++i) {
911  if (issuePipelinedIfetch[i]) {
913  }
914  }
915 
916  // Send instructions enqueued into the fetch queue to decode.
917  // Limit rate by fetchWidth. Stall if decode is stalled.
918  unsigned insts_to_decode = 0;
919  unsigned available_insts = 0;
920 
921  for (auto tid : *activeThreads) {
922  if (!stalls[tid].decode) {
923  available_insts += fetchQueue[tid].size();
924  }
925  }
926 
927  // Pick a random thread to start trying to grab instructions from
928  auto tid_itr = activeThreads->begin();
929  std::advance(tid_itr, random_mt.random<uint8_t>(0, activeThreads->size() - 1));
930 
931  while (available_insts != 0 && insts_to_decode < decodeWidth) {
932  ThreadID tid = *tid_itr;
933  if (!stalls[tid].decode && !fetchQueue[tid].empty()) {
934  const auto& inst = fetchQueue[tid].front();
935  toDecode->insts[toDecode->size++] = inst;
936  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Sending instruction to decode "
937  "from fetch queue. Fetch queue size: %i.\n",
938  tid, inst->seqNum, fetchQueue[tid].size());
939 
940  wroteToTimeBuffer = true;
941  fetchQueue[tid].pop_front();
942  insts_to_decode++;
943  available_insts--;
944  }
945 
946  tid_itr++;
947  // Wrap around if at end of active threads list
948  if (tid_itr == activeThreads->end())
949  tid_itr = activeThreads->begin();
950  }
951 
952  // If there was activity this cycle, inform the CPU of it.
953  if (wroteToTimeBuffer) {
954  DPRINTF(Activity, "Activity this cycle.\n");
955  cpu->activityThisCycle();
956  }
957 
958  // Reset the number of the instruction we've fetched.
959  numInst = 0;
960 }
961 
962 template <class Impl>
963 bool
965 {
966  // Update the per thread stall statuses.
967  if (fromDecode->decodeBlock[tid]) {
968  stalls[tid].decode = true;
969  }
970 
971  if (fromDecode->decodeUnblock[tid]) {
972  assert(stalls[tid].decode);
973  assert(!fromDecode->decodeBlock[tid]);
974  stalls[tid].decode = false;
975  }
976 
977  // Check squash signals from commit.
978  if (fromCommit->commitInfo[tid].squash) {
979 
980  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
981  "from commit.\n",tid);
982  // In any case, squash.
983  squash(fromCommit->commitInfo[tid].pc,
984  fromCommit->commitInfo[tid].doneSeqNum,
985  fromCommit->commitInfo[tid].squashInst, tid);
986 
987  // If it was a branch mispredict on a control instruction, update the
988  // branch predictor with that instruction, otherwise just kill the
989  // invalid state we generated in after sequence number
990  if (fromCommit->commitInfo[tid].mispredictInst &&
991  fromCommit->commitInfo[tid].mispredictInst->isControl()) {
992  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
993  fromCommit->commitInfo[tid].pc,
994  fromCommit->commitInfo[tid].branchTaken,
995  tid);
996  } else {
997  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
998  tid);
999  }
1000 
1001  return true;
1002  } else if (fromCommit->commitInfo[tid].doneSeqNum) {
1003  // Update the branch predictor if it wasn't a squashed instruction
1004  // that was broadcasted.
1005  branchPred->update(fromCommit->commitInfo[tid].doneSeqNum, tid);
1006  }
1007 
1008  // Check squash signals from decode.
1009  if (fromDecode->decodeInfo[tid].squash) {
1010  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1011  "from decode.\n",tid);
1012 
1013  // Update the branch predictor.
1014  if (fromDecode->decodeInfo[tid].branchMispredict) {
1015  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1016  fromDecode->decodeInfo[tid].nextPC,
1017  fromDecode->decodeInfo[tid].branchTaken,
1018  tid);
1019  } else {
1020  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1021  tid);
1022  }
1023 
1024  if (fetchStatus[tid] != Squashing) {
1025 
1026  DPRINTF(Fetch, "Squashing from decode with PC = %s\n",
1027  fromDecode->decodeInfo[tid].nextPC);
1028  // Squash unless we're already squashing
1029  squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
1030  fromDecode->decodeInfo[tid].squashInst,
1031  fromDecode->decodeInfo[tid].doneSeqNum,
1032  tid);
1033 
1034  return true;
1035  }
1036  }
1037 
1038  if (checkStall(tid) &&
1039  fetchStatus[tid] != IcacheWaitResponse &&
1040  fetchStatus[tid] != IcacheWaitRetry &&
1041  fetchStatus[tid] != ItlbWait &&
1042  fetchStatus[tid] != QuiescePending) {
1043  DPRINTF(Fetch, "[tid:%i] Setting to blocked\n",tid);
1044 
1045  fetchStatus[tid] = Blocked;
1046 
1047  return true;
1048  }
1049 
1050  if (fetchStatus[tid] == Blocked ||
1051  fetchStatus[tid] == Squashing) {
1052  // Switch status to running if fetch isn't being told to block or
1053  // squash this cycle.
1054  DPRINTF(Fetch, "[tid:%i] Done squashing, switching to running.\n",
1055  tid);
1056 
1057  fetchStatus[tid] = Running;
1058 
1059  return true;
1060  }
1061 
1062  // If we've reached this point, we have not gotten any signals that
1063  // cause fetch to change its status. Fetch remains the same as before.
1064  return false;
1065 }
1066 
1067 template<class Impl>
1068 typename Impl::DynInstPtr
1070  StaticInstPtr curMacroop, TheISA::PCState thisPC,
1071  TheISA::PCState nextPC, bool trace)
1072 {
1073  // Get a sequence number.
1074  InstSeqNum seq = cpu->getAndIncrementInstSeq();
1075 
1076  // Create a new DynInst from the instruction fetched.
1077  DynInstPtr instruction =
1078  new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu);
1079  instruction->setTid(tid);
1080 
1081  instruction->setThreadState(cpu->thread[tid]);
1082 
1083  DPRINTF(Fetch, "[tid:%i] Instruction PC %#x (%d) created "
1084  "[sn:%lli].\n", tid, thisPC.instAddr(),
1085  thisPC.microPC(), seq);
1086 
1087  DPRINTF(Fetch, "[tid:%i] Instruction is: %s\n", tid,
1088  instruction->staticInst->
1089  disassemble(thisPC.instAddr()));
1090 
1091 #if TRACING_ON
1092  if (trace) {
1093  instruction->traceData =
1094  cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
1095  instruction->staticInst, thisPC, curMacroop);
1096  }
1097 #else
1098  instruction->traceData = NULL;
1099 #endif
1100 
1101  // Add instruction to the CPU's list of instructions.
1102  instruction->setInstListIt(cpu->addInst(instruction));
1103 
1104  // Write the instruction to the first slot in the queue
1105  // that heads to decode.
1106  assert(numInst < fetchWidth);
1107  fetchQueue[tid].push_back(instruction);
1108  assert(fetchQueue[tid].size() <= fetchQueueSize);
1109  DPRINTF(Fetch, "[tid:%i] Fetch queue entry created (%i/%i).\n",
1110  tid, fetchQueue[tid].size(), fetchQueueSize);
1111  //toDecode->insts[toDecode->size++] = instruction;
1112 
1113  // Keep track of if we can take an interrupt at this boundary
1114  delayedCommit[tid] = instruction->isDelayedCommit();
1115 
1116  return instruction;
1117 }
1118 
1119 template<class Impl>
1120 void
1121 DefaultFetch<Impl>::fetch(bool &status_change)
1122 {
1124  // Start actual fetch
1126  ThreadID tid = getFetchingThread();
1127 
1128  assert(!cpu->switchedOut());
1129 
1130  if (tid == InvalidThreadID) {
1131  // Breaks looping condition in tick()
1133 
1134  if (numThreads == 1) { // @todo Per-thread stats
1135  profileStall(0);
1136  }
1137 
1138  return;
1139  }
1140 
1141  DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid);
1142 
1143  // The current PC.
1144  TheISA::PCState thisPC = pc[tid];
1145 
1146  Addr pcOffset = fetchOffset[tid];
1147  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1148 
1149  bool inRom = isRomMicroPC(thisPC.microPC());
1150 
1151  // If returning from the delay of a cache miss, then update the status
1152  // to running, otherwise do the cache access. Possibly move this up
1153  // to tick() function.
1154  if (fetchStatus[tid] == IcacheAccessComplete) {
1155  DPRINTF(Fetch, "[tid:%i] Icache miss is complete.\n", tid);
1156 
1157  fetchStatus[tid] = Running;
1158  status_change = true;
1159  } else if (fetchStatus[tid] == Running) {
1160  // Align the fetch PC so its at the start of a fetch buffer segment.
1161  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1162 
1163  // If buffer is no longer valid or fetchAddr has moved to point
1164  // to the next cache block, AND we have no remaining ucode
1165  // from a macro-op, then start fetch from icache.
1166  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])
1167  && !inRom && !macroop[tid]) {
1168  DPRINTF(Fetch, "[tid:%i] Attempting to translate and read "
1169  "instruction, starting at PC %s.\n", tid, thisPC);
1170 
1171  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1172 
1173  if (fetchStatus[tid] == IcacheWaitResponse)
1175  else if (fetchStatus[tid] == ItlbWait)
1177  else
1179  return;
1180  } else if ((checkInterrupt(thisPC.instAddr()) && !delayedCommit[tid])) {
1181  // Stall CPU if an interrupt is posted and we're not issuing
1182  // an delayed commit micro-op currently (delayed commit instructions
1183  // are not interruptable by interrupts, only faults)
1185  DPRINTF(Fetch, "[tid:%i] Fetch is stalled!\n", tid);
1186  return;
1187  }
1188  } else {
1189  if (fetchStatus[tid] == Idle) {
1191  DPRINTF(Fetch, "[tid:%i] Fetch is idle!\n", tid);
1192  }
1193 
1194  // Status is Idle, so fetch should do nothing.
1195  return;
1196  }
1197 
1198  ++fetchStats.cycles;
1199 
1200  TheISA::PCState nextPC = thisPC;
1201 
1202  StaticInstPtr staticInst = NULL;
1203  StaticInstPtr curMacroop = macroop[tid];
1204 
1205  // If the read of the first instruction was successful, then grab the
1206  // instructions from the rest of the cache line and put them into the
1207  // queue heading to decode.
1208 
1209  DPRINTF(Fetch, "[tid:%i] Adding instructions to queue to "
1210  "decode.\n", tid);
1211 
1212  // Need to keep track of whether or not a predicted branch
1213  // ended this fetch block.
1214  bool predictedBranch = false;
1215 
1216  // Need to halt fetch if quiesce instruction detected
1217  bool quiesce = false;
1218 
1219  TheISA::MachInst *cacheInsts =
1220  reinterpret_cast<TheISA::MachInst *>(fetchBuffer[tid]);
1221 
1222  const unsigned numInsts = fetchBufferSize / instSize;
1223  unsigned blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1224 
1225  // Loop through instruction memory from the cache.
1226  // Keep issuing while fetchWidth is available and branch is not
1227  // predicted taken
1228  while (numInst < fetchWidth && fetchQueue[tid].size() < fetchQueueSize
1229  && !predictedBranch && !quiesce) {
1230  // We need to process more memory if we aren't going to get a
1231  // StaticInst from the rom, the current macroop, or what's already
1232  // in the decoder.
1233  bool needMem = !inRom && !curMacroop &&
1234  !decoder[tid]->instReady();
1235  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1236  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1237 
1238  if (needMem) {
1239  // If buffer is no longer valid or fetchAddr has moved to point
1240  // to the next cache block then start fetch from icache.
1241  if (!fetchBufferValid[tid] ||
1242  fetchBufferBlockPC != fetchBufferPC[tid])
1243  break;
1244 
1245  if (blkOffset >= numInsts) {
1246  // We need to process more memory, but we've run out of the
1247  // current block.
1248  break;
1249  }
1250 
1251  decoder[tid]->moreBytes(thisPC, fetchAddr, cacheInsts[blkOffset]);
1252 
1253  if (decoder[tid]->needMoreBytes()) {
1254  blkOffset++;
1255  fetchAddr += instSize;
1256  pcOffset += instSize;
1257  }
1258  }
1259 
1260  // Extract as many instructions and/or microops as we can from
1261  // the memory we've processed so far.
1262  do {
1263  if (!(curMacroop || inRom)) {
1264  if (decoder[tid]->instReady()) {
1265  staticInst = decoder[tid]->decode(thisPC);
1266 
1267  // Increment stat of fetched instructions.
1268  ++fetchStats.insts;
1269 
1270  if (staticInst->isMacroop()) {
1271  curMacroop = staticInst;
1272  } else {
1273  pcOffset = 0;
1274  }
1275  } else {
1276  // We need more bytes for this instruction so blkOffset and
1277  // pcOffset will be updated
1278  break;
1279  }
1280  }
1281  // Whether we're moving to a new macroop because we're at the
1282  // end of the current one, or the branch predictor incorrectly
1283  // thinks we are...
1284  bool newMacro = false;
1285  if (curMacroop || inRom) {
1286  if (inRom) {
1287  staticInst = decoder[tid]->fetchRomMicroop(
1288  thisPC.microPC(), curMacroop);
1289  } else {
1290  staticInst = curMacroop->fetchMicroop(thisPC.microPC());
1291  }
1292  newMacro |= staticInst->isLastMicroop();
1293  }
1294 
1295  DynInstPtr instruction =
1296  buildInst(tid, staticInst, curMacroop,
1297  thisPC, nextPC, true);
1298 
1299  ppFetch->notify(instruction);
1300  numInst++;
1301 
1302 #if TRACING_ON
1303  if (DTRACE(O3PipeView)) {
1304  instruction->fetchTick = curTick();
1305  }
1306 #endif
1307 
1308  nextPC = thisPC;
1309 
1310  // If we're branching after this instruction, quit fetching
1311  // from the same block.
1312  predictedBranch |= thisPC.branching();
1313  predictedBranch |=
1314  lookupAndUpdateNextPC(instruction, nextPC);
1315  if (predictedBranch) {
1316  DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC);
1317  }
1318 
1319  newMacro |= thisPC.instAddr() != nextPC.instAddr();
1320 
1321  // Move to the next instruction, unless we have a branch.
1322  thisPC = nextPC;
1323  inRom = isRomMicroPC(thisPC.microPC());
1324 
1325  if (newMacro) {
1326  fetchAddr = thisPC.instAddr() & BaseCPU::PCMask;
1327  blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1328  pcOffset = 0;
1329  curMacroop = NULL;
1330  }
1331 
1332  if (instruction->isQuiesce()) {
1333  DPRINTF(Fetch,
1334  "Quiesce instruction encountered, halting fetch!\n");
1335  fetchStatus[tid] = QuiescePending;
1336  status_change = true;
1337  quiesce = true;
1338  break;
1339  }
1340  } while ((curMacroop || decoder[tid]->instReady()) &&
1341  numInst < fetchWidth &&
1342  fetchQueue[tid].size() < fetchQueueSize);
1343 
1344  // Re-evaluate whether the next instruction to fetch is in micro-op ROM
1345  // or not.
1346  inRom = isRomMicroPC(thisPC.microPC());
1347  }
1348 
1349  if (predictedBranch) {
1350  DPRINTF(Fetch, "[tid:%i] Done fetching, predicted branch "
1351  "instruction encountered.\n", tid);
1352  } else if (numInst >= fetchWidth) {
1353  DPRINTF(Fetch, "[tid:%i] Done fetching, reached fetch bandwidth "
1354  "for this cycle.\n", tid);
1355  } else if (blkOffset >= fetchBufferSize) {
1356  DPRINTF(Fetch, "[tid:%i] Done fetching, reached the end of the"
1357  "fetch buffer.\n", tid);
1358  }
1359 
1360  macroop[tid] = curMacroop;
1361  fetchOffset[tid] = pcOffset;
1362 
1363  if (numInst > 0) {
1364  wroteToTimeBuffer = true;
1365  }
1366 
1367  pc[tid] = thisPC;
1368 
1369  // pipeline a fetch if we're crossing a fetch buffer boundary and not in
1370  // a state that would preclude fetching
1371  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1372  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1373  issuePipelinedIfetch[tid] = fetchBufferBlockPC != fetchBufferPC[tid] &&
1374  fetchStatus[tid] != IcacheWaitResponse &&
1375  fetchStatus[tid] != ItlbWait &&
1376  fetchStatus[tid] != IcacheWaitRetry &&
1377  fetchStatus[tid] != QuiescePending &&
1378  !curMacroop;
1379 }
1380 
1381 template<class Impl>
1382 void
1384 {
1385  if (retryPkt != NULL) {
1386  assert(cacheBlocked);
1387  assert(retryTid != InvalidThreadID);
1388  assert(fetchStatus[retryTid] == IcacheWaitRetry);
1389 
1392  // Notify Fetch Request probe when a retryPkt is successfully sent.
1393  // Note that notify must be called before retryPkt is set to NULL.
1395  retryPkt = NULL;
1397  cacheBlocked = false;
1398  }
1399  } else {
1400  assert(retryTid == InvalidThreadID);
1401  // Access has been squashed since it was sent out. Just clear
1402  // the cache being blocked.
1403  cacheBlocked = false;
1404  }
1405 }
1406 
1408 // //
1409 // SMT FETCH POLICY MAINTAINED HERE //
1410 // //
1412 template<class Impl>
1413 ThreadID
1415 {
1416  if (numThreads > 1) {
1417  switch (fetchPolicy) {
1418  case FetchPolicy::RoundRobin:
1419  return roundRobin();
1420  case FetchPolicy::IQCount:
1421  return iqCount();
1422  case FetchPolicy::LSQCount:
1423  return lsqCount();
1424  case FetchPolicy::Branch:
1425  return branchCount();
1426  default:
1427  return InvalidThreadID;
1428  }
1429  } else {
1430  list<ThreadID>::iterator thread = activeThreads->begin();
1431  if (thread == activeThreads->end()) {
1432  return InvalidThreadID;
1433  }
1434 
1435  ThreadID tid = *thread;
1436 
1437  if (fetchStatus[tid] == Running ||
1439  fetchStatus[tid] == Idle) {
1440  return tid;
1441  } else {
1442  return InvalidThreadID;
1443  }
1444  }
1445 }
1446 
1447 
1448 template<class Impl>
1449 ThreadID
1451 {
1452  list<ThreadID>::iterator pri_iter = priorityList.begin();
1454 
1455  ThreadID high_pri;
1456 
1457  while (pri_iter != end) {
1458  high_pri = *pri_iter;
1459 
1460  assert(high_pri <= numThreads);
1461 
1462  if (fetchStatus[high_pri] == Running ||
1463  fetchStatus[high_pri] == IcacheAccessComplete ||
1464  fetchStatus[high_pri] == Idle) {
1465 
1466  priorityList.erase(pri_iter);
1467  priorityList.push_back(high_pri);
1468 
1469  return high_pri;
1470  }
1471 
1472  pri_iter++;
1473  }
1474 
1475  return InvalidThreadID;
1476 }
1477 
1478 template<class Impl>
1479 ThreadID
1481 {
1482  //sorted from lowest->highest
1483  std::priority_queue<unsigned,vector<unsigned>,
1484  std::greater<unsigned> > PQ;
1485  std::map<unsigned, ThreadID> threadMap;
1486 
1487  list<ThreadID>::iterator threads = activeThreads->begin();
1489 
1490  while (threads != end) {
1491  ThreadID tid = *threads++;
1492  unsigned iqCount = fromIEW->iewInfo[tid].iqCount;
1493 
1494  //we can potentially get tid collisions if two threads
1495  //have the same iqCount, but this should be rare.
1496  PQ.push(iqCount);
1497  threadMap[iqCount] = tid;
1498  }
1499 
1500  while (!PQ.empty()) {
1501  ThreadID high_pri = threadMap[PQ.top()];
1502 
1503  if (fetchStatus[high_pri] == Running ||
1504  fetchStatus[high_pri] == IcacheAccessComplete ||
1505  fetchStatus[high_pri] == Idle)
1506  return high_pri;
1507  else
1508  PQ.pop();
1509 
1510  }
1511 
1512  return InvalidThreadID;
1513 }
1514 
1515 template<class Impl>
1516 ThreadID
1518 {
1519  //sorted from lowest->highest
1520  std::priority_queue<unsigned,vector<unsigned>,
1521  std::greater<unsigned> > PQ;
1522  std::map<unsigned, ThreadID> threadMap;
1523 
1524  list<ThreadID>::iterator threads = activeThreads->begin();
1526 
1527  while (threads != end) {
1528  ThreadID tid = *threads++;
1529  unsigned ldstqCount = fromIEW->iewInfo[tid].ldstqCount;
1530 
1531  //we can potentially get tid collisions if two threads
1532  //have the same iqCount, but this should be rare.
1533  PQ.push(ldstqCount);
1534  threadMap[ldstqCount] = tid;
1535  }
1536 
1537  while (!PQ.empty()) {
1538  ThreadID high_pri = threadMap[PQ.top()];
1539 
1540  if (fetchStatus[high_pri] == Running ||
1541  fetchStatus[high_pri] == IcacheAccessComplete ||
1542  fetchStatus[high_pri] == Idle)
1543  return high_pri;
1544  else
1545  PQ.pop();
1546  }
1547 
1548  return InvalidThreadID;
1549 }
1550 
1551 template<class Impl>
1552 ThreadID
1554 {
1555  panic("Branch Count Fetch policy unimplemented\n");
1556  return InvalidThreadID;
1557 }
1558 
1559 template<class Impl>
1560 void
1562 {
1563  if (!issuePipelinedIfetch[tid]) {
1564  return;
1565  }
1566 
1567  // The next PC to access.
1568  TheISA::PCState thisPC = pc[tid];
1569 
1570  if (isRomMicroPC(thisPC.microPC())) {
1571  return;
1572  }
1573 
1574  Addr pcOffset = fetchOffset[tid];
1575  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1576 
1577  // Align the fetch PC so its at the start of a fetch buffer segment.
1578  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1579 
1580  // Unless buffer already got the block, fetch it from icache.
1581  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])) {
1582  DPRINTF(Fetch, "[tid:%i] Issuing a pipelined I-cache access, "
1583  "starting at PC %s.\n", tid, thisPC);
1584 
1585  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1586  }
1587 }
1588 
1589 template<class Impl>
1590 void
1592  DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
1593 
1594  // @todo Per-thread stats
1595 
1596  if (stalls[tid].drain) {
1598  DPRINTF(Fetch, "Fetch is waiting for a drain!\n");
1599  } else if (activeThreads->empty()) {
1601  DPRINTF(Fetch, "Fetch has no active thread!\n");
1602  } else if (fetchStatus[tid] == Blocked) {
1604  DPRINTF(Fetch, "[tid:%i] Fetch is blocked!\n", tid);
1605  } else if (fetchStatus[tid] == Squashing) {
1607  DPRINTF(Fetch, "[tid:%i] Fetch is squashing!\n", tid);
1608  } else if (fetchStatus[tid] == IcacheWaitResponse) {
1610  DPRINTF(Fetch, "[tid:%i] Fetch is waiting cache response!\n",
1611  tid);
1612  } else if (fetchStatus[tid] == ItlbWait) {
1614  DPRINTF(Fetch, "[tid:%i] Fetch is waiting ITLB walk to "
1615  "finish!\n", tid);
1616  } else if (fetchStatus[tid] == TrapPending) {
1618  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending trap!\n",
1619  tid);
1620  } else if (fetchStatus[tid] == QuiescePending) {
1622  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending quiesce "
1623  "instruction!\n", tid);
1624  } else if (fetchStatus[tid] == IcacheWaitRetry) {
1626  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for an I-cache retry!\n",
1627  tid);
1628  } else if (fetchStatus[tid] == NoGoodAddr) {
1629  DPRINTF(Fetch, "[tid:%i] Fetch predicted non-executable address\n",
1630  tid);
1631  } else {
1632  DPRINTF(Fetch, "[tid:%i] Unexpected fetch stall reason "
1633  "(Status: %i)\n",
1634  tid, fetchStatus[tid]);
1635  }
1636 }
1637 
1638 template<class Impl>
1639 bool
1641 {
1642  DPRINTF(O3CPU, "Fetch unit received timing\n");
1643  // We shouldn't ever get a cacheable block in Modified state
1644  assert(pkt->req->isUncacheable() ||
1645  !(pkt->cacheResponding() && !pkt->hasSharers()));
1646  fetch->processCacheCompletion(pkt);
1647 
1648  return true;
1649 }
1650 
1651 template<class Impl>
1652 void
1654 {
1655  fetch->recvReqRetry();
1656 }
1657 
1658 #endif//__CPU_O3_FETCH_IMPL_HH__
DefaultFetch::FetchStatGroup::cycles
Stats::Scalar cycles
Stat for total number of cycles spent fetching.
Definition: fetch.hh:563
InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:228
DefaultFetch::switchToActive
void switchToActive()
Changes the status of this stage to active, and indicates this to the CPU.
Definition: fetch_impl.hh:486
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:460
DefaultFetch::FetchStatGroup::pendingQuiesceStallCycles
Stats::Scalar pendingQuiesceStallCycles
Total number of stall cycles caused by pending quiesce instructions.
Definition: fetch.hh:585
DefaultFetch::macroop
StaticInstPtr macroop[Impl::MaxThreads]
Definition: fetch.hh:438
DefaultFetch::FinishTranslationEvent::setReq
void setReq(const RequestPtr &_req)
Definition: fetch.hh:157
DefaultFetch::getFetchingThread
ThreadID getFetchingThread()
Returns the appropriate thread to fetch, given the fetch policy.
Definition: fetch_impl.hh:1414
warn
#define warn(...)
Definition: logging.hh:239
DefaultFetch::commitToFetchDelay
Cycles commitToFetchDelay
Commit to fetch delay.
Definition: fetch.hh:473
DefaultFetch::Stalls::decode
bool decode
Definition: fetch.hh:456
system.hh
Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:619
DefaultFetch::fetchBufferAlignPC
Addr fetchBufferAlignPC(Addr addr)
Align a PC to the start of a fetch buffer block.
Definition: fetch.hh:371
DefaultFetch::FetchStatGroup::tlbSquashes
Stats::Scalar tlbSquashes
Total number of outstanding tlb accesses that were dropped due to a squash.
Definition: fetch.hh:597
Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:646
DefaultFetch::switchToInactive
void switchToInactive()
Changes the status of this stage to inactive, and indicates this to the CPU.
Definition: fetch_impl.hh:499
DefaultFetch::IcacheAccessComplete
@ IcacheAccessComplete
Definition: fetch.hh:196
DefaultFetch::finishTranslationEvent
FinishTranslationEvent finishTranslationEvent
Event used to delay fault generation of translation faults.
Definition: fetch.hh:546
DefaultFetch::FetchStatGroup::predictedBranches
Stats::Scalar predictedBranches
Stat for total number of predicted branches.
Definition: fetch.hh:561
DefaultFetch::QuiescePending
@ QuiescePending
Definition: fetch.hh:192
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
DefaultFetch::fetchOffset
Addr fetchOffset[Impl::MaxThreads]
Definition: fetch.hh:436
DefaultFetch::FetchStatus
FetchStatus
Overall fetch status.
Definition: fetch.hh:179
ArmISA::MachInst
uint32_t MachInst
Definition: types.hh:52
DefaultFetch::O3CPU
Impl::O3CPU O3CPU
Definition: fetch.hh:78
DefaultFetch::IcacheWaitResponse
@ IcacheWaitResponse
Definition: fetch.hh:194
DefaultFetch::takeOverFrom
void takeOverFrom()
Takes over from another CPU's thread.
Definition: fetch_impl.hh:457
DefaultFetch::FetchStatGroup::pendingTrapStallCycles
Stats::Scalar pendingTrapStallCycles
Total number of stall cycles caused by pending traps.
Definition: fetch.hh:582
DefaultFetch::regProbePoints
void regProbePoints()
Registers probes.
Definition: fetch_impl.hh:157
random.hh
tlb.hh
ProbePointArg< DynInstPtr >
MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:82
DefaultFetch::Active
@ Active
Definition: fetch.hh:180
DefaultFetch::fetchStats
DefaultFetch::FetchStatGroup fetchStats
exetrace.hh
DefaultFetch::NoGoodAddr
@ NoGoodAddr
Definition: fetch.hh:197
DefaultFetch::Blocked
@ Blocked
Definition: fetch.hh:189
DefaultFetch::fetchPolicy
FetchPolicy fetchPolicy
Fetch policy.
Definition: fetch.hh:208
DefaultFetch::squashFromDecode
void squashFromDecode(const TheISA::PCState &newPC, const DynInstPtr squashInst, const InstSeqNum seq_num, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:778
DefaultFetch::decodeWidth
unsigned decodeWidth
The width of decode in instructions.
Definition: fetch.hh:479
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:82
DefaultFetch::fetchBufferValid
bool fetchBufferValid[Impl::MaxThreads]
Whether or not the fetch buffer data is valid.
Definition: fetch.hh:514
Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:340
BPredUnit::drainSanityCheck
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: bpred_unit.cc:107
DTRACE
#define DTRACE(x)
Definition: debug.hh:146
DefaultFetch::cacheBlkSize
unsigned int cacheBlkSize
Cache block size.
Definition: fetch.hh:491
Packet::dataDynamic
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1145
DefaultFetch::fetchBufferPC
Addr fetchBufferPC[Impl::MaxThreads]
The PC of the first instruction loaded into the fetch buffer.
Definition: fetch.hh:505
FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:132
DefaultFetch::FetchStatGroup::cacheLines
Stats::Scalar cacheLines
Stat for total number of fetched cache lines.
Definition: fetch.hh:589
DefaultFetch::FetchStatGroup::branchRate
Stats::Formula branchRate
Number of branch fetches per cycle.
Definition: fetch.hh:603
ProbePointArg::notify
void notify(const Arg &arg)
called at the ProbePoint call site, passes arg to each listener.
Definition: probe.hh:286
DefaultFetch::fromIEW
TimeBuffer< TimeStruct >::wire fromIEW
Wire to get iew's information from backwards time buffer.
Definition: fetch.hh:422
DefaultFetch::deactivateThread
void deactivateThread(ThreadID tid)
For priority-based fetch policies, need to keep update priorityList.
Definition: fetch_impl.hh:512
TimeBuffer::size
unsigned size
Definition: timebuf.hh:42
DefaultFetch::numFetchingThreads
ThreadID numFetchingThreads
Number of threads that are actively fetching.
Definition: fetch.hh:529
BPredUnit::update
void update(const InstSeqNum &done_sn, ThreadID tid)
Tells the branch predictor to commit any updates until the given sequence number.
Definition: bpred_unit.cc:291
DefaultFetch::FetchStatGroup::noActiveThreadStallCycles
Stats::Scalar noActiveThreadStallCycles
Total number of stall cycles caused by no active threads to run.
Definition: fetch.hh:580
ArmISA::advancePC
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:405
DefaultFetch::tick
void tick()
Ticks the fetch stage, processing all inputs signals and fetching as many instructions as possible.
Definition: fetch_impl.hh:862
DefaultFetch::Running
@ Running
Definition: fetch.hh:186
TimeBuffer< TimeStruct >
DefaultFetch::decoder
TheISA::Decoder * decoder[Impl::MaxThreads]
The decoder.
Definition: fetch.hh:377
DefaultFetch::roundRobin
ThreadID roundRobin()
Returns the appropriate thread to fetch using a round robin policy.
Definition: fetch_impl.hh:1450
DefaultFetch::wroteToTimeBuffer
bool wroteToTimeBuffer
Variable that tracks if fetch has written to the time buffer this cycle.
Definition: fetch.hh:449
Request::INST_FETCH
@ INST_FETCH
The request was an instruction fetch.
Definition: request.hh:104
DefaultFetch::instSize
int instSize
Size of instructions.
Definition: fetch.hh:517
DefaultFetch::wakeFromQuiesce
void wakeFromQuiesce()
Tells fetch to wake up from a quiesce instruction.
Definition: fetch_impl.hh:476
DefaultFetch::pipelineIcacheAccesses
void pipelineIcacheAccesses(ThreadID tid)
Pipeline the next I-cache access to the current one.
Definition: fetch_impl.hh:1561
packet.hh
Stats::DataWrap::flags
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:331
DefaultFetch::resetStage
void resetStage()
Reset this pipeline stage.
Definition: fetch_impl.hh:319
DefaultFetch::memReq
RequestPtr memReq[Impl::MaxThreads]
Memory request used to access cache.
Definition: fetch.hh:444
DefaultFetch::FetchStatGroup::squashCycles
Stats::Scalar squashCycles
Stat for total number of cycles spent squashing.
Definition: fetch.hh:565
DefaultFetch::retryTid
ThreadID retryTid
The thread that is waiting on the cache to tell fetch to retry.
Definition: fetch.hh:488
DefaultFetch::doSquash
void doSquash(const TheISA::PCState &newPC, const DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:726
DefaultFetch::lsqCount
ThreadID lsqCount()
Returns the appropriate thread to fetch using the LSQ count policy.
Definition: fetch_impl.hh:1517
random_mt
Random random_mt
Definition: random.cc:96
DefaultFetch::isDrained
bool isDrained() const
Has the stage drained?
Definition: fetch_impl.hh:426
DefaultFetch::icachePort
IcachePort icachePort
Instruction port.
Definition: fetch.hh:540
DefaultFetch::FetchTranslation
Definition: fetch.hh:112
DefaultFetch::fromDecode
TimeBuffer< TimeStruct >::wire fromDecode
Wire to get decode's information from backwards time buffer.
Definition: fetch.hh:416
DefaultFetch::FetchStatGroup::insts
Stats::Scalar insts
Stat for total number of fetched instructions.
Definition: fetch.hh:557
DefaultFetch::fetchCacheLine
bool fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
Fetches the cache line that contains the fetch PC.
Definition: fetch_impl.hh:569
DefaultFetch::FetchStatGroup::miscStallCycles
Stats::Scalar miscStallCycles
Total number of cycles spent in any other state.
Definition: fetch.hh:576
DefaultFetch::setActiveThreads
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets pointer to list of active threads.
Definition: fetch_impl.hh:272
DefaultFetch::branchCount
ThreadID branchCount()
Returns the appropriate thread to fetch using the branch count policy.
Definition: fetch_impl.hh:1553
DefaultFetch::name
std::string name() const
Returns the name of fetch.
Definition: fetch_impl.hh:150
Stats::DataWrap::prereq
Derived & prereq(const Stat &prereq)
Set the prerequisite stat and marks this stat to print at the end of simulation.
Definition: statistics.hh:345
isa_specific.hh
RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:492
DefaultFetch::issuePipelinedIfetch
bool issuePipelinedIfetch[Impl::MaxThreads]
Set to true if a pipelined I-cache request should be issued.
Definition: fetch.hh:543
DefaultFetch::recvReqRetry
void recvReqRetry()
Handles retrying the fetch access.
Definition: fetch_impl.hh:1383
DefaultFetch::decodeToFetchDelay
Cycles decodeToFetchDelay
Decode to fetch delay.
Definition: fetch.hh:464
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:67
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
MipsISA::vaddr
vaddr
Definition: pra_constants.hh:275
DefaultFetch::setFetchQueue
void setFetchQueue(TimeBuffer< FetchStruct > *fq_ptr)
Sets pointer to time buffer used to communicate to the next stage.
Definition: fetch_impl.hh:279
StaticInst::isMacroop
bool isMacroop() const
Definition: static_inst.hh:198
DefaultFetch::iewToFetchDelay
Cycles iewToFetchDelay
IEW to fetch delay.
Definition: fetch.hh:470
DefaultFetch::startupStage
void startupStage()
Initialize stage.
Definition: fetch_impl.hh:287
DefaultFetch::timeBuffer
TimeBuffer< TimeStruct > * timeBuffer
Time buffer interface.
Definition: fetch.hh:413
DefaultFetch::renameToFetchDelay
Cycles renameToFetchDelay
Rename to fetch delay.
Definition: fetch.hh:467
DefaultFetch::Stalls::drain
bool drain
Definition: fetch.hh:457
DefaultFetch::cacheBlocked
bool cacheBlocked
Is the cache blocked? If so no threads can access it.
Definition: fetch.hh:482
DefaultFetch::checkSignalsAndUpdate
bool checkSignalsAndUpdate(ThreadID tid)
Checks all input signals and updates the status as necessary.
Definition: fetch_impl.hh:964
DefaultFetch::FetchStatGroup::branches
Stats::Scalar branches
Total number of fetched branches.
Definition: fetch.hh:559
DefaultFetch::drainSanityCheck
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: fetch_impl.hh:408
DefaultFetch::FetchStatGroup::blockedCycles
Stats::Scalar blockedCycles
Total number of cycles spent blocked.
Definition: fetch.hh:574
fetch.hh
DefaultFetch::FetchStatGroup::rate
Stats::Formula rate
Number of instruction fetched per cycle.
Definition: fetch.hh:605
InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:37
NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:245
DefaultFetch::buildInst
DynInstPtr buildInst(ThreadID tid, StaticInstPtr staticInst, StaticInstPtr curMacroop, TheISA::PCState thisPC, TheISA::PCState nextPC, bool trace)
Definition: fetch_impl.hh:1069
DefaultFetch::FetchStatGroup::nisnDist
Stats::Distribution nisnDist
Distribution of number of instructions fetched each cycle.
Definition: fetch.hh:599
core.hh
DefaultFetch::drainStall
void drainStall(ThreadID tid)
Stall the fetch stage after reaching a safe drain point.
Definition: fetch_impl.hh:466
ProbePoints::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
DefaultFetch::updateFetchStatus
FetchStatus updateFetchStatus()
Updates overall fetch stage status; to be called at the end of each cycle.
Definition: fetch_impl.hh:808
DefaultFetch::ppFetchRequestSent
ProbePointArg< RequestPtr > * ppFetchRequestSent
To probe when a fetch request is successfully sent.
Definition: fetch.hh:216
DefaultFetch
DefaultFetch class handles both single threaded and SMT fetch.
Definition: fetch.hh:71
DefaultFetch::delayedCommit
bool delayedCommit[Impl::MaxThreads]
Can the fetch stage redirect from an interrupt on this instruction?
Definition: fetch.hh:441
isRomMicroPC
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:161
DefaultFetch::FetchStatGroup::pendingDrainCycles
Stats::Scalar pendingDrainCycles
Total number of cycles spent in waiting for drains.
Definition: fetch.hh:578
DefaultFetch::ppFetch
ProbePointArg< DynInstPtr > * ppFetch
Probe points.
Definition: fetch.hh:214
DefaultFetch::lookupAndUpdateNextPC
bool lookupAndUpdateNextPC(const DynInstPtr &inst, TheISA::PCState &pc)
Looks up in the branch predictor to see if the next PC should be either next PC+=MachInst or a branch...
Definition: fetch_impl.hh:523
DefaultFetch::toDecode
TimeBuffer< FetchStruct >::wire toDecode
Wire used to write any information heading to decode.
Definition: fetch.hh:429
DefaultFetch::fetchWidth
unsigned fetchWidth
The width of fetch in instructions.
Definition: fetch.hh:476
full_system.hh
DefaultFetch::Squashing
@ Squashing
Definition: fetch.hh:188
DefaultFetch::FinishTranslationEvent::setFault
void setFault(Fault _fault)
Definition: fetch.hh:152
DefaultFetch::FetchStatGroup::idleCycles
Stats::Scalar idleCycles
Stat for total number of cycles spent blocked due to other stages in the pipeline.
Definition: fetch.hh:572
DefaultFetch::FetchStatGroup::FetchStatGroup
FetchStatGroup(O3CPU *cpu, DefaultFetch *fetch)
Definition: fetch_impl.hh:167
DefaultFetch::priorityList
std::list< ThreadID > priorityList
List that has the threads organized by priority.
Definition: fetch.hh:211
DefaultFetch::interruptPending
bool interruptPending
Checks if there is an interrupt pending.
Definition: fetch.hh:537
DefaultFetch::fetch
void fetch(bool &status_change)
Does the actual fetching of instructions and passing them on to the next stage.
Definition: fetch_impl.hh:1121
StaticInst::isLastMicroop
bool isLastMicroop() const
Definition: static_inst.hh:201
panic_if
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition: logging.hh:197
DefaultFetch::IcachePort::recvReqRetry
virtual void recvReqRetry()
Handles doing a retry of a failed fetch.
Definition: fetch_impl.hh:1653
PseudoInst::quiesce
void quiesce(ThreadContext *tc)
Definition: pseudo_inst.cc:122
DefaultFetch::profileStall
void profileStall(ThreadID tid)
Profile the reasons of fetch stall.
Definition: fetch_impl.hh:1591
StaticInst::fetchMicroop
virtual StaticInstPtr fetchMicroop(MicroPC upc) const
Return the microop that goes with a particular micropc.
Definition: static_inst.cc:98
DefaultFetch::numThreads
ThreadID numThreads
Number of threads.
Definition: fetch.hh:526
base.hh
DefaultFetch::clearStates
void clearStates(ThreadID tid)
Clear all thread-specific states.
Definition: fetch_impl.hh:299
std
Overload hash function for BasicBlockRange type.
Definition: vec_reg.hh:587
MipsISA::PCState
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
types.hh
DefaultFetch::drainResume
void drainResume()
Resume after a drain.
Definition: fetch_impl.hh:398
Stats::Distribution::init
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
Definition: statistics.hh:2634
Stats::pdf
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:51
DefaultFetch::finishTranslation
void finishTranslation(const Fault &fault, const RequestPtr &mem_req)
Definition: fetch_impl.hh:619
DefaultFetch::fetchQueueSize
unsigned fetchQueueSize
The size of the fetch queue in micro-ops.
Definition: fetch.hh:508
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
DefaultFetch::FetchStatGroup::tlbCycles
Stats::Scalar tlbCycles
Stat for total number of cycles spent waiting for translation.
Definition: fetch.hh:567
DefaultFetch::FetchStatGroup::icacheSquashes
Stats::Scalar icacheSquashes
Total number of outstanding icache accesses that were dropped due to a squash.
Definition: fetch.hh:593
DefaultFetch::TrapPending
@ TrapPending
Definition: fetch.hh:191
DefaultFetch::FetchStatGroup::idleRate
Stats::Formula idleRate
Rate of how often fetch was idle.
Definition: fetch.hh:601
DefaultFetch::fromRename
TimeBuffer< TimeStruct >::wire fromRename
Wire to get rename's information from backwards time buffer.
Definition: fetch.hh:419
DefaultFetch::Inactive
@ Inactive
Definition: fetch.hh:181
BaseCPU::PCMask
static const Addr PCMask
Definition: base.hh:264
BPredUnit::predict
bool predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, TheISA::PCState &pc, ThreadID tid)
Predicts whether or not the instruction is a taken branch, and the target of the branch if it is take...
Definition: bpred_unit.cc:116
Stats::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1924
Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
DefaultFetch::setTimeBuffer
void setTimeBuffer(TimeBuffer< TimeStruct > *time_buffer)
Sets the main backwards communication time buffer pointer.
Definition: fetch_impl.hh:259
DefaultFetch::IcacheWaitRetry
@ IcacheWaitRetry
Definition: fetch.hh:195
DefaultFetch::ItlbWait
@ ItlbWait
Definition: fetch.hh:193
Random::random
std::enable_if< std::is_integral< T >::value, T >::type random()
Use the SFINAE idiom to choose an implementation based on whether the type is integral or floating po...
Definition: random.hh:86
DefaultFetch::threadFetched
ThreadID threadFetched
Thread ID being fetched.
Definition: fetch.hh:532
DefaultFetch::FetchStatGroup::icacheWaitRetryStallCycles
Stats::Scalar icacheWaitRetryStallCycles
Total number of stall cycles caused by I-cache wait retrys.
Definition: fetch.hh:587
Stats
Definition: statistics.cc:61
DefaultFetch::processCacheCompletion
void processCacheCompletion(PacketPtr pkt)
Processes cache completion event.
Definition: fetch_impl.hh:354
DefaultFetch::pc
TheISA::PCState pc[Impl::MaxThreads]
Definition: fetch.hh:434
RefCountingPtr< StaticInst >
DefaultFetch::Idle
@ Idle
Definition: fetch.hh:187
DefaultFetch::fetchQueue
std::deque< DynInstPtr > fetchQueue[Impl::MaxThreads]
Queue of fetched instructions.
Definition: fetch.hh:511
DefaultFetch::iqCount
ThreadID iqCount()
Returns the appropriate thread to fetch using the IQ count policy.
Definition: fetch_impl.hh:1480
DefaultFetch::DynInst
Impl::DynInst DynInst
Definition: fetch.hh:76
DefaultFetch::cpu
O3CPU * cpu
Pointer to the O3CPU.
Definition: fetch.hh:410
DefaultFetch::DefaultFetch
DefaultFetch(O3CPU *_cpu, DerivO3CPUParams *params)
DefaultFetch constructor.
Definition: fetch_impl.hh:78
DefaultFetch::branchPred
BPredUnit * branchPred
BPredUnit.
Definition: fetch.hh:432
DefaultFetch::numInst
int numInst
Tracks how many instructions has been fetched this cycle.
Definition: fetch.hh:452
cpu.hh
std::list< ThreadID >
DefaultFetch::fetchBuffer
uint8_t * fetchBuffer[Impl::MaxThreads]
The fetch data that is being fetched and buffered.
Definition: fetch.hh:502
DefaultFetch::_status
FetchStatus _status
Fetch status.
Definition: fetch.hh:202
DefaultFetch::IcachePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
Definition: fetch_impl.hh:1640
StaticInst::nopStaticInstPtr
static StaticInstPtr nopStaticInstPtr
Pointer to a statically allocated generic "nop" instruction object.
Definition: static_inst.hh:240
DefaultFetch::stalls
Stalls stalls[Impl::MaxThreads]
Tracks which stages are telling fetch to stall.
Definition: fetch.hh:461
DefaultFetch::retryPkt
PacketPtr retryPkt
The packet that is waiting to be retried.
Definition: fetch.hh:485
Stats::total
const FlagsType total
Print the total.
Definition: info.hh:49
DefaultFetch::checkStall
bool checkStall(ThreadID tid) const
Checks if a thread is stalled.
Definition: fetch_impl.hh:793
BPredUnit::squash
void squash(const InstSeqNum &squashed_sn, ThreadID tid)
Squashes all outstanding updates until a given sequence number.
Definition: bpred_unit.cc:314
BaseTLB::Execute
@ Execute
Definition: tlb.hh:57
DefaultFetch::FetchStatGroup::icacheStallCycles
Stats::Scalar icacheStallCycles
Stat for total number of cycles stalled due to an icache miss.
Definition: fetch.hh:555
DefaultFetch::lastIcacheStall
Counter lastIcacheStall[Impl::MaxThreads]
Icache stall statistics.
Definition: fetch.hh:520
Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1166
DefaultFetch::checkInterrupt
bool checkInterrupt(Addr pc)
Check if an interrupt is pending and that we need to handle.
Definition: fetch.hh:321
DefaultFetch::fetchStatus
ThreadStatus fetchStatus[Impl::MaxThreads]
Per-thread status.
Definition: fetch.hh:205
DefaultFetch::activeThreads
std::list< ThreadID > * activeThreads
List of Active Threads.
Definition: fetch.hh:523
byteswap.hh
DefaultFetch::squash
void squash(const TheISA::PCState &newPC, const InstSeqNum seq_num, DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:848
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
DefaultFetch::fetchBufferSize
unsigned fetchBufferSize
The size of the fetch buffer in bytes.
Definition: fetch.hh:496
DefaultFetch::fromCommit
TimeBuffer< TimeStruct >::wire fromCommit
Wire to get commit's information from backwards time buffer.
Definition: fetch.hh:425
curTick
Tick curTick()
The current simulated tick.
Definition: core.hh:45
DefaultFetch::DynInstPtr
Impl::DynInstPtr DynInstPtr
Definition: fetch.hh:77
TimeBuffer::getWire
wire getWire(int idx)
Definition: timebuf.hh:229
eventq.hh

Generated on Wed Sep 30 2020 14:02:09 for gem5 by doxygen 1.8.17