gem5  v21.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fetch_impl.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2014 ARM Limited
3  * Copyright (c) 2012-2013 AMD
4  * All rights reserved.
5  *
6  * The license below extends only to copyright in the software and shall
7  * not be construed as granting a license to any other intellectual
8  * property including but not limited to intellectual property relating
9  * to a hardware implementation of the functionality of the software
10  * licensed hereunder. You may use the software subject to the license
11  * terms below provided that you ensure that this notice is replicated
12  * unmodified and in its entirety in all distributions of the software,
13  * modified or unmodified, in source code or in binary form.
14  *
15  * Copyright (c) 2004-2006 The Regents of The University of Michigan
16  * All rights reserved.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions are
20  * met: redistributions of source code must retain the above copyright
21  * notice, this list of conditions and the following disclaimer;
22  * redistributions in binary form must reproduce the above copyright
23  * notice, this list of conditions and the following disclaimer in the
24  * documentation and/or other materials provided with the distribution;
25  * neither the name of the copyright holders nor the names of its
26  * contributors may be used to endorse or promote products derived from
27  * this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #ifndef __CPU_O3_FETCH_IMPL_HH__
43 #define __CPU_O3_FETCH_IMPL_HH__
44 
45 #include <algorithm>
46 #include <cstring>
47 #include <list>
48 #include <map>
49 #include <queue>
50 
51 #include "arch/generic/tlb.hh"
52 #include "arch/utility.hh"
53 #include "base/random.hh"
54 #include "base/types.hh"
55 #include "config/the_isa.hh"
56 #include "cpu/base.hh"
57 #include "cpu/o3/cpu.hh"
58 #include "cpu/o3/fetch.hh"
59 #include "cpu/exetrace.hh"
60 #include "debug/Activity.hh"
61 #include "debug/Drain.hh"
62 #include "debug/Fetch.hh"
63 #include "debug/O3CPU.hh"
64 #include "debug/O3PipeView.hh"
65 #include "mem/packet.hh"
66 #include "params/DerivO3CPU.hh"
67 #include "sim/byteswap.hh"
68 #include "sim/core.hh"
69 #include "sim/eventq.hh"
70 #include "sim/full_system.hh"
71 #include "sim/system.hh"
72 #include "cpu/o3/isa_specific.hh"
73 
74 template<class Impl>
75 DefaultFetch<Impl>::DefaultFetch(O3CPU *_cpu, const DerivO3CPUParams &params)
76  : fetchPolicy(params.smtFetchPolicy),
77  cpu(_cpu),
78  branchPred(nullptr),
79  decodeToFetchDelay(params.decodeToFetchDelay),
80  renameToFetchDelay(params.renameToFetchDelay),
81  iewToFetchDelay(params.iewToFetchDelay),
82  commitToFetchDelay(params.commitToFetchDelay),
83  fetchWidth(params.fetchWidth),
84  decodeWidth(params.decodeWidth),
85  retryPkt(NULL),
86  retryTid(InvalidThreadID),
87  cacheBlkSize(cpu->cacheLineSize()),
88  fetchBufferSize(params.fetchBufferSize),
89  fetchBufferMask(fetchBufferSize - 1),
90  fetchQueueSize(params.fetchQueueSize),
91  numThreads(params.numThreads),
92  numFetchingThreads(params.smtNumFetchingThreads),
93  icachePort(this, _cpu),
94  finishTranslationEvent(this), fetchStats(_cpu, this)
95 {
96  if (numThreads > Impl::MaxThreads)
97  fatal("numThreads (%d) is larger than compiled limit (%d),\n"
98  "\tincrease MaxThreads in src/cpu/o3/impl.hh\n",
99  numThreads, static_cast<int>(Impl::MaxThreads));
100  if (fetchWidth > Impl::MaxWidth)
101  fatal("fetchWidth (%d) is larger than compiled limit (%d),\n"
102  "\tincrease MaxWidth in src/cpu/o3/impl.hh\n",
103  fetchWidth, static_cast<int>(Impl::MaxWidth));
105  fatal("fetch buffer size (%u bytes) is greater than the cache "
106  "block size (%u bytes)\n", fetchBufferSize, cacheBlkSize);
108  fatal("cache block (%u bytes) is not a multiple of the "
109  "fetch buffer (%u bytes)\n", cacheBlkSize, fetchBufferSize);
110 
111  // Get the size of an instruction.
112  instSize = sizeof(TheISA::MachInst);
113 
114  for (int i = 0; i < Impl::MaxThreads; i++) {
115  fetchStatus[i] = Idle;
116  decoder[i] = nullptr;
117  pc[i] = 0;
118  fetchOffset[i] = 0;
119  macroop[i] = nullptr;
120  delayedCommit[i] = false;
121  memReq[i] = nullptr;
122  stalls[i] = {false, false};
123  fetchBuffer[i] = NULL;
124  fetchBufferPC[i] = 0;
125  fetchBufferValid[i] = false;
126  lastIcacheStall[i] = 0;
127  issuePipelinedIfetch[i] = false;
128  }
129 
130  branchPred = params.branchPred;
131 
132  for (ThreadID tid = 0; tid < numThreads; tid++) {
133  decoder[tid] = new TheISA::Decoder(
134  dynamic_cast<TheISA::ISA *>(params.isa[tid]));
135  // Create space to buffer the cache line data,
136  // which may not hold the entire cache line.
137  fetchBuffer[tid] = new uint8_t[fetchBufferSize];
138  }
139 }
140 
141 template <class Impl>
142 std::string
144 {
145  return cpu->name() + ".fetch";
146 }
147 
148 template <class Impl>
149 void
151 {
152  ppFetch = new ProbePointArg<DynInstPtr>(cpu->getProbeManager(), "Fetch");
153  ppFetchRequestSent = new ProbePointArg<RequestPtr>(cpu->getProbeManager(),
154  "FetchRequest");
155 
156 }
157 
158 template <class Impl>
161  : Stats::Group(cpu, "fetch"),
162  ADD_STAT(icacheStallCycles, UNIT_CYCLE,
163  "Number of cycles fetch is stalled on an Icache miss"),
164  ADD_STAT(insts, UNIT_COUNT, "Number of instructions fetch has processed"),
165  ADD_STAT(branches, UNIT_COUNT,
166  "Number of branches that fetch encountered"),
167  ADD_STAT(predictedBranches, UNIT_COUNT,
168  "Number of branches that fetch has predicted taken"),
169  ADD_STAT(cycles, UNIT_CYCLE,
170  "Number of cycles fetch has run and was not squashing or "
171  "blocked"),
172  ADD_STAT(squashCycles, UNIT_CYCLE,
173  "Number of cycles fetch has spent squashing"),
174  ADD_STAT(tlbCycles, UNIT_CYCLE,
175  "Number of cycles fetch has spent waiting for tlb"),
176  ADD_STAT(idleCycles, UNIT_CYCLE, "Number of cycles fetch was idle"),
177  ADD_STAT(blockedCycles, UNIT_CYCLE,
178  "Number of cycles fetch has spent blocked"),
179  ADD_STAT(miscStallCycles, UNIT_CYCLE,
180  "Number of cycles fetch has spent waiting on interrupts, or bad "
181  "addresses, or out of MSHRs"),
182  ADD_STAT(pendingDrainCycles, UNIT_CYCLE,
183  "Number of cycles fetch has spent waiting on pipes to drain"),
184  ADD_STAT(noActiveThreadStallCycles, UNIT_CYCLE,
185  "Number of stall cycles due to no active thread to fetch from"),
186  ADD_STAT(pendingTrapStallCycles, UNIT_CYCLE,
187  "Number of stall cycles due to pending traps"),
188  ADD_STAT(pendingQuiesceStallCycles, UNIT_CYCLE,
189  "Number of stall cycles due to pending quiesce instructions"),
190  ADD_STAT(icacheWaitRetryStallCycles, UNIT_CYCLE,
191  "Number of stall cycles due to full MSHR"),
192  ADD_STAT(cacheLines, UNIT_COUNT, "Number of cache lines fetched"),
193  ADD_STAT(icacheSquashes, UNIT_COUNT,
194  "Number of outstanding Icache misses that were squashed"),
195  ADD_STAT(tlbSquashes, UNIT_COUNT,
196  "Number of outstanding ITLB misses that were squashed"),
197  ADD_STAT(nisnDist, UNIT_COUNT,
198  "Number of instructions fetched each cycle (Total)"),
199  ADD_STAT(idleRate, UNIT_RATIO, "Ratio of cycles fetch was idle",
200  idleCycles / cpu->baseStats.numCycles),
201  ADD_STAT(branchRate, UNIT_RATIO, "Number of branch fetches per cycle",
202  branches / cpu->baseStats.numCycles),
203  ADD_STAT(rate, UNIT_RATE(Stats::Units::Count, Stats::Units::Cycle),
204  "Number of inst fetches per cycle",
205  insts / cpu->baseStats.numCycles)
206 {
209  insts
210  .prereq(insts);
211  branches
212  .prereq(branches);
215  cycles
216  .prereq(cycles);
219  tlbCycles
220  .prereq(tlbCycles);
221  idleCycles
222  .prereq(idleCycles);
225  cacheLines
226  .prereq(cacheLines);
243  nisnDist
244  .init(/* base value */ 0,
245  /* last value */ fetch->fetchWidth,
246  /* bucket size */ 1)
247  .flags(Stats::pdf);
248  idleRate
249  .prereq(idleRate);
250  branchRate
252  rate
254 }
255 template<class Impl>
256 void
258 {
259  timeBuffer = time_buffer;
260 
261  // Create wires to get information from proper places in time buffer.
266 }
267 
268 template<class Impl>
269 void
271 {
272  activeThreads = at_ptr;
273 }
274 
275 template<class Impl>
276 void
278 {
279  // Create wire to write information to proper place in fetch time buf.
280  toDecode = ftb_ptr->getWire(0);
281 }
282 
283 template<class Impl>
284 void
286 {
287  assert(priorityList.empty());
288  resetStage();
289 
290  // Fetch needs to start fetching instructions at the very beginning,
291  // so it must start up in active state.
292  switchToActive();
293 }
294 
295 template<class Impl>
296 void
298 {
299  fetchStatus[tid] = Running;
300  pc[tid] = cpu->pcState(tid);
301  fetchOffset[tid] = 0;
302  macroop[tid] = NULL;
303  delayedCommit[tid] = false;
304  memReq[tid] = NULL;
305  stalls[tid].decode = false;
306  stalls[tid].drain = false;
307  fetchBufferPC[tid] = 0;
308  fetchBufferValid[tid] = false;
309  fetchQueue[tid].clear();
310 
311  // TODO not sure what to do with priorityList for now
312  // priorityList.push_back(tid);
313 }
314 
315 template<class Impl>
316 void
318 {
319  numInst = 0;
320  interruptPending = false;
321  cacheBlocked = false;
322 
323  priorityList.clear();
324 
325  // Setup PC and nextPC with initial state.
326  for (ThreadID tid = 0; tid < numThreads; ++tid) {
327  fetchStatus[tid] = Running;
328  pc[tid] = cpu->pcState(tid);
329  fetchOffset[tid] = 0;
330  macroop[tid] = NULL;
331 
332  delayedCommit[tid] = false;
333  memReq[tid] = NULL;
334 
335  stalls[tid].decode = false;
336  stalls[tid].drain = false;
337 
338  fetchBufferPC[tid] = 0;
339  fetchBufferValid[tid] = false;
340 
341  fetchQueue[tid].clear();
342 
343  priorityList.push_back(tid);
344  }
345 
346  wroteToTimeBuffer = false;
347  _status = Inactive;
348 }
349 
350 template<class Impl>
351 void
353 {
354  ThreadID tid = cpu->contextToThread(pkt->req->contextId());
355 
356  DPRINTF(Fetch, "[tid:%i] Waking up from cache miss.\n", tid);
357  assert(!cpu->switchedOut());
358 
359  // Only change the status if it's still waiting on the icache access
360  // to return.
361  if (fetchStatus[tid] != IcacheWaitResponse ||
362  pkt->req != memReq[tid]) {
364  delete pkt;
365  return;
366  }
367 
368  memcpy(fetchBuffer[tid], pkt->getConstPtr<uint8_t>(), fetchBufferSize);
369  fetchBufferValid[tid] = true;
370 
371  // Wake up the CPU (if it went to sleep and was waiting on
372  // this completion event).
373  cpu->wakeCPU();
374 
375  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache completion\n",
376  tid);
377 
378  switchToActive();
379 
380  // Only switch to IcacheAccessComplete if we're not stalled as well.
381  if (checkStall(tid)) {
382  fetchStatus[tid] = Blocked;
383  } else {
385  }
386 
387  pkt->req->setAccessLatency();
388  cpu->ppInstAccessComplete->notify(pkt);
389  // Reset the mem req to NULL.
390  delete pkt;
391  memReq[tid] = NULL;
392 }
393 
394 template <class Impl>
395 void
397 {
398  for (ThreadID i = 0; i < numThreads; ++i) {
399  stalls[i].decode = false;
400  stalls[i].drain = false;
401  }
402 }
403 
404 template <class Impl>
405 void
407 {
408  assert(isDrained());
409  assert(retryPkt == NULL);
410  assert(retryTid == InvalidThreadID);
411  assert(!cacheBlocked);
412  assert(!interruptPending);
413 
414  for (ThreadID i = 0; i < numThreads; ++i) {
415  assert(!memReq[i]);
416  assert(fetchStatus[i] == Idle || stalls[i].drain);
417  }
418 
420 }
421 
422 template <class Impl>
423 bool
425 {
426  /* Make sure that threads are either idle of that the commit stage
427  * has signaled that draining has completed by setting the drain
428  * stall flag. This effectively forces the pipeline to be disabled
429  * until the whole system is drained (simulation may continue to
430  * drain other components).
431  */
432  for (ThreadID i = 0; i < numThreads; ++i) {
433  // Verify fetch queues are drained
434  if (!fetchQueue[i].empty())
435  return false;
436 
437  // Return false if not idle or drain stalled
438  if (fetchStatus[i] != Idle) {
439  if (fetchStatus[i] == Blocked && stalls[i].drain)
440  continue;
441  else
442  return false;
443  }
444  }
445 
446  /* The pipeline might start up again in the middle of the drain
447  * cycle if the finish translation event is scheduled, so make
448  * sure that's not the case.
449  */
451 }
452 
453 template <class Impl>
454 void
456 {
457  assert(cpu->getInstPort().isConnected());
458  resetStage();
459 
460 }
461 
462 template <class Impl>
463 void
465 {
466  assert(cpu->isDraining());
467  assert(!stalls[tid].drain);
468  DPRINTF(Drain, "%i: Thread drained.\n", tid);
469  stalls[tid].drain = true;
470 }
471 
472 template <class Impl>
473 void
475 {
476  DPRINTF(Fetch, "Waking up from quiesce\n");
477  // Hopefully this is safe
478  // @todo: Allow other threads to wake from quiesce.
479  fetchStatus[0] = Running;
480 }
481 
482 template <class Impl>
483 inline void
485 {
486  if (_status == Inactive) {
487  DPRINTF(Activity, "Activating stage.\n");
488 
489  cpu->activateStage(O3CPU::FetchIdx);
490 
491  _status = Active;
492  }
493 }
494 
495 template <class Impl>
496 inline void
498 {
499  if (_status == Active) {
500  DPRINTF(Activity, "Deactivating stage.\n");
501 
502  cpu->deactivateStage(O3CPU::FetchIdx);
503 
504  _status = Inactive;
505  }
506 }
507 
508 template <class Impl>
509 void
511 {
512  // Update priority list
513  auto thread_it = std::find(priorityList.begin(), priorityList.end(), tid);
514  if (thread_it != priorityList.end()) {
515  priorityList.erase(thread_it);
516  }
517 }
518 
519 template <class Impl>
520 bool
522  const DynInstPtr &inst, TheISA::PCState &nextPC)
523 {
524  // Do branch prediction check here.
525  // A bit of a misnomer...next_PC is actually the current PC until
526  // this function updates it.
527  bool predict_taken;
528 
529  if (!inst->isControl()) {
530  TheISA::advancePC(nextPC, inst->staticInst);
531  inst->setPredTarg(nextPC);
532  inst->setPredTaken(false);
533  return false;
534  }
535 
536  ThreadID tid = inst->threadNumber;
537  predict_taken = branchPred->predict(inst->staticInst, inst->seqNum,
538  nextPC, tid);
539 
540  if (predict_taken) {
541  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
542  "predicted to be taken to %s\n",
543  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
544  } else {
545  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
546  "predicted to be not taken\n",
547  tid, inst->seqNum, inst->pcState().instAddr());
548  }
549 
550  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Branch at PC %#x "
551  "predicted to go to %s\n",
552  tid, inst->seqNum, inst->pcState().instAddr(), nextPC);
553  inst->setPredTarg(nextPC);
554  inst->setPredTaken(predict_taken);
555 
557 
558  if (predict_taken) {
560  }
561 
562  return predict_taken;
563 }
564 
565 template <class Impl>
566 bool
568 {
569  Fault fault = NoFault;
570 
571  assert(!cpu->switchedOut());
572 
573  // @todo: not sure if these should block translation.
574  //AlphaDep
575  if (cacheBlocked) {
576  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, cache blocked\n",
577  tid);
578  return false;
579  } else if (checkInterrupt(pc) && !delayedCommit[tid]) {
580  // Hold off fetch from getting new instructions when:
581  // Cache is blocked, or
582  // while an interrupt is pending and we're not in PAL mode, or
583  // fetch is switched out.
584  DPRINTF(Fetch, "[tid:%i] Can't fetch cache line, interrupt pending\n",
585  tid);
586  return false;
587  }
588 
589  // Align the fetch address to the start of a fetch buffer segment.
590  Addr fetchBufferBlockPC = fetchBufferAlignPC(vaddr);
591 
592  DPRINTF(Fetch, "[tid:%i] Fetching cache line %#x for addr %#x\n",
593  tid, fetchBufferBlockPC, vaddr);
594 
595  // Setup the memReq to do a read of the first instruction's address.
596  // Set the appropriate read size and flags as well.
597  // Build request here.
598  RequestPtr mem_req = std::make_shared<Request>(
599  fetchBufferBlockPC, fetchBufferSize,
600  Request::INST_FETCH, cpu->instRequestorId(), pc,
601  cpu->thread[tid]->contextId());
602 
603  mem_req->taskId(cpu->taskId());
604 
605  memReq[tid] = mem_req;
606 
607  // Initiate translation of the icache block
608  fetchStatus[tid] = ItlbWait;
609  FetchTranslation *trans = new FetchTranslation(this);
610  cpu->mmu->translateTiming(mem_req, cpu->thread[tid]->getTC(),
611  trans, BaseTLB::Execute);
612  return true;
613 }
614 
615 template <class Impl>
616 void
618  const RequestPtr &mem_req)
619 {
620  ThreadID tid = cpu->contextToThread(mem_req->contextId());
621  Addr fetchBufferBlockPC = mem_req->getVaddr();
622 
623  assert(!cpu->switchedOut());
624 
625  // Wake up CPU if it was idle
626  cpu->wakeCPU();
627 
628  if (fetchStatus[tid] != ItlbWait || mem_req != memReq[tid] ||
629  mem_req->getVaddr() != memReq[tid]->getVaddr()) {
630  DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
631  tid);
633  return;
634  }
635 
636 
637  // If translation was successful, attempt to read the icache block.
638  if (fault == NoFault) {
639  // Check that we're not going off into random memory
640  // If we have, just wait around for commit to squash something and put
641  // us on the right track
642  if (!cpu->system->isMemAddr(mem_req->getPaddr())) {
643  warn("Address %#x is outside of physical memory, stopping fetch\n",
644  mem_req->getPaddr());
645  fetchStatus[tid] = NoGoodAddr;
646  memReq[tid] = NULL;
647  return;
648  }
649 
650  // Build packet here.
651  PacketPtr data_pkt = new Packet(mem_req, MemCmd::ReadReq);
652  data_pkt->dataDynamic(new uint8_t[fetchBufferSize]);
653 
654  fetchBufferPC[tid] = fetchBufferBlockPC;
655  fetchBufferValid[tid] = false;
656  DPRINTF(Fetch, "Fetch: Doing instruction read.\n");
657 
659 
660  // Access the cache.
661  if (!icachePort.sendTimingReq(data_pkt)) {
662  assert(retryPkt == NULL);
663  assert(retryTid == InvalidThreadID);
664  DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
665 
667  retryPkt = data_pkt;
668  retryTid = tid;
669  cacheBlocked = true;
670  } else {
671  DPRINTF(Fetch, "[tid:%i] Doing Icache access.\n", tid);
672  DPRINTF(Activity, "[tid:%i] Activity: Waiting on I-cache "
673  "response.\n", tid);
674  lastIcacheStall[tid] = curTick();
676  // Notify Fetch Request probe when a packet containing a fetch
677  // request is successfully sent
678  ppFetchRequestSent->notify(mem_req);
679  }
680  } else {
681  // Don't send an instruction to decode if we can't handle it.
682  if (!(numInst < fetchWidth) || !(fetchQueue[tid].size() < fetchQueueSize)) {
686  cpu->schedule(finishTranslationEvent,
687  cpu->clockEdge(Cycles(1)));
688  return;
689  }
690  DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
691  tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
692  // Translation faulted, icache request won't be sent.
693  memReq[tid] = NULL;
694 
695  // Send the fault to commit. This thread will not do anything
696  // until commit handles the fault. The only other way it can
697  // wake up is if a squash comes along and changes the PC.
698  TheISA::PCState fetchPC = pc[tid];
699 
700  DPRINTF(Fetch, "[tid:%i] Translation faulted, building noop.\n", tid);
701  // We will use a nop in ordier to carry the fault.
703  NULL, fetchPC, fetchPC, false);
704  instruction->setNotAnInst();
705 
706  instruction->setPredTarg(fetchPC);
707  instruction->fault = fault;
708  wroteToTimeBuffer = true;
709 
710  DPRINTF(Activity, "Activity this cycle.\n");
711  cpu->activityThisCycle();
712 
713  fetchStatus[tid] = TrapPending;
714 
715  DPRINTF(Fetch, "[tid:%i] Blocked, need to handle the trap.\n", tid);
716  DPRINTF(Fetch, "[tid:%i] fault (%s) detected @ PC %s.\n",
717  tid, fault->name(), pc[tid]);
718  }
720 }
721 
722 template <class Impl>
723 inline void
725  const DynInstPtr squashInst, ThreadID tid)
726 {
727  DPRINTF(Fetch, "[tid:%i] Squashing, setting PC to: %s.\n",
728  tid, newPC);
729 
730  pc[tid] = newPC;
731  fetchOffset[tid] = 0;
732  if (squashInst && squashInst->pcState().instAddr() == newPC.instAddr())
733  macroop[tid] = squashInst->macroop;
734  else
735  macroop[tid] = NULL;
736  decoder[tid]->reset();
737 
738  // Clear the icache miss if it's outstanding.
739  if (fetchStatus[tid] == IcacheWaitResponse) {
740  DPRINTF(Fetch, "[tid:%i] Squashing outstanding Icache miss.\n",
741  tid);
742  memReq[tid] = NULL;
743  } else if (fetchStatus[tid] == ItlbWait) {
744  DPRINTF(Fetch, "[tid:%i] Squashing outstanding ITLB miss.\n",
745  tid);
746  memReq[tid] = NULL;
747  }
748 
749  // Get rid of the retrying packet if it was from this thread.
750  if (retryTid == tid) {
751  assert(cacheBlocked);
752  if (retryPkt) {
753  delete retryPkt;
754  }
755  retryPkt = NULL;
757  }
758 
759  fetchStatus[tid] = Squashing;
760 
761  // Empty fetch queue
762  fetchQueue[tid].clear();
763 
764  // microops are being squashed, it is not known wheather the
765  // youngest non-squashed microop was marked delayed commit
766  // or not. Setting the flag to true ensures that the
767  // interrupts are not handled when they cannot be, though
768  // some opportunities to handle interrupts may be missed.
769  delayedCommit[tid] = true;
770 
772 }
773 
774 template<class Impl>
775 void
777  const DynInstPtr squashInst,
778  const InstSeqNum seq_num, ThreadID tid)
779 {
780  DPRINTF(Fetch, "[tid:%i] Squashing from decode.\n", tid);
781 
782  doSquash(newPC, squashInst, tid);
783 
784  // Tell the CPU to remove any instructions that are in flight between
785  // fetch and decode.
786  cpu->removeInstsUntil(seq_num, tid);
787 }
788 
789 template<class Impl>
790 bool
792 {
793  bool ret_val = false;
794 
795  if (stalls[tid].drain) {
796  assert(cpu->isDraining());
797  DPRINTF(Fetch,"[tid:%i] Drain stall detected.\n",tid);
798  ret_val = true;
799  }
800 
801  return ret_val;
802 }
803 
804 template<class Impl>
807 {
808  //Check Running
809  std::list<ThreadID>::iterator threads = activeThreads->begin();
811 
812  while (threads != end) {
813  ThreadID tid = *threads++;
814 
815  if (fetchStatus[tid] == Running ||
816  fetchStatus[tid] == Squashing ||
818 
819  if (_status == Inactive) {
820  DPRINTF(Activity, "[tid:%i] Activating stage.\n",tid);
821 
822  if (fetchStatus[tid] == IcacheAccessComplete) {
823  DPRINTF(Activity, "[tid:%i] Activating fetch due to cache"
824  "completion\n",tid);
825  }
826 
827  cpu->activateStage(O3CPU::FetchIdx);
828  }
829 
830  return Active;
831  }
832  }
833 
834  // Stage is switching from active to inactive, notify CPU of it.
835  if (_status == Active) {
836  DPRINTF(Activity, "Deactivating stage.\n");
837 
838  cpu->deactivateStage(O3CPU::FetchIdx);
839  }
840 
841  return Inactive;
842 }
843 
844 template <class Impl>
845 void
847  const InstSeqNum seq_num, DynInstPtr squashInst,
848  ThreadID tid)
849 {
850  DPRINTF(Fetch, "[tid:%i] Squash from commit.\n", tid);
851 
852  doSquash(newPC, squashInst, tid);
853 
854  // Tell the CPU to remove any instructions that are not in the ROB.
855  cpu->removeInstsNotInROB(tid);
856 }
857 
858 template <class Impl>
859 void
861 {
862  std::list<ThreadID>::iterator threads = activeThreads->begin();
864  bool status_change = false;
865 
866  wroteToTimeBuffer = false;
867 
868  for (ThreadID i = 0; i < numThreads; ++i) {
869  issuePipelinedIfetch[i] = false;
870  }
871 
872  while (threads != end) {
873  ThreadID tid = *threads++;
874 
875  // Check the signals for each thread to determine the proper status
876  // for each thread.
877  bool updated_status = checkSignalsAndUpdate(tid);
878  status_change = status_change || updated_status;
879  }
880 
881  DPRINTF(Fetch, "Running stage.\n");
882 
883  if (FullSystem) {
884  if (fromCommit->commitInfo[0].interruptPending) {
885  interruptPending = true;
886  }
887 
888  if (fromCommit->commitInfo[0].clearInterrupt) {
889  interruptPending = false;
890  }
891  }
892 
894  threadFetched++) {
895  // Fetch each of the actively fetching threads.
896  fetch(status_change);
897  }
898 
899  // Record number of instructions fetched this cycle for distribution.
901 
902  if (status_change) {
903  // Change the fetch stage status if there was a status change.
905  }
906 
907  // Issue the next I-cache request if possible.
908  for (ThreadID i = 0; i < numThreads; ++i) {
909  if (issuePipelinedIfetch[i]) {
911  }
912  }
913 
914  // Send instructions enqueued into the fetch queue to decode.
915  // Limit rate by fetchWidth. Stall if decode is stalled.
916  unsigned insts_to_decode = 0;
917  unsigned available_insts = 0;
918 
919  for (auto tid : *activeThreads) {
920  if (!stalls[tid].decode) {
921  available_insts += fetchQueue[tid].size();
922  }
923  }
924 
925  // Pick a random thread to start trying to grab instructions from
926  auto tid_itr = activeThreads->begin();
927  std::advance(tid_itr, random_mt.random<uint8_t>(0, activeThreads->size() - 1));
928 
929  while (available_insts != 0 && insts_to_decode < decodeWidth) {
930  ThreadID tid = *tid_itr;
931  if (!stalls[tid].decode && !fetchQueue[tid].empty()) {
932  const auto& inst = fetchQueue[tid].front();
933  toDecode->insts[toDecode->size++] = inst;
934  DPRINTF(Fetch, "[tid:%i] [sn:%llu] Sending instruction to decode "
935  "from fetch queue. Fetch queue size: %i.\n",
936  tid, inst->seqNum, fetchQueue[tid].size());
937 
938  wroteToTimeBuffer = true;
939  fetchQueue[tid].pop_front();
940  insts_to_decode++;
941  available_insts--;
942  }
943 
944  tid_itr++;
945  // Wrap around if at end of active threads list
946  if (tid_itr == activeThreads->end())
947  tid_itr = activeThreads->begin();
948  }
949 
950  // If there was activity this cycle, inform the CPU of it.
951  if (wroteToTimeBuffer) {
952  DPRINTF(Activity, "Activity this cycle.\n");
953  cpu->activityThisCycle();
954  }
955 
956  // Reset the number of the instruction we've fetched.
957  numInst = 0;
958 }
959 
960 template <class Impl>
961 bool
963 {
964  // Update the per thread stall statuses.
965  if (fromDecode->decodeBlock[tid]) {
966  stalls[tid].decode = true;
967  }
968 
969  if (fromDecode->decodeUnblock[tid]) {
970  assert(stalls[tid].decode);
971  assert(!fromDecode->decodeBlock[tid]);
972  stalls[tid].decode = false;
973  }
974 
975  // Check squash signals from commit.
976  if (fromCommit->commitInfo[tid].squash) {
977 
978  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
979  "from commit.\n",tid);
980  // In any case, squash.
981  squash(fromCommit->commitInfo[tid].pc,
982  fromCommit->commitInfo[tid].doneSeqNum,
983  fromCommit->commitInfo[tid].squashInst, tid);
984 
985  // If it was a branch mispredict on a control instruction, update the
986  // branch predictor with that instruction, otherwise just kill the
987  // invalid state we generated in after sequence number
988  if (fromCommit->commitInfo[tid].mispredictInst &&
989  fromCommit->commitInfo[tid].mispredictInst->isControl()) {
990  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
991  fromCommit->commitInfo[tid].pc,
992  fromCommit->commitInfo[tid].branchTaken,
993  tid);
994  } else {
995  branchPred->squash(fromCommit->commitInfo[tid].doneSeqNum,
996  tid);
997  }
998 
999  return true;
1000  } else if (fromCommit->commitInfo[tid].doneSeqNum) {
1001  // Update the branch predictor if it wasn't a squashed instruction
1002  // that was broadcasted.
1003  branchPred->update(fromCommit->commitInfo[tid].doneSeqNum, tid);
1004  }
1005 
1006  // Check squash signals from decode.
1007  if (fromDecode->decodeInfo[tid].squash) {
1008  DPRINTF(Fetch, "[tid:%i] Squashing instructions due to squash "
1009  "from decode.\n",tid);
1010 
1011  // Update the branch predictor.
1012  if (fromDecode->decodeInfo[tid].branchMispredict) {
1013  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1014  fromDecode->decodeInfo[tid].nextPC,
1015  fromDecode->decodeInfo[tid].branchTaken,
1016  tid);
1017  } else {
1018  branchPred->squash(fromDecode->decodeInfo[tid].doneSeqNum,
1019  tid);
1020  }
1021 
1022  if (fetchStatus[tid] != Squashing) {
1023 
1024  DPRINTF(Fetch, "Squashing from decode with PC = %s\n",
1025  fromDecode->decodeInfo[tid].nextPC);
1026  // Squash unless we're already squashing
1027  squashFromDecode(fromDecode->decodeInfo[tid].nextPC,
1028  fromDecode->decodeInfo[tid].squashInst,
1029  fromDecode->decodeInfo[tid].doneSeqNum,
1030  tid);
1031 
1032  return true;
1033  }
1034  }
1035 
1036  if (checkStall(tid) &&
1037  fetchStatus[tid] != IcacheWaitResponse &&
1038  fetchStatus[tid] != IcacheWaitRetry &&
1039  fetchStatus[tid] != ItlbWait &&
1040  fetchStatus[tid] != QuiescePending) {
1041  DPRINTF(Fetch, "[tid:%i] Setting to blocked\n",tid);
1042 
1043  fetchStatus[tid] = Blocked;
1044 
1045  return true;
1046  }
1047 
1048  if (fetchStatus[tid] == Blocked ||
1049  fetchStatus[tid] == Squashing) {
1050  // Switch status to running if fetch isn't being told to block or
1051  // squash this cycle.
1052  DPRINTF(Fetch, "[tid:%i] Done squashing, switching to running.\n",
1053  tid);
1054 
1055  fetchStatus[tid] = Running;
1056 
1057  return true;
1058  }
1059 
1060  // If we've reached this point, we have not gotten any signals that
1061  // cause fetch to change its status. Fetch remains the same as before.
1062  return false;
1063 }
1064 
1065 template<class Impl>
1066 typename Impl::DynInstPtr
1068  StaticInstPtr curMacroop, TheISA::PCState thisPC,
1069  TheISA::PCState nextPC, bool trace)
1070 {
1071  // Get a sequence number.
1072  InstSeqNum seq = cpu->getAndIncrementInstSeq();
1073 
1074  // Create a new DynInst from the instruction fetched.
1075  DynInstPtr instruction =
1076  new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu);
1077  instruction->setTid(tid);
1078 
1079  instruction->setThreadState(cpu->thread[tid]);
1080 
1081  DPRINTF(Fetch, "[tid:%i] Instruction PC %#x (%d) created "
1082  "[sn:%lli].\n", tid, thisPC.instAddr(),
1083  thisPC.microPC(), seq);
1084 
1085  DPRINTF(Fetch, "[tid:%i] Instruction is: %s\n", tid,
1086  instruction->staticInst->
1087  disassemble(thisPC.instAddr()));
1088 
1089 #if TRACING_ON
1090  if (trace) {
1091  instruction->traceData =
1092  cpu->getTracer()->getInstRecord(curTick(), cpu->tcBase(tid),
1093  instruction->staticInst, thisPC, curMacroop);
1094  }
1095 #else
1096  instruction->traceData = NULL;
1097 #endif
1098 
1099  // Add instruction to the CPU's list of instructions.
1100  instruction->setInstListIt(cpu->addInst(instruction));
1101 
1102  // Write the instruction to the first slot in the queue
1103  // that heads to decode.
1104  assert(numInst < fetchWidth);
1105  fetchQueue[tid].push_back(instruction);
1106  assert(fetchQueue[tid].size() <= fetchQueueSize);
1107  DPRINTF(Fetch, "[tid:%i] Fetch queue entry created (%i/%i).\n",
1108  tid, fetchQueue[tid].size(), fetchQueueSize);
1109  //toDecode->insts[toDecode->size++] = instruction;
1110 
1111  // Keep track of if we can take an interrupt at this boundary
1112  delayedCommit[tid] = instruction->isDelayedCommit();
1113 
1114  return instruction;
1115 }
1116 
1117 template<class Impl>
1118 void
1119 DefaultFetch<Impl>::fetch(bool &status_change)
1120 {
1122  // Start actual fetch
1124  ThreadID tid = getFetchingThread();
1125 
1126  assert(!cpu->switchedOut());
1127 
1128  if (tid == InvalidThreadID) {
1129  // Breaks looping condition in tick()
1131 
1132  if (numThreads == 1) { // @todo Per-thread stats
1133  profileStall(0);
1134  }
1135 
1136  return;
1137  }
1138 
1139  DPRINTF(Fetch, "Attempting to fetch from [tid:%i]\n", tid);
1140 
1141  // The current PC.
1142  TheISA::PCState thisPC = pc[tid];
1143 
1144  Addr pcOffset = fetchOffset[tid];
1145  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1146 
1147  bool inRom = isRomMicroPC(thisPC.microPC());
1148 
1149  // If returning from the delay of a cache miss, then update the status
1150  // to running, otherwise do the cache access. Possibly move this up
1151  // to tick() function.
1152  if (fetchStatus[tid] == IcacheAccessComplete) {
1153  DPRINTF(Fetch, "[tid:%i] Icache miss is complete.\n", tid);
1154 
1155  fetchStatus[tid] = Running;
1156  status_change = true;
1157  } else if (fetchStatus[tid] == Running) {
1158  // Align the fetch PC so its at the start of a fetch buffer segment.
1159  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1160 
1161  // If buffer is no longer valid or fetchAddr has moved to point
1162  // to the next cache block, AND we have no remaining ucode
1163  // from a macro-op, then start fetch from icache.
1164  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])
1165  && !inRom && !macroop[tid]) {
1166  DPRINTF(Fetch, "[tid:%i] Attempting to translate and read "
1167  "instruction, starting at PC %s.\n", tid, thisPC);
1168 
1169  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1170 
1171  if (fetchStatus[tid] == IcacheWaitResponse)
1173  else if (fetchStatus[tid] == ItlbWait)
1175  else
1177  return;
1178  } else if ((checkInterrupt(thisPC.instAddr()) && !delayedCommit[tid])) {
1179  // Stall CPU if an interrupt is posted and we're not issuing
1180  // an delayed commit micro-op currently (delayed commit instructions
1181  // are not interruptable by interrupts, only faults)
1183  DPRINTF(Fetch, "[tid:%i] Fetch is stalled!\n", tid);
1184  return;
1185  }
1186  } else {
1187  if (fetchStatus[tid] == Idle) {
1189  DPRINTF(Fetch, "[tid:%i] Fetch is idle!\n", tid);
1190  }
1191 
1192  // Status is Idle, so fetch should do nothing.
1193  return;
1194  }
1195 
1196  ++fetchStats.cycles;
1197 
1198  TheISA::PCState nextPC = thisPC;
1199 
1200  StaticInstPtr staticInst = NULL;
1201  StaticInstPtr curMacroop = macroop[tid];
1202 
1203  // If the read of the first instruction was successful, then grab the
1204  // instructions from the rest of the cache line and put them into the
1205  // queue heading to decode.
1206 
1207  DPRINTF(Fetch, "[tid:%i] Adding instructions to queue to "
1208  "decode.\n", tid);
1209 
1210  // Need to keep track of whether or not a predicted branch
1211  // ended this fetch block.
1212  bool predictedBranch = false;
1213 
1214  // Need to halt fetch if quiesce instruction detected
1215  bool quiesce = false;
1216 
1217  TheISA::MachInst *cacheInsts =
1218  reinterpret_cast<TheISA::MachInst *>(fetchBuffer[tid]);
1219 
1220  const unsigned numInsts = fetchBufferSize / instSize;
1221  unsigned blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1222 
1223  // Loop through instruction memory from the cache.
1224  // Keep issuing while fetchWidth is available and branch is not
1225  // predicted taken
1226  while (numInst < fetchWidth && fetchQueue[tid].size() < fetchQueueSize
1227  && !predictedBranch && !quiesce) {
1228  // We need to process more memory if we aren't going to get a
1229  // StaticInst from the rom, the current macroop, or what's already
1230  // in the decoder.
1231  bool needMem = !inRom && !curMacroop &&
1232  !decoder[tid]->instReady();
1233  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1234  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1235 
1236  if (needMem) {
1237  // If buffer is no longer valid or fetchAddr has moved to point
1238  // to the next cache block then start fetch from icache.
1239  if (!fetchBufferValid[tid] ||
1240  fetchBufferBlockPC != fetchBufferPC[tid])
1241  break;
1242 
1243  if (blkOffset >= numInsts) {
1244  // We need to process more memory, but we've run out of the
1245  // current block.
1246  break;
1247  }
1248 
1249  decoder[tid]->moreBytes(thisPC, fetchAddr, cacheInsts[blkOffset]);
1250 
1251  if (decoder[tid]->needMoreBytes()) {
1252  blkOffset++;
1253  fetchAddr += instSize;
1254  pcOffset += instSize;
1255  }
1256  }
1257 
1258  // Extract as many instructions and/or microops as we can from
1259  // the memory we've processed so far.
1260  do {
1261  if (!(curMacroop || inRom)) {
1262  if (decoder[tid]->instReady()) {
1263  staticInst = decoder[tid]->decode(thisPC);
1264 
1265  // Increment stat of fetched instructions.
1266  ++fetchStats.insts;
1267 
1268  if (staticInst->isMacroop()) {
1269  curMacroop = staticInst;
1270  } else {
1271  pcOffset = 0;
1272  }
1273  } else {
1274  // We need more bytes for this instruction so blkOffset and
1275  // pcOffset will be updated
1276  break;
1277  }
1278  }
1279  // Whether we're moving to a new macroop because we're at the
1280  // end of the current one, or the branch predictor incorrectly
1281  // thinks we are...
1282  bool newMacro = false;
1283  if (curMacroop || inRom) {
1284  if (inRom) {
1285  staticInst = decoder[tid]->fetchRomMicroop(
1286  thisPC.microPC(), curMacroop);
1287  } else {
1288  staticInst = curMacroop->fetchMicroop(thisPC.microPC());
1289  }
1290  newMacro |= staticInst->isLastMicroop();
1291  }
1292 
1293  DynInstPtr instruction =
1294  buildInst(tid, staticInst, curMacroop,
1295  thisPC, nextPC, true);
1296 
1297  ppFetch->notify(instruction);
1298  numInst++;
1299 
1300 #if TRACING_ON
1301  if (DTRACE(O3PipeView)) {
1302  instruction->fetchTick = curTick();
1303  }
1304 #endif
1305 
1306  nextPC = thisPC;
1307 
1308  // If we're branching after this instruction, quit fetching
1309  // from the same block.
1310  predictedBranch |= thisPC.branching();
1311  predictedBranch |=
1312  lookupAndUpdateNextPC(instruction, nextPC);
1313  if (predictedBranch) {
1314  DPRINTF(Fetch, "Branch detected with PC = %s\n", thisPC);
1315  }
1316 
1317  newMacro |= thisPC.instAddr() != nextPC.instAddr();
1318 
1319  // Move to the next instruction, unless we have a branch.
1320  thisPC = nextPC;
1321  inRom = isRomMicroPC(thisPC.microPC());
1322 
1323  if (newMacro) {
1324  fetchAddr = thisPC.instAddr() & BaseCPU::PCMask;
1325  blkOffset = (fetchAddr - fetchBufferPC[tid]) / instSize;
1326  pcOffset = 0;
1327  curMacroop = NULL;
1328  }
1329 
1330  if (instruction->isQuiesce()) {
1331  DPRINTF(Fetch,
1332  "Quiesce instruction encountered, halting fetch!\n");
1333  fetchStatus[tid] = QuiescePending;
1334  status_change = true;
1335  quiesce = true;
1336  break;
1337  }
1338  } while ((curMacroop || decoder[tid]->instReady()) &&
1339  numInst < fetchWidth &&
1340  fetchQueue[tid].size() < fetchQueueSize);
1341 
1342  // Re-evaluate whether the next instruction to fetch is in micro-op ROM
1343  // or not.
1344  inRom = isRomMicroPC(thisPC.microPC());
1345  }
1346 
1347  if (predictedBranch) {
1348  DPRINTF(Fetch, "[tid:%i] Done fetching, predicted branch "
1349  "instruction encountered.\n", tid);
1350  } else if (numInst >= fetchWidth) {
1351  DPRINTF(Fetch, "[tid:%i] Done fetching, reached fetch bandwidth "
1352  "for this cycle.\n", tid);
1353  } else if (blkOffset >= fetchBufferSize) {
1354  DPRINTF(Fetch, "[tid:%i] Done fetching, reached the end of the"
1355  "fetch buffer.\n", tid);
1356  }
1357 
1358  macroop[tid] = curMacroop;
1359  fetchOffset[tid] = pcOffset;
1360 
1361  if (numInst > 0) {
1362  wroteToTimeBuffer = true;
1363  }
1364 
1365  pc[tid] = thisPC;
1366 
1367  // pipeline a fetch if we're crossing a fetch buffer boundary and not in
1368  // a state that would preclude fetching
1369  fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1370  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1371  issuePipelinedIfetch[tid] = fetchBufferBlockPC != fetchBufferPC[tid] &&
1372  fetchStatus[tid] != IcacheWaitResponse &&
1373  fetchStatus[tid] != ItlbWait &&
1374  fetchStatus[tid] != IcacheWaitRetry &&
1375  fetchStatus[tid] != QuiescePending &&
1376  !curMacroop;
1377 }
1378 
1379 template<class Impl>
1380 void
1382 {
1383  if (retryPkt != NULL) {
1384  assert(cacheBlocked);
1385  assert(retryTid != InvalidThreadID);
1386  assert(fetchStatus[retryTid] == IcacheWaitRetry);
1387 
1390  // Notify Fetch Request probe when a retryPkt is successfully sent.
1391  // Note that notify must be called before retryPkt is set to NULL.
1393  retryPkt = NULL;
1395  cacheBlocked = false;
1396  }
1397  } else {
1398  assert(retryTid == InvalidThreadID);
1399  // Access has been squashed since it was sent out. Just clear
1400  // the cache being blocked.
1401  cacheBlocked = false;
1402  }
1403 }
1404 
1406 // //
1407 // SMT FETCH POLICY MAINTAINED HERE //
1408 // //
1410 template<class Impl>
1411 ThreadID
1413 {
1414  if (numThreads > 1) {
1415  switch (fetchPolicy) {
1416  case SMTFetchPolicy::RoundRobin:
1417  return roundRobin();
1418  case SMTFetchPolicy::IQCount:
1419  return iqCount();
1420  case SMTFetchPolicy::LSQCount:
1421  return lsqCount();
1422  case SMTFetchPolicy::Branch:
1423  return branchCount();
1424  default:
1425  return InvalidThreadID;
1426  }
1427  } else {
1428  std::list<ThreadID>::iterator thread = activeThreads->begin();
1429  if (thread == activeThreads->end()) {
1430  return InvalidThreadID;
1431  }
1432 
1433  ThreadID tid = *thread;
1434 
1435  if (fetchStatus[tid] == Running ||
1437  fetchStatus[tid] == Idle) {
1438  return tid;
1439  } else {
1440  return InvalidThreadID;
1441  }
1442  }
1443 }
1444 
1445 
1446 template<class Impl>
1447 ThreadID
1449 {
1450  std::list<ThreadID>::iterator pri_iter = priorityList.begin();
1452 
1453  ThreadID high_pri;
1454 
1455  while (pri_iter != end) {
1456  high_pri = *pri_iter;
1457 
1458  assert(high_pri <= numThreads);
1459 
1460  if (fetchStatus[high_pri] == Running ||
1461  fetchStatus[high_pri] == IcacheAccessComplete ||
1462  fetchStatus[high_pri] == Idle) {
1463 
1464  priorityList.erase(pri_iter);
1465  priorityList.push_back(high_pri);
1466 
1467  return high_pri;
1468  }
1469 
1470  pri_iter++;
1471  }
1472 
1473  return InvalidThreadID;
1474 }
1475 
1476 template<class Impl>
1477 ThreadID
1479 {
1480  //sorted from lowest->highest
1481  std::priority_queue<unsigned, std::vector<unsigned>,
1482  std::greater<unsigned> > PQ;
1483  std::map<unsigned, ThreadID> threadMap;
1484 
1485  std::list<ThreadID>::iterator threads = activeThreads->begin();
1487 
1488  while (threads != end) {
1489  ThreadID tid = *threads++;
1490  unsigned iqCount = fromIEW->iewInfo[tid].iqCount;
1491 
1492  //we can potentially get tid collisions if two threads
1493  //have the same iqCount, but this should be rare.
1494  PQ.push(iqCount);
1495  threadMap[iqCount] = tid;
1496  }
1497 
1498  while (!PQ.empty()) {
1499  ThreadID high_pri = threadMap[PQ.top()];
1500 
1501  if (fetchStatus[high_pri] == Running ||
1502  fetchStatus[high_pri] == IcacheAccessComplete ||
1503  fetchStatus[high_pri] == Idle)
1504  return high_pri;
1505  else
1506  PQ.pop();
1507 
1508  }
1509 
1510  return InvalidThreadID;
1511 }
1512 
1513 template<class Impl>
1514 ThreadID
1516 {
1517  //sorted from lowest->highest
1518  std::priority_queue<unsigned, std::vector<unsigned>,
1519  std::greater<unsigned> > PQ;
1520  std::map<unsigned, ThreadID> threadMap;
1521 
1522  std::list<ThreadID>::iterator threads = activeThreads->begin();
1524 
1525  while (threads != end) {
1526  ThreadID tid = *threads++;
1527  unsigned ldstqCount = fromIEW->iewInfo[tid].ldstqCount;
1528 
1529  //we can potentially get tid collisions if two threads
1530  //have the same iqCount, but this should be rare.
1531  PQ.push(ldstqCount);
1532  threadMap[ldstqCount] = tid;
1533  }
1534 
1535  while (!PQ.empty()) {
1536  ThreadID high_pri = threadMap[PQ.top()];
1537 
1538  if (fetchStatus[high_pri] == Running ||
1539  fetchStatus[high_pri] == IcacheAccessComplete ||
1540  fetchStatus[high_pri] == Idle)
1541  return high_pri;
1542  else
1543  PQ.pop();
1544  }
1545 
1546  return InvalidThreadID;
1547 }
1548 
1549 template<class Impl>
1550 ThreadID
1552 {
1553  panic("Branch Count Fetch policy unimplemented\n");
1554  return InvalidThreadID;
1555 }
1556 
1557 template<class Impl>
1558 void
1560 {
1561  if (!issuePipelinedIfetch[tid]) {
1562  return;
1563  }
1564 
1565  // The next PC to access.
1566  TheISA::PCState thisPC = pc[tid];
1567 
1568  if (isRomMicroPC(thisPC.microPC())) {
1569  return;
1570  }
1571 
1572  Addr pcOffset = fetchOffset[tid];
1573  Addr fetchAddr = (thisPC.instAddr() + pcOffset) & BaseCPU::PCMask;
1574 
1575  // Align the fetch PC so its at the start of a fetch buffer segment.
1576  Addr fetchBufferBlockPC = fetchBufferAlignPC(fetchAddr);
1577 
1578  // Unless buffer already got the block, fetch it from icache.
1579  if (!(fetchBufferValid[tid] && fetchBufferBlockPC == fetchBufferPC[tid])) {
1580  DPRINTF(Fetch, "[tid:%i] Issuing a pipelined I-cache access, "
1581  "starting at PC %s.\n", tid, thisPC);
1582 
1583  fetchCacheLine(fetchAddr, tid, thisPC.instAddr());
1584  }
1585 }
1586 
1587 template<class Impl>
1588 void
1590  DPRINTF(Fetch,"There are no more threads available to fetch from.\n");
1591 
1592  // @todo Per-thread stats
1593 
1594  if (stalls[tid].drain) {
1596  DPRINTF(Fetch, "Fetch is waiting for a drain!\n");
1597  } else if (activeThreads->empty()) {
1599  DPRINTF(Fetch, "Fetch has no active thread!\n");
1600  } else if (fetchStatus[tid] == Blocked) {
1602  DPRINTF(Fetch, "[tid:%i] Fetch is blocked!\n", tid);
1603  } else if (fetchStatus[tid] == Squashing) {
1605  DPRINTF(Fetch, "[tid:%i] Fetch is squashing!\n", tid);
1606  } else if (fetchStatus[tid] == IcacheWaitResponse) {
1608  DPRINTF(Fetch, "[tid:%i] Fetch is waiting cache response!\n",
1609  tid);
1610  } else if (fetchStatus[tid] == ItlbWait) {
1612  DPRINTF(Fetch, "[tid:%i] Fetch is waiting ITLB walk to "
1613  "finish!\n", tid);
1614  } else if (fetchStatus[tid] == TrapPending) {
1616  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending trap!\n",
1617  tid);
1618  } else if (fetchStatus[tid] == QuiescePending) {
1620  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for a pending quiesce "
1621  "instruction!\n", tid);
1622  } else if (fetchStatus[tid] == IcacheWaitRetry) {
1624  DPRINTF(Fetch, "[tid:%i] Fetch is waiting for an I-cache retry!\n",
1625  tid);
1626  } else if (fetchStatus[tid] == NoGoodAddr) {
1627  DPRINTF(Fetch, "[tid:%i] Fetch predicted non-executable address\n",
1628  tid);
1629  } else {
1630  DPRINTF(Fetch, "[tid:%i] Unexpected fetch stall reason "
1631  "(Status: %i)\n",
1632  tid, fetchStatus[tid]);
1633  }
1634 }
1635 
1636 template<class Impl>
1637 bool
1639 {
1640  DPRINTF(O3CPU, "Fetch unit received timing\n");
1641  // We shouldn't ever get a cacheable block in Modified state
1642  assert(pkt->req->isUncacheable() ||
1643  !(pkt->cacheResponding() && !pkt->hasSharers()));
1644  fetch->processCacheCompletion(pkt);
1645 
1646  return true;
1647 }
1648 
1649 template<class Impl>
1650 void
1652 {
1653  fetch->recvReqRetry();
1654 }
1655 
1656 #endif//__CPU_O3_FETCH_IMPL_HH__
DefaultFetch::FetchStatGroup::cycles
Stats::Scalar cycles
Stat for total number of cycles spent fetching.
Definition: fetch.hh:560
InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:234
DefaultFetch::switchToActive
void switchToActive()
Changes the status of this stage to active, and indicates this to the CPU.
Definition: fetch_impl.hh:484
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
Event::scheduled
bool scheduled() const
Determine if the current event is scheduled.
Definition: eventq.hh:462
DefaultFetch::FetchStatGroup::pendingQuiesceStallCycles
Stats::Scalar pendingQuiesceStallCycles
Total number of stall cycles caused by pending quiesce instructions.
Definition: fetch.hh:582
DefaultFetch::macroop
StaticInstPtr macroop[Impl::MaxThreads]
Definition: fetch.hh:435
DefaultFetch::FinishTranslationEvent::setReq
void setReq(const RequestPtr &_req)
Definition: fetch.hh:154
DefaultFetch::getFetchingThread
ThreadID getFetchingThread()
Returns the appropriate thread to fetch, given the fetch policy.
Definition: fetch_impl.hh:1412
warn
#define warn(...)
Definition: logging.hh:239
DefaultFetch::commitToFetchDelay
Cycles commitToFetchDelay
Commit to fetch delay.
Definition: fetch.hh:470
DefaultFetch::Stalls::decode
bool decode
Definition: fetch.hh:453
system.hh
Packet::cacheResponding
bool cacheResponding() const
Definition: packet.hh:620
DefaultFetch::fetchBufferAlignPC
Addr fetchBufferAlignPC(Addr addr)
Align a PC to the start of a fetch buffer block.
Definition: fetch.hh:368
DefaultFetch::DefaultFetch
DefaultFetch(O3CPU *_cpu, const DerivO3CPUParams &params)
DefaultFetch constructor.
Definition: fetch_impl.hh:75
DefaultFetch::FetchStatGroup::tlbSquashes
Stats::Scalar tlbSquashes
Total number of outstanding tlb accesses that were dropped due to a squash.
Definition: fetch.hh:594
Packet::hasSharers
bool hasSharers() const
Definition: packet.hh:647
DefaultFetch::switchToInactive
void switchToInactive()
Changes the status of this stage to inactive, and indicates this to the CPU.
Definition: fetch_impl.hh:497
DefaultFetch::IcacheAccessComplete
@ IcacheAccessComplete
Definition: fetch.hh:193
DefaultFetch::finishTranslationEvent
FinishTranslationEvent finishTranslationEvent
Event used to delay fault generation of translation faults.
Definition: fetch.hh:543
DefaultFetch::fetchPolicy
SMTFetchPolicy fetchPolicy
Fetch policy.
Definition: fetch.hh:205
DefaultFetch::FetchStatGroup::predictedBranches
Stats::Scalar predictedBranches
Stat for total number of predicted branches.
Definition: fetch.hh:558
DefaultFetch::QuiescePending
@ QuiescePending
Definition: fetch.hh:189
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:233
DefaultFetch::fetchOffset
Addr fetchOffset[Impl::MaxThreads]
Definition: fetch.hh:433
DefaultFetch::FetchStatus
FetchStatus
Overall fetch status.
Definition: fetch.hh:176
ArmISA::MachInst
uint32_t MachInst
Definition: types.hh:52
DefaultFetch::O3CPU
Impl::O3CPU O3CPU
Definition: fetch.hh:78
DefaultFetch::IcacheWaitResponse
@ IcacheWaitResponse
Definition: fetch.hh:191
DefaultFetch::takeOverFrom
void takeOverFrom()
Takes over from another CPU's thread.
Definition: fetch_impl.hh:455
DefaultFetch::FetchStatGroup::pendingTrapStallCycles
Stats::Scalar pendingTrapStallCycles
Total number of stall cycles caused by pending traps.
Definition: fetch.hh:579
DefaultFetch::regProbePoints
void regProbePoints()
Registers probes.
Definition: fetch_impl.hh:150
random.hh
tlb.hh
ProbePointArg< DynInstPtr >
MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:83
DefaultFetch::Active
@ Active
Definition: fetch.hh:177
DefaultFetch::fetchStats
DefaultFetch::FetchStatGroup fetchStats
exetrace.hh
DefaultFetch::NoGoodAddr
@ NoGoodAddr
Definition: fetch.hh:194
DefaultFetch::Blocked
@ Blocked
Definition: fetch.hh:186
DefaultFetch::squashFromDecode
void squashFromDecode(const TheISA::PCState &newPC, const DynInstPtr squashInst, const InstSeqNum seq_num, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:776
DefaultFetch::decodeWidth
unsigned decodeWidth
The width of decode in instructions.
Definition: fetch.hh:476
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:86
DefaultFetch::fetchBufferValid
bool fetchBufferValid[Impl::MaxThreads]
Whether or not the fetch buffer data is valid.
Definition: fetch.hh:511
Packet::req
RequestPtr req
A pointer to the original request.
Definition: packet.hh:341
BPredUnit::drainSanityCheck
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: bpred_unit.cc:111
DTRACE
#define DTRACE(x)
Definition: debug.hh:156
DefaultFetch::cacheBlkSize
unsigned int cacheBlkSize
Cache block size.
Definition: fetch.hh:488
Packet::dataDynamic
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition: packet.hh:1146
DefaultFetch::fetchBufferPC
Addr fetchBufferPC[Impl::MaxThreads]
The PC of the first instruction loaded into the fetch buffer.
Definition: fetch.hh:502
FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:204
DefaultFetch::FetchStatGroup::cacheLines
Stats::Scalar cacheLines
Stat for total number of fetched cache lines.
Definition: fetch.hh:586
DefaultFetch::FetchStatGroup::branchRate
Stats::Formula branchRate
Number of branch fetches per cycle.
Definition: fetch.hh:600
ProbePointArg::notify
void notify(const Arg &arg)
called at the ProbePoint call site, passes arg to each listener.
Definition: probe.hh:299
DefaultFetch::fromIEW
TimeBuffer< TimeStruct >::wire fromIEW
Wire to get iew's information from backwards time buffer.
Definition: fetch.hh:419
DefaultFetch::deactivateThread
void deactivateThread(ThreadID tid)
For priority-based fetch policies, need to keep update priorityList.
Definition: fetch_impl.hh:510
TimeBuffer::size
unsigned size
Definition: timebuf.hh:42
DefaultFetch::numFetchingThreads
ThreadID numFetchingThreads
Number of threads that are actively fetching.
Definition: fetch.hh:526
BPredUnit::update
void update(const InstSeqNum &done_sn, ThreadID tid)
Tells the branch predictor to commit any updates until the given sequence number.
Definition: bpred_unit.cc:295
DefaultFetch::FetchStatGroup::noActiveThreadStallCycles
Stats::Scalar noActiveThreadStallCycles
Total number of stall cycles caused by no active threads to run.
Definition: fetch.hh:577
ArmISA::advancePC
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:392
DefaultFetch::tick
void tick()
Ticks the fetch stage, processing all inputs signals and fetching as many instructions as possible.
Definition: fetch_impl.hh:860
DefaultFetch::Running
@ Running
Definition: fetch.hh:183
TimeBuffer< TimeStruct >
DefaultFetch::decoder
TheISA::Decoder * decoder[Impl::MaxThreads]
The decoder.
Definition: fetch.hh:374
DefaultFetch::roundRobin
ThreadID roundRobin()
Returns the appropriate thread to fetch using a round robin policy.
Definition: fetch_impl.hh:1448
DefaultFetch::wroteToTimeBuffer
bool wroteToTimeBuffer
Variable that tracks if fetch has written to the time buffer this cycle.
Definition: fetch.hh:446
DefaultFetch::instSize
int instSize
Size of instructions.
Definition: fetch.hh:514
DefaultFetch::wakeFromQuiesce
void wakeFromQuiesce()
Tells fetch to wake up from a quiesce instruction.
Definition: fetch_impl.hh:474
DefaultFetch::pipelineIcacheAccesses
void pipelineIcacheAccesses(ThreadID tid)
Pipeline the next I-cache access to the current one.
Definition: fetch_impl.hh:1559
packet.hh
Random::random
std::enable_if_t< std::is_integral< T >::value, T > random()
Use the SFINAE idiom to choose an implementation based on whether the type is integral or floating po...
Definition: random.hh:86
Stats::DataWrap::flags
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:339
DefaultFetch::resetStage
void resetStage()
Reset this pipeline stage.
Definition: fetch_impl.hh:317
DefaultFetch::memReq
RequestPtr memReq[Impl::MaxThreads]
Memory request used to access cache.
Definition: fetch.hh:441
DefaultFetch::FetchStatGroup::squashCycles
Stats::Scalar squashCycles
Stat for total number of cycles spent squashing.
Definition: fetch.hh:562
DefaultFetch::retryTid
ThreadID retryTid
The thread that is waiting on the cache to tell fetch to retry.
Definition: fetch.hh:485
DefaultFetch::doSquash
void doSquash(const TheISA::PCState &newPC, const DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:724
DefaultFetch::lsqCount
ThreadID lsqCount()
Returns the appropriate thread to fetch using the LSQ count policy.
Definition: fetch_impl.hh:1515
random_mt
Random random_mt
Definition: random.cc:96
DefaultFetch::isDrained
bool isDrained() const
Has the stage drained?
Definition: fetch_impl.hh:424
DefaultFetch::icachePort
IcachePort icachePort
Instruction port.
Definition: fetch.hh:537
DefaultFetch::FetchTranslation
Definition: fetch.hh:109
DefaultFetch::fromDecode
TimeBuffer< TimeStruct >::wire fromDecode
Wire to get decode's information from backwards time buffer.
Definition: fetch.hh:413
DefaultFetch::FetchStatGroup::insts
Stats::Scalar insts
Stat for total number of fetched instructions.
Definition: fetch.hh:554
DefaultFetch::fetchCacheLine
bool fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
Fetches the cache line that contains the fetch PC.
Definition: fetch_impl.hh:567
DefaultFetch::FetchStatGroup::miscStallCycles
Stats::Scalar miscStallCycles
Total number of cycles spent in any other state.
Definition: fetch.hh:573
DefaultFetch::setActiveThreads
void setActiveThreads(std::list< ThreadID > *at_ptr)
Sets pointer to list of active threads.
Definition: fetch_impl.hh:270
DefaultFetch::branchCount
ThreadID branchCount()
Returns the appropriate thread to fetch using the branch count policy.
Definition: fetch_impl.hh:1551
DefaultFetch::name
std::string name() const
Returns the name of fetch.
Definition: fetch_impl.hh:143
Stats::DataWrap::prereq
Derived & prereq(const Stat &prereq)
Set the prerequisite stat and marks this stat to print at the end of simulation.
Definition: statistics.hh:353
isa_specific.hh
RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:492
DefaultFetch::issuePipelinedIfetch
bool issuePipelinedIfetch[Impl::MaxThreads]
Set to true if a pipelined I-cache request should be issued.
Definition: fetch.hh:540
DefaultFetch::recvReqRetry
void recvReqRetry()
Handles retrying the fetch access.
Definition: fetch_impl.hh:1381
DefaultFetch::decodeToFetchDelay
Cycles decodeToFetchDelay
Decode to fetch delay.
Definition: fetch.hh:461
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:237
ADD_STAT
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition: group.hh:71
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:246
MipsISA::vaddr
vaddr
Definition: pra_constants.hh:275
DefaultFetch::setFetchQueue
void setFetchQueue(TimeBuffer< FetchStruct > *fq_ptr)
Sets pointer to time buffer used to communicate to the next stage.
Definition: fetch_impl.hh:277
StaticInst::isMacroop
bool isMacroop() const
Definition: static_inst.hh:204
DefaultFetch::iewToFetchDelay
Cycles iewToFetchDelay
IEW to fetch delay.
Definition: fetch.hh:467
DefaultFetch::startupStage
void startupStage()
Initialize stage.
Definition: fetch_impl.hh:285
DefaultFetch::timeBuffer
TimeBuffer< TimeStruct > * timeBuffer
Time buffer interface.
Definition: fetch.hh:410
DefaultFetch::renameToFetchDelay
Cycles renameToFetchDelay
Rename to fetch delay.
Definition: fetch.hh:464
DefaultFetch::Stalls::drain
bool drain
Definition: fetch.hh:454
DefaultFetch::cacheBlocked
bool cacheBlocked
Is the cache blocked? If so no threads can access it.
Definition: fetch.hh:479
DefaultFetch::checkSignalsAndUpdate
bool checkSignalsAndUpdate(ThreadID tid)
Checks all input signals and updates the status as necessary.
Definition: fetch_impl.hh:962
DefaultFetch::FetchStatGroup::branches
Stats::Scalar branches
Total number of fetched branches.
Definition: fetch.hh:556
DefaultFetch::drainSanityCheck
void drainSanityCheck() const
Perform sanity checks after a drain.
Definition: fetch_impl.hh:406
DefaultFetch::FetchStatGroup::blockedCycles
Stats::Scalar blockedCycles
Total number of cycles spent blocked.
Definition: fetch.hh:571
fetch.hh
DefaultFetch::FetchStatGroup::rate
Stats::Formula rate
Number of instruction fetched per cycle.
Definition: fetch.hh:602
InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:37
UNIT_COUNT
#define UNIT_COUNT
Definition: units.hh:49
NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:251
DefaultFetch::buildInst
DynInstPtr buildInst(ThreadID tid, StaticInstPtr staticInst, StaticInstPtr curMacroop, TheISA::PCState thisPC, TheISA::PCState nextPC, bool trace)
Definition: fetch_impl.hh:1067
DefaultFetch::FetchStatGroup::nisnDist
Stats::Distribution nisnDist
Distribution of number of instructions fetched each cycle.
Definition: fetch.hh:596
core.hh
DefaultFetch::drainStall
void drainStall(ThreadID tid)
Stall the fetch stage after reaching a safe drain point.
Definition: fetch_impl.hh:464
ProbePoints::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:148
DefaultFetch::updateFetchStatus
FetchStatus updateFetchStatus()
Updates overall fetch stage status; to be called at the end of each cycle.
Definition: fetch_impl.hh:806
DefaultFetch::ppFetchRequestSent
ProbePointArg< RequestPtr > * ppFetchRequestSent
To probe when a fetch request is successfully sent.
Definition: fetch.hh:213
DefaultFetch
DefaultFetch class handles both single threaded and SMT fetch.
Definition: fetch.hh:71
DefaultFetch::delayedCommit
bool delayedCommit[Impl::MaxThreads]
Can the fetch stage redirect from an interrupt on this instruction?
Definition: fetch.hh:438
isRomMicroPC
static bool isRomMicroPC(MicroPC upc)
Definition: types.hh:167
Request::INST_FETCH
@ INST_FETCH
The request was an instruction fetch.
Definition: request.hh:108
DefaultFetch::FetchStatGroup::pendingDrainCycles
Stats::Scalar pendingDrainCycles
Total number of cycles spent in waiting for drains.
Definition: fetch.hh:575
DefaultFetch::ppFetch
ProbePointArg< DynInstPtr > * ppFetch
Probe points.
Definition: fetch.hh:211
DefaultFetch::lookupAndUpdateNextPC
bool lookupAndUpdateNextPC(const DynInstPtr &inst, TheISA::PCState &pc)
Looks up in the branch predictor to see if the next PC should be either next PC+=MachInst or a branch...
Definition: fetch_impl.hh:521
DefaultFetch::toDecode
TimeBuffer< FetchStruct >::wire toDecode
Wire used to write any information heading to decode.
Definition: fetch.hh:426
DefaultFetch::fetchWidth
unsigned fetchWidth
The width of fetch in instructions.
Definition: fetch.hh:473
full_system.hh
DefaultFetch::Squashing
@ Squashing
Definition: fetch.hh:185
DefaultFetch::FinishTranslationEvent::setFault
void setFault(Fault _fault)
Definition: fetch.hh:149
UNIT_RATIO
#define UNIT_RATIO
Definition: units.hh:48
DefaultFetch::FetchStatGroup::idleCycles
Stats::Scalar idleCycles
Stat for total number of cycles spent blocked due to other stages in the pipeline.
Definition: fetch.hh:569
UNIT_RATE
#define UNIT_RATE(T1, T2)
Definition: units.hh:47
DefaultFetch::FetchStatGroup::FetchStatGroup
FetchStatGroup(O3CPU *cpu, DefaultFetch *fetch)
Definition: fetch_impl.hh:160
DefaultFetch::priorityList
std::list< ThreadID > priorityList
List that has the threads organized by priority.
Definition: fetch.hh:208
DefaultFetch::interruptPending
bool interruptPending
Checks if there is an interrupt pending.
Definition: fetch.hh:534
DefaultFetch::fetch
void fetch(bool &status_change)
Does the actual fetching of instructions and passing them on to the next stage.
Definition: fetch_impl.hh:1119
StaticInst::isLastMicroop
bool isLastMicroop() const
Definition: static_inst.hh:207
DefaultFetch::IcachePort::recvReqRetry
virtual void recvReqRetry()
Handles doing a retry of a failed fetch.
Definition: fetch_impl.hh:1651
PseudoInst::quiesce
void quiesce(ThreadContext *tc)
Definition: pseudo_inst.cc:112
DefaultFetch::profileStall
void profileStall(ThreadID tid)
Profile the reasons of fetch stall.
Definition: fetch_impl.hh:1589
StaticInst::fetchMicroop
virtual StaticInstPtr fetchMicroop(MicroPC upc) const
Return the microop that goes with a particular micropc.
Definition: static_inst.cc:96
DefaultFetch::numThreads
ThreadID numThreads
Number of threads.
Definition: fetch.hh:523
base.hh
DefaultFetch::clearStates
void clearStates(ThreadID tid)
Clear all thread-specific states.
Definition: fetch_impl.hh:297
MipsISA::PCState
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
types.hh
DefaultFetch::drainResume
void drainResume()
Resume after a drain.
Definition: fetch_impl.hh:396
Stats::Distribution::init
Distribution & init(Counter min, Counter max, Counter bkt)
Set the parameters of this distribution.
Definition: statistics.hh:2113
Stats::pdf
const FlagsType pdf
Print the percent of the total that this entry represents.
Definition: info.hh:52
DefaultFetch::finishTranslation
void finishTranslation(const Fault &fault, const RequestPtr &mem_req)
Definition: fetch_impl.hh:617
DefaultFetch::fetchQueueSize
unsigned fetchQueueSize
The size of the fetch queue in micro-ops.
Definition: fetch.hh:505
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:258
DefaultFetch::FetchStatGroup::tlbCycles
Stats::Scalar tlbCycles
Stat for total number of cycles spent waiting for translation.
Definition: fetch.hh:564
DefaultFetch::FetchStatGroup::icacheSquashes
Stats::Scalar icacheSquashes
Total number of outstanding icache accesses that were dropped due to a squash.
Definition: fetch.hh:590
DefaultFetch::TrapPending
@ TrapPending
Definition: fetch.hh:188
DefaultFetch::FetchStatGroup::idleRate
Stats::Formula idleRate
Rate of how often fetch was idle.
Definition: fetch.hh:598
DefaultFetch::fromRename
TimeBuffer< TimeStruct >::wire fromRename
Wire to get rename's information from backwards time buffer.
Definition: fetch.hh:416
DefaultFetch::Inactive
@ Inactive
Definition: fetch.hh:178
BaseCPU::PCMask
static const Addr PCMask
Definition: base.hh:281
BPredUnit::predict
bool predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, TheISA::PCState &pc, ThreadID tid)
Predicts whether or not the instruction is a taken branch, and the target of the branch if it is take...
Definition: bpred_unit.cc:120
Stats::DistBase::sample
void sample(const U &v, int n=1)
Add a value to the distribtion n times.
Definition: statistics.hh:1323
Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:79
UNIT_CYCLE
#define UNIT_CYCLE
Convenience macros to declare the unit of a stat.
Definition: units.hh:39
DefaultFetch::setTimeBuffer
void setTimeBuffer(TimeBuffer< TimeStruct > *time_buffer)
Sets the main backwards communication time buffer pointer.
Definition: fetch_impl.hh:257
DefaultFetch::IcacheWaitRetry
@ IcacheWaitRetry
Definition: fetch.hh:192
DefaultFetch::ItlbWait
@ ItlbWait
Definition: fetch.hh:190
DefaultFetch::threadFetched
ThreadID threadFetched
Thread ID being fetched.
Definition: fetch.hh:529
DefaultFetch::FetchStatGroup::icacheWaitRetryStallCycles
Stats::Scalar icacheWaitRetryStallCycles
Total number of stall cycles caused by I-cache wait retrys.
Definition: fetch.hh:584
Stats
Definition: statistics.cc:53
DefaultFetch::processCacheCompletion
void processCacheCompletion(PacketPtr pkt)
Processes cache completion event.
Definition: fetch_impl.hh:352
DefaultFetch::pc
TheISA::PCState pc[Impl::MaxThreads]
Definition: fetch.hh:431
RefCountingPtr< StaticInst >
curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:43
DefaultFetch::Idle
@ Idle
Definition: fetch.hh:184
DefaultFetch::fetchQueue
std::deque< DynInstPtr > fetchQueue[Impl::MaxThreads]
Queue of fetched instructions.
Definition: fetch.hh:508
DefaultFetch::iqCount
ThreadID iqCount()
Returns the appropriate thread to fetch using the IQ count policy.
Definition: fetch_impl.hh:1478
DefaultFetch::DynInst
Impl::DynInst DynInst
Definition: fetch.hh:76
DefaultFetch::cpu
O3CPU * cpu
Pointer to the O3CPU.
Definition: fetch.hh:407
DefaultFetch::branchPred
BPredUnit * branchPred
BPredUnit.
Definition: fetch.hh:429
DefaultFetch::numInst
int numInst
Tracks how many instructions has been fetched this cycle.
Definition: fetch.hh:449
cpu.hh
std::list< ThreadID >
DefaultFetch::fetchBuffer
uint8_t * fetchBuffer[Impl::MaxThreads]
The fetch data that is being fetched and buffered.
Definition: fetch.hh:499
DefaultFetch::_status
FetchStatus _status
Fetch status.
Definition: fetch.hh:199
DefaultFetch::IcachePort::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Timing version of receive.
Definition: fetch_impl.hh:1638
StaticInst::nopStaticInstPtr
static StaticInstPtr nopStaticInstPtr
Pointer to a statically allocated generic "nop" instruction object.
Definition: static_inst.hh:256
DefaultFetch::stalls
Stalls stalls[Impl::MaxThreads]
Tracks which stages are telling fetch to stall.
Definition: fetch.hh:458
DefaultFetch::retryPkt
PacketPtr retryPkt
The packet that is waiting to be retried.
Definition: fetch.hh:482
Stats::total
const FlagsType total
Print the total.
Definition: info.hh:50
DefaultFetch::checkStall
bool checkStall(ThreadID tid) const
Checks if a thread is stalled.
Definition: fetch_impl.hh:791
BPredUnit::squash
void squash(const InstSeqNum &squashed_sn, ThreadID tid)
Squashes all outstanding updates until a given sequence number.
Definition: bpred_unit.cc:318
BaseTLB::Execute
@ Execute
Definition: tlb.hh:57
DefaultFetch::FetchStatGroup::icacheStallCycles
Stats::Scalar icacheStallCycles
Stat for total number of cycles stalled due to an icache miss.
Definition: fetch.hh:552
DefaultFetch::lastIcacheStall
Counter lastIcacheStall[Impl::MaxThreads]
Icache stall statistics.
Definition: fetch.hh:517
Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1167
DefaultFetch::checkInterrupt
bool checkInterrupt(Addr pc)
Check if an interrupt is pending and that we need to handle.
Definition: fetch.hh:318
DefaultFetch::fetchStatus
ThreadStatus fetchStatus[Impl::MaxThreads]
Per-thread status.
Definition: fetch.hh:202
DefaultFetch::activeThreads
std::list< ThreadID > * activeThreads
List of Active Threads.
Definition: fetch.hh:520
byteswap.hh
DefaultFetch::squash
void squash(const TheISA::PCState &newPC, const InstSeqNum seq_num, DynInstPtr squashInst, ThreadID tid)
Squashes a specific thread and resets the PC.
Definition: fetch_impl.hh:846
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
DefaultFetch::fetchBufferSize
unsigned fetchBufferSize
The size of the fetch buffer in bytes.
Definition: fetch.hh:493
DefaultFetch::fromCommit
TimeBuffer< TimeStruct >::wire fromCommit
Wire to get commit's information from backwards time buffer.
Definition: fetch.hh:422
DefaultFetch::DynInstPtr
Impl::DynInstPtr DynInstPtr
Definition: fetch.hh:77
TimeBuffer::getWire
wire getWire(int idx)
Definition: timebuf.hh:229
eventq.hh

Generated on Tue Mar 23 2021 19:41:25 for gem5 by doxygen 1.8.17