gem5  v20.0.0.3
fetch2.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2016 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/fetch2.hh"
39 
40 #include <string>
41 
42 #include "arch/decoder.hh"
43 #include "arch/utility.hh"
44 #include "cpu/minor/pipeline.hh"
45 #include "cpu/pred/bpred_unit.hh"
46 #include "debug/Branch.hh"
47 #include "debug/Fetch.hh"
48 #include "debug/MinorTrace.hh"
49 
50 namespace Minor
51 {
52 
53 Fetch2::Fetch2(const std::string &name,
54  MinorCPU &cpu_,
55  MinorCPUParams &params,
57  Latch<BranchData>::Output branchInp_,
58  Latch<BranchData>::Input predictionOut_,
60  std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
61  Named(name),
62  cpu(cpu_),
63  inp(inp_),
64  branchInp(branchInp_),
65  predictionOut(predictionOut_),
66  out(out_),
67  nextStageReserve(next_stage_input_buffer),
68  outputWidth(params.decodeInputWidth),
69  processMoreThanOneInput(params.fetch2CycleInput),
70  branchPredictor(*params.branchPred),
71  fetchInfo(params.numThreads),
72  threadPriority(0)
73 {
74  if (outputWidth < 1)
75  fatal("%s: decodeInputWidth must be >= 1 (%d)\n", name, outputWidth);
76 
77  if (params.fetch2InputBufferSize < 1) {
78  fatal("%s: fetch2InputBufferSize must be >= 1 (%d)\n", name,
79  params.fetch2InputBufferSize);
80  }
81 
82  /* Per-thread input buffers */
83  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
84  inputBuffer.push_back(
86  name + ".inputBuffer" + std::to_string(tid), "lines",
87  params.fetch2InputBufferSize));
88  }
89 }
90 
91 const ForwardLineData *
93 {
94  /* Get a line from the inputBuffer to work with */
95  if (!inputBuffer[tid].empty()) {
96  return &(inputBuffer[tid].front());
97  } else {
98  return NULL;
99  }
100 }
101 
102 void
104 {
105  if (!inputBuffer[tid].empty()) {
106  inputBuffer[tid].front().freeLine();
107  inputBuffer[tid].pop();
108  }
109 
110  fetchInfo[tid].inputIndex = 0;
111 }
112 
113 void
115 {
116  DPRINTF(Fetch, "Dumping whole input buffer\n");
117  while (!inputBuffer[tid].empty())
118  popInput(tid);
119 
120  fetchInfo[tid].inputIndex = 0;
121 }
122 
123 void
125 {
126  MinorDynInstPtr inst = branch.inst;
127 
128  /* Don't even consider instructions we didn't try to predict or faults */
129  if (inst->isFault() || !inst->triedToPredict)
130  return;
131 
132  switch (branch.reason) {
134  /* No data to update */
135  break;
137  /* Never try to predict interrupts */
138  break;
140  /* Don't need to act on suspends */
141  break;
143  /* Don't need to act on fetch wakeup */
144  break;
146  /* Shouldn't happen. Fetch2 is the only source of
147  * BranchPredictions */
148  break;
150  /* Unpredicted branch or barrier */
151  DPRINTF(Branch, "Unpredicted branch seen inst: %s\n", *inst);
152  branchPredictor.squash(inst->id.fetchSeqNum,
153  branch.target, true, inst->id.threadId);
154  // Update after squashing to accomodate O3CPU
155  // using the branch prediction code.
156  branchPredictor.update(inst->id.fetchSeqNum,
157  inst->id.threadId);
158  break;
160  /* Predicted taken, was taken */
161  DPRINTF(Branch, "Branch predicted correctly inst: %s\n", *inst);
162  branchPredictor.update(inst->id.fetchSeqNum,
163  inst->id.threadId);
164  break;
166  /* Predicted taken, not taken */
167  DPRINTF(Branch, "Branch mis-predicted inst: %s\n", *inst);
168  branchPredictor.squash(inst->id.fetchSeqNum,
169  branch.target /* Not used */, false, inst->id.threadId);
170  // Update after squashing to accomodate O3CPU
171  // using the branch prediction code.
172  branchPredictor.update(inst->id.fetchSeqNum,
173  inst->id.threadId);
174  break;
176  /* Predicted taken, was taken but to a different target */
177  DPRINTF(Branch, "Branch mis-predicted target inst: %s target: %s\n",
178  *inst, branch.target);
179  branchPredictor.squash(inst->id.fetchSeqNum,
180  branch.target, true, inst->id.threadId);
181  break;
182  }
183 }
184 
185 void
187 {
188  Fetch2ThreadInfo &thread = fetchInfo[inst->id.threadId];
189  TheISA::PCState inst_pc = inst->pc;
190 
191  assert(!inst->predictedTaken);
192 
193  /* Skip non-control/sys call instructions */
194  if (inst->staticInst->isControl() ||
195  inst->staticInst->isSyscall())
196  {
197  /* Tried to predict */
198  inst->triedToPredict = true;
199 
200  DPRINTF(Branch, "Trying to predict for inst: %s\n", *inst);
201 
202  if (branchPredictor.predict(inst->staticInst,
203  inst->id.fetchSeqNum, inst_pc,
204  inst->id.threadId))
205  {
206  inst->predictedTaken = true;
207  inst->predictedTarget = inst_pc;
208  branch.target = inst_pc;
209  }
210  } else {
211  DPRINTF(Branch, "Not attempting prediction for inst: %s\n", *inst);
212  }
213 
214  /* If we predict taken, set branch and update sequence numbers */
215  if (inst->predictedTaken) {
216  /* Update the predictionSeqNum and remember the streamSeqNum that it
217  * was associated with */
218  thread.expectedStreamSeqNum = inst->id.streamSeqNum;
219 
221  inst->id.threadId,
222  inst->id.streamSeqNum, thread.predictionSeqNum + 1,
223  inst->predictedTarget, inst);
224 
225  /* Mark with a new prediction number by the stream number of the
226  * instruction causing the prediction */
227  thread.predictionSeqNum++;
228  branch = new_branch;
229 
230  DPRINTF(Branch, "Branch predicted taken inst: %s target: %s"
231  " new predictionSeqNum: %d\n",
232  *inst, inst->predictedTarget, thread.predictionSeqNum);
233  }
234 }
235 
236 void
238 {
239  /* Push input onto appropriate input buffer */
240  if (!inp.outputWire->isBubble())
241  inputBuffer[inp.outputWire->id.threadId].setTail(*inp.outputWire);
242 
243  ForwardInstData &insts_out = *out.inputWire;
244  BranchData prediction;
245  BranchData &branch_inp = *branchInp.outputWire;
246 
247  assert(insts_out.isBubble());
248 
249  /* React to branches from Execute to update local branch prediction
250  * structures */
251  updateBranchPrediction(branch_inp);
252 
253  /* If a branch arrives, don't try and do anything about it. Only
254  * react to your own predictions */
255  if (branch_inp.isStreamChange()) {
256  DPRINTF(Fetch, "Dumping all input as a stream changing branch"
257  " has arrived\n");
258  dumpAllInput(branch_inp.threadId);
259  fetchInfo[branch_inp.threadId].havePC = false;
260  }
261 
262  assert(insts_out.isBubble());
263  /* Even when blocked, clear out input lines with the wrong
264  * prediction sequence number */
265  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
266  Fetch2ThreadInfo &thread = fetchInfo[tid];
267 
268  thread.blocked = !nextStageReserve[tid].canReserve();
269 
270  const ForwardLineData *line_in = getInput(tid);
271 
272  while (line_in &&
273  thread.expectedStreamSeqNum == line_in->id.streamSeqNum &&
274  thread.predictionSeqNum != line_in->id.predictionSeqNum)
275  {
276  DPRINTF(Fetch, "Discarding line %s"
277  " due to predictionSeqNum mismatch (expected: %d)\n",
278  line_in->id, thread.predictionSeqNum);
279 
280  popInput(tid);
281  fetchInfo[tid].havePC = false;
282 
284  DPRINTF(Fetch, "Wrapping\n");
285  line_in = getInput(tid);
286  } else {
287  line_in = NULL;
288  }
289  }
290  }
291 
293  DPRINTF(Fetch, "Scheduled Thread: %d\n", tid);
294 
295  assert(insts_out.isBubble());
296  if (tid != InvalidThreadID) {
297  Fetch2ThreadInfo &fetch_info = fetchInfo[tid];
298 
299  const ForwardLineData *line_in = getInput(tid);
300 
301  unsigned int output_index = 0;
302 
303  /* Pack instructions into the output while we can. This may involve
304  * using more than one input line. Note that lineWidth will be 0
305  * for faulting lines */
306  while (line_in &&
307  (line_in->isFault() ||
308  fetch_info.inputIndex < line_in->lineWidth) && /* More input */
309  output_index < outputWidth && /* More output to fill */
310  prediction.isBubble() /* No predicted branch */)
311  {
312  ThreadContext *thread = cpu.getContext(line_in->id.threadId);
313  TheISA::Decoder *decoder = thread->getDecoderPtr();
314 
315  /* Discard line due to prediction sequence number being wrong but
316  * without the streamSeqNum number having changed */
317  bool discard_line =
318  fetch_info.expectedStreamSeqNum == line_in->id.streamSeqNum &&
319  fetch_info.predictionSeqNum != line_in->id.predictionSeqNum;
320 
321  /* Set the PC if the stream changes. Setting havePC to false in
322  * a previous cycle handles all other change of flow of control
323  * issues */
324  bool set_pc = fetch_info.lastStreamSeqNum != line_in->id.streamSeqNum;
325 
326  if (!discard_line && (!fetch_info.havePC || set_pc)) {
327  /* Set the inputIndex to be the MachInst-aligned offset
328  * from lineBaseAddr of the new PC value */
329  fetch_info.inputIndex =
330  (line_in->pc.instAddr() & BaseCPU::PCMask) -
331  line_in->lineBaseAddr;
332  DPRINTF(Fetch, "Setting new PC value: %s inputIndex: 0x%x"
333  " lineBaseAddr: 0x%x lineWidth: 0x%x\n",
334  line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
335  line_in->lineWidth);
336  fetch_info.pc = line_in->pc;
337  fetch_info.havePC = true;
338  decoder->reset();
339  }
340 
341  /* The generated instruction. Leave as NULL if no instruction
342  * is to be packed into the output */
343  MinorDynInstPtr dyn_inst = NULL;
344 
345  if (discard_line) {
346  /* Rest of line was from an older prediction in the same
347  * stream */
348  DPRINTF(Fetch, "Discarding line %s (from inputIndex: %d)"
349  " due to predictionSeqNum mismatch (expected: %d)\n",
350  line_in->id, fetch_info.inputIndex,
351  fetch_info.predictionSeqNum);
352  } else if (line_in->isFault()) {
353  /* Pack a fault as a MinorDynInst with ->fault set */
354 
355  /* Make a new instruction and pick up the line, stream,
356  * prediction, thread ids from the incoming line */
357  dyn_inst = new MinorDynInst(line_in->id);
358 
359  /* Fetch and prediction sequence numbers originate here */
360  dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
361  dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
362  /* To complete the set, test that exec sequence number has
363  * not been set */
364  assert(dyn_inst->id.execSeqNum == 0);
365 
366  dyn_inst->pc = fetch_info.pc;
367 
368  /* Pack a faulting instruction but allow other
369  * instructions to be generated. (Fetch2 makes no
370  * immediate judgement about streamSeqNum) */
371  dyn_inst->fault = line_in->fault;
372  DPRINTF(Fetch, "Fault being passed output_index: "
373  "%d: %s\n", output_index, dyn_inst->fault->name());
374  } else {
375  uint8_t *line = line_in->line;
376 
377  /* The instruction is wholly in the line, can just
378  * assign */
379  auto inst_word = *reinterpret_cast<TheISA::MachInst *>
380  (line + fetch_info.inputIndex);
381 
382  if (!decoder->instReady()) {
383  decoder->moreBytes(fetch_info.pc,
384  line_in->lineBaseAddr + fetch_info.inputIndex,
385  inst_word);
386  DPRINTF(Fetch, "Offering MachInst to decoder addr: 0x%x\n",
387  line_in->lineBaseAddr + fetch_info.inputIndex);
388  }
389 
390  /* Maybe make the above a loop to accomodate ISAs with
391  * instructions longer than sizeof(MachInst) */
392 
393  if (decoder->instReady()) {
394  /* Make a new instruction and pick up the line, stream,
395  * prediction, thread ids from the incoming line */
396  dyn_inst = new MinorDynInst(line_in->id);
397 
398  /* Fetch and prediction sequence numbers originate here */
399  dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
400  dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
401  /* To complete the set, test that exec sequence number
402  * has not been set */
403  assert(dyn_inst->id.execSeqNum == 0);
404 
405  /* Note that the decoder can update the given PC.
406  * Remember not to assign it until *after* calling
407  * decode */
408  StaticInstPtr decoded_inst = decoder->decode(fetch_info.pc);
409  dyn_inst->staticInst = decoded_inst;
410 
411  dyn_inst->pc = fetch_info.pc;
412  DPRINTF(Fetch, "decoder inst %s\n", *dyn_inst);
413 
414  // Collect some basic inst class stats
415  if (decoded_inst->isLoad())
417  else if (decoded_inst->isStore())
419  else if (decoded_inst->isAtomic())
420  amoInstructions++;
421  else if (decoded_inst->isVector())
422  vecInstructions++;
423  else if (decoded_inst->isFloating())
424  fpInstructions++;
425  else if (decoded_inst->isInteger())
426  intInstructions++;
427 
428  DPRINTF(Fetch, "Instruction extracted from line %s"
429  " lineWidth: %d output_index: %d inputIndex: %d"
430  " pc: %s inst: %s\n",
431  line_in->id,
432  line_in->lineWidth, output_index, fetch_info.inputIndex,
433  fetch_info.pc, *dyn_inst);
434 
435 #if THE_ISA == X86_ISA || THE_ISA == ARM_ISA
436  /* In SE mode, it's possible to branch to a microop when
437  * replaying faults such as page faults (or simply
438  * intra-microcode branches in X86). Unfortunately,
439  * as Minor has micro-op decomposition in a separate
440  * pipeline stage from instruction decomposition, the
441  * following advancePC (which may follow a branch with
442  * microPC() != 0) *must* see a fresh macroop. This
443  * kludge should be improved with an addition to PCState
444  * but I offer it in this form for the moment
445  *
446  * X86 can branch within microops so we need to deal with
447  * the case that, after a branch, the first un-advanced PC
448  * may be pointing to a microop other than 0. Once
449  * advanced, however, the microop number *must* be 0 */
450  fetch_info.pc.upc(0);
451  fetch_info.pc.nupc(1);
452 #endif
453 
454  /* Advance PC for the next instruction */
455  TheISA::advancePC(fetch_info.pc, decoded_inst);
456 
457  /* Predict any branches and issue a branch if
458  * necessary */
459  predictBranch(dyn_inst, prediction);
460  } else {
461  DPRINTF(Fetch, "Inst not ready yet\n");
462  }
463 
464  /* Step on the pointer into the line if there's no
465  * complete instruction waiting */
466  if (decoder->needMoreBytes()) {
467  fetch_info.inputIndex += sizeof(TheISA::MachInst);
468 
469  DPRINTF(Fetch, "Updated inputIndex value PC: %s"
470  " inputIndex: 0x%x lineBaseAddr: 0x%x lineWidth: 0x%x\n",
471  line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
472  line_in->lineWidth);
473  }
474  }
475 
476  if (dyn_inst) {
477  /* Step to next sequence number */
478  fetch_info.fetchSeqNum++;
479 
480  /* Correctly size the output before writing */
481  if (output_index == 0) {
482  insts_out.resize(outputWidth);
483  }
484  /* Pack the generated dynamic instruction into the output */
485  insts_out.insts[output_index] = dyn_inst;
486  output_index++;
487 
488  /* Output MinorTrace instruction info for
489  * pre-microop decomposition macroops */
490  if (DTRACE(MinorTrace) && !dyn_inst->isFault() &&
491  dyn_inst->staticInst->isMacroop())
492  {
493  dyn_inst->minorTraceInst(*this);
494  }
495  }
496 
497  /* Remember the streamSeqNum of this line so we can tell when
498  * we change stream */
499  fetch_info.lastStreamSeqNum = line_in->id.streamSeqNum;
500 
501  /* Asked to discard line or there was a branch or fault */
502  if (!prediction.isBubble() || /* The remains of a
503  line with a prediction in it */
504  line_in->isFault() /* A line which is just a fault */)
505  {
506  DPRINTF(Fetch, "Discarding all input on branch/fault\n");
507  dumpAllInput(tid);
508  fetch_info.havePC = false;
509  line_in = NULL;
510  } else if (discard_line) {
511  /* Just discard one line, one's behind it may have new
512  * stream sequence numbers. There's a DPRINTF above
513  * for this event */
514  popInput(tid);
515  fetch_info.havePC = false;
516  line_in = NULL;
517  } else if (fetch_info.inputIndex == line_in->lineWidth) {
518  /* Got to end of a line, pop the line but keep PC
519  * in case this is a line-wrapping inst. */
520  popInput(tid);
521  line_in = NULL;
522  }
523 
524  if (!line_in && processMoreThanOneInput) {
525  DPRINTF(Fetch, "Wrapping\n");
526  line_in = getInput(tid);
527  }
528  }
529 
530  /* The rest of the output (if any) should already have been packed
531  * with bubble instructions by insts_out's initialisation */
532  }
533  if (tid == InvalidThreadID) {
534  assert(insts_out.isBubble());
535  }
537  *predictionOut.inputWire = prediction;
538 
539  /* If we generated output, reserve space for the result in the next stage
540  * and mark the stage as being active this cycle */
541  if (!insts_out.isBubble()) {
542  /* Note activity of following buffer */
544  insts_out.threadId = tid;
545  nextStageReserve[tid].reserve();
546  }
547 
548  /* If we still have input to process and somewhere to put it,
549  * mark stage as active */
550  for (ThreadID i = 0; i < cpu.numThreads; i++)
551  {
552  if (getInput(i) && nextStageReserve[i].canReserve()) {
554  break;
555  }
556  }
557 
558  /* Make sure the input (if any left) is pushed */
559  if (!inp.outputWire->isBubble())
560  inputBuffer[inp.outputWire->id.threadId].pushTail();
561 }
562 
563 inline ThreadID
565 {
566  /* Select thread via policy. */
567  std::vector<ThreadID> priority_list;
568 
569  switch (cpu.threadPolicy) {
570  case Enums::SingleThreaded:
571  priority_list.push_back(0);
572  break;
573  case Enums::RoundRobin:
574  priority_list = cpu.roundRobinPriority(threadPriority);
575  break;
576  case Enums::Random:
577  priority_list = cpu.randomPriority();
578  break;
579  default:
580  panic("Unknown fetch policy");
581  }
582 
583  for (auto tid : priority_list) {
584  if (getInput(tid) && !fetchInfo[tid].blocked) {
585  threadPriority = tid;
586  return tid;
587  }
588  }
589 
590  return InvalidThreadID;
591 }
592 
593 bool
595 {
596  for (const auto &buffer : inputBuffer) {
597  if (!buffer.empty())
598  return false;
599  }
600 
601  return (*inp.outputWire).isBubble() &&
602  (*predictionOut.inputWire).isBubble();
603 }
604 
605 void
607 {
608  using namespace Stats;
609 
611  .name(name() + ".int_instructions")
612  .desc("Number of integer instructions successfully decoded")
613  .flags(total);
614 
616  .name(name() + ".fp_instructions")
617  .desc("Number of floating point instructions successfully decoded")
618  .flags(total);
619 
621  .name(name() + ".vec_instructions")
622  .desc("Number of SIMD instructions successfully decoded")
623  .flags(total);
624 
626  .name(name() + ".load_instructions")
627  .desc("Number of memory load instructions successfully decoded")
628  .flags(total);
629 
631  .name(name() + ".store_instructions")
632  .desc("Number of memory store instructions successfully decoded")
633  .flags(total);
634 
636  .name(name() + ".amo_instructions")
637  .desc("Number of memory atomic instructions successfully decoded")
638  .flags(total);
639 }
640 
641 void
643 {
644  std::ostringstream data;
645 
646  if (fetchInfo[0].blocked)
647  data << 'B';
648  else
649  (*out.inputWire).reportData(data);
650 
651  MINORTRACE("inputIndex=%d havePC=%d predictionSeqNum=%d insts=%s\n",
652  fetchInfo[0].inputIndex, fetchInfo[0].havePC, fetchInfo[0].predictionSeqNum, data.str());
653  inputBuffer[0].minorTrace();
654 }
655 
656 }
InstSeqNum fetchSeqNum
Fetch2 is the source of fetch sequence numbers.
Definition: fetch2.hh:146
MinorDynInstPtr inst
Instruction which caused this branch.
Definition: pipe_data.hh:122
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:163
uint32_t MachInst
Definition: types.hh:52
#define DPRINTF(x,...)
Definition: trace.hh:225
bool isStore() const
Definition: static_inst.hh:162
Latch< BranchData >::Output branchInp
Input port carrying branches from Execute.
Definition: fetch2.hh:70
virtual TheISA::Decoder * getDecoderPtr()=0
Latch< ForwardInstData >::Input out
Output port carrying instructions into Decode.
Definition: fetch2.hh:76
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:171
const std::string & name()
Definition: trace.cc:50
Bitfield< 7 > i
bool processMoreThanOneInput
If true, more than one input word can be processed each cycle if there is room in the output to conta...
Definition: fetch2.hh:86
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty...
Definition: buffers.hh:565
ThreadID numThreads
Number of threads we&#39;re actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:374
Addr lineBaseAddr
First byte address in the line.
Definition: pipe_data.hh:181
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:81
ThreadID threadId
ThreadID associated with branch.
Definition: pipe_data.hh:112
bool isLoad() const
Definition: static_inst.hh:161
std::vector< InputBuffer< ForwardLineData > > inputBuffer
Definition: fetch2.hh:93
void minorTrace() const
Definition: fetch2.cc:642
Line fetch data in the forward direction.
Definition: pipe_data.hh:171
Reason reason
Explanation for this branch.
Definition: pipe_data.hh:109
bool isFloating() const
Definition: static_inst.hh:171
void activity()
Records that there is activity this cycle.
Definition: activity.cc:54
TheISA::PCState pc
Remembered program counter value.
Definition: fetch2.hh:133
bool isAtomic() const
Definition: static_inst.hh:163
Latch< BranchData >::Input predictionOut
Output port carrying predictions back to Fetch1.
Definition: fetch2.hh:73
std::vector< Fetch2ThreadInfo > fetchInfo
Definition: fetch2.hh:163
void update(const InstSeqNum &done_sn, ThreadID tid)
Tells the branch predictor to commit any updates until the given sequence number. ...
Definition: bpred_unit.cc:345
TheISA::PCState pc
PC of the first requested inst within this line.
Definition: pipe_data.hh:184
Fetch2 receives lines of data from Fetch1, separates them into instructions and passes them to Decode...
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
Definition: activity.cc:44
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:333
ThreadContext is the external interface to all thread state for anything outside of the CPU...
unsigned int outputWidth
Width of output of this stage/input of next in instructions.
Definition: fetch2.hh:82
Stats::Scalar vecInstructions
Definition: fetch2.hh:169
STL vector class.
Definition: stl.hh:37
Stats::Scalar loadInstructions
Definition: fetch2.hh:170
InstSeqNum predictionSeqNum
The predicted qualifier to stream, attached by Fetch2 as a consequence of branch prediction.
Definition: dyn_inst.hh:90
Definition: trace.hh:147
ThreadID threadPriority
Definition: fetch2.hh:164
bool isDrained()
Is this stage drained? For Fetch2, draining is initiated by Execute halting Fetch1 causing Fetch2 to ...
Definition: fetch2.cc:594
#define DTRACE(x)
Definition: trace.hh:223
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: fetch2.hh:63
Stats::Scalar intInstructions
Stats.
Definition: fetch2.hh:167
InstSeqNum predictionSeqNum
Fetch2 is the source of prediction sequence numbers.
Definition: fetch2.hh:157
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: fetch2.cc:103
Stats::Scalar fpInstructions
Definition: fetch2.hh:168
void activateStage(const int idx)
Marks a stage as active.
Definition: activity.cc:90
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:174
void resize(unsigned int width)
Resize a bubble/empty ForwardInstData and fill with bubbles.
Definition: pipe_data.cc:260
bool isBubble() const
Definition: pipe_data.hh:148
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:247
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:257
bool predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, TheISA::PCState &pc, ThreadID tid)
Predicts whether or not the instruction is a taken branch, and the target of the branch if it is take...
Definition: bpred_unit.cc:170
Minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:88
uint8_t * line
Line data.
Definition: pipe_data.hh:199
unsigned int lineWidth
Explicit line width, don&#39;t rely on data.size.
Definition: pipe_data.hh:187
InstId id
Thread, stream, prediction ...
Definition: pipe_data.hh:195
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:393
bool isInteger() const
Definition: static_inst.hh:170
const std::string & name() const
Definition: trace.hh:156
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:165
const FlagsType total
Print the total.
Definition: info.hh:49
const ThreadID InvalidThreadID
Definition: types.hh:226
void predictBranch(MinorDynInstPtr inst, BranchData &branch)
Predicts branches for the given instruction.
Definition: fetch2.cc:186
static const Addr PCMask
Definition: base.hh:275
InstSeqNum streamSeqNum
The &#39;stream&#39; this instruction belongs to.
Definition: dyn_inst.hh:86
bool isVector() const
Definition: static_inst.hh:172
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:276
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:225
unsigned int inputIndex
Index into an incompletely processed input line that instructions are to be extracted from...
Definition: fetch2.hh:124
InstSeqNum lastStreamSeqNum
Stream sequence number of the last seen line used to identify changes of instruction stream...
Definition: fetch2.hh:142
Dynamic instruction for Minor.
Definition: dyn_inst.hh:155
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:62
Enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:112
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition: fetch2.cc:564
Stats::Scalar storeInstructions
Definition: fetch2.hh:171
Stats::Scalar amoInstructions
Definition: fetch2.hh:172
bool isFault() const
This is a fault, not a line.
Definition: pipe_data.hh:220
void dumpAllInput(ThreadID tid)
Dump the whole contents of the input buffer.
Definition: fetch2.cc:114
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:294
InstSeqNum expectedStreamSeqNum
Stream sequence number remembered from last time the predictionSeqNum changed.
Definition: fetch2.hh:152
Data members after this line are cycle-to-cycle state.
Definition: fetch2.hh:98
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:60
std::vector< InputBuffer< ForwardInstData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition: fetch2.hh:79
Fault fault
This line has a fault.
Definition: pipe_data.hh:192
Latch< ForwardLineData >::Output inp
Input port carrying lines from Fetch1.
Definition: fetch2.hh:66
The constructed pipeline.
BPredUnit & branchPredictor
Branch predictor passed from Python configuration.
Definition: fetch2.hh:89
void squash(const InstSeqNum &squashed_sn, ThreadID tid)
Squashes all outstanding updates until a given sequence number.
Definition: bpred_unit.cc:368
Fetch2(const std::string &name, MinorCPU &cpu_, MinorCPUParams &params, Latch< ForwardLineData >::Output inp_, Latch< BranchData >::Output branchInp_, Latch< BranchData >::Input predictionOut_, Latch< ForwardInstData >::Input out_, std::vector< InputBuffer< ForwardInstData >> &next_stage_input_buffer)
Definition: fetch2.cc:53
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:309
void updateBranchPrediction(const BranchData &branch)
Update local branch prediction structures from feedback from Execute.
Definition: fetch2.cc:124
const ForwardLineData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: fetch2.cc:92
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:77
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:245
TheISA::PCState target
Starting PC of that stream.
Definition: pipe_data.hh:119
bool blocked
Blocked indication for report.
Definition: fetch2.hh:160
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
bool havePC
PC is currently valid.
Definition: fetch2.hh:138
const char data[]
ThreadID threadId
The thread to which this line/instruction belongs.
Definition: dyn_inst.hh:81
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:253
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: fetch2.cc:237
output decoder
Definition: nop.cc:61
void regStats()
Definition: fetch2.cc:606
ThreadID threadId
Thread associated with these instructions.
Definition: pipe_data.hh:263

Generated on Fri Jul 3 2020 15:53:00 for gem5 by doxygen 1.8.13