gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fetch2.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2016 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Andrew Bardsley
38  */
39 
40 #include "cpu/minor/fetch2.hh"
41 
42 #include <string>
43 
44 #include "arch/decoder.hh"
45 #include "arch/utility.hh"
46 #include "cpu/minor/pipeline.hh"
47 #include "cpu/pred/bpred_unit.hh"
48 #include "debug/Branch.hh"
49 #include "debug/Fetch.hh"
50 #include "debug/MinorTrace.hh"
51 
52 namespace Minor
53 {
54 
55 Fetch2::Fetch2(const std::string &name,
56  MinorCPU &cpu_,
57  MinorCPUParams &params,
59  Latch<BranchData>::Output branchInp_,
60  Latch<BranchData>::Input predictionOut_,
62  std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
63  Named(name),
64  cpu(cpu_),
65  inp(inp_),
66  branchInp(branchInp_),
67  predictionOut(predictionOut_),
68  out(out_),
69  nextStageReserve(next_stage_input_buffer),
70  outputWidth(params.decodeInputWidth),
71  processMoreThanOneInput(params.fetch2CycleInput),
72  branchPredictor(*params.branchPred),
73  fetchInfo(params.numThreads),
74  threadPriority(0)
75 {
76  if (outputWidth < 1)
77  fatal("%s: decodeInputWidth must be >= 1 (%d)\n", name, outputWidth);
78 
79  if (params.fetch2InputBufferSize < 1) {
80  fatal("%s: fetch2InputBufferSize must be >= 1 (%d)\n", name,
81  params.fetch2InputBufferSize);
82  }
83 
84  /* Per-thread input buffers */
85  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
86  inputBuffer.push_back(
88  name + ".inputBuffer" + std::to_string(tid), "lines",
89  params.fetch2InputBufferSize));
90  }
91 }
92 
93 const ForwardLineData *
95 {
96  /* Get a line from the inputBuffer to work with */
97  if (!inputBuffer[tid].empty()) {
98  return &(inputBuffer[tid].front());
99  } else {
100  return NULL;
101  }
102 }
103 
104 void
106 {
107  if (!inputBuffer[tid].empty()) {
108  inputBuffer[tid].front().freeLine();
109  inputBuffer[tid].pop();
110  }
111 
112  fetchInfo[tid].inputIndex = 0;
113 }
114 
115 void
117 {
118  DPRINTF(Fetch, "Dumping whole input buffer\n");
119  while (!inputBuffer[tid].empty())
120  popInput(tid);
121 
122  fetchInfo[tid].inputIndex = 0;
123 }
124 
125 void
127 {
128  MinorDynInstPtr inst = branch.inst;
129 
130  /* Don't even consider instructions we didn't try to predict or faults */
131  if (inst->isFault() || !inst->triedToPredict)
132  return;
133 
134  switch (branch.reason) {
136  /* No data to update */
137  break;
139  /* Never try to predict interrupts */
140  break;
142  /* Don't need to act on suspends */
143  break;
145  /* Don't need to act on fetch wakeup */
146  break;
148  /* Shouldn't happen. Fetch2 is the only source of
149  * BranchPredictions */
150  break;
152  /* Unpredicted branch or barrier */
153  DPRINTF(Branch, "Unpredicted branch seen inst: %s\n", *inst);
154  branchPredictor.squash(inst->id.fetchSeqNum,
155  branch.target, true, inst->id.threadId);
156  // Update after squashing to accomodate O3CPU
157  // using the branch prediction code.
158  branchPredictor.update(inst->id.fetchSeqNum,
159  inst->id.threadId);
160  break;
162  /* Predicted taken, was taken */
163  DPRINTF(Branch, "Branch predicted correctly inst: %s\n", *inst);
164  branchPredictor.update(inst->id.fetchSeqNum,
165  inst->id.threadId);
166  break;
168  /* Predicted taken, not taken */
169  DPRINTF(Branch, "Branch mis-predicted inst: %s\n", *inst);
170  branchPredictor.squash(inst->id.fetchSeqNum,
171  branch.target /* Not used */, false, inst->id.threadId);
172  // Update after squashing to accomodate O3CPU
173  // using the branch prediction code.
174  branchPredictor.update(inst->id.fetchSeqNum,
175  inst->id.threadId);
176  break;
178  /* Predicted taken, was taken but to a different target */
179  DPRINTF(Branch, "Branch mis-predicted target inst: %s target: %s\n",
180  *inst, branch.target);
181  branchPredictor.squash(inst->id.fetchSeqNum,
182  branch.target, true, inst->id.threadId);
183  break;
184  }
185 }
186 
187 void
189 {
190  Fetch2ThreadInfo &thread = fetchInfo[inst->id.threadId];
191  TheISA::PCState inst_pc = inst->pc;
192 
193  assert(!inst->predictedTaken);
194 
195  /* Skip non-control/sys call instructions */
196  if (inst->staticInst->isControl() ||
197  inst->staticInst->isSyscall())
198  {
199  /* Tried to predict */
200  inst->triedToPredict = true;
201 
202  DPRINTF(Branch, "Trying to predict for inst: %s\n", *inst);
203 
204  if (branchPredictor.predict(inst->staticInst,
205  inst->id.fetchSeqNum, inst_pc,
206  inst->id.threadId))
207  {
208  inst->predictedTaken = true;
209  inst->predictedTarget = inst_pc;
210  branch.target = inst_pc;
211  }
212  } else {
213  DPRINTF(Branch, "Not attempting prediction for inst: %s\n", *inst);
214  }
215 
216  /* If we predict taken, set branch and update sequence numbers */
217  if (inst->predictedTaken) {
218  /* Update the predictionSeqNum and remember the streamSeqNum that it
219  * was associated with */
220  thread.expectedStreamSeqNum = inst->id.streamSeqNum;
221 
223  inst->id.threadId,
224  inst->id.streamSeqNum, thread.predictionSeqNum + 1,
225  inst->predictedTarget, inst);
226 
227  /* Mark with a new prediction number by the stream number of the
228  * instruction causing the prediction */
229  thread.predictionSeqNum++;
230  branch = new_branch;
231 
232  DPRINTF(Branch, "Branch predicted taken inst: %s target: %s"
233  " new predictionSeqNum: %d\n",
234  *inst, inst->predictedTarget, thread.predictionSeqNum);
235  }
236 }
237 
238 void
240 {
241  /* Push input onto appropriate input buffer */
242  if (!inp.outputWire->isBubble())
243  inputBuffer[inp.outputWire->id.threadId].setTail(*inp.outputWire);
244 
245  ForwardInstData &insts_out = *out.inputWire;
246  BranchData prediction;
247  BranchData &branch_inp = *branchInp.outputWire;
248 
249  assert(insts_out.isBubble());
250 
251  /* React to branches from Execute to update local branch prediction
252  * structures */
253  updateBranchPrediction(branch_inp);
254 
255  /* If a branch arrives, don't try and do anything about it. Only
256  * react to your own predictions */
257  if (branch_inp.isStreamChange()) {
258  DPRINTF(Fetch, "Dumping all input as a stream changing branch"
259  " has arrived\n");
260  dumpAllInput(branch_inp.threadId);
261  fetchInfo[branch_inp.threadId].havePC = false;
262  }
263 
264  assert(insts_out.isBubble());
265  /* Even when blocked, clear out input lines with the wrong
266  * prediction sequence number */
267  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
268  Fetch2ThreadInfo &thread = fetchInfo[tid];
269 
270  thread.blocked = !nextStageReserve[tid].canReserve();
271 
272  const ForwardLineData *line_in = getInput(tid);
273 
274  while (line_in &&
275  thread.expectedStreamSeqNum == line_in->id.streamSeqNum &&
276  thread.predictionSeqNum != line_in->id.predictionSeqNum)
277  {
278  DPRINTF(Fetch, "Discarding line %s"
279  " due to predictionSeqNum mismatch (expected: %d)\n",
280  line_in->id, thread.predictionSeqNum);
281 
282  popInput(tid);
283  fetchInfo[tid].havePC = false;
284 
286  DPRINTF(Fetch, "Wrapping\n");
287  line_in = getInput(tid);
288  } else {
289  line_in = NULL;
290  }
291  }
292  }
293 
295  DPRINTF(Fetch, "Scheduled Thread: %d\n", tid);
296 
297  assert(insts_out.isBubble());
298  if (tid != InvalidThreadID) {
299  Fetch2ThreadInfo &fetch_info = fetchInfo[tid];
300 
301  const ForwardLineData *line_in = getInput(tid);
302 
303  unsigned int output_index = 0;
304 
305  /* Pack instructions into the output while we can. This may involve
306  * using more than one input line. Note that lineWidth will be 0
307  * for faulting lines */
308  while (line_in &&
309  (line_in->isFault() ||
310  fetch_info.inputIndex < line_in->lineWidth) && /* More input */
311  output_index < outputWidth && /* More output to fill */
312  prediction.isBubble() /* No predicted branch */)
313  {
314  ThreadContext *thread = cpu.getContext(line_in->id.threadId);
315  TheISA::Decoder *decoder = thread->getDecoderPtr();
316 
317  /* Discard line due to prediction sequence number being wrong but
318  * without the streamSeqNum number having changed */
319  bool discard_line =
320  fetch_info.expectedStreamSeqNum == line_in->id.streamSeqNum &&
321  fetch_info.predictionSeqNum != line_in->id.predictionSeqNum;
322 
323  /* Set the PC if the stream changes. Setting havePC to false in
324  * a previous cycle handles all other change of flow of control
325  * issues */
326  bool set_pc = fetch_info.lastStreamSeqNum != line_in->id.streamSeqNum;
327 
328  if (!discard_line && (!fetch_info.havePC || set_pc)) {
329  /* Set the inputIndex to be the MachInst-aligned offset
330  * from lineBaseAddr of the new PC value */
331  fetch_info.inputIndex =
332  (line_in->pc.instAddr() & BaseCPU::PCMask) -
333  line_in->lineBaseAddr;
334  DPRINTF(Fetch, "Setting new PC value: %s inputIndex: 0x%x"
335  " lineBaseAddr: 0x%x lineWidth: 0x%x\n",
336  line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
337  line_in->lineWidth);
338  fetch_info.pc = line_in->pc;
339  fetch_info.havePC = true;
340  decoder->reset();
341  }
342 
343  /* The generated instruction. Leave as NULL if no instruction
344  * is to be packed into the output */
345  MinorDynInstPtr dyn_inst = NULL;
346 
347  if (discard_line) {
348  /* Rest of line was from an older prediction in the same
349  * stream */
350  DPRINTF(Fetch, "Discarding line %s (from inputIndex: %d)"
351  " due to predictionSeqNum mismatch (expected: %d)\n",
352  line_in->id, fetch_info.inputIndex,
353  fetch_info.predictionSeqNum);
354  } else if (line_in->isFault()) {
355  /* Pack a fault as a MinorDynInst with ->fault set */
356 
357  /* Make a new instruction and pick up the line, stream,
358  * prediction, thread ids from the incoming line */
359  dyn_inst = new MinorDynInst(line_in->id);
360 
361  /* Fetch and prediction sequence numbers originate here */
362  dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
363  dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
364  /* To complete the set, test that exec sequence number has
365  * not been set */
366  assert(dyn_inst->id.execSeqNum == 0);
367 
368  dyn_inst->pc = fetch_info.pc;
369 
370  /* Pack a faulting instruction but allow other
371  * instructions to be generated. (Fetch2 makes no
372  * immediate judgement about streamSeqNum) */
373  dyn_inst->fault = line_in->fault;
374  DPRINTF(Fetch, "Fault being passed output_index: "
375  "%d: %s\n", output_index, dyn_inst->fault->name());
376  } else {
377  uint8_t *line = line_in->line;
378 
379  /* The instruction is wholly in the line, can just
380  * assign */
381  auto inst_word = *reinterpret_cast<TheISA::MachInst *>
382  (line + fetch_info.inputIndex);
383 
384  if (!decoder->instReady()) {
385  decoder->moreBytes(fetch_info.pc,
386  line_in->lineBaseAddr + fetch_info.inputIndex,
387  inst_word);
388  DPRINTF(Fetch, "Offering MachInst to decoder addr: 0x%x\n",
389  line_in->lineBaseAddr + fetch_info.inputIndex);
390  }
391 
392  /* Maybe make the above a loop to accomodate ISAs with
393  * instructions longer than sizeof(MachInst) */
394 
395  if (decoder->instReady()) {
396  /* Make a new instruction and pick up the line, stream,
397  * prediction, thread ids from the incoming line */
398  dyn_inst = new MinorDynInst(line_in->id);
399 
400  /* Fetch and prediction sequence numbers originate here */
401  dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
402  dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
403  /* To complete the set, test that exec sequence number
404  * has not been set */
405  assert(dyn_inst->id.execSeqNum == 0);
406 
407  /* Note that the decoder can update the given PC.
408  * Remember not to assign it until *after* calling
409  * decode */
410  StaticInstPtr decoded_inst = decoder->decode(fetch_info.pc);
411  dyn_inst->staticInst = decoded_inst;
412 
413  dyn_inst->pc = fetch_info.pc;
414  DPRINTF(Fetch, "decoder inst %s\n", *dyn_inst);
415 
416  // Collect some basic inst class stats
417  if (decoded_inst->isLoad())
419  else if (decoded_inst->isStore())
421  else if (decoded_inst->isAtomic())
422  amoInstructions++;
423  else if (decoded_inst->isVector())
424  vecInstructions++;
425  else if (decoded_inst->isFloating())
426  fpInstructions++;
427  else if (decoded_inst->isInteger())
428  intInstructions++;
429 
430  DPRINTF(Fetch, "Instruction extracted from line %s"
431  " lineWidth: %d output_index: %d inputIndex: %d"
432  " pc: %s inst: %s\n",
433  line_in->id,
434  line_in->lineWidth, output_index, fetch_info.inputIndex,
435  fetch_info.pc, *dyn_inst);
436 
437 #if THE_ISA == X86_ISA || THE_ISA == ARM_ISA
438  /* In SE mode, it's possible to branch to a microop when
439  * replaying faults such as page faults (or simply
440  * intra-microcode branches in X86). Unfortunately,
441  * as Minor has micro-op decomposition in a separate
442  * pipeline stage from instruction decomposition, the
443  * following advancePC (which may follow a branch with
444  * microPC() != 0) *must* see a fresh macroop. This
445  * kludge should be improved with an addition to PCState
446  * but I offer it in this form for the moment
447  *
448  * X86 can branch within microops so we need to deal with
449  * the case that, after a branch, the first un-advanced PC
450  * may be pointing to a microop other than 0. Once
451  * advanced, however, the microop number *must* be 0 */
452  fetch_info.pc.upc(0);
453  fetch_info.pc.nupc(1);
454 #endif
455 
456  /* Advance PC for the next instruction */
457  TheISA::advancePC(fetch_info.pc, decoded_inst);
458 
459  /* Predict any branches and issue a branch if
460  * necessary */
461  predictBranch(dyn_inst, prediction);
462  } else {
463  DPRINTF(Fetch, "Inst not ready yet\n");
464  }
465 
466  /* Step on the pointer into the line if there's no
467  * complete instruction waiting */
468  if (decoder->needMoreBytes()) {
469  fetch_info.inputIndex += sizeof(TheISA::MachInst);
470 
471  DPRINTF(Fetch, "Updated inputIndex value PC: %s"
472  " inputIndex: 0x%x lineBaseAddr: 0x%x lineWidth: 0x%x\n",
473  line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
474  line_in->lineWidth);
475  }
476  }
477 
478  if (dyn_inst) {
479  /* Step to next sequence number */
480  fetch_info.fetchSeqNum++;
481 
482  /* Correctly size the output before writing */
483  if (output_index == 0) {
484  insts_out.resize(outputWidth);
485  }
486  /* Pack the generated dynamic instruction into the output */
487  insts_out.insts[output_index] = dyn_inst;
488  output_index++;
489 
490  /* Output MinorTrace instruction info for
491  * pre-microop decomposition macroops */
492  if (DTRACE(MinorTrace) && !dyn_inst->isFault() &&
493  dyn_inst->staticInst->isMacroop())
494  {
495  dyn_inst->minorTraceInst(*this);
496  }
497  }
498 
499  /* Remember the streamSeqNum of this line so we can tell when
500  * we change stream */
501  fetch_info.lastStreamSeqNum = line_in->id.streamSeqNum;
502 
503  /* Asked to discard line or there was a branch or fault */
504  if (!prediction.isBubble() || /* The remains of a
505  line with a prediction in it */
506  line_in->isFault() /* A line which is just a fault */)
507  {
508  DPRINTF(Fetch, "Discarding all input on branch/fault\n");
509  dumpAllInput(tid);
510  fetch_info.havePC = false;
511  line_in = NULL;
512  } else if (discard_line) {
513  /* Just discard one line, one's behind it may have new
514  * stream sequence numbers. There's a DPRINTF above
515  * for this event */
516  popInput(tid);
517  fetch_info.havePC = false;
518  line_in = NULL;
519  } else if (fetch_info.inputIndex == line_in->lineWidth) {
520  /* Got to end of a line, pop the line but keep PC
521  * in case this is a line-wrapping inst. */
522  popInput(tid);
523  line_in = NULL;
524  }
525 
526  if (!line_in && processMoreThanOneInput) {
527  DPRINTF(Fetch, "Wrapping\n");
528  line_in = getInput(tid);
529  }
530  }
531 
532  /* The rest of the output (if any) should already have been packed
533  * with bubble instructions by insts_out's initialisation */
534  }
535  if (tid == InvalidThreadID) {
536  assert(insts_out.isBubble());
537  }
539  *predictionOut.inputWire = prediction;
540 
541  /* If we generated output, reserve space for the result in the next stage
542  * and mark the stage as being active this cycle */
543  if (!insts_out.isBubble()) {
544  /* Note activity of following buffer */
546  insts_out.threadId = tid;
547  nextStageReserve[tid].reserve();
548  }
549 
550  /* If we still have input to process and somewhere to put it,
551  * mark stage as active */
552  for (ThreadID i = 0; i < cpu.numThreads; i++)
553  {
554  if (getInput(i) && nextStageReserve[i].canReserve()) {
556  break;
557  }
558  }
559 
560  /* Make sure the input (if any left) is pushed */
561  if (!inp.outputWire->isBubble())
562  inputBuffer[inp.outputWire->id.threadId].pushTail();
563 }
564 
565 inline ThreadID
567 {
568  /* Select thread via policy. */
569  std::vector<ThreadID> priority_list;
570 
571  switch (cpu.threadPolicy) {
572  case Enums::SingleThreaded:
573  priority_list.push_back(0);
574  break;
575  case Enums::RoundRobin:
576  priority_list = cpu.roundRobinPriority(threadPriority);
577  break;
578  case Enums::Random:
579  priority_list = cpu.randomPriority();
580  break;
581  default:
582  panic("Unknown fetch policy");
583  }
584 
585  for (auto tid : priority_list) {
586  if (getInput(tid) && !fetchInfo[tid].blocked) {
587  threadPriority = tid;
588  return tid;
589  }
590  }
591 
592  return InvalidThreadID;
593 }
594 
595 bool
597 {
598  for (const auto &buffer : inputBuffer) {
599  if (!buffer.empty())
600  return false;
601  }
602 
603  return (*inp.outputWire).isBubble() &&
604  (*predictionOut.inputWire).isBubble();
605 }
606 
607 void
609 {
610  using namespace Stats;
611 
613  .name(name() + ".int_instructions")
614  .desc("Number of integer instructions successfully decoded")
615  .flags(total);
616 
618  .name(name() + ".fp_instructions")
619  .desc("Number of floating point instructions successfully decoded")
620  .flags(total);
621 
623  .name(name() + ".vec_instructions")
624  .desc("Number of SIMD instructions successfully decoded")
625  .flags(total);
626 
628  .name(name() + ".load_instructions")
629  .desc("Number of memory load instructions successfully decoded")
630  .flags(total);
631 
633  .name(name() + ".store_instructions")
634  .desc("Number of memory store instructions successfully decoded")
635  .flags(total);
636 
638  .name(name() + ".amo_instructions")
639  .desc("Number of memory atomic instructions successfully decoded")
640  .flags(total);
641 }
642 
643 void
645 {
646  std::ostringstream data;
647 
648  if (fetchInfo[0].blocked)
649  data << 'B';
650  else
651  (*out.inputWire).reportData(data);
652 
653  MINORTRACE("inputIndex=%d havePC=%d predictionSeqNum=%d insts=%s\n",
654  fetchInfo[0].inputIndex, fetchInfo[0].havePC, fetchInfo[0].predictionSeqNum, data.str());
655  inputBuffer[0].minorTrace();
656 }
657 
658 }
InstSeqNum fetchSeqNum
Fetch2 is the source of fetch sequence numbers.
Definition: fetch2.hh:148
MinorDynInstPtr inst
Instruction which caused this branch.
Definition: pipe_data.hh:124
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
#define DPRINTF(x,...)
Definition: trace.hh:229
bool isStore() const
Definition: static_inst.hh:160
Latch< BranchData >::Output branchInp
Input port carrying branches from Execute.
Definition: fetch2.hh:72
virtual TheISA::Decoder * getDecoderPtr()=0
Latch< ForwardInstData >::Input out
Output port carrying instructions into Decode.
Definition: fetch2.hh:78
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
const std::string & name()
Definition: trace.cc:54
Bitfield< 7 > i
bool processMoreThanOneInput
If true, more than one input word can be processed each cycle if there is room in the output to conta...
Definition: fetch2.hh:88
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty...
Definition: buffers.hh:567
ThreadID numThreads
Number of threads we&#39;re actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:378
Addr lineBaseAddr
First byte address in the line.
Definition: pipe_data.hh:183
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:83
ThreadID threadId
ThreadID associated with branch.
Definition: pipe_data.hh:114
bool isLoad() const
Definition: static_inst.hh:159
std::vector< InputBuffer< ForwardLineData > > inputBuffer
Definition: fetch2.hh:95
void minorTrace() const
Definition: fetch2.cc:644
Line fetch data in the forward direction.
Definition: pipe_data.hh:173
Reason reason
Explanation for this branch.
Definition: pipe_data.hh:111
bool isFloating() const
Definition: static_inst.hh:169
void activity()
Records that there is activity this cycle.
Definition: activity.cc:56
TheISA::PCState pc
Remembered program counter value.
Definition: fetch2.hh:135
bool isAtomic() const
Definition: static_inst.hh:161
Latch< BranchData >::Input predictionOut
Output port carrying predictions back to Fetch1.
Definition: fetch2.hh:75
std::vector< Fetch2ThreadInfo > fetchInfo
Definition: fetch2.hh:165
void update(const InstSeqNum &done_sn, ThreadID tid)
Tells the branch predictor to commit any updates until the given sequence number. ...
Definition: bpred_unit.cc:345
TheISA::PCState pc
PC of the first requested inst within this line.
Definition: pipe_data.hh:186
Fetch2 receives lines of data from Fetch1, separates them into instructions and passes them to Decode...
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
Definition: activity.cc:46
Derived & flags(Flags _flags)
Set the flags and marks this stat to print at the end of simulation.
Definition: statistics.hh:336
ThreadContext is the external interface to all thread state for anything outside of the CPU...
unsigned int outputWidth
Width of output of this stage/input of next in instructions.
Definition: fetch2.hh:84
Stats::Scalar vecInstructions
Definition: fetch2.hh:171
STL vector class.
Definition: stl.hh:40
Stats::Scalar loadInstructions
Definition: fetch2.hh:172
uint32_t MachInst
Definition: types.hh:40
InstSeqNum predictionSeqNum
The predicted qualifier to stream, attached by Fetch2 as a consequence of branch prediction.
Definition: dyn_inst.hh:92
Definition: trace.hh:151
ThreadID threadPriority
Definition: fetch2.hh:166
bool isDrained()
Is this stage drained? For Fetch2, draining is initiated by Execute halting Fetch1 causing Fetch2 to ...
Definition: fetch2.cc:596
#define DTRACE(x)
Definition: trace.hh:227
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: fetch2.hh:65
Stats::Scalar intInstructions
Stats.
Definition: fetch2.hh:169
InstSeqNum predictionSeqNum
Fetch2 is the source of prediction sequence numbers.
Definition: fetch2.hh:159
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: fetch2.cc:105
Stats::Scalar fpInstructions
Definition: fetch2.hh:170
void activateStage(const int idx)
Marks a stage as active.
Definition: activity.cc:92
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:178
void resize(unsigned int width)
Resize a bubble/empty ForwardInstData and fill with bubbles.
Definition: pipe_data.cc:262
bool isBubble() const
Definition: pipe_data.hh:150
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:249
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:259
bool predict(const StaticInstPtr &inst, const InstSeqNum &seqNum, TheISA::PCState &pc, ThreadID tid)
Predicts whether or not the instruction is a taken branch, and the target of the branch if it is take...
Definition: bpred_unit.cc:172
Minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:90
uint8_t * line
Line data.
Definition: pipe_data.hh:201
unsigned int lineWidth
Explicit line width, don&#39;t rely on data.size.
Definition: pipe_data.hh:189
InstId id
Thread, stream, prediction ...
Definition: pipe_data.hh:197
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:98
bool isInteger() const
Definition: static_inst.hh:168
const std::string & name() const
Definition: trace.hh:160
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:169
const FlagsType total
Print the total.
Definition: info.hh:51
const ThreadID InvalidThreadID
Definition: types.hh:228
void predictBranch(MinorDynInstPtr inst, BranchData &branch)
Predicts branches for the given instruction.
Definition: fetch2.cc:188
static const Addr PCMask
Definition: base.hh:279
InstSeqNum streamSeqNum
The &#39;stream&#39; this instruction belongs to.
Definition: dyn_inst.hh:88
bool isVector() const
Definition: static_inst.hh:170
Derived & name(const std::string &name)
Set the name and marks this stat to print at the end of simulation.
Definition: statistics.hh:279
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
unsigned int inputIndex
Index into an incompletely processed input line that instructions are to be extracted from...
Definition: fetch2.hh:126
InstSeqNum lastStreamSeqNum
Stream sequence number of the last seen line used to identify changes of instruction stream...
Definition: fetch2.hh:144
Dynamic instruction for Minor.
Definition: dyn_inst.hh:157
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:64
Enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:114
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition: fetch2.cc:566
Stats::Scalar storeInstructions
Definition: fetch2.hh:173
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
Stats::Scalar amoInstructions
Definition: fetch2.hh:174
bool isFault() const
This is a fault, not a line.
Definition: pipe_data.hh:222
void dumpAllInput(ThreadID tid)
Dump the whole contents of the input buffer.
Definition: fetch2.cc:116
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:298
InstSeqNum expectedStreamSeqNum
Stream sequence number remembered from last time the predictionSeqNum changed.
Definition: fetch2.hh:154
Data members after this line are cycle-to-cycle state.
Definition: fetch2.hh:100
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:62
std::vector< InputBuffer< ForwardInstData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition: fetch2.hh:81
Fault fault
This line has a fault.
Definition: pipe_data.hh:194
Latch< ForwardLineData >::Output inp
Input port carrying lines from Fetch1.
Definition: fetch2.hh:68
The constructed pipeline.
BPredUnit & branchPredictor
Branch predictor passed from Python configuration.
Definition: fetch2.hh:91
void squash(const InstSeqNum &squashed_sn, ThreadID tid)
Squashes all outstanding updates until a given sequence number.
Definition: bpred_unit.cc:368
Fetch2(const std::string &name, MinorCPU &cpu_, MinorCPUParams &params, Latch< ForwardLineData >::Output inp_, Latch< BranchData >::Output branchInp_, Latch< BranchData >::Input predictionOut_, Latch< ForwardInstData >::Input out_, std::vector< InputBuffer< ForwardInstData >> &next_stage_input_buffer)
Definition: fetch2.cc:55
Derived & desc(const std::string &_desc)
Set the description and marks this stat to print at the end of simulation.
Definition: statistics.hh:312
void updateBranchPrediction(const BranchData &branch)
Update local branch prediction structures from feedback from Execute.
Definition: fetch2.cc:126
const ForwardLineData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: fetch2.cc:94
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:79
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:247
TheISA::PCState target
Starting PC of that stream.
Definition: pipe_data.hh:121
bool blocked
Blocked indication for report.
Definition: fetch2.hh:162
bool havePC
PC is currently valid.
Definition: fetch2.hh:140
const char data[]
ThreadID threadId
The thread to which this line/instruction belongs.
Definition: dyn_inst.hh:83
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:255
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: fetch2.cc:239
output decoder
Definition: nop.cc:64
void regStats()
Definition: fetch2.cc:608
ThreadID threadId
Thread associated with these instructions.
Definition: pipe_data.hh:265

Generated on Fri Feb 28 2020 16:26:59 for gem5 by doxygen 1.8.13