gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
fetch2.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013-2014,2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "cpu/minor/fetch2.hh"
39
40#include <string>
41
43#include "base/logging.hh"
44#include "base/trace.hh"
45#include "cpu/minor/pipeline.hh"
48#include "debug/Branch.hh"
49#include "debug/Fetch.hh"
50#include "debug/MinorTrace.hh"
51
52namespace gem5
53{
54
55namespace minor
56{
57
58Fetch2::Fetch2(const std::string &name,
59 MinorCPU &cpu_,
60 const BaseMinorCPUParams &params,
63 Latch<BranchData>::Input predictionOut_,
65 std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
66 Named(name),
67 cpu(cpu_),
68 inp(inp_),
69 branchInp(branchInp_),
70 predictionOut(predictionOut_),
71 out(out_),
72 nextStageReserve(next_stage_input_buffer),
73 outputWidth(params.decodeInputWidth),
74 processMoreThanOneInput(params.fetch2CycleInput),
75 branchPredictor(*params.branchPred),
76 fetchInfo(params.numThreads),
77 threadPriority(0), stats(&cpu_)
78{
79 if (outputWidth < 1)
80 fatal("%s: decodeInputWidth must be >= 1 (%d)\n", name, outputWidth);
81
82 if (params.fetch2InputBufferSize < 1) {
83 fatal("%s: fetch2InputBufferSize must be >= 1 (%d)\n", name,
84 params.fetch2InputBufferSize);
85 }
86
87 /* Per-thread input buffers */
88 for (ThreadID tid = 0; tid < params.numThreads; tid++) {
89 inputBuffer.push_back(
91 name + ".inputBuffer" + std::to_string(tid), "lines",
92 params.fetch2InputBufferSize));
93 }
94}
95
96const ForwardLineData *
98{
99 /* Get a line from the inputBuffer to work with */
100 if (!inputBuffer[tid].empty()) {
101 return &(inputBuffer[tid].front());
102 } else {
103 return NULL;
104 }
105}
106
107void
109{
110 if (!inputBuffer[tid].empty()) {
111 inputBuffer[tid].front().freeLine();
112 inputBuffer[tid].pop();
113 }
114
115 fetchInfo[tid].inputIndex = 0;
116}
117
118void
120{
121 DPRINTF(Fetch, "Dumping whole input buffer\n");
122 while (!inputBuffer[tid].empty())
123 popInput(tid);
124
125 fetchInfo[tid].inputIndex = 0;
126}
127
128void
130{
131 MinorDynInstPtr inst = branch.inst;
132
133 /* Don't even consider instructions we didn't try to predict or faults */
134 if (inst->isFault() || !inst->triedToPredict)
135 return;
136
137 switch (branch.reason) {
139 /* No data to update */
140 break;
142 /* Never try to predict interrupts */
143 break;
145 /* Don't need to act on suspends */
146 break;
148 /* Don't need to act on fetch wakeup */
149 break;
151 /* Shouldn't happen. Fetch2 is the only source of
152 * BranchPredictions */
153 break;
155 /* Unpredicted branch or barrier */
156 DPRINTF(Branch, "Unpredicted branch seen inst: %s\n", *inst);
157 branchPredictor.squash(inst->id.fetchSeqNum,
158 *branch.target, true, inst->id.threadId);
159 // Update after squashing to accomodate O3CPU
160 // using the branch prediction code.
161 branchPredictor.update(inst->id.fetchSeqNum,
162 inst->id.threadId);
163 break;
165 /* Predicted taken, was taken */
166 DPRINTF(Branch, "Branch predicted correctly inst: %s\n", *inst);
167 branchPredictor.update(inst->id.fetchSeqNum,
168 inst->id.threadId);
169 break;
171 /* Predicted taken, not taken */
172 DPRINTF(Branch, "Branch mis-predicted inst: %s\n", *inst);
173 branchPredictor.squash(inst->id.fetchSeqNum,
174 *branch.target /* Not used */, false, inst->id.threadId);
175 // Update after squashing to accomodate O3CPU
176 // using the branch prediction code.
177 branchPredictor.update(inst->id.fetchSeqNum,
178 inst->id.threadId);
179 break;
181 /* Predicted taken, was taken but to a different target */
182 DPRINTF(Branch, "Branch mis-predicted target inst: %s target: %s\n",
183 *inst, *branch.target);
184 branchPredictor.squash(inst->id.fetchSeqNum,
185 *branch.target, true, inst->id.threadId);
186 break;
187 }
188}
189
190void
192{
193 Fetch2ThreadInfo &thread = fetchInfo[inst->id.threadId];
194
195 assert(!inst->predictedTaken);
196
197 /* Skip non-control/sys call instructions */
198 if (inst->staticInst->isControl() || inst->staticInst->isSyscall()){
199 std::unique_ptr<PCStateBase> inst_pc(inst->pc->clone());
200
201 /* Tried to predict */
202 inst->triedToPredict = true;
203
204 DPRINTF(Branch, "Trying to predict for inst: %s\n", *inst);
205
206 cpu.fetchStats[inst->id.threadId]->numBranches++;
207 if (branchPredictor.predict(inst->staticInst,
208 inst->id.fetchSeqNum, *inst_pc, inst->id.threadId)) {
209 set(branch.target, *inst_pc);
210 inst->predictedTaken = true;
211 set(inst->predictedTarget, inst_pc);
212 }
213 } else {
214 DPRINTF(Branch, "Not attempting prediction for inst: %s\n", *inst);
215 }
216
217 /* If we predict taken, set branch and update sequence numbers */
218 if (inst->predictedTaken) {
219 /* Update the predictionSeqNum and remember the streamSeqNum that it
220 * was associated with */
221 thread.expectedStreamSeqNum = inst->id.streamSeqNum;
222
224 inst->id.threadId,
225 inst->id.streamSeqNum, thread.predictionSeqNum + 1,
226 *inst->predictedTarget, inst);
227
228 /* Mark with a new prediction number by the stream number of the
229 * instruction causing the prediction */
230 thread.predictionSeqNum++;
231 branch = new_branch;
232
233 DPRINTF(Branch, "Branch predicted taken inst: %s target: %s"
234 " new predictionSeqNum: %d\n",
235 *inst, *inst->predictedTarget, thread.predictionSeqNum);
236 }
237}
238
239void
241{
242 /* Push input onto appropriate input buffer */
243 if (!inp.outputWire->isBubble())
244 inputBuffer[inp.outputWire->id.threadId].setTail(*inp.outputWire);
245
246 ForwardInstData &insts_out = *out.inputWire;
247 BranchData prediction;
248 BranchData &branch_inp = *branchInp.outputWire;
249
250 assert(insts_out.isBubble());
251
252 /* React to branches from Execute to update local branch prediction
253 * structures */
254 updateBranchPrediction(branch_inp);
255
256 /* If a branch arrives, don't try and do anything about it. Only
257 * react to your own predictions */
258 if (branch_inp.isStreamChange()) {
259 DPRINTF(Fetch, "Dumping all input as a stream changing branch"
260 " has arrived\n");
261 dumpAllInput(branch_inp.threadId);
262 fetchInfo[branch_inp.threadId].havePC = false;
263 }
264
265 assert(insts_out.isBubble());
266 /* Even when blocked, clear out input lines with the wrong
267 * prediction sequence number */
268 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
269 Fetch2ThreadInfo &thread = fetchInfo[tid];
270
271 thread.blocked = !nextStageReserve[tid].canReserve();
272
273 const ForwardLineData *line_in = getInput(tid);
274
275 while (line_in &&
276 thread.expectedStreamSeqNum == line_in->id.streamSeqNum &&
277 thread.predictionSeqNum != line_in->id.predictionSeqNum)
278 {
279 DPRINTF(Fetch, "Discarding line %s"
280 " due to predictionSeqNum mismatch (expected: %d)\n",
281 line_in->id, thread.predictionSeqNum);
282
283 popInput(tid);
284 fetchInfo[tid].havePC = false;
285
287 DPRINTF(Fetch, "Wrapping\n");
288 line_in = getInput(tid);
289 } else {
290 line_in = NULL;
291 }
292 }
293 }
294
296 DPRINTF(Fetch, "Scheduled Thread: %d\n", tid);
297
298 assert(insts_out.isBubble());
299 if (tid != InvalidThreadID) {
300 Fetch2ThreadInfo &fetch_info = fetchInfo[tid];
301
302 const ForwardLineData *line_in = getInput(tid);
303
304 unsigned int output_index = 0;
305
306 /* Pack instructions into the output while we can. This may involve
307 * using more than one input line. Note that lineWidth will be 0
308 * for faulting lines */
309 while (line_in &&
310 (line_in->isFault() ||
311 fetch_info.inputIndex < line_in->lineWidth) && /* More input */
312 output_index < outputWidth && /* More output to fill */
313 prediction.isBubble() /* No predicted branch */)
314 {
315 ThreadContext *thread = cpu.getContext(line_in->id.threadId);
316 InstDecoder *decoder = thread->getDecoderPtr();
317
318 /* Discard line due to prediction sequence number being wrong but
319 * without the streamSeqNum number having changed */
320 bool discard_line =
321 fetch_info.expectedStreamSeqNum == line_in->id.streamSeqNum &&
322 fetch_info.predictionSeqNum != line_in->id.predictionSeqNum;
323
324 /* Set the PC if the stream changes. Setting havePC to false in
325 * a previous cycle handles all other change of flow of control
326 * issues */
327 bool set_pc =
328 fetch_info.lastStreamSeqNum != line_in->id.streamSeqNum;
329
330 if (!discard_line && (!fetch_info.havePC || set_pc)) {
331 /* Set the inputIndex to be the MachInst-aligned offset
332 * from lineBaseAddr of the new PC value */
333 fetch_info.inputIndex =
334 (line_in->pc->instAddr() & decoder->pcMask()) -
335 line_in->lineBaseAddr;
336 DPRINTF(Fetch, "Setting new PC value: %s inputIndex: 0x%x"
337 " lineBaseAddr: 0x%x lineWidth: 0x%x\n",
338 *line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
339 line_in->lineWidth);
340 set(fetch_info.pc, line_in->pc);
341 fetch_info.havePC = true;
342 decoder->reset();
343 }
344
345 /* The generated instruction. Leave as NULL if no instruction
346 * is to be packed into the output */
347 MinorDynInstPtr dyn_inst = NULL;
348
349 if (discard_line) {
350 /* Rest of line was from an older prediction in the same
351 * stream */
352 DPRINTF(Fetch, "Discarding line %s (from inputIndex: %d)"
353 " due to predictionSeqNum mismatch (expected: %d)\n",
354 line_in->id, fetch_info.inputIndex,
355 fetch_info.predictionSeqNum);
356 } else if (line_in->isFault()) {
357 /* Pack a fault as a MinorDynInst with ->fault set */
358
359 /* Make a new instruction and pick up the line, stream,
360 * prediction, thread ids from the incoming line */
361 dyn_inst = new MinorDynInst(nullStaticInstPtr, line_in->id);
362
363 /* Fetch and prediction sequence numbers originate here */
364 dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
365 dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
366 /* To complete the set, test that exec sequence number has
367 * not been set */
368 assert(dyn_inst->id.execSeqNum == 0);
369
370 set(dyn_inst->pc, fetch_info.pc);
371
372 /* Pack a faulting instruction but allow other
373 * instructions to be generated. (Fetch2 makes no
374 * immediate judgement about streamSeqNum) */
375 dyn_inst->fault = line_in->fault;
376 DPRINTF(Fetch, "Fault being passed output_index: "
377 "%d: %s\n", output_index, dyn_inst->fault->name());
378 } else {
379 uint8_t *line = line_in->line;
380
381 /* The instruction is wholly in the line, can just copy. */
382 memcpy(decoder->moreBytesPtr(), line + fetch_info.inputIndex,
383 decoder->moreBytesSize());
384
385 if (!decoder->instReady()) {
386 decoder->moreBytes(*fetch_info.pc,
387 line_in->lineBaseAddr + fetch_info.inputIndex);
388 DPRINTF(Fetch, "Offering MachInst to decoder addr: 0x%x\n",
389 line_in->lineBaseAddr + fetch_info.inputIndex);
390 }
391
392 /* Maybe make the above a loop to accomodate ISAs with
393 * instructions longer than sizeof(MachInst) */
394
395 if (decoder->instReady()) {
396 /* Note that the decoder can update the given PC.
397 * Remember not to assign it until *after* calling
398 * decode */
399 StaticInstPtr decoded_inst =
400 decoder->decode(*fetch_info.pc);
401
402 /* Make a new instruction and pick up the line, stream,
403 * prediction, thread ids from the incoming line */
404 dyn_inst = new MinorDynInst(decoded_inst, line_in->id);
405
406 /* Fetch and prediction sequence numbers originate here */
407 dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
408 dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
409 /* To complete the set, test that exec sequence number
410 * has not been set */
411 assert(dyn_inst->id.execSeqNum == 0);
412
413 set(dyn_inst->pc, fetch_info.pc);
414 DPRINTF(Fetch, "decoder inst %s\n", *dyn_inst);
415
416 // Collect some basic inst class stats
417 if (decoded_inst->isLoad()) {
418 stats.loadInstructions++;
419 } else if (decoded_inst->isStore()) {
420 stats.storeInstructions++;
421 } else if (decoded_inst->isAtomic()) {
422 stats.amoInstructions++;
423 } else if (decoded_inst->isVector()) {
424 stats.vecInstructions++;
425 } else if (decoded_inst->isFloating()) {
426 stats.fpInstructions++;
427 } else if (decoded_inst->isInteger()) {
428 stats.intInstructions++;
429 }
430
431 stats.totalInstructions++;
432 cpu.fetchStats[tid]->numInsts++;
433
434 DPRINTF(Fetch, "Instruction extracted from line %s"
435 " lineWidth: %d output_index: %d inputIndex: %d"
436 " pc: %s inst: %s\n",
437 line_in->id,
438 line_in->lineWidth, output_index, fetch_info.inputIndex,
439 *fetch_info.pc, *dyn_inst);
440
441 /*
442 * In SE mode, it's possible to branch to a microop when
443 * replaying faults such as page faults (or simply
444 * intra-microcode branches in X86). Unfortunately,
445 * as Minor has micro-op decomposition in a separate
446 * pipeline stage from instruction decomposition, the
447 * following advancePC (which may follow a branch with
448 * microPC() != 0) *must* see a fresh macroop.
449 *
450 * X86 can branch within microops so we need to deal with
451 * the case that, after a branch, the first un-advanced PC
452 * may be pointing to a microop other than 0. Once
453 * advanced, however, the microop number *must* be 0
454 */
455 fetch_info.pc->uReset();
456
457 /* Advance PC for the next instruction */
458 decoded_inst->advancePC(*fetch_info.pc);
459
460 /* Predict any branches and issue a branch if
461 * necessary */
462 predictBranch(dyn_inst, prediction);
463 } else {
464 DPRINTF(Fetch, "Inst not ready yet\n");
465 }
466
467 /* Step on the pointer into the line if there's no
468 * complete instruction waiting */
469 if (decoder->needMoreBytes()) {
470 fetch_info.inputIndex += decoder->moreBytesSize();
471
472 DPRINTF(Fetch, "Updated inputIndex value PC: %s"
473 " inputIndex: 0x%x lineBaseAddr: 0x%x lineWidth: 0x%x\n",
474 *line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
475 line_in->lineWidth);
476 }
477 }
478
479 if (dyn_inst) {
480 /* Step to next sequence number */
481 fetch_info.fetchSeqNum++;
482
483 /* Correctly size the output before writing */
484 if (output_index == 0) {
485 insts_out.resize(outputWidth);
486 }
487 /* Pack the generated dynamic instruction into the output */
488 insts_out.insts[output_index] = dyn_inst;
489 output_index++;
490
491 /* Output MinorTrace instruction info for
492 * pre-microop decomposition macroops */
493 if (debug::MinorTrace && !dyn_inst->isFault() &&
494 dyn_inst->staticInst->isMacroop()) {
495 dyn_inst->minorTraceInst(*this);
496 }
497 }
498
499 /* Remember the streamSeqNum of this line so we can tell when
500 * we change stream */
501 fetch_info.lastStreamSeqNum = line_in->id.streamSeqNum;
502
503 /* Asked to discard line or there was a branch or fault */
504 if (!prediction.isBubble() || /* The remains of a
505 line with a prediction in it */
506 line_in->isFault() /* A line which is just a fault */)
507 {
508 DPRINTF(Fetch, "Discarding all input on branch/fault\n");
509 dumpAllInput(tid);
510 fetch_info.havePC = false;
511 line_in = NULL;
512 } else if (discard_line) {
513 /* Just discard one line, one's behind it may have new
514 * stream sequence numbers. There's a DPRINTF above
515 * for this event */
516 popInput(tid);
517 fetch_info.havePC = false;
518 line_in = NULL;
519 } else if (fetch_info.inputIndex == line_in->lineWidth) {
520 /* Got to end of a line, pop the line but keep PC
521 * in case this is a line-wrapping inst. */
522 popInput(tid);
523 line_in = NULL;
524 }
525
526 if (!line_in && processMoreThanOneInput) {
527 DPRINTF(Fetch, "Wrapping\n");
528 line_in = getInput(tid);
529 }
530 }
531
532 /* The rest of the output (if any) should already have been packed
533 * with bubble instructions by insts_out's initialisation */
534 }
535 if (tid == InvalidThreadID) {
536 assert(insts_out.isBubble());
537 }
539 *predictionOut.inputWire = prediction;
540
541 /* If we generated output, reserve space for the result in the next stage
542 * and mark the stage as being active this cycle */
543 if (!insts_out.isBubble()) {
544 /* Note activity of following buffer */
545 cpu.activityRecorder->activity();
546 insts_out.threadId = tid;
547 nextStageReserve[tid].reserve();
548 }
549
550 /* If we still have input to process and somewhere to put it,
551 * mark stage as active */
552 for (ThreadID i = 0; i < cpu.numThreads; i++)
553 {
554 if (getInput(i) && nextStageReserve[i].canReserve()) {
555 cpu.activityRecorder->activateStage(Pipeline::Fetch2StageId);
556 break;
557 }
558 }
559
560 /* Make sure the input (if any left) is pushed */
561 if (!inp.outputWire->isBubble())
562 inputBuffer[inp.outputWire->id.threadId].pushTail();
563}
564
565inline ThreadID
567{
568 /* Select thread via policy. */
569 std::vector<ThreadID> priority_list;
570
571 switch (cpu.threadPolicy) {
572 case enums::SingleThreaded:
573 priority_list.push_back(0);
574 break;
575 case enums::RoundRobin:
576 priority_list = cpu.roundRobinPriority(threadPriority);
577 break;
578 case enums::Random:
579 priority_list = cpu.randomPriority();
580 break;
581 default:
582 panic("Unknown fetch policy");
583 }
584
585 for (auto tid : priority_list) {
586 if (getInput(tid) && !fetchInfo[tid].blocked) {
587 threadPriority = tid;
588 return tid;
589 }
590 }
591
592 return InvalidThreadID;
593}
594
595bool
597{
598 for (const auto &buffer : inputBuffer) {
599 if (!buffer.empty())
600 return false;
601 }
602
603 return (*inp.outputWire).isBubble() &&
604 (*predictionOut.inputWire).isBubble();
605}
606
608 : statistics::Group(cpu, "fetch2"),
610 "Total number of instructions successfully decoded"),
612 "Number of integer instructions successfully decoded"),
613 ADD_STAT(fpInstructions, statistics::units::Count::get(),
614 "Number of floating point instructions successfully decoded"),
616 "Number of SIMD instructions successfully decoded"),
618 "Number of memory load instructions successfully decoded"),
620 "Number of memory store instructions successfully decoded"),
622 "Number of memory atomic instructions successfully decoded")
623{
631}
632
633void
635{
636 std::ostringstream data;
637
638 if (fetchInfo[0].blocked)
639 data << 'B';
640 else
641 (*out.inputWire).reportData(data);
642
643 minor::minorTrace("inputIndex=%d havePC=%d predictionSeqNum=%d insts=%s\n",
644 fetchInfo[0].inputIndex, fetchInfo[0].havePC,
645 fetchInfo[0].predictionSeqNum, data.str());
646 inputBuffer[0].minorTrace();
647}
648
649} // namespace minor
650} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition cpu.hh:85
Named(std::string_view name_)
Definition named.hh:57
virtual std::string name() const
Definition named.hh:60
Base class for branch operations.
Definition branch.hh:49
bool isInteger() const
bool isLoad() const
virtual void advancePC(PCStateBase &pc_state) const =0
bool isFloating() const
bool isVector() const
bool isStore() const
bool isAtomic() const
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual InstDecoder * getDecoderPtr()=0
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition pipe_data.hh:66
MinorDynInstPtr inst
Instruction which caused this branch.
Definition pipe_data.hh:125
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition pipe_data.cc:84
Reason reason
Explanation for this branch.
Definition pipe_data.hh:112
ThreadID threadId
ThreadID associated with branch.
Definition pipe_data.hh:115
std::unique_ptr< PCStateBase > target
Starting PC of that stream.
Definition pipe_data.hh:122
gem5::minor::Fetch2::Fetch2Stats stats
Fetch2(const std::string &name, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< ForwardLineData >::Output inp_, Latch< BranchData >::Output branchInp_, Latch< BranchData >::Input predictionOut_, Latch< ForwardInstData >::Input out_, std::vector< InputBuffer< ForwardInstData > > &next_stage_input_buffer)
Definition fetch2.cc:58
void evaluate()
Pass on input/buffer data to the output if you can.
Definition fetch2.cc:240
void updateBranchPrediction(const BranchData &branch)
Update local branch prediction structures from feedback from Execute.
Definition fetch2.cc:129
const ForwardLineData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition fetch2.cc:97
std::vector< InputBuffer< ForwardLineData > > inputBuffer
Definition fetch2.hh:99
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition fetch2.cc:108
MinorCPU & cpu
Pointer back to the containing CPU.
Definition fetch2.hh:69
branch_prediction::BPredUnit & branchPredictor
Branch predictor passed from Python configuration.
Definition fetch2.hh:95
Latch< ForwardLineData >::Output inp
Input port carrying lines from Fetch1.
Definition fetch2.hh:72
bool isDrained()
Is this stage drained?
Definition fetch2.cc:596
Latch< BranchData >::Input predictionOut
Output port carrying predictions back to Fetch1.
Definition fetch2.hh:79
ThreadID threadPriority
Definition fetch2.hh:161
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition fetch2.cc:566
Latch< ForwardInstData >::Input out
Output port carrying instructions into Decode.
Definition fetch2.hh:82
void dumpAllInput(ThreadID tid)
Dump the whole contents of the input buffer.
Definition fetch2.cc:119
void predictBranch(MinorDynInstPtr inst, BranchData &branch)
Predicts branches for the given instruction.
Definition fetch2.cc:191
std::vector< InputBuffer< ForwardInstData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition fetch2.hh:85
bool processMoreThanOneInput
If true, more than one input word can be processed each cycle if there is room in the output to conta...
Definition fetch2.hh:92
std::vector< Fetch2ThreadInfo > fetchInfo
Definition fetch2.hh:160
unsigned int outputWidth
Width of output of this stage/input of next in instructions.
Definition fetch2.hh:88
Latch< BranchData >::Output branchInp
Input port carrying branches from Execute.
Definition fetch2.hh:76
void minorTrace() const
Definition fetch2.cc:634
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition pipe_data.hh:284
ThreadID threadId
Thread associated with these instructions.
Definition pipe_data.hh:293
void resize(unsigned int width)
Resize a bubble/empty ForwardInstData and fill with bubbles.
Definition pipe_data.cc:263
bool isBubble() const
BubbleIF interface.
Definition pipe_data.cc:250
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition pipe_data.hh:287
Line fetch data in the forward direction.
Definition pipe_data.hh:187
unsigned int lineWidth
Explicit line width, don't rely on data.size.
Definition pipe_data.hh:206
uint8_t * line
Line data.
Definition pipe_data.hh:218
InstId id
Thread, stream, prediction ... id of this line.
Definition pipe_data.hh:214
std::unique_ptr< PCStateBase > pc
PC of the first inst within this sequence.
Definition pipe_data.hh:200
bool isFault() const
This is a fault, not a line.
Definition pipe_data.hh:250
Fault fault
This line has a fault.
Definition pipe_data.hh:211
Addr lineBaseAddr
First byte address in the line.
Definition pipe_data.hh:197
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition buffers.hh:572
ThreadID threadId
The thread to which this line/instruction belongs.
Definition dyn_inst.hh:88
InstSeqNum streamSeqNum
The 'stream' this instruction belongs to.
Definition dyn_inst.hh:93
InstSeqNum predictionSeqNum
The predicted qualifier to stream, attached by Fetch2 as a consequence of branch prediction.
Definition dyn_inst.hh:97
Encapsulate wires on either input or output of the latch.
Definition buffers.hh:252
Dynamic instruction for Minor.
Definition dyn_inst.hh:163
STL vector class.
Definition stl.hh:37
Fetch2 receives lines of data from Fetch1, separates them into instructions and passes them to Decode...
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 12, 11 > set
RefCountingPtr< MinorDynInst > MinorDynInstPtr
MinorDynInsts are currently reference counted.
Definition dyn_inst.hh:71
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition trace.hh:66
Units for Stats.
Definition units.hh:113
const FlagsType total
Print the total.
Definition info.hh:59
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
const ThreadID InvalidThreadID
Definition types.hh:236
RefCountingPtr< StaticInst > StaticInstPtr
const StaticInstPtr nullStaticInstPtr
Statically allocated null StaticInstPtr.
output decoder
Definition nop.cc:61
The constructed pipeline.
statistics::Scalar loadInstructions
Definition fetch2.hh:171
statistics::Scalar fpInstructions
Definition fetch2.hh:169
statistics::Scalar intInstructions
Definition fetch2.hh:168
statistics::Scalar storeInstructions
Definition fetch2.hh:172
statistics::Scalar amoInstructions
Definition fetch2.hh:173
statistics::Scalar totalInstructions
Stats.
Definition fetch2.hh:167
statistics::Scalar vecInstructions
Definition fetch2.hh:170
Data members after this line are cycle-to-cycle state.
Definition fetch2.hh:105
InstSeqNum expectedStreamSeqNum
Stream sequence number remembered from last time the predictionSeqNum changed.
Definition fetch2.hh:149
InstSeqNum fetchSeqNum
Fetch2 is the source of fetch sequence numbers.
Definition fetch2.hh:143
bool havePC
PC is currently valid.
Definition fetch2.hh:135
InstSeqNum lastStreamSeqNum
Stream sequence number of the last seen line used to identify changes of instruction stream.
Definition fetch2.hh:139
std::unique_ptr< PCStateBase > pc
Remembered program counter value.
Definition fetch2.hh:130
unsigned int inputIndex
Index into an incompletely processed input line that instructions are to be extracted from.
Definition fetch2.hh:121
InstSeqNum predictionSeqNum
Fetch2 is the source of prediction sequence numbers.
Definition fetch2.hh:154
bool blocked
Blocked indication for report.
Definition fetch2.hh:157

Generated on Mon Oct 27 2025 04:13:00 for gem5 by doxygen 1.14.0