gem5 [DEVELOP-FOR-25.0]
Loading...
Searching...
No Matches
fetch2.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013-2014,2016 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "cpu/minor/fetch2.hh"
39
40#include <string>
41
43#include "base/logging.hh"
44#include "base/trace.hh"
45#include "cpu/minor/pipeline.hh"
48#include "debug/Branch.hh"
49#include "debug/Fetch.hh"
50#include "debug/MinorTrace.hh"
51
52namespace gem5
53{
54
55namespace minor
56{
57
58Fetch2::Fetch2(const std::string &name,
59 MinorCPU &cpu_,
60 const BaseMinorCPUParams &params,
63 Latch<BranchData>::Input predictionOut_,
65 std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
66 Named(name),
67 cpu(cpu_),
68 inp(inp_),
69 branchInp(branchInp_),
70 predictionOut(predictionOut_),
71 out(out_),
72 nextStageReserve(next_stage_input_buffer),
73 outputWidth(params.decodeInputWidth),
74 processMoreThanOneInput(params.fetch2CycleInput),
75 branchPredictor(*params.branchPred),
76 fetchInfo(params.numThreads),
77 threadPriority(0), stats(&cpu_)
78{
79 if (outputWidth < 1)
80 fatal("%s: decodeInputWidth must be >= 1 (%d)\n", name, outputWidth);
81
82 if (params.fetch2InputBufferSize < 1) {
83 fatal("%s: fetch2InputBufferSize must be >= 1 (%d)\n", name,
84 params.fetch2InputBufferSize);
85 }
86
87 /* Per-thread input buffers */
88 for (ThreadID tid = 0; tid < params.numThreads; tid++) {
89 inputBuffer.push_back(
91 name + ".inputBuffer" + std::to_string(tid), "lines",
92 params.fetch2InputBufferSize));
93 }
94}
95
96const ForwardLineData *
98{
99 /* Get a line from the inputBuffer to work with */
100 if (!inputBuffer[tid].empty()) {
101 return &(inputBuffer[tid].front());
102 } else {
103 return NULL;
104 }
105}
106
107void
109{
110 if (!inputBuffer[tid].empty()) {
111 inputBuffer[tid].front().freeLine();
112 inputBuffer[tid].pop();
113 }
114
115 fetchInfo[tid].inputIndex = 0;
116}
117
118void
120{
121 DPRINTF(Fetch, "Dumping whole input buffer\n");
122 while (!inputBuffer[tid].empty())
123 popInput(tid);
124
125 fetchInfo[tid].inputIndex = 0;
126}
127
128void
130{
131 MinorDynInstPtr inst = branch.inst;
132
133 /* Don't even consider instructions we didn't try to predict or faults */
134 if (inst->isFault() || !inst->triedToPredict)
135 return;
136
137 switch (branch.reason) {
139 /* No data to update */
140 break;
142 /* Never try to predict interrupts */
143 break;
145 /* Don't need to act on suspends */
146 break;
148 /* Don't need to act on fetch wakeup */
149 break;
151 /* Shouldn't happen. Fetch2 is the only source of
152 * BranchPredictions */
153 break;
155 /* Unpredicted branch or barrier */
156 DPRINTF(Branch, "Unpredicted branch seen inst: %s\n", *inst);
157 branchPredictor.squash(inst->id.fetchSeqNum,
158 *branch.target, true, inst->id.threadId);
159 // Update after squashing to accomodate O3CPU
160 // using the branch prediction code.
161 branchPredictor.update(inst->id.fetchSeqNum,
162 inst->id.threadId);
163 break;
165 /* Predicted taken, was taken */
166 DPRINTF(Branch, "Branch predicted correctly inst: %s\n", *inst);
167 branchPredictor.update(inst->id.fetchSeqNum,
168 inst->id.threadId);
169 break;
171 /* Predicted taken, not taken */
172 DPRINTF(Branch, "Branch mis-predicted inst: %s\n", *inst);
173 branchPredictor.squash(inst->id.fetchSeqNum,
174 *branch.target /* Not used */, false, inst->id.threadId);
175 // Update after squashing to accomodate O3CPU
176 // using the branch prediction code.
177 branchPredictor.update(inst->id.fetchSeqNum,
178 inst->id.threadId);
179 break;
181 /* Predicted taken, was taken but to a different target */
182 DPRINTF(Branch, "Branch mis-predicted target inst: %s target: %s\n",
183 *inst, *branch.target);
184 branchPredictor.squash(inst->id.fetchSeqNum,
185 *branch.target, true, inst->id.threadId);
186 break;
187 }
188}
189
190void
192{
193 Fetch2ThreadInfo &thread = fetchInfo[inst->id.threadId];
194
195 assert(!inst->predictedTaken);
196
197 /* Skip non-control/sys call instructions */
198 if (inst->staticInst->isControl() || inst->staticInst->isSyscall()){
199 std::unique_ptr<PCStateBase> inst_pc(inst->pc->clone());
200
201 /* Tried to predict */
202 inst->triedToPredict = true;
203
204 DPRINTF(Branch, "Trying to predict for inst: %s\n", *inst);
205
206 if (branchPredictor.predict(inst->staticInst,
207 inst->id.fetchSeqNum, *inst_pc, inst->id.threadId)) {
208 set(branch.target, *inst_pc);
209 inst->predictedTaken = true;
210 set(inst->predictedTarget, inst_pc);
211 }
212 } else {
213 DPRINTF(Branch, "Not attempting prediction for inst: %s\n", *inst);
214 }
215
216 /* If we predict taken, set branch and update sequence numbers */
217 if (inst->predictedTaken) {
218 /* Update the predictionSeqNum and remember the streamSeqNum that it
219 * was associated with */
220 thread.expectedStreamSeqNum = inst->id.streamSeqNum;
221
223 inst->id.threadId,
224 inst->id.streamSeqNum, thread.predictionSeqNum + 1,
225 *inst->predictedTarget, inst);
226
227 /* Mark with a new prediction number by the stream number of the
228 * instruction causing the prediction */
229 thread.predictionSeqNum++;
230 branch = new_branch;
231
232 DPRINTF(Branch, "Branch predicted taken inst: %s target: %s"
233 " new predictionSeqNum: %d\n",
234 *inst, *inst->predictedTarget, thread.predictionSeqNum);
235 }
236}
237
238void
240{
241 /* Push input onto appropriate input buffer */
242 if (!inp.outputWire->isBubble())
243 inputBuffer[inp.outputWire->id.threadId].setTail(*inp.outputWire);
244
245 ForwardInstData &insts_out = *out.inputWire;
246 BranchData prediction;
247 BranchData &branch_inp = *branchInp.outputWire;
248
249 assert(insts_out.isBubble());
250
251 /* React to branches from Execute to update local branch prediction
252 * structures */
253 updateBranchPrediction(branch_inp);
254
255 /* If a branch arrives, don't try and do anything about it. Only
256 * react to your own predictions */
257 if (branch_inp.isStreamChange()) {
258 DPRINTF(Fetch, "Dumping all input as a stream changing branch"
259 " has arrived\n");
260 dumpAllInput(branch_inp.threadId);
261 fetchInfo[branch_inp.threadId].havePC = false;
262 }
263
264 assert(insts_out.isBubble());
265 /* Even when blocked, clear out input lines with the wrong
266 * prediction sequence number */
267 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
268 Fetch2ThreadInfo &thread = fetchInfo[tid];
269
270 thread.blocked = !nextStageReserve[tid].canReserve();
271
272 const ForwardLineData *line_in = getInput(tid);
273
274 while (line_in &&
275 thread.expectedStreamSeqNum == line_in->id.streamSeqNum &&
276 thread.predictionSeqNum != line_in->id.predictionSeqNum)
277 {
278 DPRINTF(Fetch, "Discarding line %s"
279 " due to predictionSeqNum mismatch (expected: %d)\n",
280 line_in->id, thread.predictionSeqNum);
281
282 popInput(tid);
283 fetchInfo[tid].havePC = false;
284
286 DPRINTF(Fetch, "Wrapping\n");
287 line_in = getInput(tid);
288 } else {
289 line_in = NULL;
290 }
291 }
292 }
293
295 DPRINTF(Fetch, "Scheduled Thread: %d\n", tid);
296
297 assert(insts_out.isBubble());
298 if (tid != InvalidThreadID) {
299 Fetch2ThreadInfo &fetch_info = fetchInfo[tid];
300
301 const ForwardLineData *line_in = getInput(tid);
302
303 unsigned int output_index = 0;
304
305 /* Pack instructions into the output while we can. This may involve
306 * using more than one input line. Note that lineWidth will be 0
307 * for faulting lines */
308 while (line_in &&
309 (line_in->isFault() ||
310 fetch_info.inputIndex < line_in->lineWidth) && /* More input */
311 output_index < outputWidth && /* More output to fill */
312 prediction.isBubble() /* No predicted branch */)
313 {
314 ThreadContext *thread = cpu.getContext(line_in->id.threadId);
315 InstDecoder *decoder = thread->getDecoderPtr();
316
317 /* Discard line due to prediction sequence number being wrong but
318 * without the streamSeqNum number having changed */
319 bool discard_line =
320 fetch_info.expectedStreamSeqNum == line_in->id.streamSeqNum &&
321 fetch_info.predictionSeqNum != line_in->id.predictionSeqNum;
322
323 /* Set the PC if the stream changes. Setting havePC to false in
324 * a previous cycle handles all other change of flow of control
325 * issues */
326 bool set_pc = fetch_info.lastStreamSeqNum != line_in->id.streamSeqNum;
327
328 if (!discard_line && (!fetch_info.havePC || set_pc)) {
329 /* Set the inputIndex to be the MachInst-aligned offset
330 * from lineBaseAddr of the new PC value */
331 fetch_info.inputIndex =
332 (line_in->pc->instAddr() & decoder->pcMask()) -
333 line_in->lineBaseAddr;
334 DPRINTF(Fetch, "Setting new PC value: %s inputIndex: 0x%x"
335 " lineBaseAddr: 0x%x lineWidth: 0x%x\n",
336 *line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
337 line_in->lineWidth);
338 set(fetch_info.pc, line_in->pc);
339 fetch_info.havePC = true;
340 decoder->reset();
341 }
342
343 /* The generated instruction. Leave as NULL if no instruction
344 * is to be packed into the output */
345 MinorDynInstPtr dyn_inst = NULL;
346
347 if (discard_line) {
348 /* Rest of line was from an older prediction in the same
349 * stream */
350 DPRINTF(Fetch, "Discarding line %s (from inputIndex: %d)"
351 " due to predictionSeqNum mismatch (expected: %d)\n",
352 line_in->id, fetch_info.inputIndex,
353 fetch_info.predictionSeqNum);
354 } else if (line_in->isFault()) {
355 /* Pack a fault as a MinorDynInst with ->fault set */
356
357 /* Make a new instruction and pick up the line, stream,
358 * prediction, thread ids from the incoming line */
359 dyn_inst = new MinorDynInst(nullStaticInstPtr, line_in->id);
360
361 /* Fetch and prediction sequence numbers originate here */
362 dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
363 dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
364 /* To complete the set, test that exec sequence number has
365 * not been set */
366 assert(dyn_inst->id.execSeqNum == 0);
367
368 set(dyn_inst->pc, fetch_info.pc);
369
370 /* Pack a faulting instruction but allow other
371 * instructions to be generated. (Fetch2 makes no
372 * immediate judgement about streamSeqNum) */
373 dyn_inst->fault = line_in->fault;
374 DPRINTF(Fetch, "Fault being passed output_index: "
375 "%d: %s\n", output_index, dyn_inst->fault->name());
376 } else {
377 uint8_t *line = line_in->line;
378
379 /* The instruction is wholly in the line, can just copy. */
380 memcpy(decoder->moreBytesPtr(), line + fetch_info.inputIndex,
381 decoder->moreBytesSize());
382
383 if (!decoder->instReady()) {
384 decoder->moreBytes(*fetch_info.pc,
385 line_in->lineBaseAddr + fetch_info.inputIndex);
386 DPRINTF(Fetch, "Offering MachInst to decoder addr: 0x%x\n",
387 line_in->lineBaseAddr + fetch_info.inputIndex);
388 }
389
390 /* Maybe make the above a loop to accomodate ISAs with
391 * instructions longer than sizeof(MachInst) */
392
393 if (decoder->instReady()) {
394 /* Note that the decoder can update the given PC.
395 * Remember not to assign it until *after* calling
396 * decode */
397 StaticInstPtr decoded_inst =
398 decoder->decode(*fetch_info.pc);
399
400 /* Make a new instruction and pick up the line, stream,
401 * prediction, thread ids from the incoming line */
402 dyn_inst = new MinorDynInst(decoded_inst, line_in->id);
403
404 /* Fetch and prediction sequence numbers originate here */
405 dyn_inst->id.fetchSeqNum = fetch_info.fetchSeqNum;
406 dyn_inst->id.predictionSeqNum = fetch_info.predictionSeqNum;
407 /* To complete the set, test that exec sequence number
408 * has not been set */
409 assert(dyn_inst->id.execSeqNum == 0);
410
411 set(dyn_inst->pc, fetch_info.pc);
412 DPRINTF(Fetch, "decoder inst %s\n", *dyn_inst);
413
414 // Collect some basic inst class stats
415 if (decoded_inst->isLoad())
416 stats.loadInstructions++;
417 else if (decoded_inst->isStore())
418 stats.storeInstructions++;
419 else if (decoded_inst->isAtomic())
420 stats.amoInstructions++;
421 else if (decoded_inst->isVector())
422 stats.vecInstructions++;
423 else if (decoded_inst->isFloating())
424 stats.fpInstructions++;
425 else if (decoded_inst->isInteger())
426 stats.intInstructions++;
427
428 DPRINTF(Fetch, "Instruction extracted from line %s"
429 " lineWidth: %d output_index: %d inputIndex: %d"
430 " pc: %s inst: %s\n",
431 line_in->id,
432 line_in->lineWidth, output_index, fetch_info.inputIndex,
433 *fetch_info.pc, *dyn_inst);
434
435 /*
436 * In SE mode, it's possible to branch to a microop when
437 * replaying faults such as page faults (or simply
438 * intra-microcode branches in X86). Unfortunately,
439 * as Minor has micro-op decomposition in a separate
440 * pipeline stage from instruction decomposition, the
441 * following advancePC (which may follow a branch with
442 * microPC() != 0) *must* see a fresh macroop.
443 *
444 * X86 can branch within microops so we need to deal with
445 * the case that, after a branch, the first un-advanced PC
446 * may be pointing to a microop other than 0. Once
447 * advanced, however, the microop number *must* be 0
448 */
449 fetch_info.pc->uReset();
450
451 /* Advance PC for the next instruction */
452 decoded_inst->advancePC(*fetch_info.pc);
453
454 /* Predict any branches and issue a branch if
455 * necessary */
456 predictBranch(dyn_inst, prediction);
457 } else {
458 DPRINTF(Fetch, "Inst not ready yet\n");
459 }
460
461 /* Step on the pointer into the line if there's no
462 * complete instruction waiting */
463 if (decoder->needMoreBytes()) {
464 fetch_info.inputIndex += decoder->moreBytesSize();
465
466 DPRINTF(Fetch, "Updated inputIndex value PC: %s"
467 " inputIndex: 0x%x lineBaseAddr: 0x%x lineWidth: 0x%x\n",
468 *line_in->pc, fetch_info.inputIndex, line_in->lineBaseAddr,
469 line_in->lineWidth);
470 }
471 }
472
473 if (dyn_inst) {
474 /* Step to next sequence number */
475 fetch_info.fetchSeqNum++;
476
477 /* Correctly size the output before writing */
478 if (output_index == 0) {
479 insts_out.resize(outputWidth);
480 }
481 /* Pack the generated dynamic instruction into the output */
482 insts_out.insts[output_index] = dyn_inst;
483 output_index++;
484
485 /* Output MinorTrace instruction info for
486 * pre-microop decomposition macroops */
487 if (debug::MinorTrace && !dyn_inst->isFault() &&
488 dyn_inst->staticInst->isMacroop()) {
489 dyn_inst->minorTraceInst(*this);
490 }
491 }
492
493 /* Remember the streamSeqNum of this line so we can tell when
494 * we change stream */
495 fetch_info.lastStreamSeqNum = line_in->id.streamSeqNum;
496
497 /* Asked to discard line or there was a branch or fault */
498 if (!prediction.isBubble() || /* The remains of a
499 line with a prediction in it */
500 line_in->isFault() /* A line which is just a fault */)
501 {
502 DPRINTF(Fetch, "Discarding all input on branch/fault\n");
503 dumpAllInput(tid);
504 fetch_info.havePC = false;
505 line_in = NULL;
506 } else if (discard_line) {
507 /* Just discard one line, one's behind it may have new
508 * stream sequence numbers. There's a DPRINTF above
509 * for this event */
510 popInput(tid);
511 fetch_info.havePC = false;
512 line_in = NULL;
513 } else if (fetch_info.inputIndex == line_in->lineWidth) {
514 /* Got to end of a line, pop the line but keep PC
515 * in case this is a line-wrapping inst. */
516 popInput(tid);
517 line_in = NULL;
518 }
519
520 if (!line_in && processMoreThanOneInput) {
521 DPRINTF(Fetch, "Wrapping\n");
522 line_in = getInput(tid);
523 }
524 }
525
526 /* The rest of the output (if any) should already have been packed
527 * with bubble instructions by insts_out's initialisation */
528 }
529 if (tid == InvalidThreadID) {
530 assert(insts_out.isBubble());
531 }
533 *predictionOut.inputWire = prediction;
534
535 /* If we generated output, reserve space for the result in the next stage
536 * and mark the stage as being active this cycle */
537 if (!insts_out.isBubble()) {
538 /* Note activity of following buffer */
539 cpu.activityRecorder->activity();
540 insts_out.threadId = tid;
541 nextStageReserve[tid].reserve();
542 }
543
544 /* If we still have input to process and somewhere to put it,
545 * mark stage as active */
546 for (ThreadID i = 0; i < cpu.numThreads; i++)
547 {
548 if (getInput(i) && nextStageReserve[i].canReserve()) {
549 cpu.activityRecorder->activateStage(Pipeline::Fetch2StageId);
550 break;
551 }
552 }
553
554 /* Make sure the input (if any left) is pushed */
555 if (!inp.outputWire->isBubble())
556 inputBuffer[inp.outputWire->id.threadId].pushTail();
557}
558
559inline ThreadID
561{
562 /* Select thread via policy. */
563 std::vector<ThreadID> priority_list;
564
565 switch (cpu.threadPolicy) {
566 case enums::SingleThreaded:
567 priority_list.push_back(0);
568 break;
569 case enums::RoundRobin:
570 priority_list = cpu.roundRobinPriority(threadPriority);
571 break;
572 case enums::Random:
573 priority_list = cpu.randomPriority();
574 break;
575 default:
576 panic("Unknown fetch policy");
577 }
578
579 for (auto tid : priority_list) {
580 if (getInput(tid) && !fetchInfo[tid].blocked) {
581 threadPriority = tid;
582 return tid;
583 }
584 }
585
586 return InvalidThreadID;
587}
588
589bool
591{
592 for (const auto &buffer : inputBuffer) {
593 if (!buffer.empty())
594 return false;
595 }
596
597 return (*inp.outputWire).isBubble() &&
598 (*predictionOut.inputWire).isBubble();
599}
600
602 : statistics::Group(cpu, "fetch2"),
604 "Number of integer instructions successfully decoded"),
605 ADD_STAT(fpInstructions, statistics::units::Count::get(),
606 "Number of floating point instructions successfully decoded"),
608 "Number of SIMD instructions successfully decoded"),
610 "Number of memory load instructions successfully decoded"),
612 "Number of memory store instructions successfully decoded"),
614 "Number of memory atomic instructions successfully decoded")
615{
617 .flags(statistics::total);
619 .flags(statistics::total);
621 .flags(statistics::total);
623 .flags(statistics::total);
625 .flags(statistics::total);
627 .flags(statistics::total);
628}
629
630void
632{
633 std::ostringstream data;
634
635 if (fetchInfo[0].blocked)
636 data << 'B';
637 else
638 (*out.inputWire).reportData(data);
639
640 minor::minorTrace("inputIndex=%d havePC=%d predictionSeqNum=%d insts=%s\n",
641 fetchInfo[0].inputIndex, fetchInfo[0].havePC,
642 fetchInfo[0].predictionSeqNum, data.str());
643 inputBuffer[0].minorTrace();
644}
645
646} // namespace minor
647} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition cpu.hh:85
Named(std::string_view name_)
Definition named.hh:57
virtual std::string name() const
Definition named.hh:60
Base class for branch operations.
Definition branch.hh:49
bool isInteger() const
bool isLoad() const
virtual void advancePC(PCStateBase &pc_state) const =0
bool isFloating() const
bool isVector() const
bool isStore() const
bool isAtomic() const
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual InstDecoder * getDecoderPtr()=0
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition pipe_data.hh:66
MinorDynInstPtr inst
Instruction which caused this branch.
Definition pipe_data.hh:125
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition pipe_data.cc:84
Reason reason
Explanation for this branch.
Definition pipe_data.hh:112
ThreadID threadId
ThreadID associated with branch.
Definition pipe_data.hh:115
std::unique_ptr< PCStateBase > target
Starting PC of that stream.
Definition pipe_data.hh:122
gem5::minor::Fetch2::Fetch2Stats stats
Fetch2(const std::string &name, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< ForwardLineData >::Output inp_, Latch< BranchData >::Output branchInp_, Latch< BranchData >::Input predictionOut_, Latch< ForwardInstData >::Input out_, std::vector< InputBuffer< ForwardInstData > > &next_stage_input_buffer)
Definition fetch2.cc:58
void evaluate()
Pass on input/buffer data to the output if you can.
Definition fetch2.cc:239
void updateBranchPrediction(const BranchData &branch)
Update local branch prediction structures from feedback from Execute.
Definition fetch2.cc:129
const ForwardLineData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition fetch2.cc:97
std::vector< InputBuffer< ForwardLineData > > inputBuffer
Definition fetch2.hh:99
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition fetch2.cc:108
MinorCPU & cpu
Pointer back to the containing CPU.
Definition fetch2.hh:69
branch_prediction::BPredUnit & branchPredictor
Branch predictor passed from Python configuration.
Definition fetch2.hh:95
Latch< ForwardLineData >::Output inp
Input port carrying lines from Fetch1.
Definition fetch2.hh:72
bool isDrained()
Is this stage drained?
Definition fetch2.cc:590
Latch< BranchData >::Input predictionOut
Output port carrying predictions back to Fetch1.
Definition fetch2.hh:79
ThreadID threadPriority
Definition fetch2.hh:161
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition fetch2.cc:560
Latch< ForwardInstData >::Input out
Output port carrying instructions into Decode.
Definition fetch2.hh:82
void dumpAllInput(ThreadID tid)
Dump the whole contents of the input buffer.
Definition fetch2.cc:119
void predictBranch(MinorDynInstPtr inst, BranchData &branch)
Predicts branches for the given instruction.
Definition fetch2.cc:191
std::vector< InputBuffer< ForwardInstData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition fetch2.hh:85
bool processMoreThanOneInput
If true, more than one input word can be processed each cycle if there is room in the output to conta...
Definition fetch2.hh:92
std::vector< Fetch2ThreadInfo > fetchInfo
Definition fetch2.hh:160
unsigned int outputWidth
Width of output of this stage/input of next in instructions.
Definition fetch2.hh:88
Latch< BranchData >::Output branchInp
Input port carrying branches from Execute.
Definition fetch2.hh:76
void minorTrace() const
Definition fetch2.cc:631
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition pipe_data.hh:284
ThreadID threadId
Thread associated with these instructions.
Definition pipe_data.hh:293
void resize(unsigned int width)
Resize a bubble/empty ForwardInstData and fill with bubbles.
Definition pipe_data.cc:263
bool isBubble() const
BubbleIF interface.
Definition pipe_data.cc:250
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition pipe_data.hh:287
Line fetch data in the forward direction.
Definition pipe_data.hh:187
unsigned int lineWidth
Explicit line width, don't rely on data.size.
Definition pipe_data.hh:206
uint8_t * line
Line data.
Definition pipe_data.hh:218
InstId id
Thread, stream, prediction ... id of this line.
Definition pipe_data.hh:214
std::unique_ptr< PCStateBase > pc
PC of the first inst within this sequence.
Definition pipe_data.hh:200
bool isFault() const
This is a fault, not a line.
Definition pipe_data.hh:250
Fault fault
This line has a fault.
Definition pipe_data.hh:211
Addr lineBaseAddr
First byte address in the line.
Definition pipe_data.hh:197
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition buffers.hh:572
ThreadID threadId
The thread to which this line/instruction belongs.
Definition dyn_inst.hh:88
InstSeqNum streamSeqNum
The 'stream' this instruction belongs to.
Definition dyn_inst.hh:93
InstSeqNum predictionSeqNum
The predicted qualifier to stream, attached by Fetch2 as a consequence of branch prediction.
Definition dyn_inst.hh:97
Encapsulate wires on either input or output of the latch.
Definition buffers.hh:252
Dynamic instruction for Minor.
Definition dyn_inst.hh:163
STL vector class.
Definition stl.hh:37
Fetch2 receives lines of data from Fetch1, separates them into instructions and passes them to Decode...
#define ADD_STAT(n,...)
Convenience macro to add a stat to a statistics group.
Definition group.hh:75
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 12, 11 > set
RefCountingPtr< MinorDynInst > MinorDynInstPtr
MinorDynInsts are currently reference counted.
Definition dyn_inst.hh:71
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition trace.hh:66
Units for Stats.
Definition units.hh:113
const FlagsType total
Print the total.
Definition info.hh:59
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
const ThreadID InvalidThreadID
Definition types.hh:236
RefCountingPtr< StaticInst > StaticInstPtr
const StaticInstPtr nullStaticInstPtr
Statically allocated null StaticInstPtr.
output decoder
Definition nop.cc:61
The constructed pipeline.
statistics::Scalar loadInstructions
Definition fetch2.hh:170
statistics::Scalar fpInstructions
Definition fetch2.hh:168
statistics::Scalar intInstructions
Stats.
Definition fetch2.hh:167
statistics::Scalar storeInstructions
Definition fetch2.hh:171
statistics::Scalar amoInstructions
Definition fetch2.hh:172
statistics::Scalar vecInstructions
Definition fetch2.hh:169
Data members after this line are cycle-to-cycle state.
Definition fetch2.hh:105
InstSeqNum expectedStreamSeqNum
Stream sequence number remembered from last time the predictionSeqNum changed.
Definition fetch2.hh:149
InstSeqNum fetchSeqNum
Fetch2 is the source of fetch sequence numbers.
Definition fetch2.hh:143
bool havePC
PC is currently valid.
Definition fetch2.hh:135
InstSeqNum lastStreamSeqNum
Stream sequence number of the last seen line used to identify changes of instruction stream.
Definition fetch2.hh:139
std::unique_ptr< PCStateBase > pc
Remembered program counter value.
Definition fetch2.hh:130
unsigned int inputIndex
Index into an incompletely processed input line that instructions are to be extracted from.
Definition fetch2.hh:121
InstSeqNum predictionSeqNum
Fetch2 is the source of prediction sequence numbers.
Definition fetch2.hh:154
bool blocked
Blocked indication for report.
Definition fetch2.hh:157

Generated on Mon May 26 2025 09:19:07 for gem5 by doxygen 1.13.2