gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
decode.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "cpu/minor/decode.hh"
39
41#include "base/logging.hh"
42#include "base/trace.hh"
43#include "cpu/minor/pipeline.hh"
44#include "debug/Decode.hh"
45
46namespace gem5
47{
48
49namespace minor
50{
51
52Decode::Decode(const std::string &name,
53 MinorCPU &cpu_,
54 const BaseMinorCPUParams &params,
57 std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
58 Named(name),
59 cpu(cpu_),
60 inp(inp_),
61 out(out_),
62 nextStageReserve(next_stage_input_buffer),
63 outputWidth(params.executeInputWidth),
64 processMoreThanOneInput(params.decodeCycleInput),
65 decodeInfo(params.numThreads),
67{
68 if (outputWidth < 1)
69 fatal("%s: executeInputWidth must be >= 1 (%d)\n", name, outputWidth);
70
71 if (params.decodeInputBufferSize < 1) {
72 fatal("%s: decodeInputBufferSize must be >= 1 (%d)\n", name,
73 params.decodeInputBufferSize);
74 }
75
76 /* Per-thread input buffers */
77 for (ThreadID tid = 0; tid < params.numThreads; tid++) {
78 inputBuffer.push_back(
80 name + ".inputBuffer" + std::to_string(tid), "insts",
81 params.decodeInputBufferSize));
82 }
83}
84
85const ForwardInstData *
87{
88 /* Get insts from the inputBuffer to work with */
89 if (!inputBuffer[tid].empty()) {
90 const ForwardInstData &head = inputBuffer[tid].front();
91
92 return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
93 } else {
94 return NULL;
95 }
96}
97
98void
100{
101 if (!inputBuffer[tid].empty())
102 inputBuffer[tid].pop();
103
104 decodeInfo[tid].inputIndex = 0;
105 decodeInfo[tid].inMacroop = false;
106}
107
108#if TRACING_ON
112static void
113dynInstAddTracing(MinorDynInstPtr inst, StaticInstPtr static_inst,
114 MinorCPU &cpu)
115{
116 inst->traceData = cpu.getTracer()->getInstRecord(curTick(),
117 cpu.getContext(inst->id.threadId),
118 inst->staticInst, *inst->pc, static_inst);
119
120 /* Use the execSeqNum as the fetch sequence number as this most closely
121 * matches the other processor models' idea of fetch sequence */
122 if (inst->traceData)
123 inst->traceData->setFetchSeq(inst->id.execSeqNum);
124}
125#endif
126
127void
129{
130 /* Push input onto appropriate input buffer */
131 if (!inp.outputWire->isBubble())
132 inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
133
134 ForwardInstData &insts_out = *out.inputWire;
135
136 assert(insts_out.isBubble());
137
138 for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
139 decodeInfo[tid].blocked = !nextStageReserve[tid].canReserve();
140
142
143 if (tid != InvalidThreadID) {
144 DecodeThreadInfo &decode_info = decodeInfo[tid];
145 const ForwardInstData *insts_in = getInput(tid);
146
147 unsigned int output_index = 0;
148
149 /* Pack instructions into the output while we can. This may involve
150 * using more than one input line */
151 while (insts_in &&
152 decode_info.inputIndex < insts_in->width() && /* Still more input */
153 output_index < outputWidth /* Still more output to fill */)
154 {
155 MinorDynInstPtr inst = insts_in->insts[decode_info.inputIndex];
156
157 if (inst->isBubble()) {
158 /* Skip */
159 decode_info.inputIndex++;
160 decode_info.inMacroop = false;
161 } else {
162 StaticInstPtr static_inst = inst->staticInst;
163 /* Static inst of a macro-op above the output_inst */
164 StaticInstPtr parent_static_inst = NULL;
165 MinorDynInstPtr output_inst = inst;
166
167 auto *dec_ptr =
168 cpu.getContext(inst->id.threadId)->getDecoderPtr();
169
170 if (inst->isFault()) {
171 DPRINTF(Decode, "Fault being passed: %d\n",
172 inst->fault->name());
173
174 decode_info.inputIndex++;
175 decode_info.inMacroop = false;
176 } else if (static_inst->isMacroop()) {
177 /* Generate a new micro-op */
178 StaticInstPtr static_micro_inst;
179
180 /* Set up PC for the next micro-op emitted */
181 if (!decode_info.inMacroop) {
182 set(decode_info.microopPC, *inst->pc);
183 decode_info.inMacroop = true;
184 }
185
186 if (isRomMicroPC(decode_info.microopPC->microPC())) {
187 static_micro_inst = dec_ptr->fetchRomMicroop(
188 decode_info.microopPC->microPC(), static_inst);
189 } else {
190 /* Get the micro-op static instruction from the
191 * static_inst. */
192 static_micro_inst = static_inst->fetchMicroop(
193 decode_info.microopPC->microPC());
194 }
195
196 output_inst =
197 new MinorDynInst(static_micro_inst, inst->id);
198 set(output_inst->pc, decode_info.microopPC);
199 output_inst->fault = NoFault;
200
201 /* Allow a predicted next address only on the last
202 * microop */
203 if (static_micro_inst->isLastMicroop()) {
204 output_inst->predictedTaken = inst->predictedTaken;
205 set(output_inst->predictedTarget,
206 inst->predictedTarget);
207 }
208
209 DPRINTF(Decode, "Microop decomposition inputIndex:"
210 " %d output_index: %d lastMicroop: %s microopPC:"
211 " %s inst: %d\n",
212 decode_info.inputIndex, output_index,
213 (static_micro_inst->isLastMicroop() ?
214 "true" : "false"),
215 *decode_info.microopPC,
216 *output_inst);
217
218 /* Acknowledge that the static_inst isn't mine, it's my
219 * parent macro-op's */
220 parent_static_inst = static_inst;
221
222 static_micro_inst->advancePC(*decode_info.microopPC);
223
224 /* Step input if this is the last micro-op */
225 if (static_micro_inst->isLastMicroop()) {
226 decode_info.inputIndex++;
227 decode_info.inMacroop = false;
228 }
229 } else {
230 /* Doesn't need decomposing, pass on instruction */
231 DPRINTF(Decode, "Passing on inst: %s inputIndex:"
232 " %d output_index: %d\n",
233 *output_inst, decode_info.inputIndex, output_index);
234
235 parent_static_inst = static_inst;
236
237 /* Step input */
238 decode_info.inputIndex++;
239 decode_info.inMacroop = false;
240 }
241
242 /* Set execSeqNum of output_inst */
243 output_inst->id.execSeqNum = decode_info.execSeqNum;
244 /* Add tracing */
245#if TRACING_ON
246 dynInstAddTracing(output_inst, parent_static_inst, cpu);
247#endif
248
249 /* Step to next sequence number */
250 decode_info.execSeqNum++;
251
252 /* Correctly size the output before writing */
253 if (output_index == 0) insts_out.resize(outputWidth);
254 /* Push into output */
255 insts_out.insts[output_index] = output_inst;
256 output_index++;
257 }
258
259 /* Have we finished with the input? */
260 if (decode_info.inputIndex == insts_in->width()) {
261 /* If we have just been producing micro-ops, we *must* have
262 * got to the end of that for inputIndex to be pushed past
263 * insts_in->width() */
264 assert(!decode_info.inMacroop);
265 popInput(tid);
266 insts_in = NULL;
267
269 DPRINTF(Decode, "Wrapping\n");
270 insts_in = getInput(tid);
271 }
272 }
273 }
274
275 /* The rest of the output (if any) should already have been packed
276 * with bubble instructions by insts_out's initialisation
277 *
278 * for (; output_index < outputWidth; output_index++)
279 * assert(insts_out.insts[output_index]->isBubble());
280 */
281 }
282
283 /* If we generated output, reserve space for the result in the next stage
284 * and mark the stage as being active this cycle */
285 if (!insts_out.isBubble()) {
286 /* Note activity of following buffer */
287 cpu.activityRecorder->activity();
288 insts_out.threadId = tid;
289 nextStageReserve[tid].reserve();
290 }
291
292 /* If we still have input to process and somewhere to put it,
293 * mark stage as active */
294 for (ThreadID i = 0; i < cpu.numThreads; i++)
295 {
296 if (getInput(i) && nextStageReserve[i].canReserve()) {
297 cpu.activityRecorder->activateStage(Pipeline::DecodeStageId);
298 break;
299 }
300 }
301
302 /* Make sure the input (if any left) is pushed */
303 if (!inp.outputWire->isBubble())
304 inputBuffer[inp.outputWire->threadId].pushTail();
305}
306
307inline ThreadID
309{
310 /* Select thread via policy. */
311 std::vector<ThreadID> priority_list;
312
313 switch (cpu.threadPolicy) {
314 case enums::SingleThreaded:
315 priority_list.push_back(0);
316 break;
317 case enums::RoundRobin:
318 priority_list = cpu.roundRobinPriority(threadPriority);
319 break;
320 case enums::Random:
321 priority_list = cpu.randomPriority();
322 break;
323 default:
324 panic("Unknown fetch policy");
325 }
326
327 for (auto tid : priority_list) {
328 if (getInput(tid) && !decodeInfo[tid].blocked) {
329 threadPriority = tid;
330 return tid;
331 }
332 }
333
334 return InvalidThreadID;
335}
336
337bool
339{
340 for (const auto &buffer : inputBuffer) {
341 if (!buffer.empty())
342 return false;
343 }
344
345 return (*inp.outputWire).isBubble();
346}
347
348void
350{
351 std::ostringstream data;
352
353 if (decodeInfo[0].blocked)
354 data << 'B';
355 else
356 (*out.inputWire).reportData(data);
357
358 minor::minorTrace("insts=%s\n", data.str());
359 inputBuffer[0].minorTrace();
360}
361
362} // namespace minor
363} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition base.hh:316
trace::InstTracer * getTracer()
Provide access to the tracer pointer.
Definition base.hh:300
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition cpu.hh:85
Named(std::string_view name_)
Definition named.hh:57
virtual std::string name() const
Definition named.hh:60
virtual StaticInstPtr fetchMicroop(MicroPC upc) const
Return the microop that goes with a particular micropc.
virtual void advancePC(PCStateBase &pc_state) const =0
bool isMacroop() const
bool isLastMicroop() const
void evaluate()
Pass on input/buffer data to the output if you can.
Definition decode.cc:128
void minorTrace() const
Definition decode.cc:349
bool isDrained()
Is this stage drained?
Definition decode.cc:338
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition decode.hh:89
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on, or 0 if there is no data.
Definition decode.cc:86
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition decode.cc:99
std::vector< InputBuffer< ForwardInstData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition decode.hh:78
Latch< ForwardInstData >::Output inp
Input port carrying macro instructions from Fetch2.
Definition decode.hh:73
std::vector< DecodeThreadInfo > decodeInfo
Definition decode.hh:127
unsigned int outputWidth
Width of output of this stage/input of next in instructions.
Definition decode.hh:81
Decode(const std::string &name, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< ForwardInstData >::Input out_, std::vector< InputBuffer< ForwardInstData > > &next_stage_input_buffer)
Definition decode.cc:52
Latch< ForwardInstData >::Input out
Output port carrying micro-op decomposed instructions to Execute.
Definition decode.hh:75
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to decode from.
Definition decode.cc:308
ThreadID threadPriority
Definition decode.hh:128
MinorCPU & cpu
Pointer back to the containing CPU.
Definition decode.hh:70
bool processMoreThanOneInput
If true, more than one input word can be processed each cycle if there is room in the output to conta...
Definition decode.hh:85
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition pipe_data.hh:284
ThreadID threadId
Thread associated with these instructions.
Definition pipe_data.hh:293
void resize(unsigned int width)
Resize a bubble/empty ForwardInstData and fill with bubbles.
Definition pipe_data.cc:263
bool isBubble() const
BubbleIF interface.
Definition pipe_data.cc:250
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition pipe_data.hh:287
unsigned int width() const
Number of instructions carried by this object.
Definition pipe_data.hh:303
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition buffers.hh:572
ThreadID threadId
The thread to which this line/instruction belongs.
Definition dyn_inst.hh:88
InstSeqNum execSeqNum
'Execute' sequence number.
Definition dyn_inst.hh:110
Encapsulate wires on either input or output of the latch.
Definition buffers.hh:252
Dynamic instruction for Minor.
Definition dyn_inst.hh:163
trace::InstRecord * traceData
Trace information for this instruction's execution.
Definition dyn_inst.hh:175
std::unique_ptr< PCStateBase > pc
The fetch address of this instruction.
Definition dyn_inst.hh:178
const StaticInstPtr staticInst
Definition dyn_inst.hh:170
void setFetchSeq(InstSeqNum seq)
virtual InstRecord * getInstRecord(Tick when, ThreadContext *tc, const StaticInstPtr staticInst, const PCStateBase &pc, const StaticInstPtr macroStaticInst=nullptr)=0
STL vector class.
Definition stl.hh:37
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
Decode collects macro-ops from Fetch2 and splits them into micro-ops passed to Execute.
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 12, 11 > set
RefCountingPtr< MinorDynInst > MinorDynInstPtr
MinorDynInsts are currently reference counted.
Definition dyn_inst.hh:71
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition trace.hh:66
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
const ThreadID InvalidThreadID
Definition types.hh:236
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
RefCountingPtr< StaticInst > StaticInstPtr
static bool isRomMicroPC(MicroPC upc)
Definition types.hh:166
constexpr decltype(nullptr) NoFault
Definition types.hh:253
The constructed pipeline.
Data members after this line are cycle-to-cycle state.
Definition decode.hh:95
InstSeqNum execSeqNum
Source of execSeqNums to number instructions.
Definition decode.hh:121
bool inMacroop
True when we're in the process of decomposing a micro-op and microopPC will be valid.
Definition decode.hh:117
std::unique_ptr< PCStateBase > microopPC
Definition decode.hh:118
unsigned int inputIndex
Index into the inputBuffer's head marking the start of unhandled instructions.
Definition decode.hh:110

Generated on Mon Oct 27 2025 04:13:00 for gem5 by doxygen 1.14.0