gem5  v22.1.0.0
decode.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/decode.hh"
39 
40 #include "base/logging.hh"
41 #include "base/trace.hh"
42 #include "cpu/minor/pipeline.hh"
43 #include "debug/Decode.hh"
44 
45 namespace gem5
46 {
47 
49 namespace minor
50 {
51 
52 Decode::Decode(const std::string &name,
53  MinorCPU &cpu_,
54  const BaseMinorCPUParams &params,
57  std::vector<InputBuffer<ForwardInstData>> &next_stage_input_buffer) :
58  Named(name),
59  cpu(cpu_),
60  inp(inp_),
61  out(out_),
62  nextStageReserve(next_stage_input_buffer),
63  outputWidth(params.executeInputWidth),
64  processMoreThanOneInput(params.decodeCycleInput),
65  decodeInfo(params.numThreads),
66  threadPriority(0)
67 {
68  if (outputWidth < 1)
69  fatal("%s: executeInputWidth must be >= 1 (%d)\n", name, outputWidth);
70 
71  if (params.decodeInputBufferSize < 1) {
72  fatal("%s: decodeInputBufferSize must be >= 1 (%d)\n", name,
73  params.decodeInputBufferSize);
74  }
75 
76  /* Per-thread input buffers */
77  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
78  inputBuffer.push_back(
80  name + ".inputBuffer" + std::to_string(tid), "insts",
81  params.decodeInputBufferSize));
82  }
83 }
84 
85 const ForwardInstData *
87 {
88  /* Get insts from the inputBuffer to work with */
89  if (!inputBuffer[tid].empty()) {
90  const ForwardInstData &head = inputBuffer[tid].front();
91 
92  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
93  } else {
94  return NULL;
95  }
96 }
97 
98 void
100 {
101  if (!inputBuffer[tid].empty())
102  inputBuffer[tid].pop();
103 
104  decodeInfo[tid].inputIndex = 0;
105  decodeInfo[tid].inMacroop = false;
106 }
107 
108 #if TRACING_ON
112 static void
113 dynInstAddTracing(MinorDynInstPtr inst, StaticInstPtr static_inst,
114  MinorCPU &cpu)
115 {
116  inst->traceData = cpu.getTracer()->getInstRecord(curTick(),
117  cpu.getContext(inst->id.threadId),
118  inst->staticInst, *inst->pc, static_inst);
119 
120  /* Use the execSeqNum as the fetch sequence number as this most closely
121  * matches the other processor models' idea of fetch sequence */
122  if (inst->traceData)
123  inst->traceData->setFetchSeq(inst->id.execSeqNum);
124 }
125 #endif
126 
127 void
129 {
130  /* Push input onto appropriate input buffer */
131  if (!inp.outputWire->isBubble())
132  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
133 
134  ForwardInstData &insts_out = *out.inputWire;
135 
136  assert(insts_out.isBubble());
137 
138  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
139  decodeInfo[tid].blocked = !nextStageReserve[tid].canReserve();
140 
142 
143  if (tid != InvalidThreadID) {
144  DecodeThreadInfo &decode_info = decodeInfo[tid];
145  const ForwardInstData *insts_in = getInput(tid);
146 
147  unsigned int output_index = 0;
148 
149  /* Pack instructions into the output while we can. This may involve
150  * using more than one input line */
151  while (insts_in &&
152  decode_info.inputIndex < insts_in->width() && /* Still more input */
153  output_index < outputWidth /* Still more output to fill */)
154  {
155  MinorDynInstPtr inst = insts_in->insts[decode_info.inputIndex];
156 
157  if (inst->isBubble()) {
158  /* Skip */
159  decode_info.inputIndex++;
160  decode_info.inMacroop = false;
161  } else {
162  StaticInstPtr static_inst = inst->staticInst;
163  /* Static inst of a macro-op above the output_inst */
164  StaticInstPtr parent_static_inst = NULL;
165  MinorDynInstPtr output_inst = inst;
166 
167  if (inst->isFault()) {
168  DPRINTF(Decode, "Fault being passed: %d\n",
169  inst->fault->name());
170 
171  decode_info.inputIndex++;
172  decode_info.inMacroop = false;
173  } else if (static_inst->isMacroop()) {
174  /* Generate a new micro-op */
175  StaticInstPtr static_micro_inst;
176 
177  /* Set up PC for the next micro-op emitted */
178  if (!decode_info.inMacroop) {
179  set(decode_info.microopPC, *inst->pc);
180  decode_info.inMacroop = true;
181  }
182 
183  /* Get the micro-op static instruction from the
184  * static_inst. */
185  static_micro_inst =
186  static_inst->fetchMicroop(
187  decode_info.microopPC->microPC());
188 
189  output_inst =
190  new MinorDynInst(static_micro_inst, inst->id);
191  set(output_inst->pc, decode_info.microopPC);
192  output_inst->fault = NoFault;
193 
194  /* Allow a predicted next address only on the last
195  * microop */
196  if (static_micro_inst->isLastMicroop()) {
197  output_inst->predictedTaken = inst->predictedTaken;
198  set(output_inst->predictedTarget,
199  inst->predictedTarget);
200  }
201 
202  DPRINTF(Decode, "Microop decomposition inputIndex:"
203  " %d output_index: %d lastMicroop: %s microopPC:"
204  " %s inst: %d\n",
205  decode_info.inputIndex, output_index,
206  (static_micro_inst->isLastMicroop() ?
207  "true" : "false"),
208  *decode_info.microopPC,
209  *output_inst);
210 
211  /* Acknowledge that the static_inst isn't mine, it's my
212  * parent macro-op's */
213  parent_static_inst = static_inst;
214 
215  static_micro_inst->advancePC(*decode_info.microopPC);
216 
217  /* Step input if this is the last micro-op */
218  if (static_micro_inst->isLastMicroop()) {
219  decode_info.inputIndex++;
220  decode_info.inMacroop = false;
221  }
222  } else {
223  /* Doesn't need decomposing, pass on instruction */
224  DPRINTF(Decode, "Passing on inst: %s inputIndex:"
225  " %d output_index: %d\n",
226  *output_inst, decode_info.inputIndex, output_index);
227 
228  parent_static_inst = static_inst;
229 
230  /* Step input */
231  decode_info.inputIndex++;
232  decode_info.inMacroop = false;
233  }
234 
235  /* Set execSeqNum of output_inst */
236  output_inst->id.execSeqNum = decode_info.execSeqNum;
237  /* Add tracing */
238 #if TRACING_ON
239  dynInstAddTracing(output_inst, parent_static_inst, cpu);
240 #endif
241 
242  /* Step to next sequence number */
243  decode_info.execSeqNum++;
244 
245  /* Correctly size the output before writing */
246  if (output_index == 0) insts_out.resize(outputWidth);
247  /* Push into output */
248  insts_out.insts[output_index] = output_inst;
249  output_index++;
250  }
251 
252  /* Have we finished with the input? */
253  if (decode_info.inputIndex == insts_in->width()) {
254  /* If we have just been producing micro-ops, we *must* have
255  * got to the end of that for inputIndex to be pushed past
256  * insts_in->width() */
257  assert(!decode_info.inMacroop);
258  popInput(tid);
259  insts_in = NULL;
260 
262  DPRINTF(Decode, "Wrapping\n");
263  insts_in = getInput(tid);
264  }
265  }
266  }
267 
268  /* The rest of the output (if any) should already have been packed
269  * with bubble instructions by insts_out's initialisation
270  *
271  * for (; output_index < outputWidth; output_index++)
272  * assert(insts_out.insts[output_index]->isBubble());
273  */
274  }
275 
276  /* If we generated output, reserve space for the result in the next stage
277  * and mark the stage as being active this cycle */
278  if (!insts_out.isBubble()) {
279  /* Note activity of following buffer */
281  insts_out.threadId = tid;
282  nextStageReserve[tid].reserve();
283  }
284 
285  /* If we still have input to process and somewhere to put it,
286  * mark stage as active */
287  for (ThreadID i = 0; i < cpu.numThreads; i++)
288  {
289  if (getInput(i) && nextStageReserve[i].canReserve()) {
291  break;
292  }
293  }
294 
295  /* Make sure the input (if any left) is pushed */
296  if (!inp.outputWire->isBubble())
297  inputBuffer[inp.outputWire->threadId].pushTail();
298 }
299 
300 inline ThreadID
302 {
303  /* Select thread via policy. */
304  std::vector<ThreadID> priority_list;
305 
306  switch (cpu.threadPolicy) {
307  case enums::SingleThreaded:
308  priority_list.push_back(0);
309  break;
310  case enums::RoundRobin:
311  priority_list = cpu.roundRobinPriority(threadPriority);
312  break;
313  case enums::Random:
314  priority_list = cpu.randomPriority();
315  break;
316  default:
317  panic("Unknown fetch policy");
318  }
319 
320  for (auto tid : priority_list) {
321  if (getInput(tid) && !decodeInfo[tid].blocked) {
322  threadPriority = tid;
323  return tid;
324  }
325  }
326 
327  return InvalidThreadID;
328 }
329 
330 bool
332 {
333  for (const auto &buffer : inputBuffer) {
334  if (!buffer.empty())
335  return false;
336  }
337 
338  return (*inp.outputWire).isBubble();
339 }
340 
341 void
343 {
344  std::ostringstream data;
345 
346  if (decodeInfo[0].blocked)
347  data << 'B';
348  else
349  (*out.inputWire).reportData(data);
350 
351  minor::minorTrace("insts=%s\n", data.str());
352  inputBuffer[0].minorTrace();
353 }
354 
355 } // namespace minor
356 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
const char data[]
void activity()
Records that there is activity this cycle.
Definition: activity.cc:55
void activateStage(const int idx)
Marks a stage as active.
Definition: activity.cc:91
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:284
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:367
trace::InstTracer * getTracer()
Provide access to the tracer pointer.
Definition: base.hh:268
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:86
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:96
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:173
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:182
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:120
Interface for things with names.
Definition: named.hh:39
virtual std::string name() const
Definition: named.hh:47
virtual StaticInstPtr fetchMicroop(MicroPC upc) const
Return the microop that goes with a particular micropc.
Definition: static_inst.cc:39
virtual void advancePC(PCStateBase &pc_state) const =0
bool isMacroop() const
Definition: static_inst.hh:185
bool isLastMicroop() const
Definition: static_inst.hh:188
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: decode.cc:128
void minorTrace() const
Definition: decode.cc:342
bool isDrained()
Is this stage drained? For Decoed, draining is initiated by Execute halting Fetch1 causing Fetch2 to ...
Definition: decode.cc:331
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: decode.hh:90
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on, or 0 if there is no data.
Definition: decode.cc:86
Decode(const std::string &name, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< ForwardInstData >::Input out_, std::vector< InputBuffer< ForwardInstData >> &next_stage_input_buffer)
Definition: decode.cc:52
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: decode.cc:99
std::vector< InputBuffer< ForwardInstData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition: decode.hh:79
Latch< ForwardInstData >::Output inp
Input port carrying macro instructions from Fetch2.
Definition: decode.hh:74
std::vector< DecodeThreadInfo > decodeInfo
Definition: decode.hh:128
unsigned int outputWidth
Width of output of this stage/input of next in instructions.
Definition: decode.hh:82
Latch< ForwardInstData >::Input out
Output port carrying micro-op decomposed instructions to Execute.
Definition: decode.hh:76
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to decode from.
Definition: decode.cc:301
ThreadID threadPriority
Definition: decode.hh:129
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: decode.hh:71
bool processMoreThanOneInput
If true, more than one input word can be processed each cycle if there is room in the output to conta...
Definition: decode.hh:86
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:285
ThreadID threadId
Thread associated with these instructions.
Definition: pipe_data.hh:294
void resize(unsigned int width)
Resize a bubble/empty ForwardInstData and fill with bubbles.
Definition: pipe_data.cc:264
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:251
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:288
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:304
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:573
ThreadID threadId
The thread to which this line/instruction belongs.
Definition: dyn_inst.hh:89
InstSeqNum execSeqNum
'Execute' sequence number.
Definition: dyn_inst.hh:111
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:253
Dynamic instruction for Minor.
Definition: dyn_inst.hh:164
trace::InstRecord * traceData
Trace information for this instruction's execution.
Definition: dyn_inst.hh:176
std::unique_ptr< PCStateBase > pc
The fetch address of this instruction.
Definition: dyn_inst.hh:179
const StaticInstPtr staticInst
Definition: dyn_inst.hh:171
void setFetchSeq(InstSeqNum seq)
Definition: insttracer.hh:245
virtual InstRecord * getInstRecord(Tick when, ThreadContext *tc, const StaticInstPtr staticInst, const PCStateBase &pc, const StaticInstPtr macroStaticInst=nullptr)=0
STL vector class.
Definition: stl.hh:37
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
Decode collects macro-ops from Fetch2 and splits them into micro-ops passed to Execute.
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 12, 11 > set
Definition: misc_types.hh:709
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:235
const ThreadID InvalidThreadID
Definition: types.hh:236
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60
The constructed pipeline.
Data members after this line are cycle-to-cycle state.
Definition: decode.hh:96
InstSeqNum execSeqNum
Source of execSeqNums to number instructions.
Definition: decode.hh:122
bool inMacroop
True when we're in the process of decomposing a micro-op and microopPC will be valid.
Definition: decode.hh:118
std::unique_ptr< PCStateBase > microopPC
Definition: decode.hh:119
unsigned int inputIndex
Index into the inputBuffer's head marking the start of unhandled instructions.
Definition: decode.hh:111
const std::string & name()
Definition: trace.cc:49

Generated on Wed Dec 21 2022 10:22:30 for gem5 by doxygen 1.9.1