gem5  v22.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
execute.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2018-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/execute.hh"
39 
40 #include <functional>
41 
42 #include "cpu/minor/cpu.hh"
44 #include "cpu/minor/fetch1.hh"
45 #include "cpu/minor/lsq.hh"
46 #include "cpu/op_class.hh"
47 #include "debug/Activity.hh"
48 #include "debug/Branch.hh"
49 #include "debug/Drain.hh"
50 #include "debug/ExecFaulting.hh"
51 #include "debug/MinorExecute.hh"
52 #include "debug/MinorInterrupt.hh"
53 #include "debug/MinorMem.hh"
54 #include "debug/MinorTrace.hh"
55 #include "debug/PCEvent.hh"
56 
57 namespace gem5
58 {
59 
61 namespace minor
62 {
63 
64 Execute::Execute(const std::string &name_,
65  MinorCPU &cpu_,
66  const BaseMinorCPUParams &params,
69  Named(name_),
70  inp(inp_),
71  out(out_),
72  cpu(cpu_),
73  issueLimit(params.executeIssueLimit),
74  memoryIssueLimit(params.executeMemoryIssueLimit),
75  commitLimit(params.executeCommitLimit),
76  memoryCommitLimit(params.executeMemoryCommitLimit),
77  processMoreThanOneInput(params.executeCycleInput),
78  fuDescriptions(*params.executeFuncUnits),
79  numFuncUnits(fuDescriptions.funcUnits.size()),
80  setTraceTimeOnCommit(params.executeSetTraceTimeOnCommit),
81  setTraceTimeOnIssue(params.executeSetTraceTimeOnIssue),
82  allowEarlyMemIssue(params.executeAllowEarlyMemoryIssue),
83  noCostFUIndex(fuDescriptions.funcUnits.size() + 1),
84  lsq(name_ + ".lsq", name_ + ".dcache_port",
85  cpu_, *this,
86  params.executeMaxAccessesInMemory,
87  params.executeMemoryWidth,
88  params.executeLSQRequestsQueueSize,
89  params.executeLSQTransfersQueueSize,
90  params.executeLSQStoreBufferSize,
91  params.executeLSQMaxStoreBufferStoresPerCycle),
92  executeInfo(params.numThreads,
93  ExecuteThreadInfo(params.executeCommitLimit)),
94  interruptPriority(0),
95  issuePriority(0),
96  commitPriority(0)
97 {
98  if (commitLimit < 1) {
99  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
100  commitLimit);
101  }
102 
103  if (issueLimit < 1) {
104  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
105  issueLimit);
106  }
107 
108  if (memoryIssueLimit < 1) {
109  fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_,
111  }
112 
114  fatal("%s: executeMemoryCommitLimit (%d) must be <="
115  " executeCommitLimit (%d)\n",
117  }
118 
119  if (params.executeInputBufferSize < 1) {
120  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
121  params.executeInputBufferSize);
122  }
123 
124  if (params.executeInputBufferSize < 1) {
125  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
126  params.executeInputBufferSize);
127  }
128 
129  /* This should be large enough to count all the in-FU instructions
130  * which need to be accounted for in the inFlightInsts
131  * queue */
132  unsigned int total_slots = 0;
133 
134  /* Make FUPipelines for each MinorFU */
135  for (unsigned int i = 0; i < numFuncUnits; i++) {
136  std::ostringstream fu_name;
137  MinorFU *fu_description = fuDescriptions.funcUnits[i];
138 
139  /* Note the total number of instruction slots (for sizing
140  * the inFlightInst queue) and the maximum latency of any FU
141  * (for sizing the activity recorder) */
142  total_slots += fu_description->opLat;
143 
144  fu_name << name_ << ".fu." << i;
145 
146  FUPipeline *fu = new FUPipeline(fu_name.str(), *fu_description, cpu);
147 
148  funcUnits.push_back(fu);
149  }
150 
152  for (int op_class = No_OpClass + 1; op_class < Num_OpClasses; op_class++) {
153  bool found_fu = false;
154  unsigned int fu_index = 0;
155 
156  while (fu_index < numFuncUnits && !found_fu)
157  {
158  if (funcUnits[fu_index]->provides(
159  static_cast<OpClass>(op_class)))
160  {
161  found_fu = true;
162  }
163  fu_index++;
164  }
165 
166  if (!found_fu) {
167  warn("No functional unit for OpClass %s\n",
168  enums::OpClassStrings[op_class]);
169  }
170  }
171 
172  /* Per-thread structures */
173  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
174  std::string tid_str = std::to_string(tid);
175 
176  /* Input Buffers */
177  inputBuffer.push_back(
179  name_ + ".inputBuffer" + tid_str, "insts",
180  params.executeInputBufferSize));
181 
182  const auto &regClasses = cpu.threads[tid]->getIsaPtr()->regClasses();
183 
184  /* Scoreboards */
185  scoreboard.emplace_back(name_ + ".scoreboard" + tid_str, regClasses);
186 
187  /* In-flight instruction records */
188  executeInfo[tid].inFlightInsts = new Queue<QueuedInst,
190  name_ + ".inFlightInsts" + tid_str, "insts", total_slots);
191 
192  executeInfo[tid].inFUMemInsts = new Queue<QueuedInst,
194  name_ + ".inFUMemInsts" + tid_str, "insts", total_slots);
195  }
196 }
197 
198 const ForwardInstData *
200 {
201  /* Get a line from the inputBuffer to work with */
202  if (!inputBuffer[tid].empty()) {
203  const ForwardInstData &head = inputBuffer[tid].front();
204 
205  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
206  } else {
207  return NULL;
208  }
209 }
210 
211 void
213 {
214  if (!inputBuffer[tid].empty())
215  inputBuffer[tid].pop();
216 
217  executeInfo[tid].inputIndex = 0;
218 }
219 
220 void
222 {
223  ThreadContext *thread = cpu.getContext(inst->id.threadId);
224  const std::unique_ptr<PCStateBase> pc_before(inst->pc->clone());
225  std::unique_ptr<PCStateBase> target(thread->pcState().clone());
226 
227  /* Force a branch for SerializeAfter/SquashAfter instructions
228  * at the end of micro-op sequence when we're not suspended */
229  bool force_branch = thread->status() != ThreadContext::Suspended &&
230  !inst->isFault() &&
231  inst->isLastOpInInst() &&
232  (inst->staticInst->isSerializeAfter() ||
233  inst->staticInst->isSquashAfter());
234 
235  DPRINTF(Branch, "tryToBranch before: %s after: %s%s\n",
236  *pc_before, *target, (force_branch ? " (forcing)" : ""));
237 
238  /* Will we change the PC to something other than the next instruction? */
239  bool must_branch = *pc_before != *target ||
240  fault != NoFault ||
241  force_branch;
242 
243  /* The reason for the branch data we're about to generate, set below */
245 
246  if (fault == NoFault) {
247  inst->staticInst->advancePC(*target);
248  thread->pcState(*target);
249 
250  DPRINTF(Branch, "Advancing current PC from: %s to: %s\n",
251  *pc_before, *target);
252  }
253 
254  if (inst->predictedTaken && !force_branch) {
255  /* Predicted to branch */
256  if (!must_branch) {
257  /* No branch was taken, change stream to get us back to the
258  * intended PC value */
259  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x but"
260  " none happened inst: %s\n",
261  inst->pc->instAddr(), inst->predictedTarget->instAddr(),
262  *inst);
263 
265  } else if (*inst->predictedTarget == *target) {
266  /* Branch prediction got the right target, kill the branch and
267  * carry on.
268  * Note that this information to the branch predictor might get
269  * overwritten by a "real" branch during this cycle */
270  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x correctly"
271  " inst: %s\n",
272  inst->pc->instAddr(), inst->predictedTarget->instAddr(),
273  *inst);
274 
276  } else {
277  /* Branch prediction got the wrong target */
278  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x"
279  " but got the wrong target (actual: 0x%x) inst: %s\n",
280  inst->pc->instAddr(), inst->predictedTarget->instAddr(),
281  target->instAddr(), *inst);
282 
284  }
285  } else if (must_branch) {
286  /* Unpredicted branch */
287  DPRINTF(Branch, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
288  inst->pc->instAddr(), target->instAddr(), *inst);
289 
291  } else {
292  /* No branch at all */
293  reason = BranchData::NoBranch;
294  }
295 
296  updateBranchData(inst->id.threadId, reason, inst, *target, branch);
297 }
298 
299 void
301  ThreadID tid,
302  BranchData::Reason reason,
303  MinorDynInstPtr inst, const PCStateBase &target,
304  BranchData &branch)
305 {
306  if (reason != BranchData::NoBranch) {
307  /* Bump up the stream sequence number on a real branch*/
308  if (BranchData::isStreamChange(reason))
309  executeInfo[tid].streamSeqNum++;
310 
311  /* Branches (even mis-predictions) don't change the predictionSeqNum,
312  * just the streamSeqNum */
313  branch = BranchData(reason, tid,
314  executeInfo[tid].streamSeqNum,
315  /* Maintaining predictionSeqNum if there's no inst is just a
316  * courtesy and looks better on minorview */
317  (inst->isBubble() ? executeInfo[tid].lastPredictionSeqNum
318  : inst->id.predictionSeqNum),
319  target, inst);
320 
321  DPRINTF(Branch, "Branch data signalled: %s\n", branch);
322  }
323 }
324 
325 void
327  LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
328 {
329  ThreadID thread_id = inst->id.threadId;
330  ThreadContext *thread = cpu.getContext(thread_id);
331 
332  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
333 
334  PacketPtr packet = response->packet;
335 
336  bool is_load = inst->staticInst->isLoad();
337  bool is_store = inst->staticInst->isStore();
338  bool is_atomic = inst->staticInst->isAtomic();
339  bool is_prefetch = inst->staticInst->isDataPrefetch();
340 
341  /* If true, the trace's predicate value will be taken from the exec
342  * context predicate, otherwise, it will be set to false */
343  bool use_context_predicate = true;
344 
345  if (inst->translationFault != NoFault) {
346  /* Invoke memory faults. */
347  DPRINTF(MinorMem, "Completing fault from DTLB access: %s\n",
348  inst->translationFault->name());
349 
350  if (inst->staticInst->isPrefetch()) {
351  DPRINTF(MinorMem, "Not taking fault on prefetch: %s\n",
352  inst->translationFault->name());
353 
354  /* Don't assign to fault */
355  } else {
356  /* Take the fault raised during the TLB/memory access */
357  fault = inst->translationFault;
358 
359  fault->invoke(thread, inst->staticInst);
360  }
361  } else if (!packet) {
362  DPRINTF(MinorMem, "Completing failed request inst: %s\n",
363  *inst);
364  use_context_predicate = false;
365  if (!context.readMemAccPredicate())
366  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
367  } else if (packet->isError()) {
368  DPRINTF(MinorMem, "Trying to commit error response: %s\n",
369  *inst);
370 
371  fatal("Received error response packet for inst: %s\n", *inst);
372  } else if (is_store || is_load || is_prefetch || is_atomic) {
373  assert(packet);
374 
375  DPRINTF(MinorMem, "Memory response inst: %s addr: 0x%x size: %d\n",
376  *inst, packet->getAddr(), packet->getSize());
377 
378  if (is_load && packet->getSize() > 0) {
379  DPRINTF(MinorMem, "Memory data[0]: 0x%x\n",
380  static_cast<unsigned int>(packet->getConstPtr<uint8_t>()[0]));
381  }
382 
383  /* Complete the memory access instruction */
384  fault = inst->staticInst->completeAcc(packet, &context,
385  inst->traceData);
386 
387  if (fault != NoFault) {
388  /* Invoke fault created by instruction completion */
389  DPRINTF(MinorMem, "Fault in memory completeAcc: %s\n",
390  fault->name());
391  fault->invoke(thread, inst->staticInst);
392  } else {
393  /* Stores need to be pushed into the store buffer to finish
394  * them off */
395  if (response->needsToBeSentToStoreBuffer())
396  lsq.sendStoreToStoreBuffer(response);
397  }
398  } else {
399  fatal("There should only ever be reads, "
400  "writes or faults at this point\n");
401  }
402 
403  lsq.popResponse(response);
404 
405  if (inst->traceData) {
406  inst->traceData->setPredicate((use_context_predicate ?
407  context.readPredicate() : false));
408  }
409 
411 
412  /* Generate output to account for branches */
413  tryToBranch(inst, fault, branch);
414 }
415 
416 bool
418 {
419  return cpu.checkInterrupts(thread_id);
420 }
421 
422 bool
424 {
425  DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
426  cpu.getContext(thread_id)->pcState());
427 
428  Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt();
429 
430  if (interrupt != NoFault) {
431  /* The interrupt *must* set pcState */
432  cpu.getInterruptController(thread_id)->updateIntrInfo();
433  interrupt->invoke(cpu.getContext(thread_id));
434 
435  assert(!lsq.accessesInFlight());
436 
437  DPRINTF(MinorInterrupt, "Invoking interrupt: %s to PC: %s\n",
438  interrupt->name(), cpu.getContext(thread_id)->pcState());
439 
440  /* Assume that an interrupt *must* cause a branch. Assert this? */
441 
443  MinorDynInst::bubble(), cpu.getContext(thread_id)->pcState(),
444  branch);
445  }
446 
447  return interrupt != NoFault;
448 }
449 
450 bool
452  bool &passed_predicate, Fault &fault)
453 {
454  bool issued = false;
455 
456  /* Set to true if the mem op. is issued and sent to the mem system */
457  passed_predicate = false;
458 
459  if (!lsq.canRequest()) {
460  /* Not acting on instruction yet as the memory
461  * queues are full */
462  issued = false;
463  } else {
464  ThreadContext *thread = cpu.getContext(inst->id.threadId);
465  std::unique_ptr<PCStateBase> old_pc(thread->pcState().clone());
466 
467  ExecContext context(cpu, *cpu.threads[inst->id.threadId], *this, inst);
468 
469  DPRINTF(MinorExecute, "Initiating memRef inst: %s\n", *inst);
470 
471  Fault init_fault = inst->staticInst->initiateAcc(&context,
472  inst->traceData);
473 
474  if (inst->inLSQ) {
475  if (init_fault != NoFault) {
476  assert(inst->translationFault != NoFault);
477  // Translation faults are dealt with in handleMemResponse()
478  init_fault = NoFault;
479  } else {
480  // If we have a translation fault then it got suppressed by
481  // initateAcc()
482  inst->translationFault = NoFault;
483  }
484  }
485 
486  if (init_fault != NoFault) {
487  DPRINTF(MinorExecute, "Fault on memory inst: %s"
488  " initiateAcc: %s\n", *inst, init_fault->name());
489  fault = init_fault;
490  } else {
491  /* Only set this if the instruction passed its
492  * predicate */
493  if (!context.readMemAccPredicate()) {
494  DPRINTF(MinorMem, "No memory access for inst: %s\n", *inst);
495  assert(context.readPredicate());
496  }
497  passed_predicate = context.readPredicate();
498 
499  /* Set predicate in tracing */
500  if (inst->traceData)
501  inst->traceData->setPredicate(passed_predicate);
502 
503  /* If the instruction didn't pass its predicate
504  * or it is a predicated vector instruction and the
505  * associated predicate register is all-false (and so will not
506  * progress from here) Try to branch to correct and branch
507  * mis-prediction. */
508  if (!inst->inLSQ) {
509  /* Leave it up to commit to handle the fault */
510  lsq.pushFailedRequest(inst);
511  inst->inLSQ = true;
512  }
513  }
514 
515  /* Restore thread PC */
516  thread->pcState(*old_pc);
517  issued = true;
518  }
519 
520  return issued;
521 }
522 
524 inline unsigned int
525 cyclicIndexInc(unsigned int index, unsigned int cycle_size)
526 {
527  unsigned int ret = index + 1;
528 
529  if (ret == cycle_size)
530  ret = 0;
531 
532  return ret;
533 }
534 
536 inline unsigned int
537 cyclicIndexDec(unsigned int index, unsigned int cycle_size)
538 {
539  int ret = index - 1;
540 
541  if (ret < 0)
542  ret = cycle_size - 1;
543 
544  return ret;
545 }
546 
547 unsigned int
549 {
550  const ForwardInstData *insts_in = getInput(thread_id);
551  ExecuteThreadInfo &thread = executeInfo[thread_id];
552 
553  /* Early termination if we have no instructions */
554  if (!insts_in)
555  return 0;
556 
557  /* Start from the first FU */
558  unsigned int fu_index = 0;
559 
560  /* Remains true while instructions are still being issued. If any
561  * instruction fails to issue, this is set to false and we exit issue.
562  * This strictly enforces in-order issue. For other issue behaviours,
563  * a more complicated test in the outer while loop below is needed. */
564  bool issued = true;
565 
566  /* Number of insts issues this cycle to check for issueLimit */
567  unsigned num_insts_issued = 0;
568 
569  /* Number of memory ops issues this cycle to check for memoryIssueLimit */
570  unsigned num_mem_insts_issued = 0;
571 
572  /* Number of instructions discarded this cycle in order to enforce a
573  * discardLimit. @todo, add that parameter? */
574  unsigned num_insts_discarded = 0;
575 
576  do {
577  MinorDynInstPtr inst = insts_in->insts[thread.inputIndex];
578  Fault fault = inst->fault;
579  bool discarded = false;
580  bool issued_mem_ref = false;
581 
582  if (inst->isBubble()) {
583  /* Skip */
584  issued = true;
585  } else if (cpu.getContext(thread_id)->status() ==
587  {
588  DPRINTF(MinorExecute, "Discarding inst: %s from suspended"
589  " thread\n", *inst);
590 
591  issued = true;
592  discarded = true;
593  } else if (inst->id.streamSeqNum != thread.streamSeqNum) {
594  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
595  " state was unexpected, expected: %d\n",
596  *inst, thread.streamSeqNum);
597  issued = true;
598  discarded = true;
599  } else {
600  /* Try and issue an instruction into an FU, assume we didn't and
601  * fix that in the loop */
602  issued = false;
603 
604  /* Try FU from 0 each instruction */
605  fu_index = 0;
606 
607  /* Try and issue a single instruction stepping through the
608  * available FUs */
609  do {
610  FUPipeline *fu = funcUnits[fu_index];
611 
612  DPRINTF(MinorExecute, "Trying to issue inst: %s to FU: %d\n",
613  *inst, fu_index);
614 
615  /* Does the examined fu have the OpClass-related capability
616  * needed to execute this instruction? Faults can always
617  * issue to any FU but probably should just 'live' in the
618  * inFlightInsts queue rather than having an FU. */
619  bool fu_is_capable = (!inst->isFault() ?
620  fu->provides(inst->staticInst->opClass()) : true);
621 
622  if (inst->isNoCostInst()) {
623  /* Issue free insts. to a fake numbered FU */
624  fu_index = noCostFUIndex;
625 
626  /* And start the countdown on activity to allow
627  * this instruction to get to the end of its FU */
629 
630  /* Mark the destinations for this instruction as
631  * busy */
632  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
633  Cycles(0), cpu.getContext(thread_id), false);
634 
635  DPRINTF(MinorExecute, "Issuing %s to %d\n", inst->id, noCostFUIndex);
636  inst->fuIndex = noCostFUIndex;
637  inst->extraCommitDelay = Cycles(0);
638  inst->extraCommitDelayExpr = NULL;
639 
640  /* Push the instruction onto the inFlight queue so
641  * it can be committed in order */
642  QueuedInst fu_inst(inst);
643  thread.inFlightInsts->push(fu_inst);
644 
645  issued = true;
646 
647  } else if (!fu_is_capable || fu->alreadyPushed()) {
648  /* Skip */
649  if (!fu_is_capable) {
650  DPRINTF(MinorExecute, "Can't issue as FU: %d isn't"
651  " capable\n", fu_index);
652  } else {
653  DPRINTF(MinorExecute, "Can't issue as FU: %d is"
654  " already busy\n", fu_index);
655  }
656  } else if (fu->stalled) {
657  DPRINTF(MinorExecute, "Can't issue inst: %s into FU: %d,"
658  " it's stalled\n",
659  *inst, fu_index);
660  } else if (!fu->canInsert()) {
661  DPRINTF(MinorExecute, "Can't issue inst: %s to busy FU"
662  " for another: %d cycles\n",
663  *inst, fu->cyclesBeforeInsert());
664  } else {
665  MinorFUTiming *timing = (!inst->isFault() ?
666  fu->findTiming(inst->staticInst) : NULL);
667 
668  const std::vector<Cycles> *src_latencies =
669  (timing ? &(timing->srcRegsRelativeLats)
670  : NULL);
671 
672  const std::vector<bool> *cant_forward_from_fu_indices =
673  &(fu->cantForwardFromFUIndices);
674 
675  if (timing && timing->suppress) {
676  DPRINTF(MinorExecute, "Can't issue inst: %s as extra"
677  " decoding is suppressing it\n",
678  *inst);
679  } else if (!scoreboard[thread_id].canInstIssue(inst,
680  src_latencies, cant_forward_from_fu_indices,
681  cpu.curCycle(), cpu.getContext(thread_id)))
682  {
683  DPRINTF(MinorExecute, "Can't issue inst: %s yet\n",
684  *inst);
685  } else {
686  /* Can insert the instruction into this FU */
687  DPRINTF(MinorExecute, "Issuing inst: %s"
688  " into FU %d\n", *inst,
689  fu_index);
690 
691  Cycles extra_dest_retire_lat = Cycles(0);
692  TimingExpr *extra_dest_retire_lat_expr = NULL;
693  Cycles extra_assumed_lat = Cycles(0);
694 
695  /* Add the extraCommitDelay and extraAssumeLat to
696  * the FU pipeline timings */
697  if (timing) {
698  extra_dest_retire_lat =
699  timing->extraCommitLat;
700  extra_dest_retire_lat_expr =
701  timing->extraCommitLatExpr;
702  extra_assumed_lat =
703  timing->extraAssumedLat;
704  }
705 
706  issued_mem_ref = inst->isMemRef();
707 
708  QueuedInst fu_inst(inst);
709 
710  /* Decorate the inst with FU details */
711  inst->fuIndex = fu_index;
712  inst->extraCommitDelay = extra_dest_retire_lat;
713  inst->extraCommitDelayExpr =
714  extra_dest_retire_lat_expr;
715 
716  if (issued_mem_ref) {
717  /* Remember which instruction this memory op
718  * depends on so that initiateAcc can be called
719  * early */
720  if (allowEarlyMemIssue) {
721  inst->instToWaitFor =
722  scoreboard[thread_id].execSeqNumToWaitFor(inst,
723  cpu.getContext(thread_id));
724 
725  if (lsq.getLastMemBarrier(thread_id) >
726  inst->instToWaitFor)
727  {
728  DPRINTF(MinorExecute, "A barrier will"
729  " cause a delay in mem ref issue of"
730  " inst: %s until after inst"
731  " %d(exec)\n", *inst,
732  lsq.getLastMemBarrier(thread_id));
733 
734  inst->instToWaitFor =
735  lsq.getLastMemBarrier(thread_id);
736  } else {
737  DPRINTF(MinorExecute, "Memory ref inst:"
738  " %s must wait for inst %d(exec)"
739  " before issuing\n",
740  *inst, inst->instToWaitFor);
741  }
742 
743  inst->canEarlyIssue = true;
744  }
745  /* Also queue this instruction in the memory ref
746  * queue to ensure in-order issue to the LSQ */
747  DPRINTF(MinorExecute, "Pushing mem inst: %s\n",
748  *inst);
749  thread.inFUMemInsts->push(fu_inst);
750  }
751 
752  /* Issue to FU */
753  fu->push(fu_inst);
754  /* And start the countdown on activity to allow
755  * this instruction to get to the end of its FU */
757 
758  /* Mark the destinations for this instruction as
759  * busy */
760  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
761  fu->description.opLat +
762  extra_dest_retire_lat +
763  extra_assumed_lat,
764  cpu.getContext(thread_id),
765  issued_mem_ref && extra_assumed_lat == Cycles(0));
766 
767  /* Push the instruction onto the inFlight queue so
768  * it can be committed in order */
769  thread.inFlightInsts->push(fu_inst);
770 
771  issued = true;
772  }
773  }
774 
775  fu_index++;
776  } while (fu_index != numFuncUnits && !issued);
777 
778  if (!issued)
779  DPRINTF(MinorExecute, "Didn't issue inst: %s\n", *inst);
780  }
781 
782  if (issued) {
783  /* Generate MinorTrace's MinorInst lines. Do this at commit
784  * to allow better instruction annotation? */
785  if (debug::MinorTrace && !inst->isBubble()) {
786  inst->minorTraceInst(*this,
787  cpu.threads[0]->getIsaPtr()->regClasses());
788  }
789 
790  /* Mark up barriers in the LSQ */
791  if (!discarded && inst->isInst() &&
792  inst->staticInst->isFullMemBarrier())
793  {
794  DPRINTF(MinorMem, "Issuing memory barrier inst: %s\n", *inst);
796  }
797 
798  if (inst->traceData && setTraceTimeOnIssue) {
799  inst->traceData->setWhen(curTick());
800  }
801 
802  if (issued_mem_ref)
803  num_mem_insts_issued++;
804 
805  if (discarded) {
806  num_insts_discarded++;
807  } else if (!inst->isBubble()) {
808  num_insts_issued++;
809 
810  if (num_insts_issued == issueLimit)
811  DPRINTF(MinorExecute, "Reached inst issue limit\n");
812  }
813 
814  thread.inputIndex++;
815  DPRINTF(MinorExecute, "Stepping to next inst inputIndex: %d\n",
816  thread.inputIndex);
817  }
818 
819  /* Got to the end of a line */
820  if (thread.inputIndex == insts_in->width()) {
821  popInput(thread_id);
822  /* Set insts_in to null to force us to leave the surrounding
823  * loop */
824  insts_in = NULL;
825 
827  DPRINTF(MinorExecute, "Wrapping\n");
828  insts_in = getInput(thread_id);
829  }
830  }
831  } while (insts_in && thread.inputIndex < insts_in->width() &&
832  /* We still have instructions */
833  fu_index != numFuncUnits && /* Not visited all FUs */
834  issued && /* We've not yet failed to issue an instruction */
835  num_insts_issued != issueLimit && /* Still allowed to issue */
836  num_mem_insts_issued != memoryIssueLimit);
837 
838  return num_insts_issued;
839 }
840 
841 bool
843 {
844  ThreadContext *thread = cpu.getContext(thread_id);
845  unsigned int num_pc_event_checks = 0;
846 
847  /* Handle PC events on instructions */
848  Addr oldPC;
849  do {
850  oldPC = thread->pcState().instAddr();
851  cpu.threads[thread_id]->pcEventQueue.service(oldPC, thread);
852  num_pc_event_checks++;
853  } while (oldPC != thread->pcState().instAddr());
854 
855  if (num_pc_event_checks > 1) {
856  DPRINTF(PCEvent, "Acting on PC Event to PC: %s\n",
857  thread->pcState());
858  }
859 
860  return num_pc_event_checks > 1;
861 }
862 
863 void
865 {
866  assert(!inst->isFault());
867 
868  MinorThread *thread = cpu.threads[inst->id.threadId];
869 
870  /* Increment the many and various inst and op counts in the
871  * thread and system */
872  if (!inst->staticInst->isMicroop() || inst->staticInst->isLastMicroop())
873  {
874  thread->numInst++;
875  thread->threadStats.numInsts++;
876  cpu.stats.numInsts++;
877 
878  /* Act on events related to instruction counts */
879  thread->comInstEventQueue.serviceEvents(thread->numInst);
880  }
881  thread->numOp++;
882  thread->threadStats.numOps++;
883  cpu.stats.numOps++;
884  cpu.stats.committedInstType[inst->id.threadId]
885  [inst->staticInst->opClass()]++;
886 
887  /* Set the CP SeqNum to the numOps commit number */
888  if (inst->traceData)
889  inst->traceData->setCPSeq(thread->numOp);
890 
891  cpu.probeInstCommit(inst->staticInst, inst->pc->instAddr());
892 }
893 
894 bool
895 Execute::commitInst(MinorDynInstPtr inst, bool early_memory_issue,
896  BranchData &branch, Fault &fault, bool &committed,
897  bool &completed_mem_issue)
898 {
899  ThreadID thread_id = inst->id.threadId;
900  ThreadContext *thread = cpu.getContext(thread_id);
901 
902  bool completed_inst = true;
903  fault = NoFault;
904 
905  /* Is the thread for this instruction suspended? In that case, just
906  * stall as long as there are no pending interrupts */
907  if (thread->status() == ThreadContext::Suspended &&
908  !isInterrupted(thread_id))
909  {
910  panic("We should never hit the case where we try to commit from a "
911  "suspended thread as the streamSeqNum should not match");
912  } else if (inst->isFault()) {
913  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
914 
915  DPRINTF(MinorExecute, "Fault inst reached Execute: %s\n",
916  inst->fault->name());
917 
918  fault = inst->fault;
919  inst->fault->invoke(thread, NULL);
920 
921  tryToBranch(inst, fault, branch);
922  } else if (inst->staticInst->isMemRef()) {
923  /* Memory accesses are executed in two parts:
924  * executeMemRefInst -- calculates the EA and issues the access
925  * to memory. This is done here.
926  * handleMemResponse -- handles the response packet, done by
927  * Execute::commit
928  *
929  * While the memory access is in its FU, the EA is being
930  * calculated. At the end of the FU, when it is ready to
931  * 'commit' (in this function), the access is presented to the
932  * memory queues. When a response comes back from memory,
933  * Execute::commit will commit it.
934  */
935  bool predicate_passed = false;
936  bool completed_mem_inst = executeMemRefInst(inst, branch,
937  predicate_passed, fault);
938 
939  if (completed_mem_inst && fault != NoFault) {
940  if (early_memory_issue) {
941  DPRINTF(MinorExecute, "Fault in early executing inst: %s\n",
942  fault->name());
943  /* Don't execute the fault, just stall the instruction
944  * until it gets to the head of inFlightInsts */
945  inst->canEarlyIssue = false;
946  /* Not completed as we'll come here again to pick up
947  * the fault when we get to the end of the FU */
948  completed_inst = false;
949  } else {
950  DPRINTF(MinorExecute, "Fault in execute: %s\n",
951  fault->name());
952  fault->invoke(thread, NULL);
953 
954  tryToBranch(inst, fault, branch);
955  completed_inst = true;
956  }
957  } else {
958  completed_inst = completed_mem_inst;
959  }
960  completed_mem_issue = completed_inst;
961  } else if (inst->isInst() && inst->staticInst->isFullMemBarrier() &&
963  {
964  DPRINTF(MinorExecute, "Can't commit data barrier inst: %s yet as"
965  " there isn't space in the store buffer\n", *inst);
966 
967  completed_inst = false;
968  } else if (inst->isInst() && inst->staticInst->isQuiesce()
969  && !branch.isBubble()){
970  /* This instruction can suspend, need to be able to communicate
971  * backwards, so no other branches may evaluate this cycle*/
972  completed_inst = false;
973  } else {
974  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
975 
976  DPRINTF(MinorExecute, "Committing inst: %s\n", *inst);
977 
978  fault = inst->staticInst->execute(&context,
979  inst->traceData);
980 
981  /* Set the predicate for tracing and dump */
982  if (inst->traceData)
983  inst->traceData->setPredicate(context.readPredicate());
984 
985  committed = true;
986 
987  if (fault != NoFault) {
988  if (inst->traceData) {
989  if (debug::ExecFaulting) {
990  inst->traceData->setFaulting(true);
991  } else {
992  delete inst->traceData;
993  inst->traceData = NULL;
994  }
995  }
996 
997  DPRINTF(MinorExecute, "Fault in execute of inst: %s fault: %s\n",
998  *inst, fault->name());
999  fault->invoke(thread, inst->staticInst);
1000  }
1001 
1002  doInstCommitAccounting(inst);
1003  tryToBranch(inst, fault, branch);
1004  }
1005 
1006  if (completed_inst) {
1007  /* Keep a copy of this instruction's predictionSeqNum just in case
1008  * we need to issue a branch without an instruction (such as an
1009  * interrupt) */
1010  executeInfo[thread_id].lastPredictionSeqNum = inst->id.predictionSeqNum;
1011 
1012  /* Check to see if this instruction suspended the current thread. */
1013  if (!inst->isFault() &&
1014  thread->status() == ThreadContext::Suspended &&
1015  branch.isBubble() && /* It didn't branch too */
1016  !isInterrupted(thread_id)) /* Don't suspend if we have
1017  interrupts */
1018  {
1019  auto &resume_pc = cpu.getContext(thread_id)->pcState();
1020 
1021  assert(resume_pc.microPC() == 0);
1022 
1023  DPRINTF(MinorInterrupt, "Suspending thread: %d from Execute"
1024  " inst: %s\n", thread_id, *inst);
1025 
1027 
1029  resume_pc, branch);
1030  }
1031  }
1032 
1033  return completed_inst;
1034 }
1035 
1036 void
1037 Execute::commit(ThreadID thread_id, bool only_commit_microops, bool discard,
1038  BranchData &branch)
1039 {
1040  Fault fault = NoFault;
1041  Cycles now = cpu.curCycle();
1042  ExecuteThreadInfo &ex_info = executeInfo[thread_id];
1043 
1069  /* Has an instruction been completed? Once this becomes false, we stop
1070  * trying to complete instructions. */
1071  bool completed_inst = true;
1072 
1073  /* Number of insts committed this cycle to check against commitLimit */
1074  unsigned int num_insts_committed = 0;
1075 
1076  /* Number of memory access instructions committed to check against
1077  * memCommitLimit */
1078  unsigned int num_mem_refs_committed = 0;
1079 
1080  if (only_commit_microops && !ex_info.inFlightInsts->empty()) {
1081  DPRINTF(MinorInterrupt, "Only commit microops %s %d\n",
1082  *(ex_info.inFlightInsts->front().inst),
1083  ex_info.lastCommitWasEndOfMacroop);
1084  }
1085 
1086  while (!ex_info.inFlightInsts->empty() && /* Some more instructions to process */
1087  !branch.isStreamChange() && /* No real branch */
1088  fault == NoFault && /* No faults */
1089  completed_inst && /* Still finding instructions to execute */
1090  num_insts_committed != commitLimit /* Not reached commit limit */
1091  )
1092  {
1093  if (only_commit_microops) {
1094  DPRINTF(MinorInterrupt, "Committing tail of insts before"
1095  " interrupt: %s\n",
1096  *(ex_info.inFlightInsts->front().inst));
1097  }
1098 
1099  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1100 
1101  InstSeqNum head_exec_seq_num =
1102  head_inflight_inst->inst->id.execSeqNum;
1103 
1104  /* The instruction we actually process if completed_inst
1105  * remains true to the end of the loop body.
1106  * Start by considering the the head of the in flight insts queue */
1107  MinorDynInstPtr inst = head_inflight_inst->inst;
1108 
1109  bool committed_inst = false;
1110  bool discard_inst = false;
1111  bool completed_mem_ref = false;
1112  bool issued_mem_ref = false;
1113  bool early_memory_issue = false;
1114 
1115  /* Must set this again to go around the loop */
1116  completed_inst = false;
1117 
1118  /* If we're just completing a macroop before an interrupt or drain,
1119  * can we stil commit another microop (rather than a memory response)
1120  * without crosing into the next full instruction? */
1121  bool can_commit_insts = !ex_info.inFlightInsts->empty() &&
1122  !(only_commit_microops && ex_info.lastCommitWasEndOfMacroop);
1123 
1124  /* Can we find a mem response for this inst */
1125  LSQ::LSQRequestPtr mem_response =
1126  (inst->inLSQ ? lsq.findResponse(inst) : NULL);
1127 
1128  DPRINTF(MinorExecute, "Trying to commit canCommitInsts: %d\n",
1129  can_commit_insts);
1130 
1131  /* Test for PC events after every instruction */
1132  if (isInbetweenInsts(thread_id) && tryPCEvents(thread_id)) {
1133  ThreadContext *thread = cpu.getContext(thread_id);
1134 
1135  /* Branch as there was a change in PC */
1137  MinorDynInst::bubble(), thread->pcState(), branch);
1138  } else if (mem_response &&
1139  num_mem_refs_committed < memoryCommitLimit)
1140  {
1141  /* Try to commit from the memory responses next */
1142  discard_inst = inst->id.streamSeqNum !=
1143  ex_info.streamSeqNum || discard;
1144 
1145  DPRINTF(MinorExecute, "Trying to commit mem response: %s\n",
1146  *inst);
1147 
1148  /* Complete or discard the response */
1149  if (discard_inst) {
1150  DPRINTF(MinorExecute, "Discarding mem inst: %s as its"
1151  " stream state was unexpected, expected: %d\n",
1152  *inst, ex_info.streamSeqNum);
1153 
1154  lsq.popResponse(mem_response);
1155  } else {
1156  handleMemResponse(inst, mem_response, branch, fault);
1157  committed_inst = true;
1158  }
1159 
1160  completed_mem_ref = true;
1161  completed_inst = true;
1162  } else if (can_commit_insts) {
1163  /* If true, this instruction will, subject to timing tweaks,
1164  * be considered for completion. try_to_commit flattens
1165  * the `if' tree a bit and allows other tests for inst
1166  * commit to be inserted here. */
1167  bool try_to_commit = false;
1168 
1169  /* Try and issue memory ops early if they:
1170  * - Can push a request into the LSQ
1171  * - Have reached the end of their FUs
1172  * - Have had all their dependencies satisfied
1173  * - Are from the right stream
1174  *
1175  * For any other case, leave it to the normal instruction
1176  * issue below to handle them.
1177  */
1178  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1179  DPRINTF(MinorExecute, "Trying to commit from mem FUs\n");
1180 
1181  const MinorDynInstPtr head_mem_ref_inst =
1182  ex_info.inFUMemInsts->front().inst;
1183  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1184  const MinorDynInstPtr &fu_inst = fu->front().inst;
1185 
1186  /* Use this, possibly out of order, inst as the one
1187  * to 'commit'/send to the LSQ */
1188  if (!fu_inst->isBubble() &&
1189  !fu_inst->inLSQ &&
1190  fu_inst->canEarlyIssue &&
1191  ex_info.streamSeqNum == fu_inst->id.streamSeqNum &&
1192  head_exec_seq_num > fu_inst->instToWaitFor)
1193  {
1194  DPRINTF(MinorExecute, "Issuing mem ref early"
1195  " inst: %s instToWaitFor: %d\n",
1196  *(fu_inst), fu_inst->instToWaitFor);
1197 
1198  inst = fu_inst;
1199  try_to_commit = true;
1200  early_memory_issue = true;
1201  completed_inst = true;
1202  }
1203  }
1204 
1205  /* Try and commit FU-less insts */
1206  if (!completed_inst && inst->isNoCostInst()) {
1207  DPRINTF(MinorExecute, "Committing no cost inst: %s", *inst);
1208 
1209  try_to_commit = true;
1210  completed_inst = true;
1211  }
1212 
1213  /* Try to issue from the ends of FUs and the inFlightInsts
1214  * queue */
1215  if (!completed_inst && !inst->inLSQ) {
1216  DPRINTF(MinorExecute, "Trying to commit from FUs\n");
1217 
1218  /* Try to commit from a functional unit */
1219  /* Is the head inst of the expected inst's FU actually the
1220  * expected inst? */
1221  QueuedInst &fu_inst =
1222  funcUnits[inst->fuIndex]->front();
1223  InstSeqNum fu_inst_seq_num = fu_inst.inst->id.execSeqNum;
1224 
1225  if (fu_inst.inst->isBubble()) {
1226  /* No instruction ready */
1227  completed_inst = false;
1228  } else if (fu_inst_seq_num != head_exec_seq_num) {
1229  /* Past instruction: we must have already executed it
1230  * in the same cycle and so the head inst isn't
1231  * actually at the end of its pipeline
1232  * Future instruction: handled above and only for
1233  * mem refs on their way to the LSQ */
1234  } else if (fu_inst.inst->id == inst->id) {
1235  /* All instructions can be committed if they have the
1236  * right execSeqNum and there are no in-flight
1237  * mem insts before us */
1238  try_to_commit = true;
1239  completed_inst = true;
1240  }
1241  }
1242 
1243  if (try_to_commit) {
1244  discard_inst = inst->id.streamSeqNum !=
1245  ex_info.streamSeqNum || discard;
1246 
1247  /* Is this instruction discardable as its streamSeqNum
1248  * doesn't match? */
1249  if (!discard_inst) {
1250  /* Try to commit or discard a non-memory instruction.
1251  * Memory ops are actually 'committed' from this FUs
1252  * and 'issued' into the memory system so we need to
1253  * account for them later (commit_was_mem_issue gets
1254  * set) */
1255  if (inst->extraCommitDelayExpr) {
1256  DPRINTF(MinorExecute, "Evaluating expression for"
1257  " extra commit delay inst: %s\n", *inst);
1258 
1259  ThreadContext *thread = cpu.getContext(thread_id);
1260 
1261  TimingExprEvalContext context(inst->staticInst,
1262  thread, NULL);
1263 
1264  uint64_t extra_delay = inst->extraCommitDelayExpr->
1265  eval(context);
1266 
1267  DPRINTF(MinorExecute, "Extra commit delay expr"
1268  " result: %d\n", extra_delay);
1269 
1270  if (extra_delay < 128) {
1271  inst->extraCommitDelay += Cycles(extra_delay);
1272  } else {
1273  DPRINTF(MinorExecute, "Extra commit delay was"
1274  " very long: %d\n", extra_delay);
1275  }
1276  inst->extraCommitDelayExpr = NULL;
1277  }
1278 
1279  /* Move the extraCommitDelay from the instruction
1280  * into the minimumCommitCycle */
1281  if (inst->extraCommitDelay != Cycles(0)) {
1282  inst->minimumCommitCycle = cpu.curCycle() +
1283  inst->extraCommitDelay;
1284  inst->extraCommitDelay = Cycles(0);
1285  }
1286 
1287  /* @todo Think about making lastMemBarrier be
1288  * MAX_UINT_64 to avoid using 0 as a marker value */
1289  if (!inst->isFault() && inst->isMemRef() &&
1290  lsq.getLastMemBarrier(thread_id) <
1291  inst->id.execSeqNum &&
1292  lsq.getLastMemBarrier(thread_id) != 0)
1293  {
1294  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1295  " as there are incomplete barriers in flight\n",
1296  *inst);
1297  completed_inst = false;
1298  } else if (inst->minimumCommitCycle > now) {
1299  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1300  " as it wants to be stalled for %d more cycles\n",
1301  *inst, inst->minimumCommitCycle - now);
1302  completed_inst = false;
1303  } else {
1304  completed_inst = commitInst(inst,
1305  early_memory_issue, branch, fault,
1306  committed_inst, issued_mem_ref);
1307  }
1308  } else {
1309  /* Discard instruction */
1310  completed_inst = true;
1311  }
1312 
1313  if (completed_inst) {
1314  /* Allow the pipeline to advance. If the FU head
1315  * instruction wasn't the inFlightInsts head
1316  * but had already been committed, it would have
1317  * unstalled the pipeline before here */
1318  if (inst->fuIndex != noCostFUIndex) {
1319  DPRINTF(MinorExecute, "Unstalling %d for inst %s\n", inst->fuIndex, inst->id);
1320  funcUnits[inst->fuIndex]->stalled = false;
1321  }
1322  }
1323  }
1324  } else {
1325  DPRINTF(MinorExecute, "No instructions to commit\n");
1326  completed_inst = false;
1327  }
1328 
1329  /* All discardable instructions must also be 'completed' by now */
1330  assert(!(discard_inst && !completed_inst));
1331 
1332  /* Instruction committed but was discarded due to streamSeqNum
1333  * mismatch */
1334  if (discard_inst) {
1335  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
1336  " state was unexpected, expected: %d\n",
1337  *inst, ex_info.streamSeqNum);
1338 
1339  if (fault == NoFault)
1341  }
1342 
1343  /* Mark the mem inst as being in the LSQ */
1344  if (issued_mem_ref) {
1345  inst->fuIndex = 0;
1346  inst->inLSQ = true;
1347  }
1348 
1349  /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1350  * as they've *definitely* exited the FUs */
1351  if (completed_inst && inst->isMemRef()) {
1352  /* The MemRef could have been discarded from the FU or the memory
1353  * queue, so just check an FU instruction */
1354  if (!ex_info.inFUMemInsts->empty() &&
1355  ex_info.inFUMemInsts->front().inst == inst)
1356  {
1357  ex_info.inFUMemInsts->pop();
1358  }
1359  }
1360 
1361  if (completed_inst && !(issued_mem_ref && fault == NoFault)) {
1362  /* Note that this includes discarded insts */
1363  DPRINTF(MinorExecute, "Completed inst: %s\n", *inst);
1364 
1365  /* Got to the end of a full instruction? */
1366  ex_info.lastCommitWasEndOfMacroop = inst->isFault() ||
1367  inst->isLastOpInInst();
1368 
1369  /* lastPredictionSeqNum is kept as a convenience to prevent its
1370  * value from changing too much on the minorview display */
1371  ex_info.lastPredictionSeqNum = inst->id.predictionSeqNum;
1372 
1373  /* Finished with the inst, remove it from the inst queue and
1374  * clear its dependencies */
1375  ex_info.inFlightInsts->pop();
1376 
1377  /* Complete barriers in the LSQ/move to store buffer */
1378  if (inst->isInst() && inst->staticInst->isFullMemBarrier()) {
1379  DPRINTF(MinorMem, "Completing memory barrier"
1380  " inst: %s committed: %d\n", *inst, committed_inst);
1381  lsq.completeMemBarrierInst(inst, committed_inst);
1382  }
1383 
1384  scoreboard[thread_id].clearInstDests(inst, inst->isMemRef());
1385  }
1386 
1387  /* Handle per-cycle instruction counting */
1388  if (committed_inst) {
1389  bool is_no_cost_inst = inst->isNoCostInst();
1390 
1391  /* Don't show no cost instructions as having taken a commit
1392  * slot */
1393  if (debug::MinorTrace && !is_no_cost_inst)
1394  ex_info.instsBeingCommitted.insts[num_insts_committed] = inst;
1395 
1396  if (!is_no_cost_inst)
1397  num_insts_committed++;
1398 
1399  if (num_insts_committed == commitLimit)
1400  DPRINTF(MinorExecute, "Reached inst commit limit\n");
1401 
1402  /* Re-set the time of the instruction if that's required for
1403  * tracing */
1404  if (inst->traceData) {
1406  inst->traceData->setWhen(curTick());
1407  inst->traceData->dump();
1408  }
1409 
1410  if (completed_mem_ref)
1411  num_mem_refs_committed++;
1412 
1413  if (num_mem_refs_committed == memoryCommitLimit)
1414  DPRINTF(MinorExecute, "Reached mem ref commit limit\n");
1415  }
1416  }
1417 }
1418 
1419 bool
1421 {
1422  return executeInfo[thread_id].lastCommitWasEndOfMacroop &&
1423  !lsq.accessesInFlight();
1424 }
1425 
1426 void
1428 {
1429  if (!inp.outputWire->isBubble())
1430  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
1431 
1432  BranchData &branch = *out.inputWire;
1433 
1434  unsigned int num_issued = 0;
1435 
1436  /* Do all the cycle-wise activities for dcachePort here to potentially
1437  * free up input spaces in the LSQ's requests queue */
1438  lsq.step();
1439 
1440  /* Check interrupts first. Will halt commit if interrupt found */
1441  bool interrupted = false;
1442  ThreadID interrupt_tid = checkInterrupts(branch, interrupted);
1443 
1444  if (interrupt_tid != InvalidThreadID) {
1445  /* Signalling an interrupt this cycle, not issuing/committing from
1446  * any other threads */
1447  } else if (!branch.isBubble()) {
1448  /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1449  * without overwriting them */
1450  DPRINTF(MinorInterrupt, "Execute skipping a cycle to allow old"
1451  " branch to complete\n");
1452  } else {
1453  ThreadID commit_tid = getCommittingThread();
1454 
1455  if (commit_tid != InvalidThreadID) {
1456  ExecuteThreadInfo& commit_info = executeInfo[commit_tid];
1457 
1458  DPRINTF(MinorExecute, "Attempting to commit [tid:%d]\n",
1459  commit_tid);
1460  /* commit can set stalled flags observable to issue and so *must* be
1461  * called first */
1462  if (commit_info.drainState != NotDraining) {
1463  if (commit_info.drainState == DrainCurrentInst) {
1464  /* Commit only micro-ops, don't kill anything else */
1465  commit(commit_tid, true, false, branch);
1466 
1467  if (isInbetweenInsts(commit_tid))
1468  setDrainState(commit_tid, DrainHaltFetch);
1469 
1470  /* Discard any generated branch */
1471  branch = BranchData::bubble();
1472  } else if (commit_info.drainState == DrainAllInsts) {
1473  /* Kill all instructions */
1474  while (getInput(commit_tid))
1475  popInput(commit_tid);
1476  commit(commit_tid, false, true, branch);
1477  }
1478  } else {
1479  /* Commit micro-ops only if interrupted. Otherwise, commit
1480  * anything you like */
1481  DPRINTF(MinorExecute, "Committing micro-ops for interrupt[tid:%d]\n",
1482  commit_tid);
1483  bool only_commit_microops = interrupted &&
1484  hasInterrupt(commit_tid);
1485  commit(commit_tid, only_commit_microops, false, branch);
1486  }
1487 
1488  /* Halt fetch, but don't do it until we have the current instruction in
1489  * the bag */
1490  if (commit_info.drainState == DrainHaltFetch) {
1493  cpu.getContext(commit_tid)->pcState(), branch);
1494 
1496  setDrainState(commit_tid, DrainAllInsts);
1497  }
1498  }
1499  ThreadID issue_tid = getIssuingThread();
1500  /* This will issue merrily even when interrupted in the sure and
1501  * certain knowledge that the interrupt with change the stream */
1502  if (issue_tid != InvalidThreadID) {
1503  DPRINTF(MinorExecute, "Attempting to issue [tid:%d]\n",
1504  issue_tid);
1505  num_issued = issue(issue_tid);
1506  }
1507 
1508  }
1509 
1510  /* Run logic to step functional units + decide if we are active on the next
1511  * clock cycle */
1512  std::vector<MinorDynInstPtr> next_issuable_insts;
1513  bool can_issue_next = false;
1514 
1515  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1516  /* Find the next issuable instruction for each thread and see if it can
1517  be issued */
1518  if (getInput(tid)) {
1519  unsigned int input_index = executeInfo[tid].inputIndex;
1520  MinorDynInstPtr inst = getInput(tid)->insts[input_index];
1521  if (inst->isFault()) {
1522  can_issue_next = true;
1523  } else if (!inst->isBubble()) {
1524  next_issuable_insts.push_back(inst);
1525  }
1526  }
1527  }
1528 
1529  bool becoming_stalled = true;
1530 
1531  /* Advance the pipelines and note whether they still need to be
1532  * advanced */
1533  for (unsigned int i = 0; i < numFuncUnits; i++) {
1534  FUPipeline *fu = funcUnits[i];
1535  fu->advance();
1536 
1537  /* If we need to tick again, the pipeline will have been left or set
1538  * to be unstalled */
1539  if (fu->occupancy !=0 && !fu->stalled)
1540  becoming_stalled = false;
1541 
1542  /* Could we possibly issue the next instruction from any thread?
1543  * This is quite an expensive test and is only used to determine
1544  * if the CPU should remain active, only run it if we aren't sure
1545  * we are active next cycle yet */
1546  for (auto inst : next_issuable_insts) {
1547  if (!fu->stalled && fu->provides(inst->staticInst->opClass()) &&
1548  scoreboard[inst->id.threadId].canInstIssue(inst,
1549  NULL, NULL, cpu.curCycle() + Cycles(1),
1550  cpu.getContext(inst->id.threadId))) {
1551  can_issue_next = true;
1552  break;
1553  }
1554  }
1555  }
1556 
1557  bool head_inst_might_commit = false;
1558 
1559  /* Could the head in flight insts be committed */
1560  for (auto const &info : executeInfo) {
1561  if (!info.inFlightInsts->empty()) {
1562  const QueuedInst &head_inst = info.inFlightInsts->front();
1563 
1564  if (head_inst.inst->isNoCostInst()) {
1565  head_inst_might_commit = true;
1566  } else {
1567  FUPipeline *fu = funcUnits[head_inst.inst->fuIndex];
1568  if ((fu->stalled &&
1569  fu->front().inst->id == head_inst.inst->id) ||
1570  lsq.findResponse(head_inst.inst))
1571  {
1572  head_inst_might_commit = true;
1573  break;
1574  }
1575  }
1576  }
1577  }
1578 
1579  DPRINTF(Activity, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1580  (num_issued != 0 ? " (issued some insts)" : ""),
1581  (becoming_stalled ? "(becoming stalled)" : "(not becoming stalled)"),
1582  (can_issue_next ? " (can issued next inst)" : ""),
1583  (head_inst_might_commit ? "(head inst might commit)" : ""),
1584  (lsq.needsToTick() ? " (LSQ needs to tick)" : ""),
1585  (interrupted ? " (interrupted)" : ""));
1586 
1587  bool need_to_tick =
1588  num_issued != 0 || /* Issued some insts this cycle */
1589  !becoming_stalled || /* Some FU pipelines can still move */
1590  can_issue_next || /* Can still issue a new inst */
1591  head_inst_might_commit || /* Could possible commit the next inst */
1592  lsq.needsToTick() || /* Must step the dcache port */
1593  interrupted; /* There are pending interrupts */
1594 
1595  if (!need_to_tick) {
1596  DPRINTF(Activity, "The next cycle might be skippable as there are no"
1597  " advanceable FUs\n");
1598  }
1599 
1600  /* Wake up if we need to tick again */
1601  if (need_to_tick)
1603 
1604  /* Note activity of following buffer */
1605  if (!branch.isBubble())
1607 
1608  /* Make sure the input (if any left) is pushed */
1609  if (!inp.outputWire->isBubble())
1610  inputBuffer[inp.outputWire->threadId].pushTail();
1611 }
1612 
1613 ThreadID
1614 Execute::checkInterrupts(BranchData& branch, bool& interrupted)
1615 {
1617  /* Evaluate interrupts in round-robin based upon service */
1618  do {
1619  /* Has an interrupt been signalled? This may not be acted on
1620  * straighaway so this is different from took_interrupt */
1621  bool thread_interrupted = false;
1622 
1623  if (FullSystem && cpu.getInterruptController(tid)) {
1624  /* This is here because it seems that after drainResume the
1625  * interrupt controller isn't always set */
1626  thread_interrupted = executeInfo[tid].drainState == NotDraining &&
1627  isInterrupted(tid);
1628  interrupted = interrupted || thread_interrupted;
1629  } else {
1630  DPRINTF(MinorInterrupt, "No interrupt controller\n");
1631  }
1632  DPRINTF(MinorInterrupt, "[tid:%d] thread_interrupted?=%d isInbetweenInsts?=%d\n",
1633  tid, thread_interrupted, isInbetweenInsts(tid));
1634  /* Act on interrupts */
1635  if (thread_interrupted && isInbetweenInsts(tid)) {
1636  if (takeInterrupt(tid, branch)) {
1637  interruptPriority = tid;
1638  return tid;
1639  }
1640  } else {
1641  tid = (tid + 1) % cpu.numThreads;
1642  }
1643  } while (tid != interruptPriority);
1644 
1645  return InvalidThreadID;
1646 }
1647 
1648 bool
1650 {
1651  if (FullSystem && cpu.getInterruptController(thread_id)) {
1652  return executeInfo[thread_id].drainState == NotDraining &&
1653  isInterrupted(thread_id);
1654  }
1655 
1656  return false;
1657 }
1658 
1659 void
1661 {
1662  std::ostringstream insts;
1663  std::ostringstream stalled;
1664 
1665  executeInfo[0].instsBeingCommitted.reportData(insts);
1666  lsq.minorTrace();
1667  inputBuffer[0].minorTrace();
1668  scoreboard[0].minorTrace();
1669 
1670  /* Report functional unit stalling in one string */
1671  unsigned int i = 0;
1672  while (i < numFuncUnits)
1673  {
1674  stalled << (funcUnits[i]->stalled ? '1' : 'E');
1675  i++;
1676  if (i != numFuncUnits)
1677  stalled << ',';
1678  }
1679 
1680  minor::minorTrace("insts=%s inputIndex=%d streamSeqNum=%d"
1681  " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1682  insts.str(), executeInfo[0].inputIndex, executeInfo[0].streamSeqNum,
1683  stalled.str(), executeInfo[0].drainState, isInbetweenInsts(0));
1684 
1685  std::for_each(funcUnits.begin(), funcUnits.end(),
1686  std::mem_fn(&FUPipeline::minorTrace));
1687 
1688  executeInfo[0].inFlightInsts->minorTrace();
1689  executeInfo[0].inFUMemInsts->minorTrace();
1690 }
1691 
1692 inline ThreadID
1694 {
1695  std::vector<ThreadID> priority_list;
1696 
1697  switch (cpu.threadPolicy) {
1698  case enums::SingleThreaded:
1699  return 0;
1700  case enums::RoundRobin:
1701  priority_list = cpu.roundRobinPriority(commitPriority);
1702  break;
1703  case enums::Random:
1704  priority_list = cpu.randomPriority();
1705  break;
1706  default:
1707  panic("Invalid thread policy");
1708  }
1709 
1710  for (auto tid : priority_list) {
1711  ExecuteThreadInfo &ex_info = executeInfo[tid];
1712  bool can_commit_insts = !ex_info.inFlightInsts->empty();
1713  if (can_commit_insts) {
1714  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1715  MinorDynInstPtr inst = head_inflight_inst->inst;
1716 
1717  can_commit_insts = can_commit_insts &&
1718  (!inst->inLSQ || (lsq.findResponse(inst) != NULL));
1719 
1720  if (!inst->inLSQ) {
1721  bool can_transfer_mem_inst = false;
1722  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1723  const MinorDynInstPtr head_mem_ref_inst =
1724  ex_info.inFUMemInsts->front().inst;
1725  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1726  const MinorDynInstPtr &fu_inst = fu->front().inst;
1727  can_transfer_mem_inst =
1728  !fu_inst->isBubble() &&
1729  fu_inst->id.threadId == tid &&
1730  !fu_inst->inLSQ &&
1731  fu_inst->canEarlyIssue &&
1732  inst->id.execSeqNum > fu_inst->instToWaitFor;
1733  }
1734 
1735  bool can_execute_fu_inst = inst->fuIndex == noCostFUIndex;
1736  if (can_commit_insts && !can_transfer_mem_inst &&
1737  inst->fuIndex != noCostFUIndex)
1738  {
1739  QueuedInst& fu_inst = funcUnits[inst->fuIndex]->front();
1740  can_execute_fu_inst = !fu_inst.inst->isBubble() &&
1741  fu_inst.inst->id == inst->id;
1742  }
1743 
1744  can_commit_insts = can_commit_insts &&
1745  (can_transfer_mem_inst || can_execute_fu_inst);
1746  }
1747  }
1748 
1749 
1750  if (can_commit_insts) {
1751  commitPriority = tid;
1752  return tid;
1753  }
1754  }
1755 
1756  return InvalidThreadID;
1757 }
1758 
1759 inline ThreadID
1761 {
1762  std::vector<ThreadID> priority_list;
1763 
1764  switch (cpu.threadPolicy) {
1765  case enums::SingleThreaded:
1766  return 0;
1767  case enums::RoundRobin:
1768  priority_list = cpu.roundRobinPriority(issuePriority);
1769  break;
1770  case enums::Random:
1771  priority_list = cpu.randomPriority();
1772  break;
1773  default:
1774  panic("Invalid thread scheduling policy.");
1775  }
1776 
1777  for (auto tid : priority_list) {
1778  if (getInput(tid)) {
1779  issuePriority = tid;
1780  return tid;
1781  }
1782  }
1783 
1784  return InvalidThreadID;
1785 }
1786 
1787 void
1789 {
1790  DPRINTF(Drain, "MinorExecute drainResume\n");
1791 
1792  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1793  setDrainState(tid, NotDraining);
1794  }
1795 
1797 }
1798 
1799 std::ostream &operator <<(std::ostream &os, Execute::DrainState state)
1800 {
1801  switch (state)
1802  {
1803  case Execute::NotDraining:
1804  os << "NotDraining";
1805  break;
1807  os << "DrainCurrentInst";
1808  break;
1810  os << "DrainHaltFetch";
1811  break;
1813  os << "DrainAllInsts";
1814  break;
1815  default:
1816  os << "Drain-" << static_cast<int>(state);
1817  break;
1818  }
1819 
1820  return os;
1821 }
1822 
1823 void
1825 {
1826  DPRINTF(Drain, "setDrainState[%d]: %s\n", thread_id, state);
1827  executeInfo[thread_id].drainState = state;
1828 }
1829 
1830 unsigned int
1832 {
1833  DPRINTF(Drain, "MinorExecute drain\n");
1834 
1835  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1836  if (executeInfo[tid].drainState == NotDraining) {
1838 
1839  /* Go to DrainCurrentInst if we're between microops
1840  * or waiting on an unbufferable memory operation.
1841  * Otherwise we can go straight to DrainHaltFetch
1842  */
1843  if (isInbetweenInsts(tid))
1845  else
1847  }
1848  }
1849  return (isDrained() ? 0 : 1);
1850 }
1851 
1852 bool
1854 {
1855  if (!lsq.isDrained())
1856  return false;
1857 
1858  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1859  if (!inputBuffer[tid].empty() ||
1860  !executeInfo[tid].inFlightInsts->empty()) {
1861 
1862  return false;
1863  }
1864  }
1865 
1866  return true;
1867 }
1868 
1870 {
1871  for (unsigned int i = 0; i < numFuncUnits; i++)
1872  delete funcUnits[i];
1873 
1874  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
1875  delete executeInfo[tid].inFlightInsts;
1876 }
1877 
1878 bool
1880 {
1881  return inst->id.streamSeqNum == executeInfo[inst->id.threadId].streamSeqNum;
1882 }
1883 
1884 bool
1886 {
1887  bool ret = false;
1888 
1889  if (!executeInfo[inst->id.threadId].inFlightInsts->empty())
1890  ret = executeInfo[inst->id.threadId].inFlightInsts->front().inst->id == inst->id;
1891 
1892  return ret;
1893 }
1894 
1897 {
1898  return lsq.getDcachePort();
1899 }
1900 
1901 } // namespace minor
1902 } // namespace gem5
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
gem5::MinorCPU::randomPriority
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:182
gem5::minor::Execute::getCommittingThread
ThreadID getCommittingThread()
Use the current threading policy to determine the next thread to decode from.
Definition: execute.cc:1693
gem5::PCStateBase::instAddr
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition: pcstate.hh:107
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
warn
#define warn(...)
Definition: logging.hh:246
gem5::minor::Execute::tryToBranch
void tryToBranch(MinorDynInstPtr inst, Fault fault, BranchData &branch)
Generate Branch data based (into branch) on an observed (or not) change in PC while executing an inst...
Definition: execute.cc:221
gem5::minor::ForwardInstData::isBubble
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:251
gem5::minor::LSQ::issuedMemBarrierInst
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1718
op_class.hh
gem5::minor::BranchData::Reason
Reason
Definition: pipe_data.hh:69
gem5::minor::ForwardInstData::insts
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:288
gem5::auxv::Random
@ Random
Definition: aux_vector.hh:87
gem5::minor::Execute::NotDraining
@ NotDraining
Definition: execute.hh:147
gem5::minor::QueuedInst::inst
MinorDynInstPtr inst
Definition: func_unit.hh:210
gem5::minor::Execute::ExecuteThreadInfo::inputIndex
unsigned int inputIndex
Index that we've completed upto in getInput data.
Definition: execute.hh:182
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
gem5::minor::MinorStats::numFetchSuspends
statistics::Scalar numFetchSuspends
Number of times fetch was asked to suspend by Execute.
Definition: stats.hh:73
gem5::minor::Execute::drain
unsigned int drain()
Like the drain interface on SimObject.
Definition: execute.cc:1831
gem5::minor::LSQ::LSQRequest::packet
PacketPtr packet
Definition: lsq.hh:150
gem5::minor::Execute::commitLimit
unsigned int commitLimit
Number of instructions that can be committed per cycle.
Definition: execute.hh:88
gem5::minor::ExecContext::readPredicate
bool readPredicate() const override
Definition: exec_context.hh:184
gem5::MinorCPU::MinorCPUPort
Provide a non-protected base class for Minor's Ports as derived classes are created by Fetch1 and Exe...
Definition: cpu.hh:106
gem5::MinorFUTiming::extraAssumedLat
Cycles extraAssumedLat
Extra delay that results should show in the scoreboard after leaving the pipeline.
Definition: func_unit.hh:127
gem5::ThreadContext::pcState
virtual const PCStateBase & pcState() const =0
gem5::minor::Execute::fuDescriptions
MinorFUPool & fuDescriptions
Descriptions of the functional units we want to generate.
Definition: execute.hh:99
gem5::ThreadState::ThreadStateStats::numOps
statistics::Scalar numOps
Stat for number ops (including micro ops) committed.
Definition: thread_state.hh:98
gem5::minor::MinorStats::numOps
statistics::Scalar numOps
Number of simulated insts and microops.
Definition: stats.hh:67
gem5::minor::LSQ::LSQRequest::needsToBeSentToStoreBuffer
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:165
sc_dt::to_string
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:91
gem5::ThreadState::ThreadStateStats::numInsts
statistics::Scalar numInsts
Stat for number instructions committed.
Definition: thread_state.hh:96
gem5::minor::Queue
Wrapper for a queue type to act as a pipeline stage input queue.
Definition: buffers.hh:404
gem5::minor::Execute::ExecuteThreadInfo
Definition: execute.hh:153
gem5::minor::Execute::checkInterrupts
ThreadID checkInterrupts(BranchData &branch, bool &interrupted)
Check all threads for possible interrupts.
Definition: execute.cc:1614
cpu.hh
minor
gem5::MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:85
gem5::minor::Execute::lsq
LSQ lsq
Dcache port to pass on to the CPU.
Definition: execute.hh:123
gem5::minor::Execute::ExecuteThreadInfo::inFUMemInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFUMemInsts
Memory ref instructions still in the FUs.
Definition: execute.hh:178
gem5::minor::Execute::DrainCurrentInst
@ DrainCurrentInst
Definition: execute.hh:148
gem5::minor::Execute::evaluate
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: execute.cc:1427
gem5::minor::BranchData::bubble
static BranchData bubble()
BubbleIF interface.
Definition: pipe_data.hh:163
gem5::minor::LSQ::step
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1476
std::vector
STL vector class.
Definition: stl.hh:37
gem5::minor::QueuedInst
Container class to box instructions in the FUs to make those queues have correct bubble behaviour whe...
Definition: func_unit.hh:207
gem5::minor::LSQ::needsToTick
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1564
gem5::minor::Execute::takeInterrupt
bool takeInterrupt(ThreadID thread_id, BranchData &branch)
Act on an interrupt.
Definition: execute.cc:423
gem5::minor::BranchData::NoBranch
@ NoBranch
Definition: pipe_data.hh:74
gem5::MinorFUTiming
Extra timing capability to allow individual ops to have their source register dependency latencies tw...
Definition: func_unit.hh:103
gem5::minor::LSQ::pushFailedRequest
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1667
gem5::minor::ForwardInstData
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:284
gem5::minor::Execute::setTraceTimeOnIssue
bool setTraceTimeOnIssue
Modify instruction trace times on issue.
Definition: execute.hh:112
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::minor::Execute::isInterrupted
bool isInterrupted(ThreadID thread_id) const
Has an interrupt been raised.
Definition: execute.cc:417
gem5::SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:95
gem5::minor::Execute::hasInterrupt
bool hasInterrupt(ThreadID thread_id)
Checks if a specific thread has an interrupt.
Definition: execute.cc:1649
gem5::EventQueue::serviceEvents
void serviceEvents(Tick when)
process all events up to the given timestamp.
Definition: eventq.hh:876
gem5::minor::Execute::memoryCommitLimit
unsigned int memoryCommitLimit
Number of memory instructions that can be committed per cycle.
Definition: execute.hh:91
execute.hh
gem5::minor::BranchData::isStreamChange
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:85
gem5::MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:291
gem5::minor::Execute::commit
void commit(ThreadID thread_id, bool only_commit_microops, bool discard, BranchData &branch)
Try and commit instructions from the ends of the functional unit pipelines.
Definition: execute.cc:1037
gem5::minor::Execute::commitPriority
ThreadID commitPriority
Definition: execute.hh:212
gem5::minor::Execute::ExecuteThreadInfo::streamSeqNum
InstSeqNum streamSeqNum
Source of sequence number for instuction streams.
Definition: execute.hh:196
gem5::minor::Execute::noCostFUIndex
unsigned int noCostFUIndex
The FU index of the non-existent costless FU for instructions which pass the MinorDynInst::isNoCostIn...
Definition: execute.hh:120
gem5::RefCountingPtr< MinorDynInst >
gem5::ThreadContext::status
virtual Status status() const =0
gem5::MinorCPU::threads
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:101
gem5::minor::InputBuffer
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:572
gem5::MinorCPU::stats
minor::MinorStats stats
Processor-specific statistics.
Definition: cpu.hh:140
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::minor::LSQ::LSQRequest
Derived SenderState to carry data access info.
Definition: lsq.hh:127
gem5::minor::Execute::getIssuingThread
ThreadID getIssuingThread()
Definition: execute.cc:1760
gem5::Named
Interface for things with names.
Definition: named.hh:38
gem5::minor::LSQ::isDrained
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1557
gem5::ThreadState::threadStats
gem5::ThreadState::ThreadStateStats threadStats
gem5::minor::ReportTraitsAdaptor
...ReportTraits are trait classes with the same functionality as ReportIF, but with elements explicit...
Definition: buffers.hh:97
gem5::minor::Execute::inputBuffer
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: execute.hh:132
gem5::minor::MinorStats::numInsts
statistics::Scalar numInsts
Number of simulated instructions.
Definition: stats.hh:64
gem5::minor::Latch::Output
Definition: buffers.hh:263
gem5::minor::BranchData::Interrupt
@ Interrupt
Definition: pipe_data.hh:97
gem5::minor::Execute::Execute
Execute(const std::string &name_, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< BranchData >::Input out_)
Definition: execute.cc:64
gem5::minor::Pipeline::ExecuteStageId
@ ExecuteStageId
Definition: pipeline.hh:104
gem5::minor::LSQ::minorTrace
void minorTrace() const
Definition: lsq.cc:1674
gem5::minor::Execute::getInput
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: execute.cc:199
gem5::minor::cyclicIndexDec
unsigned int cyclicIndexDec(unsigned int index, unsigned int cycle_size)
Decrement a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:537
exec_context.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::minor::Execute::minorTrace
void minorTrace() const
Definition: execute.cc:1660
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
gem5::PowerISA::fu
Bitfield< 12 > fu
Definition: misc.hh:89
gem5::minor::LSQ::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Return the raw-bindable port.
Definition: lsq.hh:732
gem5::minor::Execute::processMoreThanOneInput
bool processMoreThanOneInput
If true, more than one input line can be processed each cycle if there is room to execute more instru...
Definition: execute.hh:96
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ThreadContext::Suspended
@ Suspended
Temporarily inactive.
Definition: thread_context.hh:113
gem5::minor::LSQ::getLastMemBarrier
InstSeqNum getLastMemBarrier(ThreadID thread_id) const
Get the execSeqNum of the last issued memory barrier.
Definition: lsq.hh:699
gem5::minor::BranchData::CorrectlyPredictedBranch
@ CorrectlyPredictedBranch
Definition: pipe_data.hh:77
gem5::MinorFU
A functional unit that can execute any of opClasses operations with a single op(eration)Lat(ency) and...
Definition: func_unit.hh:157
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:291
gem5::minor::Latch::Input
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:252
gem5::minor::Execute::isDrained
bool isDrained()
After thread suspension, has Execute been drained of in-flight instructions and memory accesses.
Definition: execute.cc:1853
gem5::TimingExpr
Definition: timing_expr.hh:91
gem5::minor::Execute::commitInst
bool commitInst(MinorDynInstPtr inst, bool early_memory_issue, BranchData &branch, Fault &fault, bool &committed, bool &completed_mem_issue)
Commit a single instruction.
Definition: execute.cc:895
gem5::minor::Execute::doInstCommitAccounting
void doInstCommitAccounting(MinorDynInstPtr inst)
Do the stats handling and instruction count and PC event events related to the new instruction/op cou...
Definition: execute.cc:864
gem5::minor::LSQ::canRequest
bool canRequest()
Is their space in the request queue to be able to push a request by issuing an isMemRef instruction.
Definition: lsq.hh:670
gem5::PCEvent
Definition: pc_event.hh:45
gem5::minor::BranchData::BadlyPredictedBranchTarget
@ BadlyPredictedBranchTarget
Definition: pipe_data.hh:86
gem5::minor::Execute::issuePriority
ThreadID issuePriority
Definition: execute.hh:211
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1206
gem5::minor::MinorBuffer< ElemType, ReportTraits >::minorTrace
void minorTrace() const
Report buffer states from 'slot' 'from' to 'to'.
Definition: buffers.hh:198
gem5::minor::MinorStats::numDiscardedOps
statistics::Scalar numDiscardedOps
Number of ops discarded before committing.
Definition: stats.hh:70
gem5::minor::LSQ::completeMemBarrierInst
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:914
gem5::TimingExprEvalContext
Object to gather the visible context for evaluation.
Definition: timing_expr.hh:73
gem5::minor::MinorDynInst::bubble
static MinorDynInstPtr bubble()
There is a single bubble inst.
Definition: dyn_inst.hh:251
gem5::minor::ForwardInstData::width
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:304
gem5::minor::Execute::ExecuteThreadInfo::instsBeingCommitted
ForwardInstData instsBeingCommitted
Structure for reporting insts currently being processed/retired for MinorTrace.
Definition: execute.hh:190
gem5::minor::Execute::scoreboard
std::vector< Scoreboard > scoreboard
Scoreboard of instruction dependencies.
Definition: execute.hh:126
gem5::InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:236
gem5::minor::LSQ::popResponse
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1522
gem5::minor::BranchData
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:66
gem5::minor::Execute::isInbetweenInsts
bool isInbetweenInsts(ThreadID thread_id) const
Are we between instructions? Can we be interrupted?
Definition: execute.cc:1420
gem5::minor::Execute::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Returns the DcachePort owned by this Execute to pass upwards.
Definition: execute.cc:1896
gem5::minor::MinorStats::committedInstType
statistics::Vector2d committedInstType
Number of instructions by type (OpClass)
Definition: stats.hh:83
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::MinorFUTiming::extraCommitLatExpr
TimingExpr * extraCommitLatExpr
Definition: func_unit.hh:121
gem5::Packet::isError
bool isError() const
Definition: packet.hh:619
gem5::minor::Execute::updateBranchData
void updateBranchData(ThreadID tid, BranchData::Reason reason, MinorDynInstPtr inst, const PCStateBase &target, BranchData &branch)
Actually create a branch to communicate to Fetch1/Fetch2 and, if that is a stream-changing branch upd...
Definition: execute.cc:300
gem5::GEM5_DEPRECATED_NAMESPACE
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
gem5::minor::BranchData::isBubble
bool isBubble() const
Definition: pipe_data.hh:164
gem5::MinorFUPool::funcUnits
std::vector< MinorFU * > funcUnits
Definition: func_unit.hh:192
gem5::minor::ExecContext::readMemAccPredicate
bool readMemAccPredicate() const override
Definition: exec_context.hh:196
gem5::minor::Execute::tryPCEvents
bool tryPCEvents(ThreadID thread_id)
Try to act on PC-related events.
Definition: execute.cc:842
gem5::minor::minorTrace
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
gem5::ActivityRecorder::activity
void activity()
Records that there is activity this cycle.
Definition: activity.cc:55
gem5::minor::Execute::~Execute
~Execute()
Definition: execute.cc:1869
gem5::minor::FUPipeline
A functional unit configured from a MinorFU object.
Definition: func_unit.hh:233
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
gem5::minor::ExecContext
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:73
gem5::MinorFU::opLat
Cycles opLat
Delay from issuing the operation, to it reaching the end of the associated pipeline.
Definition: func_unit.hh:164
gem5::minor::Execute::ExecuteThreadInfo::lastCommitWasEndOfMacroop
bool lastCommitWasEndOfMacroop
The last commit was the end of a full instruction so an interrupt can safely happen.
Definition: execute.hh:186
state
atomic_var_t state
Definition: helpers.cc:188
gem5::minor::Execute::DrainHaltFetch
@ DrainHaltFetch
Definition: execute.hh:149
gem5::minor::BranchData::isStreamChange
bool isStreamChange() const
As static isStreamChange but on this branch data.
Definition: pipe_data.hh:167
gem5::minor::cyclicIndexInc
unsigned int cyclicIndexInc(unsigned int index, unsigned int cycle_size)
Increment a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:525
gem5::ThreadState::numInst
Counter numInst
Number of instructions committed.
Definition: thread_state.hh:88
gem5::minor::Execute::popInput
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: execute.cc:212
gem5::minor::Execute::setTraceTimeOnCommit
bool setTraceTimeOnCommit
Modify instruction trace times on commit.
Definition: execute.hh:109
gem5::X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:803
gem5::SparcISA::Branch
Base class for branch operations.
Definition: branch.hh:48
gem5::minor::Execute::setDrainState
void setDrainState(ThreadID thread_id, DrainState state)
Set the drain state (with useful debugging messages)
Definition: execute.cc:1824
gem5::minor::operator<<
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:65
gem5::minor::LSQ::accessesInFlight
bool accessesInFlight() const
Are there any accesses other than normal cached loads in the memory system or having received respons...
Definition: lsq.hh:690
gem5::minor::Execute::handleMemResponse
void handleMemResponse(MinorDynInstPtr inst, LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
Handle extracting mem ref responses from the memory queues and completing the associated instructions...
Definition: execute.cc:326
gem5::minor::Execute::ExecuteThreadInfo::drainState
DrainState drainState
State progression for draining NotDraining -> ...
Definition: execute.hh:205
gem5::minor::BranchData::HaltFetch
@ HaltFetch
Definition: pipe_data.hh:99
gem5::MinorFUTiming::srcRegsRelativeLats
std::vector< Cycles > srcRegsRelativeLats
Cycle offsets from the scoreboard delivery times of register values for each of this instruction's so...
Definition: func_unit.hh:136
gem5::minor::Execute::inp
Latch< ForwardInstData >::Output inp
Input port carrying instructions from Decode.
Definition: execute.hh:73
gem5::minor::Execute::ExecuteThreadInfo::inFlightInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFlightInsts
In-order instructions either in FUs or the LSQ.
Definition: execute.hh:175
gem5::InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:40
gem5::minor::Execute::executeMemRefInst
bool executeMemRefInst(MinorDynInstPtr inst, BranchData &branch, bool &failed_predicate, Fault &fault)
Execute a memory reference instruction.
Definition: execute.cc:451
gem5::minor::BranchData::BadlyPredictedBranch
@ BadlyPredictedBranch
Definition: pipe_data.hh:90
gem5::minor::Execute::out
Latch< BranchData >::Input out
Input port carrying stream changes to Fetch1.
Definition: execute.hh:76
gem5::ThreadState::numOp
Counter numOp
Number of ops (including micro ops) committed.
Definition: thread_state.hh:90
gem5::minor::Execute::allowEarlyMemIssue
bool allowEarlyMemIssue
Allow mem refs to leave their FUs before reaching the head of the in flight insts queue if their depe...
Definition: execute.hh:116
gem5::Num_OpClasses
static const OpClass Num_OpClasses
Definition: op_class.hh:108
gem5::minor::Execute::DrainState
DrainState
Stage cycle-by-cycle state.
Definition: execute.hh:145
gem5::MinorCPU::threadPolicy
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:120
gem5::MinorFUTiming::extraCommitLat
Cycles extraCommitLat
Extra latency that the instruction should spend at the end of the pipeline.
Definition: func_unit.hh:120
gem5::minor::Execute::numFuncUnits
unsigned int numFuncUnits
Number of functional units to produce.
Definition: execute.hh:102
gem5::MinorCPU::activityRecorder
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:96
gem5::minor::Execute::drainResume
void drainResume()
Definition: execute.cc:1788
gem5::minor::Execute::ExecuteThreadInfo::lastPredictionSeqNum
InstSeqNum lastPredictionSeqNum
A prediction number for use where one isn't available from an instruction.
Definition: execute.hh:202
gem5::minor::Execute::funcUnits
std::vector< FUPipeline * > funcUnits
The execution functional units.
Definition: execute.hh:129
gem5::PCStateBase
Definition: pcstate.hh:57
gem5::minor::Execute::issueLimit
unsigned int issueLimit
Number of instructions that can be issued per cycle.
Definition: execute.hh:82
gem5::minor::BranchData::SuspendThread
@ SuspendThread
Definition: pipe_data.hh:95
gem5::minor::Execute::executeInfo
std::vector< ExecuteThreadInfo > executeInfo
Definition: execute.hh:208
gem5::minor::Execute::issue
unsigned int issue(ThreadID thread_id)
Try and issue instructions from the inputBuffer.
Definition: execute.cc:548
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:790
gem5::MinorFUTiming::suppress
bool suppress
If true, instructions matching this mask/match should not be issued in this FU.
Definition: func_unit.hh:116
gem5::minor::Execute::cpu
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: execute.hh:79
gem5::minor::Execute::interruptPriority
ThreadID interruptPriority
Definition: execute.hh:210
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::minor::Execute::instIsHeadInst
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1885
gem5::minor::Execute::memoryIssueLimit
unsigned int memoryIssueLimit
Number of memory ops that can be issued per cycle.
Definition: execute.hh:85
gem5::minor::Execute::DrainAllInsts
@ DrainAllInsts
Definition: execute.hh:150
lsq.hh
gem5::minor::LSQ::sendStoreToStoreBuffer
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1544
gem5::minor::BranchData::UnpredictedBranch
@ UnpredictedBranch
Definition: pipe_data.hh:82
gem5::minor::LSQ::findResponse
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1487
gem5::minor::LSQ::canPushIntoStoreBuffer
bool canPushIntoStoreBuffer() const
Must check this before trying to insert into the store buffer.
Definition: lsq.hh:682
gem5::SimpleThread::comInstEventQueue
EventQueue comInstEventQueue
An instruction-based event queue.
Definition: simple_thread.hh:128
fetch1.hh
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:800
gem5::PCStateBase::clone
virtual PCStateBase * clone() const =0
gem5::MinorCPU::roundRobinPriority
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:173
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:235
gem5::minor::Execute::instIsRightStream
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1879
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178

Generated on Thu Jun 16 2022 10:41:46 for gem5 by doxygen 1.8.17