gem5  v21.2.1.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
execute.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2018-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/execute.hh"
39 
40 #include <functional>
41 
42 #include "cpu/minor/cpu.hh"
44 #include "cpu/minor/fetch1.hh"
45 #include "cpu/minor/lsq.hh"
46 #include "cpu/op_class.hh"
47 #include "debug/Activity.hh"
48 #include "debug/Branch.hh"
49 #include "debug/Drain.hh"
50 #include "debug/ExecFaulting.hh"
51 #include "debug/MinorExecute.hh"
52 #include "debug/MinorInterrupt.hh"
53 #include "debug/MinorMem.hh"
54 #include "debug/MinorTrace.hh"
55 #include "debug/PCEvent.hh"
56 
57 namespace gem5
58 {
59 
61 namespace minor
62 {
63 
64 Execute::Execute(const std::string &name_,
65  MinorCPU &cpu_,
66  const MinorCPUParams &params,
69  Named(name_),
70  inp(inp_),
71  out(out_),
72  cpu(cpu_),
73  zeroReg(cpu.threads[0]->getIsaPtr()->regClasses().
74  at(IntRegClass).zeroReg()),
75  issueLimit(params.executeIssueLimit),
76  memoryIssueLimit(params.executeMemoryIssueLimit),
77  commitLimit(params.executeCommitLimit),
78  memoryCommitLimit(params.executeMemoryCommitLimit),
79  processMoreThanOneInput(params.executeCycleInput),
80  fuDescriptions(*params.executeFuncUnits),
81  numFuncUnits(fuDescriptions.funcUnits.size()),
82  setTraceTimeOnCommit(params.executeSetTraceTimeOnCommit),
83  setTraceTimeOnIssue(params.executeSetTraceTimeOnIssue),
84  allowEarlyMemIssue(params.executeAllowEarlyMemoryIssue),
85  noCostFUIndex(fuDescriptions.funcUnits.size() + 1),
86  lsq(name_ + ".lsq", name_ + ".dcache_port",
87  cpu_, *this,
88  params.executeMaxAccessesInMemory,
89  params.executeMemoryWidth,
90  params.executeLSQRequestsQueueSize,
91  params.executeLSQTransfersQueueSize,
92  params.executeLSQStoreBufferSize,
93  params.executeLSQMaxStoreBufferStoresPerCycle,
94  zeroReg),
95  executeInfo(params.numThreads,
96  ExecuteThreadInfo(params.executeCommitLimit)),
97  interruptPriority(0),
98  issuePriority(0),
99  commitPriority(0)
100 {
101  if (commitLimit < 1) {
102  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
103  commitLimit);
104  }
105 
106  if (issueLimit < 1) {
107  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
108  issueLimit);
109  }
110 
111  if (memoryIssueLimit < 1) {
112  fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_,
114  }
115 
117  fatal("%s: executeMemoryCommitLimit (%d) must be <="
118  " executeCommitLimit (%d)\n",
120  }
121 
122  if (params.executeInputBufferSize < 1) {
123  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
124  params.executeInputBufferSize);
125  }
126 
127  if (params.executeInputBufferSize < 1) {
128  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
129  params.executeInputBufferSize);
130  }
131 
132  /* This should be large enough to count all the in-FU instructions
133  * which need to be accounted for in the inFlightInsts
134  * queue */
135  unsigned int total_slots = 0;
136 
137  /* Make FUPipelines for each MinorFU */
138  for (unsigned int i = 0; i < numFuncUnits; i++) {
139  std::ostringstream fu_name;
140  MinorFU *fu_description = fuDescriptions.funcUnits[i];
141 
142  /* Note the total number of instruction slots (for sizing
143  * the inFlightInst queue) and the maximum latency of any FU
144  * (for sizing the activity recorder) */
145  total_slots += fu_description->opLat;
146 
147  fu_name << name_ << ".fu." << i;
148 
149  FUPipeline *fu = new FUPipeline(fu_name.str(), *fu_description, cpu);
150 
151  funcUnits.push_back(fu);
152  }
153 
155  for (int op_class = No_OpClass + 1; op_class < Num_OpClasses; op_class++) {
156  bool found_fu = false;
157  unsigned int fu_index = 0;
158 
159  while (fu_index < numFuncUnits && !found_fu)
160  {
161  if (funcUnits[fu_index]->provides(
162  static_cast<OpClass>(op_class)))
163  {
164  found_fu = true;
165  }
166  fu_index++;
167  }
168 
169  if (!found_fu) {
170  warn("No functional unit for OpClass %s\n",
171  enums::OpClassStrings[op_class]);
172  }
173  }
174 
175  /* Per-thread structures */
176  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
177  std::string tid_str = std::to_string(tid);
178 
179  /* Input Buffers */
180  inputBuffer.push_back(
182  name_ + ".inputBuffer" + tid_str, "insts",
183  params.executeInputBufferSize));
184 
185  const auto &regClasses = cpu.threads[tid]->getIsaPtr()->regClasses();
186 
187  /* Scoreboards */
188  scoreboard.emplace_back(name_ + ".scoreboard" + tid_str, regClasses);
189 
190  /* In-flight instruction records */
191  executeInfo[tid].inFlightInsts = new Queue<QueuedInst,
193  name_ + ".inFlightInsts" + tid_str, "insts", total_slots);
194 
195  executeInfo[tid].inFUMemInsts = new Queue<QueuedInst,
197  name_ + ".inFUMemInsts" + tid_str, "insts", total_slots);
198  }
199 }
200 
201 const ForwardInstData *
203 {
204  /* Get a line from the inputBuffer to work with */
205  if (!inputBuffer[tid].empty()) {
206  const ForwardInstData &head = inputBuffer[tid].front();
207 
208  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
209  } else {
210  return NULL;
211  }
212 }
213 
214 void
216 {
217  if (!inputBuffer[tid].empty())
218  inputBuffer[tid].pop();
219 
220  executeInfo[tid].inputIndex = 0;
221 }
222 
223 void
225 {
226  ThreadContext *thread = cpu.getContext(inst->id.threadId);
227  const std::unique_ptr<PCStateBase> pc_before(inst->pc->clone());
228  std::unique_ptr<PCStateBase> target(thread->pcState().clone());
229 
230  /* Force a branch for SerializeAfter/SquashAfter instructions
231  * at the end of micro-op sequence when we're not suspended */
232  bool force_branch = thread->status() != ThreadContext::Suspended &&
233  !inst->isFault() &&
234  inst->isLastOpInInst() &&
235  (inst->staticInst->isSerializeAfter() ||
236  inst->staticInst->isSquashAfter());
237 
238  DPRINTF(Branch, "tryToBranch before: %s after: %s%s\n",
239  *pc_before, *target, (force_branch ? " (forcing)" : ""));
240 
241  /* Will we change the PC to something other than the next instruction? */
242  bool must_branch = *pc_before != *target ||
243  fault != NoFault ||
244  force_branch;
245 
246  /* The reason for the branch data we're about to generate, set below */
248 
249  if (fault == NoFault) {
250  inst->staticInst->advancePC(*target);
251  thread->pcState(*target);
252 
253  DPRINTF(Branch, "Advancing current PC from: %s to: %s\n",
254  *pc_before, *target);
255  }
256 
257  if (inst->predictedTaken && !force_branch) {
258  /* Predicted to branch */
259  if (!must_branch) {
260  /* No branch was taken, change stream to get us back to the
261  * intended PC value */
262  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x but"
263  " none happened inst: %s\n",
264  inst->pc->instAddr(), inst->predictedTarget->instAddr(),
265  *inst);
266 
268  } else if (*inst->predictedTarget == *target) {
269  /* Branch prediction got the right target, kill the branch and
270  * carry on.
271  * Note that this information to the branch predictor might get
272  * overwritten by a "real" branch during this cycle */
273  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x correctly"
274  " inst: %s\n",
275  inst->pc->instAddr(), inst->predictedTarget->instAddr(),
276  *inst);
277 
279  } else {
280  /* Branch prediction got the wrong target */
281  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x"
282  " but got the wrong target (actual: 0x%x) inst: %s\n",
283  inst->pc->instAddr(), inst->predictedTarget->instAddr(),
284  target->instAddr(), *inst);
285 
287  }
288  } else if (must_branch) {
289  /* Unpredicted branch */
290  DPRINTF(Branch, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
291  inst->pc->instAddr(), target->instAddr(), *inst);
292 
294  } else {
295  /* No branch at all */
296  reason = BranchData::NoBranch;
297  }
298 
299  updateBranchData(inst->id.threadId, reason, inst, *target, branch);
300 }
301 
302 void
304  ThreadID tid,
305  BranchData::Reason reason,
306  MinorDynInstPtr inst, const PCStateBase &target,
307  BranchData &branch)
308 {
309  if (reason != BranchData::NoBranch) {
310  /* Bump up the stream sequence number on a real branch*/
311  if (BranchData::isStreamChange(reason))
312  executeInfo[tid].streamSeqNum++;
313 
314  /* Branches (even mis-predictions) don't change the predictionSeqNum,
315  * just the streamSeqNum */
316  branch = BranchData(reason, tid,
317  executeInfo[tid].streamSeqNum,
318  /* Maintaining predictionSeqNum if there's no inst is just a
319  * courtesy and looks better on minorview */
320  (inst->isBubble() ? executeInfo[tid].lastPredictionSeqNum
321  : inst->id.predictionSeqNum),
322  target, inst);
323 
324  DPRINTF(Branch, "Branch data signalled: %s\n", branch);
325  }
326 }
327 
328 void
330  LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
331 {
332  ThreadID thread_id = inst->id.threadId;
333  ThreadContext *thread = cpu.getContext(thread_id);
334 
335  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst, zeroReg);
336 
337  PacketPtr packet = response->packet;
338 
339  bool is_load = inst->staticInst->isLoad();
340  bool is_store = inst->staticInst->isStore();
341  bool is_atomic = inst->staticInst->isAtomic();
342  bool is_prefetch = inst->staticInst->isDataPrefetch();
343 
344  /* If true, the trace's predicate value will be taken from the exec
345  * context predicate, otherwise, it will be set to false */
346  bool use_context_predicate = true;
347 
348  if (inst->translationFault != NoFault) {
349  /* Invoke memory faults. */
350  DPRINTF(MinorMem, "Completing fault from DTLB access: %s\n",
351  inst->translationFault->name());
352 
353  if (inst->staticInst->isPrefetch()) {
354  DPRINTF(MinorMem, "Not taking fault on prefetch: %s\n",
355  inst->translationFault->name());
356 
357  /* Don't assign to fault */
358  } else {
359  /* Take the fault raised during the TLB/memory access */
360  fault = inst->translationFault;
361 
362  fault->invoke(thread, inst->staticInst);
363  }
364  } else if (!packet) {
365  DPRINTF(MinorMem, "Completing failed request inst: %s\n",
366  *inst);
367  use_context_predicate = false;
368  if (!context.readMemAccPredicate())
369  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
370  } else if (packet->isError()) {
371  DPRINTF(MinorMem, "Trying to commit error response: %s\n",
372  *inst);
373 
374  fatal("Received error response packet for inst: %s\n", *inst);
375  } else if (is_store || is_load || is_prefetch || is_atomic) {
376  assert(packet);
377 
378  DPRINTF(MinorMem, "Memory response inst: %s addr: 0x%x size: %d\n",
379  *inst, packet->getAddr(), packet->getSize());
380 
381  if (is_load && packet->getSize() > 0) {
382  DPRINTF(MinorMem, "Memory data[0]: 0x%x\n",
383  static_cast<unsigned int>(packet->getConstPtr<uint8_t>()[0]));
384  }
385 
386  /* Complete the memory access instruction */
387  fault = inst->staticInst->completeAcc(packet, &context,
388  inst->traceData);
389 
390  if (fault != NoFault) {
391  /* Invoke fault created by instruction completion */
392  DPRINTF(MinorMem, "Fault in memory completeAcc: %s\n",
393  fault->name());
394  fault->invoke(thread, inst->staticInst);
395  } else {
396  /* Stores need to be pushed into the store buffer to finish
397  * them off */
398  if (response->needsToBeSentToStoreBuffer())
399  lsq.sendStoreToStoreBuffer(response);
400  }
401  } else {
402  fatal("There should only ever be reads, "
403  "writes or faults at this point\n");
404  }
405 
406  lsq.popResponse(response);
407 
408  if (inst->traceData) {
409  inst->traceData->setPredicate((use_context_predicate ?
410  context.readPredicate() : false));
411  }
412 
414 
415  /* Generate output to account for branches */
416  tryToBranch(inst, fault, branch);
417 }
418 
419 bool
421 {
422  return cpu.checkInterrupts(thread_id);
423 }
424 
425 bool
427 {
428  DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
429  cpu.getContext(thread_id)->pcState());
430 
431  Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt();
432 
433  if (interrupt != NoFault) {
434  /* The interrupt *must* set pcState */
435  cpu.getInterruptController(thread_id)->updateIntrInfo();
436  interrupt->invoke(cpu.getContext(thread_id));
437 
438  assert(!lsq.accessesInFlight());
439 
440  DPRINTF(MinorInterrupt, "Invoking interrupt: %s to PC: %s\n",
441  interrupt->name(), cpu.getContext(thread_id)->pcState());
442 
443  /* Assume that an interrupt *must* cause a branch. Assert this? */
444 
446  MinorDynInst::bubble(), cpu.getContext(thread_id)->pcState(),
447  branch);
448  }
449 
450  return interrupt != NoFault;
451 }
452 
453 bool
455  bool &passed_predicate, Fault &fault)
456 {
457  bool issued = false;
458 
459  /* Set to true if the mem op. is issued and sent to the mem system */
460  passed_predicate = false;
461 
462  if (!lsq.canRequest()) {
463  /* Not acting on instruction yet as the memory
464  * queues are full */
465  issued = false;
466  } else {
467  ThreadContext *thread = cpu.getContext(inst->id.threadId);
468  std::unique_ptr<PCStateBase> old_pc(thread->pcState().clone());
469 
470  ExecContext context(cpu, *cpu.threads[inst->id.threadId],
471  *this, inst, zeroReg);
472 
473  DPRINTF(MinorExecute, "Initiating memRef inst: %s\n", *inst);
474 
475  Fault init_fault = inst->staticInst->initiateAcc(&context,
476  inst->traceData);
477 
478  if (inst->inLSQ) {
479  if (init_fault != NoFault) {
480  assert(inst->translationFault != NoFault);
481  // Translation faults are dealt with in handleMemResponse()
482  init_fault = NoFault;
483  } else {
484  // If we have a translation fault then it got suppressed by
485  // initateAcc()
486  inst->translationFault = NoFault;
487  }
488  }
489 
490  if (init_fault != NoFault) {
491  DPRINTF(MinorExecute, "Fault on memory inst: %s"
492  " initiateAcc: %s\n", *inst, init_fault->name());
493  fault = init_fault;
494  } else {
495  /* Only set this if the instruction passed its
496  * predicate */
497  if (!context.readMemAccPredicate()) {
498  DPRINTF(MinorMem, "No memory access for inst: %s\n", *inst);
499  assert(context.readPredicate());
500  }
501  passed_predicate = context.readPredicate();
502 
503  /* Set predicate in tracing */
504  if (inst->traceData)
505  inst->traceData->setPredicate(passed_predicate);
506 
507  /* If the instruction didn't pass its predicate
508  * or it is a predicated vector instruction and the
509  * associated predicate register is all-false (and so will not
510  * progress from here) Try to branch to correct and branch
511  * mis-prediction. */
512  if (!inst->inLSQ) {
513  /* Leave it up to commit to handle the fault */
514  lsq.pushFailedRequest(inst);
515  inst->inLSQ = true;
516  }
517  }
518 
519  /* Restore thread PC */
520  thread->pcState(*old_pc);
521  issued = true;
522  }
523 
524  return issued;
525 }
526 
528 inline unsigned int
529 cyclicIndexInc(unsigned int index, unsigned int cycle_size)
530 {
531  unsigned int ret = index + 1;
532 
533  if (ret == cycle_size)
534  ret = 0;
535 
536  return ret;
537 }
538 
540 inline unsigned int
541 cyclicIndexDec(unsigned int index, unsigned int cycle_size)
542 {
543  int ret = index - 1;
544 
545  if (ret < 0)
546  ret = cycle_size - 1;
547 
548  return ret;
549 }
550 
551 unsigned int
553 {
554  const ForwardInstData *insts_in = getInput(thread_id);
555  ExecuteThreadInfo &thread = executeInfo[thread_id];
556 
557  /* Early termination if we have no instructions */
558  if (!insts_in)
559  return 0;
560 
561  /* Start from the first FU */
562  unsigned int fu_index = 0;
563 
564  /* Remains true while instructions are still being issued. If any
565  * instruction fails to issue, this is set to false and we exit issue.
566  * This strictly enforces in-order issue. For other issue behaviours,
567  * a more complicated test in the outer while loop below is needed. */
568  bool issued = true;
569 
570  /* Number of insts issues this cycle to check for issueLimit */
571  unsigned num_insts_issued = 0;
572 
573  /* Number of memory ops issues this cycle to check for memoryIssueLimit */
574  unsigned num_mem_insts_issued = 0;
575 
576  /* Number of instructions discarded this cycle in order to enforce a
577  * discardLimit. @todo, add that parameter? */
578  unsigned num_insts_discarded = 0;
579 
580  do {
581  MinorDynInstPtr inst = insts_in->insts[thread.inputIndex];
582  Fault fault = inst->fault;
583  bool discarded = false;
584  bool issued_mem_ref = false;
585 
586  if (inst->isBubble()) {
587  /* Skip */
588  issued = true;
589  } else if (cpu.getContext(thread_id)->status() ==
591  {
592  DPRINTF(MinorExecute, "Discarding inst: %s from suspended"
593  " thread\n", *inst);
594 
595  issued = true;
596  discarded = true;
597  } else if (inst->id.streamSeqNum != thread.streamSeqNum) {
598  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
599  " state was unexpected, expected: %d\n",
600  *inst, thread.streamSeqNum);
601  issued = true;
602  discarded = true;
603  } else {
604  /* Try and issue an instruction into an FU, assume we didn't and
605  * fix that in the loop */
606  issued = false;
607 
608  /* Try FU from 0 each instruction */
609  fu_index = 0;
610 
611  /* Try and issue a single instruction stepping through the
612  * available FUs */
613  do {
614  FUPipeline *fu = funcUnits[fu_index];
615 
616  DPRINTF(MinorExecute, "Trying to issue inst: %s to FU: %d\n",
617  *inst, fu_index);
618 
619  /* Does the examined fu have the OpClass-related capability
620  * needed to execute this instruction? Faults can always
621  * issue to any FU but probably should just 'live' in the
622  * inFlightInsts queue rather than having an FU. */
623  bool fu_is_capable = (!inst->isFault() ?
624  fu->provides(inst->staticInst->opClass()) : true);
625 
626  if (inst->isNoCostInst()) {
627  /* Issue free insts. to a fake numbered FU */
628  fu_index = noCostFUIndex;
629 
630  /* And start the countdown on activity to allow
631  * this instruction to get to the end of its FU */
633 
634  /* Mark the destinations for this instruction as
635  * busy */
636  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
637  Cycles(0), cpu.getContext(thread_id), false);
638 
639  DPRINTF(MinorExecute, "Issuing %s to %d\n", inst->id, noCostFUIndex);
640  inst->fuIndex = noCostFUIndex;
641  inst->extraCommitDelay = Cycles(0);
642  inst->extraCommitDelayExpr = NULL;
643 
644  /* Push the instruction onto the inFlight queue so
645  * it can be committed in order */
646  QueuedInst fu_inst(inst);
647  thread.inFlightInsts->push(fu_inst);
648 
649  issued = true;
650 
651  } else if (!fu_is_capable || fu->alreadyPushed()) {
652  /* Skip */
653  if (!fu_is_capable) {
654  DPRINTF(MinorExecute, "Can't issue as FU: %d isn't"
655  " capable\n", fu_index);
656  } else {
657  DPRINTF(MinorExecute, "Can't issue as FU: %d is"
658  " already busy\n", fu_index);
659  }
660  } else if (fu->stalled) {
661  DPRINTF(MinorExecute, "Can't issue inst: %s into FU: %d,"
662  " it's stalled\n",
663  *inst, fu_index);
664  } else if (!fu->canInsert()) {
665  DPRINTF(MinorExecute, "Can't issue inst: %s to busy FU"
666  " for another: %d cycles\n",
667  *inst, fu->cyclesBeforeInsert());
668  } else {
669  MinorFUTiming *timing = (!inst->isFault() ?
670  fu->findTiming(inst->staticInst) : NULL);
671 
672  const std::vector<Cycles> *src_latencies =
673  (timing ? &(timing->srcRegsRelativeLats)
674  : NULL);
675 
676  const std::vector<bool> *cant_forward_from_fu_indices =
677  &(fu->cantForwardFromFUIndices);
678 
679  if (timing && timing->suppress) {
680  DPRINTF(MinorExecute, "Can't issue inst: %s as extra"
681  " decoding is suppressing it\n",
682  *inst);
683  } else if (!scoreboard[thread_id].canInstIssue(inst,
684  src_latencies, cant_forward_from_fu_indices,
685  cpu.curCycle(), cpu.getContext(thread_id)))
686  {
687  DPRINTF(MinorExecute, "Can't issue inst: %s yet\n",
688  *inst);
689  } else {
690  /* Can insert the instruction into this FU */
691  DPRINTF(MinorExecute, "Issuing inst: %s"
692  " into FU %d\n", *inst,
693  fu_index);
694 
695  Cycles extra_dest_retire_lat = Cycles(0);
696  TimingExpr *extra_dest_retire_lat_expr = NULL;
697  Cycles extra_assumed_lat = Cycles(0);
698 
699  /* Add the extraCommitDelay and extraAssumeLat to
700  * the FU pipeline timings */
701  if (timing) {
702  extra_dest_retire_lat =
703  timing->extraCommitLat;
704  extra_dest_retire_lat_expr =
705  timing->extraCommitLatExpr;
706  extra_assumed_lat =
707  timing->extraAssumedLat;
708  }
709 
710  issued_mem_ref = inst->isMemRef();
711 
712  QueuedInst fu_inst(inst);
713 
714  /* Decorate the inst with FU details */
715  inst->fuIndex = fu_index;
716  inst->extraCommitDelay = extra_dest_retire_lat;
717  inst->extraCommitDelayExpr =
718  extra_dest_retire_lat_expr;
719 
720  if (issued_mem_ref) {
721  /* Remember which instruction this memory op
722  * depends on so that initiateAcc can be called
723  * early */
724  if (allowEarlyMemIssue) {
725  inst->instToWaitFor =
726  scoreboard[thread_id].execSeqNumToWaitFor(inst,
727  cpu.getContext(thread_id));
728 
729  if (lsq.getLastMemBarrier(thread_id) >
730  inst->instToWaitFor)
731  {
732  DPRINTF(MinorExecute, "A barrier will"
733  " cause a delay in mem ref issue of"
734  " inst: %s until after inst"
735  " %d(exec)\n", *inst,
736  lsq.getLastMemBarrier(thread_id));
737 
738  inst->instToWaitFor =
739  lsq.getLastMemBarrier(thread_id);
740  } else {
741  DPRINTF(MinorExecute, "Memory ref inst:"
742  " %s must wait for inst %d(exec)"
743  " before issuing\n",
744  *inst, inst->instToWaitFor);
745  }
746 
747  inst->canEarlyIssue = true;
748  }
749  /* Also queue this instruction in the memory ref
750  * queue to ensure in-order issue to the LSQ */
751  DPRINTF(MinorExecute, "Pushing mem inst: %s\n",
752  *inst);
753  thread.inFUMemInsts->push(fu_inst);
754  }
755 
756  /* Issue to FU */
757  fu->push(fu_inst);
758  /* And start the countdown on activity to allow
759  * this instruction to get to the end of its FU */
761 
762  /* Mark the destinations for this instruction as
763  * busy */
764  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
765  fu->description.opLat +
766  extra_dest_retire_lat +
767  extra_assumed_lat,
768  cpu.getContext(thread_id),
769  issued_mem_ref && extra_assumed_lat == Cycles(0));
770 
771  /* Push the instruction onto the inFlight queue so
772  * it can be committed in order */
773  thread.inFlightInsts->push(fu_inst);
774 
775  issued = true;
776  }
777  }
778 
779  fu_index++;
780  } while (fu_index != numFuncUnits && !issued);
781 
782  if (!issued)
783  DPRINTF(MinorExecute, "Didn't issue inst: %s\n", *inst);
784  }
785 
786  if (issued) {
787  /* Generate MinorTrace's MinorInst lines. Do this at commit
788  * to allow better instruction annotation? */
789  if (debug::MinorTrace && !inst->isBubble()) {
790  inst->minorTraceInst(*this,
791  cpu.threads[0]->getIsaPtr()->regClasses());
792  }
793 
794  /* Mark up barriers in the LSQ */
795  if (!discarded && inst->isInst() &&
796  inst->staticInst->isFullMemBarrier())
797  {
798  DPRINTF(MinorMem, "Issuing memory barrier inst: %s\n", *inst);
800  }
801 
802  if (inst->traceData && setTraceTimeOnIssue) {
803  inst->traceData->setWhen(curTick());
804  }
805 
806  if (issued_mem_ref)
807  num_mem_insts_issued++;
808 
809  if (discarded) {
810  num_insts_discarded++;
811  } else if (!inst->isBubble()) {
812  num_insts_issued++;
813 
814  if (num_insts_issued == issueLimit)
815  DPRINTF(MinorExecute, "Reached inst issue limit\n");
816  }
817 
818  thread.inputIndex++;
819  DPRINTF(MinorExecute, "Stepping to next inst inputIndex: %d\n",
820  thread.inputIndex);
821  }
822 
823  /* Got to the end of a line */
824  if (thread.inputIndex == insts_in->width()) {
825  popInput(thread_id);
826  /* Set insts_in to null to force us to leave the surrounding
827  * loop */
828  insts_in = NULL;
829 
831  DPRINTF(MinorExecute, "Wrapping\n");
832  insts_in = getInput(thread_id);
833  }
834  }
835  } while (insts_in && thread.inputIndex < insts_in->width() &&
836  /* We still have instructions */
837  fu_index != numFuncUnits && /* Not visited all FUs */
838  issued && /* We've not yet failed to issue an instruction */
839  num_insts_issued != issueLimit && /* Still allowed to issue */
840  num_mem_insts_issued != memoryIssueLimit);
841 
842  return num_insts_issued;
843 }
844 
845 bool
847 {
848  ThreadContext *thread = cpu.getContext(thread_id);
849  unsigned int num_pc_event_checks = 0;
850 
851  /* Handle PC events on instructions */
852  Addr oldPC;
853  do {
854  oldPC = thread->pcState().instAddr();
855  cpu.threads[thread_id]->pcEventQueue.service(oldPC, thread);
856  num_pc_event_checks++;
857  } while (oldPC != thread->pcState().instAddr());
858 
859  if (num_pc_event_checks > 1) {
860  DPRINTF(PCEvent, "Acting on PC Event to PC: %s\n",
861  thread->pcState());
862  }
863 
864  return num_pc_event_checks > 1;
865 }
866 
867 void
869 {
870  assert(!inst->isFault());
871 
872  MinorThread *thread = cpu.threads[inst->id.threadId];
873 
874  /* Increment the many and various inst and op counts in the
875  * thread and system */
876  if (!inst->staticInst->isMicroop() || inst->staticInst->isLastMicroop())
877  {
878  thread->numInst++;
879  thread->threadStats.numInsts++;
880  cpu.stats.numInsts++;
881 
882  /* Act on events related to instruction counts */
883  thread->comInstEventQueue.serviceEvents(thread->numInst);
884  }
885  thread->numOp++;
886  thread->threadStats.numOps++;
887  cpu.stats.numOps++;
888  cpu.stats.committedInstType[inst->id.threadId]
889  [inst->staticInst->opClass()]++;
890 
891  /* Set the CP SeqNum to the numOps commit number */
892  if (inst->traceData)
893  inst->traceData->setCPSeq(thread->numOp);
894 
895  cpu.probeInstCommit(inst->staticInst, inst->pc->instAddr());
896 }
897 
898 bool
899 Execute::commitInst(MinorDynInstPtr inst, bool early_memory_issue,
900  BranchData &branch, Fault &fault, bool &committed,
901  bool &completed_mem_issue)
902 {
903  ThreadID thread_id = inst->id.threadId;
904  ThreadContext *thread = cpu.getContext(thread_id);
905 
906  bool completed_inst = true;
907  fault = NoFault;
908 
909  /* Is the thread for this instruction suspended? In that case, just
910  * stall as long as there are no pending interrupts */
911  if (thread->status() == ThreadContext::Suspended &&
912  !isInterrupted(thread_id))
913  {
914  panic("We should never hit the case where we try to commit from a "
915  "suspended thread as the streamSeqNum should not match");
916  } else if (inst->isFault()) {
917  ExecContext context(cpu, *cpu.threads[thread_id], *this,
918  inst, zeroReg);
919 
920  DPRINTF(MinorExecute, "Fault inst reached Execute: %s\n",
921  inst->fault->name());
922 
923  fault = inst->fault;
924  inst->fault->invoke(thread, NULL);
925 
926  tryToBranch(inst, fault, branch);
927  } else if (inst->staticInst->isMemRef()) {
928  /* Memory accesses are executed in two parts:
929  * executeMemRefInst -- calculates the EA and issues the access
930  * to memory. This is done here.
931  * handleMemResponse -- handles the response packet, done by
932  * Execute::commit
933  *
934  * While the memory access is in its FU, the EA is being
935  * calculated. At the end of the FU, when it is ready to
936  * 'commit' (in this function), the access is presented to the
937  * memory queues. When a response comes back from memory,
938  * Execute::commit will commit it.
939  */
940  bool predicate_passed = false;
941  bool completed_mem_inst = executeMemRefInst(inst, branch,
942  predicate_passed, fault);
943 
944  if (completed_mem_inst && fault != NoFault) {
945  if (early_memory_issue) {
946  DPRINTF(MinorExecute, "Fault in early executing inst: %s\n",
947  fault->name());
948  /* Don't execute the fault, just stall the instruction
949  * until it gets to the head of inFlightInsts */
950  inst->canEarlyIssue = false;
951  /* Not completed as we'll come here again to pick up
952  * the fault when we get to the end of the FU */
953  completed_inst = false;
954  } else {
955  DPRINTF(MinorExecute, "Fault in execute: %s\n",
956  fault->name());
957  fault->invoke(thread, NULL);
958 
959  tryToBranch(inst, fault, branch);
960  completed_inst = true;
961  }
962  } else {
963  completed_inst = completed_mem_inst;
964  }
965  completed_mem_issue = completed_inst;
966  } else if (inst->isInst() && inst->staticInst->isFullMemBarrier() &&
968  {
969  DPRINTF(MinorExecute, "Can't commit data barrier inst: %s yet as"
970  " there isn't space in the store buffer\n", *inst);
971 
972  completed_inst = false;
973  } else if (inst->isInst() && inst->staticInst->isQuiesce()
974  && !branch.isBubble()){
975  /* This instruction can suspend, need to be able to communicate
976  * backwards, so no other branches may evaluate this cycle*/
977  completed_inst = false;
978  } else {
979  ExecContext context(cpu, *cpu.threads[thread_id], *this,
980  inst, zeroReg);
981 
982  DPRINTF(MinorExecute, "Committing inst: %s\n", *inst);
983 
984  fault = inst->staticInst->execute(&context,
985  inst->traceData);
986 
987  /* Set the predicate for tracing and dump */
988  if (inst->traceData)
989  inst->traceData->setPredicate(context.readPredicate());
990 
991  committed = true;
992 
993  if (fault != NoFault) {
994  if (inst->traceData) {
995  if (debug::ExecFaulting) {
996  inst->traceData->setFaulting(true);
997  } else {
998  delete inst->traceData;
999  inst->traceData = NULL;
1000  }
1001  }
1002 
1003  DPRINTF(MinorExecute, "Fault in execute of inst: %s fault: %s\n",
1004  *inst, fault->name());
1005  fault->invoke(thread, inst->staticInst);
1006  }
1007 
1008  doInstCommitAccounting(inst);
1009  tryToBranch(inst, fault, branch);
1010  }
1011 
1012  if (completed_inst) {
1013  /* Keep a copy of this instruction's predictionSeqNum just in case
1014  * we need to issue a branch without an instruction (such as an
1015  * interrupt) */
1016  executeInfo[thread_id].lastPredictionSeqNum = inst->id.predictionSeqNum;
1017 
1018  /* Check to see if this instruction suspended the current thread. */
1019  if (!inst->isFault() &&
1020  thread->status() == ThreadContext::Suspended &&
1021  branch.isBubble() && /* It didn't branch too */
1022  !isInterrupted(thread_id)) /* Don't suspend if we have
1023  interrupts */
1024  {
1025  auto &resume_pc = cpu.getContext(thread_id)->pcState();
1026 
1027  assert(resume_pc.microPC() == 0);
1028 
1029  DPRINTF(MinorInterrupt, "Suspending thread: %d from Execute"
1030  " inst: %s\n", thread_id, *inst);
1031 
1033 
1035  resume_pc, branch);
1036  }
1037  }
1038 
1039  return completed_inst;
1040 }
1041 
1042 void
1043 Execute::commit(ThreadID thread_id, bool only_commit_microops, bool discard,
1044  BranchData &branch)
1045 {
1046  Fault fault = NoFault;
1047  Cycles now = cpu.curCycle();
1048  ExecuteThreadInfo &ex_info = executeInfo[thread_id];
1049 
1075  /* Has an instruction been completed? Once this becomes false, we stop
1076  * trying to complete instructions. */
1077  bool completed_inst = true;
1078 
1079  /* Number of insts committed this cycle to check against commitLimit */
1080  unsigned int num_insts_committed = 0;
1081 
1082  /* Number of memory access instructions committed to check against
1083  * memCommitLimit */
1084  unsigned int num_mem_refs_committed = 0;
1085 
1086  if (only_commit_microops && !ex_info.inFlightInsts->empty()) {
1087  DPRINTF(MinorInterrupt, "Only commit microops %s %d\n",
1088  *(ex_info.inFlightInsts->front().inst),
1089  ex_info.lastCommitWasEndOfMacroop);
1090  }
1091 
1092  while (!ex_info.inFlightInsts->empty() && /* Some more instructions to process */
1093  !branch.isStreamChange() && /* No real branch */
1094  fault == NoFault && /* No faults */
1095  completed_inst && /* Still finding instructions to execute */
1096  num_insts_committed != commitLimit /* Not reached commit limit */
1097  )
1098  {
1099  if (only_commit_microops) {
1100  DPRINTF(MinorInterrupt, "Committing tail of insts before"
1101  " interrupt: %s\n",
1102  *(ex_info.inFlightInsts->front().inst));
1103  }
1104 
1105  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1106 
1107  InstSeqNum head_exec_seq_num =
1108  head_inflight_inst->inst->id.execSeqNum;
1109 
1110  /* The instruction we actually process if completed_inst
1111  * remains true to the end of the loop body.
1112  * Start by considering the the head of the in flight insts queue */
1113  MinorDynInstPtr inst = head_inflight_inst->inst;
1114 
1115  bool committed_inst = false;
1116  bool discard_inst = false;
1117  bool completed_mem_ref = false;
1118  bool issued_mem_ref = false;
1119  bool early_memory_issue = false;
1120 
1121  /* Must set this again to go around the loop */
1122  completed_inst = false;
1123 
1124  /* If we're just completing a macroop before an interrupt or drain,
1125  * can we stil commit another microop (rather than a memory response)
1126  * without crosing into the next full instruction? */
1127  bool can_commit_insts = !ex_info.inFlightInsts->empty() &&
1128  !(only_commit_microops && ex_info.lastCommitWasEndOfMacroop);
1129 
1130  /* Can we find a mem response for this inst */
1131  LSQ::LSQRequestPtr mem_response =
1132  (inst->inLSQ ? lsq.findResponse(inst) : NULL);
1133 
1134  DPRINTF(MinorExecute, "Trying to commit canCommitInsts: %d\n",
1135  can_commit_insts);
1136 
1137  /* Test for PC events after every instruction */
1138  if (isInbetweenInsts(thread_id) && tryPCEvents(thread_id)) {
1139  ThreadContext *thread = cpu.getContext(thread_id);
1140 
1141  /* Branch as there was a change in PC */
1143  MinorDynInst::bubble(), thread->pcState(), branch);
1144  } else if (mem_response &&
1145  num_mem_refs_committed < memoryCommitLimit)
1146  {
1147  /* Try to commit from the memory responses next */
1148  discard_inst = inst->id.streamSeqNum !=
1149  ex_info.streamSeqNum || discard;
1150 
1151  DPRINTF(MinorExecute, "Trying to commit mem response: %s\n",
1152  *inst);
1153 
1154  /* Complete or discard the response */
1155  if (discard_inst) {
1156  DPRINTF(MinorExecute, "Discarding mem inst: %s as its"
1157  " stream state was unexpected, expected: %d\n",
1158  *inst, ex_info.streamSeqNum);
1159 
1160  lsq.popResponse(mem_response);
1161  } else {
1162  handleMemResponse(inst, mem_response, branch, fault);
1163  committed_inst = true;
1164  }
1165 
1166  completed_mem_ref = true;
1167  completed_inst = true;
1168  } else if (can_commit_insts) {
1169  /* If true, this instruction will, subject to timing tweaks,
1170  * be considered for completion. try_to_commit flattens
1171  * the `if' tree a bit and allows other tests for inst
1172  * commit to be inserted here. */
1173  bool try_to_commit = false;
1174 
1175  /* Try and issue memory ops early if they:
1176  * - Can push a request into the LSQ
1177  * - Have reached the end of their FUs
1178  * - Have had all their dependencies satisfied
1179  * - Are from the right stream
1180  *
1181  * For any other case, leave it to the normal instruction
1182  * issue below to handle them.
1183  */
1184  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1185  DPRINTF(MinorExecute, "Trying to commit from mem FUs\n");
1186 
1187  const MinorDynInstPtr head_mem_ref_inst =
1188  ex_info.inFUMemInsts->front().inst;
1189  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1190  const MinorDynInstPtr &fu_inst = fu->front().inst;
1191 
1192  /* Use this, possibly out of order, inst as the one
1193  * to 'commit'/send to the LSQ */
1194  if (!fu_inst->isBubble() &&
1195  !fu_inst->inLSQ &&
1196  fu_inst->canEarlyIssue &&
1197  ex_info.streamSeqNum == fu_inst->id.streamSeqNum &&
1198  head_exec_seq_num > fu_inst->instToWaitFor)
1199  {
1200  DPRINTF(MinorExecute, "Issuing mem ref early"
1201  " inst: %s instToWaitFor: %d\n",
1202  *(fu_inst), fu_inst->instToWaitFor);
1203 
1204  inst = fu_inst;
1205  try_to_commit = true;
1206  early_memory_issue = true;
1207  completed_inst = true;
1208  }
1209  }
1210 
1211  /* Try and commit FU-less insts */
1212  if (!completed_inst && inst->isNoCostInst()) {
1213  DPRINTF(MinorExecute, "Committing no cost inst: %s", *inst);
1214 
1215  try_to_commit = true;
1216  completed_inst = true;
1217  }
1218 
1219  /* Try to issue from the ends of FUs and the inFlightInsts
1220  * queue */
1221  if (!completed_inst && !inst->inLSQ) {
1222  DPRINTF(MinorExecute, "Trying to commit from FUs\n");
1223 
1224  /* Try to commit from a functional unit */
1225  /* Is the head inst of the expected inst's FU actually the
1226  * expected inst? */
1227  QueuedInst &fu_inst =
1228  funcUnits[inst->fuIndex]->front();
1229  InstSeqNum fu_inst_seq_num = fu_inst.inst->id.execSeqNum;
1230 
1231  if (fu_inst.inst->isBubble()) {
1232  /* No instruction ready */
1233  completed_inst = false;
1234  } else if (fu_inst_seq_num != head_exec_seq_num) {
1235  /* Past instruction: we must have already executed it
1236  * in the same cycle and so the head inst isn't
1237  * actually at the end of its pipeline
1238  * Future instruction: handled above and only for
1239  * mem refs on their way to the LSQ */
1240  } else if (fu_inst.inst->id == inst->id) {
1241  /* All instructions can be committed if they have the
1242  * right execSeqNum and there are no in-flight
1243  * mem insts before us */
1244  try_to_commit = true;
1245  completed_inst = true;
1246  }
1247  }
1248 
1249  if (try_to_commit) {
1250  discard_inst = inst->id.streamSeqNum !=
1251  ex_info.streamSeqNum || discard;
1252 
1253  /* Is this instruction discardable as its streamSeqNum
1254  * doesn't match? */
1255  if (!discard_inst) {
1256  /* Try to commit or discard a non-memory instruction.
1257  * Memory ops are actually 'committed' from this FUs
1258  * and 'issued' into the memory system so we need to
1259  * account for them later (commit_was_mem_issue gets
1260  * set) */
1261  if (inst->extraCommitDelayExpr) {
1262  DPRINTF(MinorExecute, "Evaluating expression for"
1263  " extra commit delay inst: %s\n", *inst);
1264 
1265  ThreadContext *thread = cpu.getContext(thread_id);
1266 
1267  TimingExprEvalContext context(inst->staticInst,
1268  thread, NULL);
1269 
1270  uint64_t extra_delay = inst->extraCommitDelayExpr->
1271  eval(context);
1272 
1273  DPRINTF(MinorExecute, "Extra commit delay expr"
1274  " result: %d\n", extra_delay);
1275 
1276  if (extra_delay < 128) {
1277  inst->extraCommitDelay += Cycles(extra_delay);
1278  } else {
1279  DPRINTF(MinorExecute, "Extra commit delay was"
1280  " very long: %d\n", extra_delay);
1281  }
1282  inst->extraCommitDelayExpr = NULL;
1283  }
1284 
1285  /* Move the extraCommitDelay from the instruction
1286  * into the minimumCommitCycle */
1287  if (inst->extraCommitDelay != Cycles(0)) {
1288  inst->minimumCommitCycle = cpu.curCycle() +
1289  inst->extraCommitDelay;
1290  inst->extraCommitDelay = Cycles(0);
1291  }
1292 
1293  /* @todo Think about making lastMemBarrier be
1294  * MAX_UINT_64 to avoid using 0 as a marker value */
1295  if (!inst->isFault() && inst->isMemRef() &&
1296  lsq.getLastMemBarrier(thread_id) <
1297  inst->id.execSeqNum &&
1298  lsq.getLastMemBarrier(thread_id) != 0)
1299  {
1300  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1301  " as there are incomplete barriers in flight\n",
1302  *inst);
1303  completed_inst = false;
1304  } else if (inst->minimumCommitCycle > now) {
1305  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1306  " as it wants to be stalled for %d more cycles\n",
1307  *inst, inst->minimumCommitCycle - now);
1308  completed_inst = false;
1309  } else {
1310  completed_inst = commitInst(inst,
1311  early_memory_issue, branch, fault,
1312  committed_inst, issued_mem_ref);
1313  }
1314  } else {
1315  /* Discard instruction */
1316  completed_inst = true;
1317  }
1318 
1319  if (completed_inst) {
1320  /* Allow the pipeline to advance. If the FU head
1321  * instruction wasn't the inFlightInsts head
1322  * but had already been committed, it would have
1323  * unstalled the pipeline before here */
1324  if (inst->fuIndex != noCostFUIndex) {
1325  DPRINTF(MinorExecute, "Unstalling %d for inst %s\n", inst->fuIndex, inst->id);
1326  funcUnits[inst->fuIndex]->stalled = false;
1327  }
1328  }
1329  }
1330  } else {
1331  DPRINTF(MinorExecute, "No instructions to commit\n");
1332  completed_inst = false;
1333  }
1334 
1335  /* All discardable instructions must also be 'completed' by now */
1336  assert(!(discard_inst && !completed_inst));
1337 
1338  /* Instruction committed but was discarded due to streamSeqNum
1339  * mismatch */
1340  if (discard_inst) {
1341  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
1342  " state was unexpected, expected: %d\n",
1343  *inst, ex_info.streamSeqNum);
1344 
1345  if (fault == NoFault)
1347  }
1348 
1349  /* Mark the mem inst as being in the LSQ */
1350  if (issued_mem_ref) {
1351  inst->fuIndex = 0;
1352  inst->inLSQ = true;
1353  }
1354 
1355  /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1356  * as they've *definitely* exited the FUs */
1357  if (completed_inst && inst->isMemRef()) {
1358  /* The MemRef could have been discarded from the FU or the memory
1359  * queue, so just check an FU instruction */
1360  if (!ex_info.inFUMemInsts->empty() &&
1361  ex_info.inFUMemInsts->front().inst == inst)
1362  {
1363  ex_info.inFUMemInsts->pop();
1364  }
1365  }
1366 
1367  if (completed_inst && !(issued_mem_ref && fault == NoFault)) {
1368  /* Note that this includes discarded insts */
1369  DPRINTF(MinorExecute, "Completed inst: %s\n", *inst);
1370 
1371  /* Got to the end of a full instruction? */
1372  ex_info.lastCommitWasEndOfMacroop = inst->isFault() ||
1373  inst->isLastOpInInst();
1374 
1375  /* lastPredictionSeqNum is kept as a convenience to prevent its
1376  * value from changing too much on the minorview display */
1377  ex_info.lastPredictionSeqNum = inst->id.predictionSeqNum;
1378 
1379  /* Finished with the inst, remove it from the inst queue and
1380  * clear its dependencies */
1381  ex_info.inFlightInsts->pop();
1382 
1383  /* Complete barriers in the LSQ/move to store buffer */
1384  if (inst->isInst() && inst->staticInst->isFullMemBarrier()) {
1385  DPRINTF(MinorMem, "Completing memory barrier"
1386  " inst: %s committed: %d\n", *inst, committed_inst);
1387  lsq.completeMemBarrierInst(inst, committed_inst);
1388  }
1389 
1390  scoreboard[thread_id].clearInstDests(inst, inst->isMemRef());
1391  }
1392 
1393  /* Handle per-cycle instruction counting */
1394  if (committed_inst) {
1395  bool is_no_cost_inst = inst->isNoCostInst();
1396 
1397  /* Don't show no cost instructions as having taken a commit
1398  * slot */
1399  if (debug::MinorTrace && !is_no_cost_inst)
1400  ex_info.instsBeingCommitted.insts[num_insts_committed] = inst;
1401 
1402  if (!is_no_cost_inst)
1403  num_insts_committed++;
1404 
1405  if (num_insts_committed == commitLimit)
1406  DPRINTF(MinorExecute, "Reached inst commit limit\n");
1407 
1408  /* Re-set the time of the instruction if that's required for
1409  * tracing */
1410  if (inst->traceData) {
1412  inst->traceData->setWhen(curTick());
1413  inst->traceData->dump();
1414  }
1415 
1416  if (completed_mem_ref)
1417  num_mem_refs_committed++;
1418 
1419  if (num_mem_refs_committed == memoryCommitLimit)
1420  DPRINTF(MinorExecute, "Reached mem ref commit limit\n");
1421  }
1422  }
1423 }
1424 
1425 bool
1427 {
1428  return executeInfo[thread_id].lastCommitWasEndOfMacroop &&
1429  !lsq.accessesInFlight();
1430 }
1431 
1432 void
1434 {
1435  if (!inp.outputWire->isBubble())
1436  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
1437 
1438  BranchData &branch = *out.inputWire;
1439 
1440  unsigned int num_issued = 0;
1441 
1442  /* Do all the cycle-wise activities for dcachePort here to potentially
1443  * free up input spaces in the LSQ's requests queue */
1444  lsq.step();
1445 
1446  /* Check interrupts first. Will halt commit if interrupt found */
1447  bool interrupted = false;
1448  ThreadID interrupt_tid = checkInterrupts(branch, interrupted);
1449 
1450  if (interrupt_tid != InvalidThreadID) {
1451  /* Signalling an interrupt this cycle, not issuing/committing from
1452  * any other threads */
1453  } else if (!branch.isBubble()) {
1454  /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1455  * without overwriting them */
1456  DPRINTF(MinorInterrupt, "Execute skipping a cycle to allow old"
1457  " branch to complete\n");
1458  } else {
1459  ThreadID commit_tid = getCommittingThread();
1460 
1461  if (commit_tid != InvalidThreadID) {
1462  ExecuteThreadInfo& commit_info = executeInfo[commit_tid];
1463 
1464  DPRINTF(MinorExecute, "Attempting to commit [tid:%d]\n",
1465  commit_tid);
1466  /* commit can set stalled flags observable to issue and so *must* be
1467  * called first */
1468  if (commit_info.drainState != NotDraining) {
1469  if (commit_info.drainState == DrainCurrentInst) {
1470  /* Commit only micro-ops, don't kill anything else */
1471  commit(commit_tid, true, false, branch);
1472 
1473  if (isInbetweenInsts(commit_tid))
1474  setDrainState(commit_tid, DrainHaltFetch);
1475 
1476  /* Discard any generated branch */
1477  branch = BranchData::bubble();
1478  } else if (commit_info.drainState == DrainAllInsts) {
1479  /* Kill all instructions */
1480  while (getInput(commit_tid))
1481  popInput(commit_tid);
1482  commit(commit_tid, false, true, branch);
1483  }
1484  } else {
1485  /* Commit micro-ops only if interrupted. Otherwise, commit
1486  * anything you like */
1487  DPRINTF(MinorExecute, "Committing micro-ops for interrupt[tid:%d]\n",
1488  commit_tid);
1489  bool only_commit_microops = interrupted &&
1490  hasInterrupt(commit_tid);
1491  commit(commit_tid, only_commit_microops, false, branch);
1492  }
1493 
1494  /* Halt fetch, but don't do it until we have the current instruction in
1495  * the bag */
1496  if (commit_info.drainState == DrainHaltFetch) {
1499  cpu.getContext(commit_tid)->pcState(), branch);
1500 
1502  setDrainState(commit_tid, DrainAllInsts);
1503  }
1504  }
1505  ThreadID issue_tid = getIssuingThread();
1506  /* This will issue merrily even when interrupted in the sure and
1507  * certain knowledge that the interrupt with change the stream */
1508  if (issue_tid != InvalidThreadID) {
1509  DPRINTF(MinorExecute, "Attempting to issue [tid:%d]\n",
1510  issue_tid);
1511  num_issued = issue(issue_tid);
1512  }
1513 
1514  }
1515 
1516  /* Run logic to step functional units + decide if we are active on the next
1517  * clock cycle */
1518  std::vector<MinorDynInstPtr> next_issuable_insts;
1519  bool can_issue_next = false;
1520 
1521  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1522  /* Find the next issuable instruction for each thread and see if it can
1523  be issued */
1524  if (getInput(tid)) {
1525  unsigned int input_index = executeInfo[tid].inputIndex;
1526  MinorDynInstPtr inst = getInput(tid)->insts[input_index];
1527  if (inst->isFault()) {
1528  can_issue_next = true;
1529  } else if (!inst->isBubble()) {
1530  next_issuable_insts.push_back(inst);
1531  }
1532  }
1533  }
1534 
1535  bool becoming_stalled = true;
1536 
1537  /* Advance the pipelines and note whether they still need to be
1538  * advanced */
1539  for (unsigned int i = 0; i < numFuncUnits; i++) {
1540  FUPipeline *fu = funcUnits[i];
1541  fu->advance();
1542 
1543  /* If we need to tick again, the pipeline will have been left or set
1544  * to be unstalled */
1545  if (fu->occupancy !=0 && !fu->stalled)
1546  becoming_stalled = false;
1547 
1548  /* Could we possibly issue the next instruction from any thread?
1549  * This is quite an expensive test and is only used to determine
1550  * if the CPU should remain active, only run it if we aren't sure
1551  * we are active next cycle yet */
1552  for (auto inst : next_issuable_insts) {
1553  if (!fu->stalled && fu->provides(inst->staticInst->opClass()) &&
1554  scoreboard[inst->id.threadId].canInstIssue(inst,
1555  NULL, NULL, cpu.curCycle() + Cycles(1),
1556  cpu.getContext(inst->id.threadId))) {
1557  can_issue_next = true;
1558  break;
1559  }
1560  }
1561  }
1562 
1563  bool head_inst_might_commit = false;
1564 
1565  /* Could the head in flight insts be committed */
1566  for (auto const &info : executeInfo) {
1567  if (!info.inFlightInsts->empty()) {
1568  const QueuedInst &head_inst = info.inFlightInsts->front();
1569 
1570  if (head_inst.inst->isNoCostInst()) {
1571  head_inst_might_commit = true;
1572  } else {
1573  FUPipeline *fu = funcUnits[head_inst.inst->fuIndex];
1574  if ((fu->stalled &&
1575  fu->front().inst->id == head_inst.inst->id) ||
1576  lsq.findResponse(head_inst.inst))
1577  {
1578  head_inst_might_commit = true;
1579  break;
1580  }
1581  }
1582  }
1583  }
1584 
1585  DPRINTF(Activity, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1586  (num_issued != 0 ? " (issued some insts)" : ""),
1587  (becoming_stalled ? "(becoming stalled)" : "(not becoming stalled)"),
1588  (can_issue_next ? " (can issued next inst)" : ""),
1589  (head_inst_might_commit ? "(head inst might commit)" : ""),
1590  (lsq.needsToTick() ? " (LSQ needs to tick)" : ""),
1591  (interrupted ? " (interrupted)" : ""));
1592 
1593  bool need_to_tick =
1594  num_issued != 0 || /* Issued some insts this cycle */
1595  !becoming_stalled || /* Some FU pipelines can still move */
1596  can_issue_next || /* Can still issue a new inst */
1597  head_inst_might_commit || /* Could possible commit the next inst */
1598  lsq.needsToTick() || /* Must step the dcache port */
1599  interrupted; /* There are pending interrupts */
1600 
1601  if (!need_to_tick) {
1602  DPRINTF(Activity, "The next cycle might be skippable as there are no"
1603  " advanceable FUs\n");
1604  }
1605 
1606  /* Wake up if we need to tick again */
1607  if (need_to_tick)
1609 
1610  /* Note activity of following buffer */
1611  if (!branch.isBubble())
1613 
1614  /* Make sure the input (if any left) is pushed */
1615  if (!inp.outputWire->isBubble())
1616  inputBuffer[inp.outputWire->threadId].pushTail();
1617 }
1618 
1619 ThreadID
1620 Execute::checkInterrupts(BranchData& branch, bool& interrupted)
1621 {
1623  /* Evaluate interrupts in round-robin based upon service */
1624  do {
1625  /* Has an interrupt been signalled? This may not be acted on
1626  * straighaway so this is different from took_interrupt */
1627  bool thread_interrupted = false;
1628 
1629  if (FullSystem && cpu.getInterruptController(tid)) {
1630  /* This is here because it seems that after drainResume the
1631  * interrupt controller isn't always set */
1632  thread_interrupted = executeInfo[tid].drainState == NotDraining &&
1633  isInterrupted(tid);
1634  interrupted = interrupted || thread_interrupted;
1635  } else {
1636  DPRINTF(MinorInterrupt, "No interrupt controller\n");
1637  }
1638  DPRINTF(MinorInterrupt, "[tid:%d] thread_interrupted?=%d isInbetweenInsts?=%d\n",
1639  tid, thread_interrupted, isInbetweenInsts(tid));
1640  /* Act on interrupts */
1641  if (thread_interrupted && isInbetweenInsts(tid)) {
1642  if (takeInterrupt(tid, branch)) {
1643  interruptPriority = tid;
1644  return tid;
1645  }
1646  } else {
1647  tid = (tid + 1) % cpu.numThreads;
1648  }
1649  } while (tid != interruptPriority);
1650 
1651  return InvalidThreadID;
1652 }
1653 
1654 bool
1656 {
1657  if (FullSystem && cpu.getInterruptController(thread_id)) {
1658  return executeInfo[thread_id].drainState == NotDraining &&
1659  isInterrupted(thread_id);
1660  }
1661 
1662  return false;
1663 }
1664 
1665 void
1667 {
1668  std::ostringstream insts;
1669  std::ostringstream stalled;
1670 
1671  executeInfo[0].instsBeingCommitted.reportData(insts);
1672  lsq.minorTrace();
1673  inputBuffer[0].minorTrace();
1674  scoreboard[0].minorTrace();
1675 
1676  /* Report functional unit stalling in one string */
1677  unsigned int i = 0;
1678  while (i < numFuncUnits)
1679  {
1680  stalled << (funcUnits[i]->stalled ? '1' : 'E');
1681  i++;
1682  if (i != numFuncUnits)
1683  stalled << ',';
1684  }
1685 
1686  minor::minorTrace("insts=%s inputIndex=%d streamSeqNum=%d"
1687  " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1688  insts.str(), executeInfo[0].inputIndex, executeInfo[0].streamSeqNum,
1689  stalled.str(), executeInfo[0].drainState, isInbetweenInsts(0));
1690 
1691  std::for_each(funcUnits.begin(), funcUnits.end(),
1692  std::mem_fn(&FUPipeline::minorTrace));
1693 
1694  executeInfo[0].inFlightInsts->minorTrace();
1695  executeInfo[0].inFUMemInsts->minorTrace();
1696 }
1697 
1698 inline ThreadID
1700 {
1701  std::vector<ThreadID> priority_list;
1702 
1703  switch (cpu.threadPolicy) {
1704  case enums::SingleThreaded:
1705  return 0;
1706  case enums::RoundRobin:
1707  priority_list = cpu.roundRobinPriority(commitPriority);
1708  break;
1709  case enums::Random:
1710  priority_list = cpu.randomPriority();
1711  break;
1712  default:
1713  panic("Invalid thread policy");
1714  }
1715 
1716  for (auto tid : priority_list) {
1717  ExecuteThreadInfo &ex_info = executeInfo[tid];
1718  bool can_commit_insts = !ex_info.inFlightInsts->empty();
1719  if (can_commit_insts) {
1720  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1721  MinorDynInstPtr inst = head_inflight_inst->inst;
1722 
1723  can_commit_insts = can_commit_insts &&
1724  (!inst->inLSQ || (lsq.findResponse(inst) != NULL));
1725 
1726  if (!inst->inLSQ) {
1727  bool can_transfer_mem_inst = false;
1728  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1729  const MinorDynInstPtr head_mem_ref_inst =
1730  ex_info.inFUMemInsts->front().inst;
1731  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1732  const MinorDynInstPtr &fu_inst = fu->front().inst;
1733  can_transfer_mem_inst =
1734  !fu_inst->isBubble() &&
1735  fu_inst->id.threadId == tid &&
1736  !fu_inst->inLSQ &&
1737  fu_inst->canEarlyIssue &&
1738  inst->id.execSeqNum > fu_inst->instToWaitFor;
1739  }
1740 
1741  bool can_execute_fu_inst = inst->fuIndex == noCostFUIndex;
1742  if (can_commit_insts && !can_transfer_mem_inst &&
1743  inst->fuIndex != noCostFUIndex)
1744  {
1745  QueuedInst& fu_inst = funcUnits[inst->fuIndex]->front();
1746  can_execute_fu_inst = !fu_inst.inst->isBubble() &&
1747  fu_inst.inst->id == inst->id;
1748  }
1749 
1750  can_commit_insts = can_commit_insts &&
1751  (can_transfer_mem_inst || can_execute_fu_inst);
1752  }
1753  }
1754 
1755 
1756  if (can_commit_insts) {
1757  commitPriority = tid;
1758  return tid;
1759  }
1760  }
1761 
1762  return InvalidThreadID;
1763 }
1764 
1765 inline ThreadID
1767 {
1768  std::vector<ThreadID> priority_list;
1769 
1770  switch (cpu.threadPolicy) {
1771  case enums::SingleThreaded:
1772  return 0;
1773  case enums::RoundRobin:
1774  priority_list = cpu.roundRobinPriority(issuePriority);
1775  break;
1776  case enums::Random:
1777  priority_list = cpu.randomPriority();
1778  break;
1779  default:
1780  panic("Invalid thread scheduling policy.");
1781  }
1782 
1783  for (auto tid : priority_list) {
1784  if (getInput(tid)) {
1785  issuePriority = tid;
1786  return tid;
1787  }
1788  }
1789 
1790  return InvalidThreadID;
1791 }
1792 
1793 void
1795 {
1796  DPRINTF(Drain, "MinorExecute drainResume\n");
1797 
1798  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1799  setDrainState(tid, NotDraining);
1800  }
1801 
1803 }
1804 
1805 std::ostream &operator <<(std::ostream &os, Execute::DrainState state)
1806 {
1807  switch (state)
1808  {
1809  case Execute::NotDraining:
1810  os << "NotDraining";
1811  break;
1813  os << "DrainCurrentInst";
1814  break;
1816  os << "DrainHaltFetch";
1817  break;
1819  os << "DrainAllInsts";
1820  break;
1821  default:
1822  os << "Drain-" << static_cast<int>(state);
1823  break;
1824  }
1825 
1826  return os;
1827 }
1828 
1829 void
1831 {
1832  DPRINTF(Drain, "setDrainState[%d]: %s\n", thread_id, state);
1833  executeInfo[thread_id].drainState = state;
1834 }
1835 
1836 unsigned int
1838 {
1839  DPRINTF(Drain, "MinorExecute drain\n");
1840 
1841  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1842  if (executeInfo[tid].drainState == NotDraining) {
1844 
1845  /* Go to DrainCurrentInst if we're between microops
1846  * or waiting on an unbufferable memory operation.
1847  * Otherwise we can go straight to DrainHaltFetch
1848  */
1849  if (isInbetweenInsts(tid))
1851  else
1853  }
1854  }
1855  return (isDrained() ? 0 : 1);
1856 }
1857 
1858 bool
1860 {
1861  if (!lsq.isDrained())
1862  return false;
1863 
1864  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1865  if (!inputBuffer[tid].empty() ||
1866  !executeInfo[tid].inFlightInsts->empty()) {
1867 
1868  return false;
1869  }
1870  }
1871 
1872  return true;
1873 }
1874 
1876 {
1877  for (unsigned int i = 0; i < numFuncUnits; i++)
1878  delete funcUnits[i];
1879 
1880  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
1881  delete executeInfo[tid].inFlightInsts;
1882 }
1883 
1884 bool
1886 {
1887  return inst->id.streamSeqNum == executeInfo[inst->id.threadId].streamSeqNum;
1888 }
1889 
1890 bool
1892 {
1893  bool ret = false;
1894 
1895  if (!executeInfo[inst->id.threadId].inFlightInsts->empty())
1896  ret = executeInfo[inst->id.threadId].inFlightInsts->front().inst->id == inst->id;
1897 
1898  return ret;
1899 }
1900 
1903 {
1904  return lsq.getDcachePort();
1905 }
1906 
1907 } // namespace minor
1908 } // namespace gem5
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
gem5::MinorCPU::randomPriority
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:182
gem5::minor::Execute::getCommittingThread
ThreadID getCommittingThread()
Use the current threading policy to determine the next thread to decode from.
Definition: execute.cc:1699
gem5::PCStateBase::instAddr
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition: pcstate.hh:107
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
warn
#define warn(...)
Definition: logging.hh:246
gem5::minor::Execute::tryToBranch
void tryToBranch(MinorDynInstPtr inst, Fault fault, BranchData &branch)
Generate Branch data based (into branch) on an observed (or not) change in PC while executing an inst...
Definition: execute.cc:224
gem5::minor::ForwardInstData::isBubble
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:251
gem5::minor::LSQ::issuedMemBarrierInst
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1713
gem5::minor::Execute::zeroReg
const RegIndex zeroReg
Index of the zero integer register.
Definition: execute.hh:82
op_class.hh
gem5::minor::BranchData::Reason
Reason
Definition: pipe_data.hh:69
gem5::minor::ForwardInstData::insts
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:288
gem5::auxv::Random
@ Random
Definition: aux_vector.hh:87
gem5::minor::Execute::NotDraining
@ NotDraining
Definition: execute.hh:150
gem5::minor::QueuedInst::inst
MinorDynInstPtr inst
Definition: func_unit.hh:210
gem5::minor::Execute::ExecuteThreadInfo::inputIndex
unsigned int inputIndex
Index that we've completed upto in getInput data.
Definition: execute.hh:185
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
gem5::minor::MinorStats::numFetchSuspends
statistics::Scalar numFetchSuspends
Number of times fetch was asked to suspend by Execute.
Definition: stats.hh:73
gem5::minor::Execute::drain
unsigned int drain()
Like the drain interface on SimObject.
Definition: execute.cc:1837
gem5::minor::LSQ::LSQRequest::packet
PacketPtr packet
Definition: lsq.hh:154
gem5::minor::Execute::commitLimit
unsigned int commitLimit
Number of instructions that can be committed per cycle.
Definition: execute.hh:91
gem5::minor::ExecContext::readPredicate
bool readPredicate() const override
Definition: exec_context.hh:246
gem5::MinorCPU::MinorCPUPort
Provide a non-protected base class for Minor's Ports as derived classes are created by Fetch1 and Exe...
Definition: cpu.hh:106
gem5::MinorFUTiming::extraAssumedLat
Cycles extraAssumedLat
Extra delay that results should show in the scoreboard after leaving the pipeline.
Definition: func_unit.hh:127
gem5::ThreadContext::pcState
virtual const PCStateBase & pcState() const =0
gem5::minor::Execute::fuDescriptions
MinorFUPool & fuDescriptions
Descriptions of the functional units we want to generate.
Definition: execute.hh:102
gem5::ThreadState::ThreadStateStats::numOps
statistics::Scalar numOps
Stat for number ops (including micro ops) committed.
Definition: thread_state.hh:98
gem5::minor::MinorStats::numOps
statistics::Scalar numOps
Number of simulated insts and microops.
Definition: stats.hh:67
gem5::minor::LSQ::LSQRequest::needsToBeSentToStoreBuffer
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:166
sc_dt::to_string
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:91
gem5::ThreadState::ThreadStateStats::numInsts
statistics::Scalar numInsts
Stat for number instructions committed.
Definition: thread_state.hh:96
gem5::minor::Queue
Wrapper for a queue type to act as a pipeline stage input queue.
Definition: buffers.hh:404
gem5::minor::Execute::ExecuteThreadInfo
Definition: execute.hh:156
gem5::minor::Execute::checkInterrupts
ThreadID checkInterrupts(BranchData &branch, bool &interrupted)
Check all threads for possible interrupts.
Definition: execute.cc:1620
cpu.hh
minor
gem5::MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:85
gem5::minor::Execute::lsq
LSQ lsq
Dcache port to pass on to the CPU.
Definition: execute.hh:126
gem5::minor::Execute::ExecuteThreadInfo::inFUMemInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFUMemInsts
Memory ref instructions still in the FUs.
Definition: execute.hh:181
gem5::minor::Execute::DrainCurrentInst
@ DrainCurrentInst
Definition: execute.hh:151
gem5::minor::Execute::evaluate
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: execute.cc:1433
gem5::minor::BranchData::bubble
static BranchData bubble()
BubbleIF interface.
Definition: pipe_data.hh:163
gem5::minor::LSQ::step
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1479
std::vector
STL vector class.
Definition: stl.hh:37
gem5::minor::QueuedInst
Container class to box instructions in the FUs to make those queues have correct bubble behaviour whe...
Definition: func_unit.hh:207
gem5::minor::LSQ::needsToTick
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1567
gem5::minor::Execute::takeInterrupt
bool takeInterrupt(ThreadID thread_id, BranchData &branch)
Act on an interrupt.
Definition: execute.cc:426
gem5::minor::BranchData::NoBranch
@ NoBranch
Definition: pipe_data.hh:74
gem5::MinorFUTiming
Extra timing capability to allow individual ops to have their source register dependency latencies tw...
Definition: func_unit.hh:103
gem5::minor::LSQ::pushFailedRequest
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1662
gem5::minor::ForwardInstData
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:284
gem5::minor::Execute::setTraceTimeOnIssue
bool setTraceTimeOnIssue
Modify instruction trace times on issue.
Definition: execute.hh:115
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::minor::Execute::isInterrupted
bool isInterrupted(ThreadID thread_id) const
Has an interrupt been raised.
Definition: execute.cc:420
gem5::SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:93
gem5::minor::Execute::hasInterrupt
bool hasInterrupt(ThreadID thread_id)
Checks if a specific thread has an interrupt.
Definition: execute.cc:1655
gem5::EventQueue::serviceEvents
void serviceEvents(Tick when)
process all events up to the given timestamp.
Definition: eventq.hh:876
gem5::minor::Execute::memoryCommitLimit
unsigned int memoryCommitLimit
Number of memory instructions that can be committed per cycle.
Definition: execute.hh:94
execute.hh
gem5::minor::BranchData::isStreamChange
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:85
gem5::MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:291
gem5::minor::Execute::commit
void commit(ThreadID thread_id, bool only_commit_microops, bool discard, BranchData &branch)
Try and commit instructions from the ends of the functional unit pipelines.
Definition: execute.cc:1043
gem5::minor::Execute::commitPriority
ThreadID commitPriority
Definition: execute.hh:215
gem5::minor::Execute::ExecuteThreadInfo::streamSeqNum
InstSeqNum streamSeqNum
Source of sequence number for instuction streams.
Definition: execute.hh:199
gem5::minor::Execute::noCostFUIndex
unsigned int noCostFUIndex
The FU index of the non-existent costless FU for instructions which pass the MinorDynInst::isNoCostIn...
Definition: execute.hh:123
gem5::RefCountingPtr< MinorDynInst >
gem5::ThreadContext::status
virtual Status status() const =0
gem5::MinorCPU::threads
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:101
gem5::minor::InputBuffer
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:572
gem5::ArmISA::at
Bitfield< 35, 32 > at
Definition: misc_types.hh:155
gem5::MinorCPU::stats
minor::MinorStats stats
Processor-specific statistics.
Definition: cpu.hh:140
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::minor::LSQ::LSQRequest
Derived SenderState to carry data access info.
Definition: lsq.hh:129
gem5::minor::Execute::getIssuingThread
ThreadID getIssuingThread()
Definition: execute.cc:1766
gem5::Named
Interface for things with names.
Definition: named.hh:38
gem5::minor::LSQ::isDrained
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1560
gem5::ThreadState::threadStats
gem5::ThreadState::ThreadStateStats threadStats
gem5::minor::ReportTraitsAdaptor
...ReportTraits are trait classes with the same functionality as ReportIF, but with elements explicit...
Definition: buffers.hh:97
gem5::minor::Execute::inputBuffer
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: execute.hh:135
gem5::minor::MinorStats::numInsts
statistics::Scalar numInsts
Number of simulated instructions.
Definition: stats.hh:64
gem5::minor::Latch::Output
Definition: buffers.hh:263
gem5::minor::BranchData::Interrupt
@ Interrupt
Definition: pipe_data.hh:97
gem5::minor::Pipeline::ExecuteStageId
@ ExecuteStageId
Definition: pipeline.hh:104
gem5::minor::LSQ::minorTrace
void minorTrace() const
Definition: lsq.cc:1669
gem5::minor::Execute::getInput
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: execute.cc:202
gem5::minor::cyclicIndexDec
unsigned int cyclicIndexDec(unsigned int index, unsigned int cycle_size)
Decrement a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:541
exec_context.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:94
gem5::minor::Execute::minorTrace
void minorTrace() const
Definition: execute.cc:1666
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::PowerISA::fu
Bitfield< 12 > fu
Definition: misc.hh:89
gem5::minor::LSQ::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Return the raw-bindable port.
Definition: lsq.hh:738
gem5::minor::Execute::processMoreThanOneInput
bool processMoreThanOneInput
If true, more than one input line can be processed each cycle if there is room to execute more instru...
Definition: execute.hh:99
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ThreadContext::Suspended
@ Suspended
Temporarily inactive.
Definition: thread_context.hh:113
gem5::minor::LSQ::getLastMemBarrier
InstSeqNum getLastMemBarrier(ThreadID thread_id) const
Get the execSeqNum of the last issued memory barrier.
Definition: lsq.hh:705
gem5::minor::BranchData::CorrectlyPredictedBranch
@ CorrectlyPredictedBranch
Definition: pipe_data.hh:77
gem5::MinorFU
A functional unit that can execute any of opClasses operations with a single op(eration)Lat(ency) and...
Definition: func_unit.hh:157
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::minor::Latch::Input
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:252
gem5::minor::Execute::isDrained
bool isDrained()
After thread suspension, has Execute been drained of in-flight instructions and memory accesses.
Definition: execute.cc:1859
gem5::TimingExpr
Definition: timing_expr.hh:91
gem5::minor::Execute::commitInst
bool commitInst(MinorDynInstPtr inst, bool early_memory_issue, BranchData &branch, Fault &fault, bool &committed, bool &completed_mem_issue)
Commit a single instruction.
Definition: execute.cc:899
gem5::minor::Execute::doInstCommitAccounting
void doInstCommitAccounting(MinorDynInstPtr inst)
Do the stats handling and instruction count and PC event events related to the new instruction/op cou...
Definition: execute.cc:868
gem5::minor::LSQ::canRequest
bool canRequest()
Is their space in the request queue to be able to push a request by issuing an isMemRef instruction.
Definition: lsq.hh:676
gem5::PCEvent
Definition: pc_event.hh:45
gem5::minor::BranchData::BadlyPredictedBranchTarget
@ BadlyPredictedBranchTarget
Definition: pipe_data.hh:86
gem5::minor::Execute::issuePriority
ThreadID issuePriority
Definition: execute.hh:214
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::minor::MinorBuffer< ElemType, ReportTraits >::minorTrace
void minorTrace() const
Report buffer states from 'slot' 'from' to 'to'.
Definition: buffers.hh:198
gem5::minor::MinorStats::numDiscardedOps
statistics::Scalar numDiscardedOps
Number of ops discarded before committing.
Definition: stats.hh:70
gem5::minor::LSQ::completeMemBarrierInst
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:915
gem5::TimingExprEvalContext
Object to gather the visible context for evaluation.
Definition: timing_expr.hh:73
gem5::minor::MinorDynInst::bubble
static MinorDynInstPtr bubble()
There is a single bubble inst.
Definition: dyn_inst.hh:251
gem5::minor::ForwardInstData::width
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:304
gem5::minor::Execute::ExecuteThreadInfo::instsBeingCommitted
ForwardInstData instsBeingCommitted
Structure for reporting insts currently being processed/retired for MinorTrace.
Definition: execute.hh:193
gem5::minor::Execute::scoreboard
std::vector< Scoreboard > scoreboard
Scoreboard of instruction dependencies.
Definition: execute.hh:129
gem5::InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:243
gem5::minor::LSQ::popResponse
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1525
gem5::minor::BranchData
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:66
gem5::minor::Execute::isInbetweenInsts
bool isInbetweenInsts(ThreadID thread_id) const
Are we between instructions? Can we be interrupted?
Definition: execute.cc:1426
gem5::minor::Execute::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Returns the DcachePort owned by this Execute to pass upwards.
Definition: execute.cc:1902
gem5::minor::MinorStats::committedInstType
statistics::Vector2d committedInstType
Number of instructions by type (OpClass)
Definition: stats.hh:83
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::MinorFUTiming::extraCommitLatExpr
TimingExpr * extraCommitLatExpr
Definition: func_unit.hh:121
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::minor::Execute::updateBranchData
void updateBranchData(ThreadID tid, BranchData::Reason reason, MinorDynInstPtr inst, const PCStateBase &target, BranchData &branch)
Actually create a branch to communicate to Fetch1/Fetch2 and, if that is a stream-changing branch upd...
Definition: execute.cc:303
gem5::GEM5_DEPRECATED_NAMESPACE
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
gem5::minor::BranchData::isBubble
bool isBubble() const
Definition: pipe_data.hh:164
gem5::MinorFUPool::funcUnits
std::vector< MinorFU * > funcUnits
Definition: func_unit.hh:192
gem5::minor::ExecContext::readMemAccPredicate
bool readMemAccPredicate() const override
Definition: exec_context.hh:258
gem5::minor::Execute::tryPCEvents
bool tryPCEvents(ThreadID thread_id)
Try to act on PC-related events.
Definition: execute.cc:846
gem5::minor::minorTrace
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
gem5::ActivityRecorder::activity
void activity()
Records that there is activity this cycle.
Definition: activity.cc:55
gem5::minor::Execute::~Execute
~Execute()
Definition: execute.cc:1875
gem5::minor::FUPipeline
A functional unit configured from a MinorFU object.
Definition: func_unit.hh:233
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
gem5::minor::ExecContext
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:73
gem5::MinorFU::opLat
Cycles opLat
Delay from issuing the operation, to it reaching the end of the associated pipeline.
Definition: func_unit.hh:164
gem5::IntRegClass
@ IntRegClass
Integer register.
Definition: reg_class.hh:58
gem5::minor::Execute::ExecuteThreadInfo::lastCommitWasEndOfMacroop
bool lastCommitWasEndOfMacroop
The last commit was the end of a full instruction so an interrupt can safely happen.
Definition: execute.hh:189
gem5::minor::Execute::DrainHaltFetch
@ DrainHaltFetch
Definition: execute.hh:152
gem5::minor::BranchData::isStreamChange
bool isStreamChange() const
As static isStreamChange but on this branch data.
Definition: pipe_data.hh:167
gem5::minor::cyclicIndexInc
unsigned int cyclicIndexInc(unsigned int index, unsigned int cycle_size)
Increment a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:529
gem5::ThreadState::numInst
Counter numInst
Number of instructions committed.
Definition: thread_state.hh:88
gem5::minor::Execute::popInput
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: execute.cc:215
gem5::minor::Execute::setTraceTimeOnCommit
bool setTraceTimeOnCommit
Modify instruction trace times on commit.
Definition: execute.hh:112
gem5::X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:809
gem5::SparcISA::Branch
Base class for branch operations.
Definition: branch.hh:48
gem5::minor::Execute::setDrainState
void setDrainState(ThreadID thread_id, DrainState state)
Set the drain state (with useful debugging messages)
Definition: execute.cc:1830
gem5::minor::operator<<
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:65
gem5::minor::LSQ::accessesInFlight
bool accessesInFlight() const
Are there any accesses other than normal cached loads in the memory system or having received respons...
Definition: lsq.hh:696
gem5::minor::Execute::handleMemResponse
void handleMemResponse(MinorDynInstPtr inst, LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
Handle extracting mem ref responses from the memory queues and completing the associated instructions...
Definition: execute.cc:329
gem5::minor::Execute::ExecuteThreadInfo::drainState
DrainState drainState
State progression for draining NotDraining -> ...
Definition: execute.hh:208
gem5::minor::BranchData::HaltFetch
@ HaltFetch
Definition: pipe_data.hh:99
gem5::MinorFUTiming::srcRegsRelativeLats
std::vector< Cycles > srcRegsRelativeLats
Cycle offsets from the scoreboard delivery times of register values for each of this instruction's so...
Definition: func_unit.hh:136
gem5::minor::Execute::inp
Latch< ForwardInstData >::Output inp
Input port carrying instructions from Decode.
Definition: execute.hh:73
gem5::minor::Execute::ExecuteThreadInfo::inFlightInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFlightInsts
In-order instructions either in FUs or the LSQ.
Definition: execute.hh:178
gem5::InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:40
gem5::minor::Execute::executeMemRefInst
bool executeMemRefInst(MinorDynInstPtr inst, BranchData &branch, bool &failed_predicate, Fault &fault)
Execute a memory reference instruction.
Definition: execute.cc:454
gem5::minor::BranchData::BadlyPredictedBranch
@ BadlyPredictedBranch
Definition: pipe_data.hh:90
gem5::minor::Execute::out
Latch< BranchData >::Input out
Input port carrying stream changes to Fetch1.
Definition: execute.hh:76
gem5::ThreadState::numOp
Counter numOp
Number of ops (including micro ops) committed.
Definition: thread_state.hh:90
gem5::minor::Execute::allowEarlyMemIssue
bool allowEarlyMemIssue
Allow mem refs to leave their FUs before reaching the head of the in flight insts queue if their depe...
Definition: execute.hh:119
gem5::Num_OpClasses
static const OpClass Num_OpClasses
Definition: op_class.hh:108
gem5::minor::Execute::DrainState
DrainState
Stage cycle-by-cycle state.
Definition: execute.hh:148
gem5::MinorCPU::threadPolicy
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:120
gem5::MinorFUTiming::extraCommitLat
Cycles extraCommitLat
Extra latency that the instruction should spend at the end of the pipeline.
Definition: func_unit.hh:120
gem5::minor::Execute::numFuncUnits
unsigned int numFuncUnits
Number of functional units to produce.
Definition: execute.hh:105
gem5::MinorCPU::activityRecorder
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:96
gem5::minor::Execute::drainResume
void drainResume()
Definition: execute.cc:1794
gem5::minor::Execute::ExecuteThreadInfo::lastPredictionSeqNum
InstSeqNum lastPredictionSeqNum
A prediction number for use where one isn't available from an instruction.
Definition: execute.hh:205
gem5::minor::Execute::funcUnits
std::vector< FUPipeline * > funcUnits
The execution functional units.
Definition: execute.hh:132
gem5::PCStateBase
Definition: pcstate.hh:57
gem5::minor::Execute::issueLimit
unsigned int issueLimit
Number of instructions that can be issued per cycle.
Definition: execute.hh:85
gem5::minor::BranchData::SuspendThread
@ SuspendThread
Definition: pipe_data.hh:95
gem5::minor::Execute::executeInfo
std::vector< ExecuteThreadInfo > executeInfo
Definition: execute.hh:211
gem5::minor::Execute::issue
unsigned int issue(ThreadID thread_id)
Try and issue instructions from the inputBuffer.
Definition: execute.cc:552
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::MinorFUTiming::suppress
bool suppress
If true, instructions matching this mask/match should not be issued in this FU.
Definition: func_unit.hh:116
gem5::minor::Execute::cpu
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: execute.hh:79
gem5::minor::Execute::interruptPriority
ThreadID interruptPriority
Definition: execute.hh:213
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::minor::Execute::instIsHeadInst
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1891
gem5::minor::Execute::memoryIssueLimit
unsigned int memoryIssueLimit
Number of memory ops that can be issued per cycle.
Definition: execute.hh:88
gem5::minor::Execute::DrainAllInsts
@ DrainAllInsts
Definition: execute.hh:153
lsq.hh
gem5::minor::LSQ::sendStoreToStoreBuffer
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1547
gem5::minor::BranchData::UnpredictedBranch
@ UnpredictedBranch
Definition: pipe_data.hh:82
gem5::minor::LSQ::findResponse
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1490
gem5::minor::LSQ::canPushIntoStoreBuffer
bool canPushIntoStoreBuffer() const
Must check this before trying to insert into the store buffer.
Definition: lsq.hh:688
gem5::SimpleThread::comInstEventQueue
EventQueue comInstEventQueue
An instruction-based event queue.
Definition: simple_thread.hh:130
fetch1.hh
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::PCStateBase::clone
virtual PCStateBase * clone() const =0
gem5::MinorCPU::roundRobinPriority
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:173
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:242
gem5::minor::Execute::instIsRightStream
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1885
gem5::minor::Execute::Execute
Execute(const std::string &name_, MinorCPU &cpu_, const MinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< BranchData >::Input out_)
Definition: execute.cc:64
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178

Generated on Wed May 4 2022 12:13:53 for gem5 by doxygen 1.8.17