gem5  v21.1.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
execute.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2018-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/execute.hh"
39 
40 #include <functional>
41 
42 #include "arch/locked_mem.hh"
43 #include "cpu/minor/cpu.hh"
45 #include "cpu/minor/fetch1.hh"
46 #include "cpu/minor/lsq.hh"
47 #include "cpu/op_class.hh"
48 #include "debug/Activity.hh"
49 #include "debug/Branch.hh"
50 #include "debug/Drain.hh"
51 #include "debug/ExecFaulting.hh"
52 #include "debug/MinorExecute.hh"
53 #include "debug/MinorInterrupt.hh"
54 #include "debug/MinorMem.hh"
55 #include "debug/MinorTrace.hh"
56 #include "debug/PCEvent.hh"
57 
58 namespace gem5
59 {
60 
62 namespace minor
63 {
64 
65 Execute::Execute(const std::string &name_,
66  MinorCPU &cpu_,
67  const MinorCPUParams &params,
70  Named(name_),
71  inp(inp_),
72  out(out_),
73  cpu(cpu_),
74  zeroReg(cpu.threads[0]->getIsaPtr()->regClasses().
75  at(IntRegClass).zeroReg()),
76  issueLimit(params.executeIssueLimit),
77  memoryIssueLimit(params.executeMemoryIssueLimit),
78  commitLimit(params.executeCommitLimit),
79  memoryCommitLimit(params.executeMemoryCommitLimit),
80  processMoreThanOneInput(params.executeCycleInput),
81  fuDescriptions(*params.executeFuncUnits),
82  numFuncUnits(fuDescriptions.funcUnits.size()),
83  setTraceTimeOnCommit(params.executeSetTraceTimeOnCommit),
84  setTraceTimeOnIssue(params.executeSetTraceTimeOnIssue),
85  allowEarlyMemIssue(params.executeAllowEarlyMemoryIssue),
86  noCostFUIndex(fuDescriptions.funcUnits.size() + 1),
87  lsq(name_ + ".lsq", name_ + ".dcache_port",
88  cpu_, *this,
89  params.executeMaxAccessesInMemory,
90  params.executeMemoryWidth,
91  params.executeLSQRequestsQueueSize,
92  params.executeLSQTransfersQueueSize,
93  params.executeLSQStoreBufferSize,
94  params.executeLSQMaxStoreBufferStoresPerCycle,
95  zeroReg),
96  executeInfo(params.numThreads,
97  ExecuteThreadInfo(params.executeCommitLimit)),
98  interruptPriority(0),
99  issuePriority(0),
100  commitPriority(0)
101 {
102  if (commitLimit < 1) {
103  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
104  commitLimit);
105  }
106 
107  if (issueLimit < 1) {
108  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
109  issueLimit);
110  }
111 
112  if (memoryIssueLimit < 1) {
113  fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_,
115  }
116 
118  fatal("%s: executeMemoryCommitLimit (%d) must be <="
119  " executeCommitLimit (%d)\n",
121  }
122 
123  if (params.executeInputBufferSize < 1) {
124  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
125  params.executeInputBufferSize);
126  }
127 
128  if (params.executeInputBufferSize < 1) {
129  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
130  params.executeInputBufferSize);
131  }
132 
133  /* This should be large enough to count all the in-FU instructions
134  * which need to be accounted for in the inFlightInsts
135  * queue */
136  unsigned int total_slots = 0;
137 
138  /* Make FUPipelines for each MinorFU */
139  for (unsigned int i = 0; i < numFuncUnits; i++) {
140  std::ostringstream fu_name;
141  MinorFU *fu_description = fuDescriptions.funcUnits[i];
142 
143  /* Note the total number of instruction slots (for sizing
144  * the inFlightInst queue) and the maximum latency of any FU
145  * (for sizing the activity recorder) */
146  total_slots += fu_description->opLat;
147 
148  fu_name << name_ << ".fu." << i;
149 
150  FUPipeline *fu = new FUPipeline(fu_name.str(), *fu_description, cpu);
151 
152  funcUnits.push_back(fu);
153  }
154 
156  for (int op_class = No_OpClass + 1; op_class < Num_OpClasses; op_class++) {
157  bool found_fu = false;
158  unsigned int fu_index = 0;
159 
160  while (fu_index < numFuncUnits && !found_fu)
161  {
162  if (funcUnits[fu_index]->provides(
163  static_cast<OpClass>(op_class)))
164  {
165  found_fu = true;
166  }
167  fu_index++;
168  }
169 
170  if (!found_fu) {
171  warn("No functional unit for OpClass %s\n",
172  enums::OpClassStrings[op_class]);
173  }
174  }
175 
176  /* Per-thread structures */
177  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
178  std::string tid_str = std::to_string(tid);
179 
180  /* Input Buffers */
181  inputBuffer.push_back(
183  name_ + ".inputBuffer" + tid_str, "insts",
184  params.executeInputBufferSize));
185 
186  const auto &regClasses = cpu.threads[tid]->getIsaPtr()->regClasses();
187 
188  /* Scoreboards */
189  scoreboard.emplace_back(name_ + ".scoreboard" + tid_str, regClasses);
190 
191  /* In-flight instruction records */
192  executeInfo[tid].inFlightInsts = new Queue<QueuedInst,
194  name_ + ".inFlightInsts" + tid_str, "insts", total_slots);
195 
196  executeInfo[tid].inFUMemInsts = new Queue<QueuedInst,
198  name_ + ".inFUMemInsts" + tid_str, "insts", total_slots);
199  }
200 }
201 
202 const ForwardInstData *
204 {
205  /* Get a line from the inputBuffer to work with */
206  if (!inputBuffer[tid].empty()) {
207  const ForwardInstData &head = inputBuffer[tid].front();
208 
209  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
210  } else {
211  return NULL;
212  }
213 }
214 
215 void
217 {
218  if (!inputBuffer[tid].empty())
219  inputBuffer[tid].pop();
220 
221  executeInfo[tid].inputIndex = 0;
222 }
223 
224 void
226 {
227  ThreadContext *thread = cpu.getContext(inst->id.threadId);
228  const TheISA::PCState &pc_before = inst->pc;
229  TheISA::PCState target = thread->pcState();
230 
231  /* Force a branch for SerializeAfter/SquashAfter instructions
232  * at the end of micro-op sequence when we're not suspended */
233  bool force_branch = thread->status() != ThreadContext::Suspended &&
234  !inst->isFault() &&
235  inst->isLastOpInInst() &&
236  (inst->staticInst->isSerializeAfter() ||
237  inst->staticInst->isSquashAfter());
238 
239  DPRINTF(Branch, "tryToBranch before: %s after: %s%s\n",
240  pc_before, target, (force_branch ? " (forcing)" : ""));
241 
242  /* Will we change the PC to something other than the next instruction? */
243  bool must_branch = pc_before != target ||
244  fault != NoFault ||
245  force_branch;
246 
247  /* The reason for the branch data we're about to generate, set below */
249 
250  if (fault == NoFault) {
251  inst->staticInst->advancePC(target);
252  thread->pcState(target);
253 
254  DPRINTF(Branch, "Advancing current PC from: %s to: %s\n",
255  pc_before, target);
256  }
257 
258  if (inst->predictedTaken && !force_branch) {
259  /* Predicted to branch */
260  if (!must_branch) {
261  /* No branch was taken, change stream to get us back to the
262  * intended PC value */
263  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x but"
264  " none happened inst: %s\n",
265  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
266 
268  } else if (inst->predictedTarget == target) {
269  /* Branch prediction got the right target, kill the branch and
270  * carry on.
271  * Note that this information to the branch predictor might get
272  * overwritten by a "real" branch during this cycle */
273  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x correctly"
274  " inst: %s\n",
275  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
276 
278  } else {
279  /* Branch prediction got the wrong target */
280  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x"
281  " but got the wrong target (actual: 0x%x) inst: %s\n",
282  inst->pc.instAddr(), inst->predictedTarget.instAddr(),
283  target.instAddr(), *inst);
284 
286  }
287  } else if (must_branch) {
288  /* Unpredicted branch */
289  DPRINTF(Branch, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
290  inst->pc.instAddr(), target.instAddr(), *inst);
291 
293  } else {
294  /* No branch at all */
295  reason = BranchData::NoBranch;
296  }
297 
298  updateBranchData(inst->id.threadId, reason, inst, target, branch);
299 }
300 
301 void
303  ThreadID tid,
304  BranchData::Reason reason,
305  MinorDynInstPtr inst, const TheISA::PCState &target,
306  BranchData &branch)
307 {
308  if (reason != BranchData::NoBranch) {
309  /* Bump up the stream sequence number on a real branch*/
310  if (BranchData::isStreamChange(reason))
311  executeInfo[tid].streamSeqNum++;
312 
313  /* Branches (even mis-predictions) don't change the predictionSeqNum,
314  * just the streamSeqNum */
315  branch = BranchData(reason, tid,
316  executeInfo[tid].streamSeqNum,
317  /* Maintaining predictionSeqNum if there's no inst is just a
318  * courtesy and looks better on minorview */
319  (inst->isBubble() ? executeInfo[tid].lastPredictionSeqNum
320  : inst->id.predictionSeqNum),
321  target, inst);
322 
323  DPRINTF(Branch, "Branch data signalled: %s\n", branch);
324  }
325 }
326 
327 void
329  LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
330 {
331  ThreadID thread_id = inst->id.threadId;
332  ThreadContext *thread = cpu.getContext(thread_id);
333 
334  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst, zeroReg);
335 
336  PacketPtr packet = response->packet;
337 
338  bool is_load = inst->staticInst->isLoad();
339  bool is_store = inst->staticInst->isStore();
340  bool is_atomic = inst->staticInst->isAtomic();
341  bool is_prefetch = inst->staticInst->isDataPrefetch();
342 
343  /* If true, the trace's predicate value will be taken from the exec
344  * context predicate, otherwise, it will be set to false */
345  bool use_context_predicate = true;
346 
347  if (inst->translationFault != NoFault) {
348  /* Invoke memory faults. */
349  DPRINTF(MinorMem, "Completing fault from DTLB access: %s\n",
350  inst->translationFault->name());
351 
352  if (inst->staticInst->isPrefetch()) {
353  DPRINTF(MinorMem, "Not taking fault on prefetch: %s\n",
354  inst->translationFault->name());
355 
356  /* Don't assign to fault */
357  } else {
358  /* Take the fault raised during the TLB/memory access */
359  fault = inst->translationFault;
360 
361  fault->invoke(thread, inst->staticInst);
362  }
363  } else if (!packet) {
364  DPRINTF(MinorMem, "Completing failed request inst: %s\n",
365  *inst);
366  use_context_predicate = false;
367  if (!context.readMemAccPredicate())
368  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
369  } else if (packet->isError()) {
370  DPRINTF(MinorMem, "Trying to commit error response: %s\n",
371  *inst);
372 
373  fatal("Received error response packet for inst: %s\n", *inst);
374  } else if (is_store || is_load || is_prefetch || is_atomic) {
375  assert(packet);
376 
377  DPRINTF(MinorMem, "Memory response inst: %s addr: 0x%x size: %d\n",
378  *inst, packet->getAddr(), packet->getSize());
379 
380  if (is_load && packet->getSize() > 0) {
381  DPRINTF(MinorMem, "Memory data[0]: 0x%x\n",
382  static_cast<unsigned int>(packet->getConstPtr<uint8_t>()[0]));
383  }
384 
385  /* Complete the memory access instruction */
386  fault = inst->staticInst->completeAcc(packet, &context,
387  inst->traceData);
388 
389  if (fault != NoFault) {
390  /* Invoke fault created by instruction completion */
391  DPRINTF(MinorMem, "Fault in memory completeAcc: %s\n",
392  fault->name());
393  fault->invoke(thread, inst->staticInst);
394  } else {
395  /* Stores need to be pushed into the store buffer to finish
396  * them off */
397  if (response->needsToBeSentToStoreBuffer())
398  lsq.sendStoreToStoreBuffer(response);
399  }
400  } else {
401  fatal("There should only ever be reads, "
402  "writes or faults at this point\n");
403  }
404 
405  lsq.popResponse(response);
406 
407  if (inst->traceData) {
408  inst->traceData->setPredicate((use_context_predicate ?
409  context.readPredicate() : false));
410  }
411 
413 
414  /* Generate output to account for branches */
415  tryToBranch(inst, fault, branch);
416 }
417 
418 bool
420 {
421  return cpu.checkInterrupts(thread_id);
422 }
423 
424 bool
426 {
427  DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
428  cpu.getContext(thread_id)->pcState());
429 
430  Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt();
431 
432  if (interrupt != NoFault) {
433  /* The interrupt *must* set pcState */
435  interrupt->invoke(cpu.getContext(thread_id));
436 
437  assert(!lsq.accessesInFlight());
438 
439  DPRINTF(MinorInterrupt, "Invoking interrupt: %s to PC: %s\n",
440  interrupt->name(), cpu.getContext(thread_id)->pcState());
441 
442  /* Assume that an interrupt *must* cause a branch. Assert this? */
443 
445  MinorDynInst::bubble(), cpu.getContext(thread_id)->pcState(),
446  branch);
447  }
448 
449  return interrupt != NoFault;
450 }
451 
452 bool
454  bool &passed_predicate, Fault &fault)
455 {
456  bool issued = false;
457 
458  /* Set to true if the mem op. is issued and sent to the mem system */
459  passed_predicate = false;
460 
461  if (!lsq.canRequest()) {
462  /* Not acting on instruction yet as the memory
463  * queues are full */
464  issued = false;
465  } else {
466  ThreadContext *thread = cpu.getContext(inst->id.threadId);
467  TheISA::PCState old_pc = thread->pcState();
468 
469  ExecContext context(cpu, *cpu.threads[inst->id.threadId],
470  *this, inst, zeroReg);
471 
472  DPRINTF(MinorExecute, "Initiating memRef inst: %s\n", *inst);
473 
474  Fault init_fault = inst->staticInst->initiateAcc(&context,
475  inst->traceData);
476 
477  if (inst->inLSQ) {
478  if (init_fault != NoFault) {
479  assert(inst->translationFault != NoFault);
480  // Translation faults are dealt with in handleMemResponse()
481  init_fault = NoFault;
482  } else {
483  // If we have a translation fault then it got suppressed by
484  // initateAcc()
485  inst->translationFault = NoFault;
486  }
487  }
488 
489  if (init_fault != NoFault) {
490  DPRINTF(MinorExecute, "Fault on memory inst: %s"
491  " initiateAcc: %s\n", *inst, init_fault->name());
492  fault = init_fault;
493  } else {
494  /* Only set this if the instruction passed its
495  * predicate */
496  if (!context.readMemAccPredicate()) {
497  DPRINTF(MinorMem, "No memory access for inst: %s\n", *inst);
498  assert(context.readPredicate());
499  }
500  passed_predicate = context.readPredicate();
501 
502  /* Set predicate in tracing */
503  if (inst->traceData)
504  inst->traceData->setPredicate(passed_predicate);
505 
506  /* If the instruction didn't pass its predicate
507  * or it is a predicated vector instruction and the
508  * associated predicate register is all-false (and so will not
509  * progress from here) Try to branch to correct and branch
510  * mis-prediction. */
511  if (!inst->inLSQ) {
512  /* Leave it up to commit to handle the fault */
513  lsq.pushFailedRequest(inst);
514  inst->inLSQ = true;
515  }
516  }
517 
518  /* Restore thread PC */
519  thread->pcState(old_pc);
520  issued = true;
521  }
522 
523  return issued;
524 }
525 
527 inline unsigned int
528 cyclicIndexInc(unsigned int index, unsigned int cycle_size)
529 {
530  unsigned int ret = index + 1;
531 
532  if (ret == cycle_size)
533  ret = 0;
534 
535  return ret;
536 }
537 
539 inline unsigned int
540 cyclicIndexDec(unsigned int index, unsigned int cycle_size)
541 {
542  int ret = index - 1;
543 
544  if (ret < 0)
545  ret = cycle_size - 1;
546 
547  return ret;
548 }
549 
550 unsigned int
552 {
553  const ForwardInstData *insts_in = getInput(thread_id);
554  ExecuteThreadInfo &thread = executeInfo[thread_id];
555 
556  /* Early termination if we have no instructions */
557  if (!insts_in)
558  return 0;
559 
560  /* Start from the first FU */
561  unsigned int fu_index = 0;
562 
563  /* Remains true while instructions are still being issued. If any
564  * instruction fails to issue, this is set to false and we exit issue.
565  * This strictly enforces in-order issue. For other issue behaviours,
566  * a more complicated test in the outer while loop below is needed. */
567  bool issued = true;
568 
569  /* Number of insts issues this cycle to check for issueLimit */
570  unsigned num_insts_issued = 0;
571 
572  /* Number of memory ops issues this cycle to check for memoryIssueLimit */
573  unsigned num_mem_insts_issued = 0;
574 
575  /* Number of instructions discarded this cycle in order to enforce a
576  * discardLimit. @todo, add that parameter? */
577  unsigned num_insts_discarded = 0;
578 
579  do {
580  MinorDynInstPtr inst = insts_in->insts[thread.inputIndex];
581  Fault fault = inst->fault;
582  bool discarded = false;
583  bool issued_mem_ref = false;
584 
585  if (inst->isBubble()) {
586  /* Skip */
587  issued = true;
588  } else if (cpu.getContext(thread_id)->status() ==
590  {
591  DPRINTF(MinorExecute, "Discarding inst: %s from suspended"
592  " thread\n", *inst);
593 
594  issued = true;
595  discarded = true;
596  } else if (inst->id.streamSeqNum != thread.streamSeqNum) {
597  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
598  " state was unexpected, expected: %d\n",
599  *inst, thread.streamSeqNum);
600  issued = true;
601  discarded = true;
602  } else {
603  /* Try and issue an instruction into an FU, assume we didn't and
604  * fix that in the loop */
605  issued = false;
606 
607  /* Try FU from 0 each instruction */
608  fu_index = 0;
609 
610  /* Try and issue a single instruction stepping through the
611  * available FUs */
612  do {
613  FUPipeline *fu = funcUnits[fu_index];
614 
615  DPRINTF(MinorExecute, "Trying to issue inst: %s to FU: %d\n",
616  *inst, fu_index);
617 
618  /* Does the examined fu have the OpClass-related capability
619  * needed to execute this instruction? Faults can always
620  * issue to any FU but probably should just 'live' in the
621  * inFlightInsts queue rather than having an FU. */
622  bool fu_is_capable = (!inst->isFault() ?
623  fu->provides(inst->staticInst->opClass()) : true);
624 
625  if (inst->isNoCostInst()) {
626  /* Issue free insts. to a fake numbered FU */
627  fu_index = noCostFUIndex;
628 
629  /* And start the countdown on activity to allow
630  * this instruction to get to the end of its FU */
632 
633  /* Mark the destinations for this instruction as
634  * busy */
635  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
636  Cycles(0), cpu.getContext(thread_id), false);
637 
638  DPRINTF(MinorExecute, "Issuing %s to %d\n", inst->id, noCostFUIndex);
639  inst->fuIndex = noCostFUIndex;
640  inst->extraCommitDelay = Cycles(0);
641  inst->extraCommitDelayExpr = NULL;
642 
643  /* Push the instruction onto the inFlight queue so
644  * it can be committed in order */
645  QueuedInst fu_inst(inst);
646  thread.inFlightInsts->push(fu_inst);
647 
648  issued = true;
649 
650  } else if (!fu_is_capable || fu->alreadyPushed()) {
651  /* Skip */
652  if (!fu_is_capable) {
653  DPRINTF(MinorExecute, "Can't issue as FU: %d isn't"
654  " capable\n", fu_index);
655  } else {
656  DPRINTF(MinorExecute, "Can't issue as FU: %d is"
657  " already busy\n", fu_index);
658  }
659  } else if (fu->stalled) {
660  DPRINTF(MinorExecute, "Can't issue inst: %s into FU: %d,"
661  " it's stalled\n",
662  *inst, fu_index);
663  } else if (!fu->canInsert()) {
664  DPRINTF(MinorExecute, "Can't issue inst: %s to busy FU"
665  " for another: %d cycles\n",
666  *inst, fu->cyclesBeforeInsert());
667  } else {
668  MinorFUTiming *timing = (!inst->isFault() ?
669  fu->findTiming(inst->staticInst) : NULL);
670 
671  const std::vector<Cycles> *src_latencies =
672  (timing ? &(timing->srcRegsRelativeLats)
673  : NULL);
674 
675  const std::vector<bool> *cant_forward_from_fu_indices =
676  &(fu->cantForwardFromFUIndices);
677 
678  if (timing && timing->suppress) {
679  DPRINTF(MinorExecute, "Can't issue inst: %s as extra"
680  " decoding is suppressing it\n",
681  *inst);
682  } else if (!scoreboard[thread_id].canInstIssue(inst,
683  src_latencies, cant_forward_from_fu_indices,
684  cpu.curCycle(), cpu.getContext(thread_id)))
685  {
686  DPRINTF(MinorExecute, "Can't issue inst: %s yet\n",
687  *inst);
688  } else {
689  /* Can insert the instruction into this FU */
690  DPRINTF(MinorExecute, "Issuing inst: %s"
691  " into FU %d\n", *inst,
692  fu_index);
693 
694  Cycles extra_dest_retire_lat = Cycles(0);
695  TimingExpr *extra_dest_retire_lat_expr = NULL;
696  Cycles extra_assumed_lat = Cycles(0);
697 
698  /* Add the extraCommitDelay and extraAssumeLat to
699  * the FU pipeline timings */
700  if (timing) {
701  extra_dest_retire_lat =
702  timing->extraCommitLat;
703  extra_dest_retire_lat_expr =
704  timing->extraCommitLatExpr;
705  extra_assumed_lat =
706  timing->extraAssumedLat;
707  }
708 
709  issued_mem_ref = inst->isMemRef();
710 
711  QueuedInst fu_inst(inst);
712 
713  /* Decorate the inst with FU details */
714  inst->fuIndex = fu_index;
715  inst->extraCommitDelay = extra_dest_retire_lat;
716  inst->extraCommitDelayExpr =
717  extra_dest_retire_lat_expr;
718 
719  if (issued_mem_ref) {
720  /* Remember which instruction this memory op
721  * depends on so that initiateAcc can be called
722  * early */
723  if (allowEarlyMemIssue) {
724  inst->instToWaitFor =
725  scoreboard[thread_id].execSeqNumToWaitFor(inst,
726  cpu.getContext(thread_id));
727 
728  if (lsq.getLastMemBarrier(thread_id) >
729  inst->instToWaitFor)
730  {
731  DPRINTF(MinorExecute, "A barrier will"
732  " cause a delay in mem ref issue of"
733  " inst: %s until after inst"
734  " %d(exec)\n", *inst,
735  lsq.getLastMemBarrier(thread_id));
736 
737  inst->instToWaitFor =
738  lsq.getLastMemBarrier(thread_id);
739  } else {
740  DPRINTF(MinorExecute, "Memory ref inst:"
741  " %s must wait for inst %d(exec)"
742  " before issuing\n",
743  *inst, inst->instToWaitFor);
744  }
745 
746  inst->canEarlyIssue = true;
747  }
748  /* Also queue this instruction in the memory ref
749  * queue to ensure in-order issue to the LSQ */
750  DPRINTF(MinorExecute, "Pushing mem inst: %s\n",
751  *inst);
752  thread.inFUMemInsts->push(fu_inst);
753  }
754 
755  /* Issue to FU */
756  fu->push(fu_inst);
757  /* And start the countdown on activity to allow
758  * this instruction to get to the end of its FU */
760 
761  /* Mark the destinations for this instruction as
762  * busy */
763  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
764  fu->description.opLat +
765  extra_dest_retire_lat +
766  extra_assumed_lat,
767  cpu.getContext(thread_id),
768  issued_mem_ref && extra_assumed_lat == Cycles(0));
769 
770  /* Push the instruction onto the inFlight queue so
771  * it can be committed in order */
772  thread.inFlightInsts->push(fu_inst);
773 
774  issued = true;
775  }
776  }
777 
778  fu_index++;
779  } while (fu_index != numFuncUnits && !issued);
780 
781  if (!issued)
782  DPRINTF(MinorExecute, "Didn't issue inst: %s\n", *inst);
783  }
784 
785  if (issued) {
786  /* Generate MinorTrace's MinorInst lines. Do this at commit
787  * to allow better instruction annotation? */
788  if (debug::MinorTrace && !inst->isBubble()) {
789  inst->minorTraceInst(*this,
790  cpu.threads[0]->getIsaPtr()->regClasses());
791  }
792 
793  /* Mark up barriers in the LSQ */
794  if (!discarded && inst->isInst() &&
795  inst->staticInst->isFullMemBarrier())
796  {
797  DPRINTF(MinorMem, "Issuing memory barrier inst: %s\n", *inst);
799  }
800 
801  if (inst->traceData && setTraceTimeOnIssue) {
802  inst->traceData->setWhen(curTick());
803  }
804 
805  if (issued_mem_ref)
806  num_mem_insts_issued++;
807 
808  if (discarded) {
809  num_insts_discarded++;
810  } else if (!inst->isBubble()) {
811  num_insts_issued++;
812 
813  if (num_insts_issued == issueLimit)
814  DPRINTF(MinorExecute, "Reached inst issue limit\n");
815  }
816 
817  thread.inputIndex++;
818  DPRINTF(MinorExecute, "Stepping to next inst inputIndex: %d\n",
819  thread.inputIndex);
820  }
821 
822  /* Got to the end of a line */
823  if (thread.inputIndex == insts_in->width()) {
824  popInput(thread_id);
825  /* Set insts_in to null to force us to leave the surrounding
826  * loop */
827  insts_in = NULL;
828 
830  DPRINTF(MinorExecute, "Wrapping\n");
831  insts_in = getInput(thread_id);
832  }
833  }
834  } while (insts_in && thread.inputIndex < insts_in->width() &&
835  /* We still have instructions */
836  fu_index != numFuncUnits && /* Not visited all FUs */
837  issued && /* We've not yet failed to issue an instruction */
838  num_insts_issued != issueLimit && /* Still allowed to issue */
839  num_mem_insts_issued != memoryIssueLimit);
840 
841  return num_insts_issued;
842 }
843 
844 bool
846 {
847  ThreadContext *thread = cpu.getContext(thread_id);
848  unsigned int num_pc_event_checks = 0;
849 
850  /* Handle PC events on instructions */
851  Addr oldPC;
852  do {
853  oldPC = thread->instAddr();
854  cpu.threads[thread_id]->pcEventQueue.service(oldPC, thread);
855  num_pc_event_checks++;
856  } while (oldPC != thread->instAddr());
857 
858  if (num_pc_event_checks > 1) {
859  DPRINTF(PCEvent, "Acting on PC Event to PC: %s\n",
860  thread->pcState());
861  }
862 
863  return num_pc_event_checks > 1;
864 }
865 
866 void
868 {
869  assert(!inst->isFault());
870 
871  MinorThread *thread = cpu.threads[inst->id.threadId];
872 
873  /* Increment the many and various inst and op counts in the
874  * thread and system */
875  if (!inst->staticInst->isMicroop() || inst->staticInst->isLastMicroop())
876  {
877  thread->numInst++;
878  thread->threadStats.numInsts++;
879  cpu.stats.numInsts++;
880 
881  /* Act on events related to instruction counts */
882  thread->comInstEventQueue.serviceEvents(thread->numInst);
883  }
884  thread->numOp++;
885  thread->threadStats.numOps++;
886  cpu.stats.numOps++;
887  cpu.stats.committedInstType[inst->id.threadId]
888  [inst->staticInst->opClass()]++;
889 
890  /* Set the CP SeqNum to the numOps commit number */
891  if (inst->traceData)
892  inst->traceData->setCPSeq(thread->numOp);
893 
894  cpu.probeInstCommit(inst->staticInst, inst->pc.instAddr());
895 }
896 
897 bool
898 Execute::commitInst(MinorDynInstPtr inst, bool early_memory_issue,
899  BranchData &branch, Fault &fault, bool &committed,
900  bool &completed_mem_issue)
901 {
902  ThreadID thread_id = inst->id.threadId;
903  ThreadContext *thread = cpu.getContext(thread_id);
904 
905  bool completed_inst = true;
906  fault = NoFault;
907 
908  /* Is the thread for this instruction suspended? In that case, just
909  * stall as long as there are no pending interrupts */
910  if (thread->status() == ThreadContext::Suspended &&
911  !isInterrupted(thread_id))
912  {
913  panic("We should never hit the case where we try to commit from a "
914  "suspended thread as the streamSeqNum should not match");
915  } else if (inst->isFault()) {
916  ExecContext context(cpu, *cpu.threads[thread_id], *this,
917  inst, zeroReg);
918 
919  DPRINTF(MinorExecute, "Fault inst reached Execute: %s\n",
920  inst->fault->name());
921 
922  fault = inst->fault;
923  inst->fault->invoke(thread, NULL);
924 
925  tryToBranch(inst, fault, branch);
926  } else if (inst->staticInst->isMemRef()) {
927  /* Memory accesses are executed in two parts:
928  * executeMemRefInst -- calculates the EA and issues the access
929  * to memory. This is done here.
930  * handleMemResponse -- handles the response packet, done by
931  * Execute::commit
932  *
933  * While the memory access is in its FU, the EA is being
934  * calculated. At the end of the FU, when it is ready to
935  * 'commit' (in this function), the access is presented to the
936  * memory queues. When a response comes back from memory,
937  * Execute::commit will commit it.
938  */
939  bool predicate_passed = false;
940  bool completed_mem_inst = executeMemRefInst(inst, branch,
941  predicate_passed, fault);
942 
943  if (completed_mem_inst && fault != NoFault) {
944  if (early_memory_issue) {
945  DPRINTF(MinorExecute, "Fault in early executing inst: %s\n",
946  fault->name());
947  /* Don't execute the fault, just stall the instruction
948  * until it gets to the head of inFlightInsts */
949  inst->canEarlyIssue = false;
950  /* Not completed as we'll come here again to pick up
951  * the fault when we get to the end of the FU */
952  completed_inst = false;
953  } else {
954  DPRINTF(MinorExecute, "Fault in execute: %s\n",
955  fault->name());
956  fault->invoke(thread, NULL);
957 
958  tryToBranch(inst, fault, branch);
959  completed_inst = true;
960  }
961  } else {
962  completed_inst = completed_mem_inst;
963  }
964  completed_mem_issue = completed_inst;
965  } else if (inst->isInst() && inst->staticInst->isFullMemBarrier() &&
967  {
968  DPRINTF(MinorExecute, "Can't commit data barrier inst: %s yet as"
969  " there isn't space in the store buffer\n", *inst);
970 
971  completed_inst = false;
972  } else if (inst->isInst() && inst->staticInst->isQuiesce()
973  && !branch.isBubble()){
974  /* This instruction can suspend, need to be able to communicate
975  * backwards, so no other branches may evaluate this cycle*/
976  completed_inst = false;
977  } else {
978  ExecContext context(cpu, *cpu.threads[thread_id], *this,
979  inst, zeroReg);
980 
981  DPRINTF(MinorExecute, "Committing inst: %s\n", *inst);
982 
983  fault = inst->staticInst->execute(&context,
984  inst->traceData);
985 
986  /* Set the predicate for tracing and dump */
987  if (inst->traceData)
988  inst->traceData->setPredicate(context.readPredicate());
989 
990  committed = true;
991 
992  if (fault != NoFault) {
993  if (inst->traceData) {
994  if (debug::ExecFaulting) {
995  inst->traceData->setFaulting(true);
996  } else {
997  delete inst->traceData;
998  inst->traceData = NULL;
999  }
1000  }
1001 
1002  DPRINTF(MinorExecute, "Fault in execute of inst: %s fault: %s\n",
1003  *inst, fault->name());
1004  fault->invoke(thread, inst->staticInst);
1005  }
1006 
1007  doInstCommitAccounting(inst);
1008  tryToBranch(inst, fault, branch);
1009  }
1010 
1011  if (completed_inst) {
1012  /* Keep a copy of this instruction's predictionSeqNum just in case
1013  * we need to issue a branch without an instruction (such as an
1014  * interrupt) */
1015  executeInfo[thread_id].lastPredictionSeqNum = inst->id.predictionSeqNum;
1016 
1017  /* Check to see if this instruction suspended the current thread. */
1018  if (!inst->isFault() &&
1019  thread->status() == ThreadContext::Suspended &&
1020  branch.isBubble() && /* It didn't branch too */
1021  !isInterrupted(thread_id)) /* Don't suspend if we have
1022  interrupts */
1023  {
1024  TheISA::PCState resume_pc = cpu.getContext(thread_id)->pcState();
1025 
1026  assert(resume_pc.microPC() == 0);
1027 
1028  DPRINTF(MinorInterrupt, "Suspending thread: %d from Execute"
1029  " inst: %s\n", thread_id, *inst);
1030 
1032 
1034  resume_pc, branch);
1035  }
1036  }
1037 
1038  return completed_inst;
1039 }
1040 
1041 void
1042 Execute::commit(ThreadID thread_id, bool only_commit_microops, bool discard,
1043  BranchData &branch)
1044 {
1045  Fault fault = NoFault;
1046  Cycles now = cpu.curCycle();
1047  ExecuteThreadInfo &ex_info = executeInfo[thread_id];
1048 
1074  /* Has an instruction been completed? Once this becomes false, we stop
1075  * trying to complete instructions. */
1076  bool completed_inst = true;
1077 
1078  /* Number of insts committed this cycle to check against commitLimit */
1079  unsigned int num_insts_committed = 0;
1080 
1081  /* Number of memory access instructions committed to check against
1082  * memCommitLimit */
1083  unsigned int num_mem_refs_committed = 0;
1084 
1085  if (only_commit_microops && !ex_info.inFlightInsts->empty()) {
1086  DPRINTF(MinorInterrupt, "Only commit microops %s %d\n",
1087  *(ex_info.inFlightInsts->front().inst),
1088  ex_info.lastCommitWasEndOfMacroop);
1089  }
1090 
1091  while (!ex_info.inFlightInsts->empty() && /* Some more instructions to process */
1092  !branch.isStreamChange() && /* No real branch */
1093  fault == NoFault && /* No faults */
1094  completed_inst && /* Still finding instructions to execute */
1095  num_insts_committed != commitLimit /* Not reached commit limit */
1096  )
1097  {
1098  if (only_commit_microops) {
1099  DPRINTF(MinorInterrupt, "Committing tail of insts before"
1100  " interrupt: %s\n",
1101  *(ex_info.inFlightInsts->front().inst));
1102  }
1103 
1104  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1105 
1106  InstSeqNum head_exec_seq_num =
1107  head_inflight_inst->inst->id.execSeqNum;
1108 
1109  /* The instruction we actually process if completed_inst
1110  * remains true to the end of the loop body.
1111  * Start by considering the the head of the in flight insts queue */
1112  MinorDynInstPtr inst = head_inflight_inst->inst;
1113 
1114  bool committed_inst = false;
1115  bool discard_inst = false;
1116  bool completed_mem_ref = false;
1117  bool issued_mem_ref = false;
1118  bool early_memory_issue = false;
1119 
1120  /* Must set this again to go around the loop */
1121  completed_inst = false;
1122 
1123  /* If we're just completing a macroop before an interrupt or drain,
1124  * can we stil commit another microop (rather than a memory response)
1125  * without crosing into the next full instruction? */
1126  bool can_commit_insts = !ex_info.inFlightInsts->empty() &&
1127  !(only_commit_microops && ex_info.lastCommitWasEndOfMacroop);
1128 
1129  /* Can we find a mem response for this inst */
1130  LSQ::LSQRequestPtr mem_response =
1131  (inst->inLSQ ? lsq.findResponse(inst) : NULL);
1132 
1133  DPRINTF(MinorExecute, "Trying to commit canCommitInsts: %d\n",
1134  can_commit_insts);
1135 
1136  /* Test for PC events after every instruction */
1137  if (isInbetweenInsts(thread_id) && tryPCEvents(thread_id)) {
1138  ThreadContext *thread = cpu.getContext(thread_id);
1139 
1140  /* Branch as there was a change in PC */
1142  MinorDynInst::bubble(), thread->pcState(), branch);
1143  } else if (mem_response &&
1144  num_mem_refs_committed < memoryCommitLimit)
1145  {
1146  /* Try to commit from the memory responses next */
1147  discard_inst = inst->id.streamSeqNum !=
1148  ex_info.streamSeqNum || discard;
1149 
1150  DPRINTF(MinorExecute, "Trying to commit mem response: %s\n",
1151  *inst);
1152 
1153  /* Complete or discard the response */
1154  if (discard_inst) {
1155  DPRINTF(MinorExecute, "Discarding mem inst: %s as its"
1156  " stream state was unexpected, expected: %d\n",
1157  *inst, ex_info.streamSeqNum);
1158 
1159  lsq.popResponse(mem_response);
1160  } else {
1161  handleMemResponse(inst, mem_response, branch, fault);
1162  committed_inst = true;
1163  }
1164 
1165  completed_mem_ref = true;
1166  completed_inst = true;
1167  } else if (can_commit_insts) {
1168  /* If true, this instruction will, subject to timing tweaks,
1169  * be considered for completion. try_to_commit flattens
1170  * the `if' tree a bit and allows other tests for inst
1171  * commit to be inserted here. */
1172  bool try_to_commit = false;
1173 
1174  /* Try and issue memory ops early if they:
1175  * - Can push a request into the LSQ
1176  * - Have reached the end of their FUs
1177  * - Have had all their dependencies satisfied
1178  * - Are from the right stream
1179  *
1180  * For any other case, leave it to the normal instruction
1181  * issue below to handle them.
1182  */
1183  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1184  DPRINTF(MinorExecute, "Trying to commit from mem FUs\n");
1185 
1186  const MinorDynInstPtr head_mem_ref_inst =
1187  ex_info.inFUMemInsts->front().inst;
1188  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1189  const MinorDynInstPtr &fu_inst = fu->front().inst;
1190 
1191  /* Use this, possibly out of order, inst as the one
1192  * to 'commit'/send to the LSQ */
1193  if (!fu_inst->isBubble() &&
1194  !fu_inst->inLSQ &&
1195  fu_inst->canEarlyIssue &&
1196  ex_info.streamSeqNum == fu_inst->id.streamSeqNum &&
1197  head_exec_seq_num > fu_inst->instToWaitFor)
1198  {
1199  DPRINTF(MinorExecute, "Issuing mem ref early"
1200  " inst: %s instToWaitFor: %d\n",
1201  *(fu_inst), fu_inst->instToWaitFor);
1202 
1203  inst = fu_inst;
1204  try_to_commit = true;
1205  early_memory_issue = true;
1206  completed_inst = true;
1207  }
1208  }
1209 
1210  /* Try and commit FU-less insts */
1211  if (!completed_inst && inst->isNoCostInst()) {
1212  DPRINTF(MinorExecute, "Committing no cost inst: %s", *inst);
1213 
1214  try_to_commit = true;
1215  completed_inst = true;
1216  }
1217 
1218  /* Try to issue from the ends of FUs and the inFlightInsts
1219  * queue */
1220  if (!completed_inst && !inst->inLSQ) {
1221  DPRINTF(MinorExecute, "Trying to commit from FUs\n");
1222 
1223  /* Try to commit from a functional unit */
1224  /* Is the head inst of the expected inst's FU actually the
1225  * expected inst? */
1226  QueuedInst &fu_inst =
1227  funcUnits[inst->fuIndex]->front();
1228  InstSeqNum fu_inst_seq_num = fu_inst.inst->id.execSeqNum;
1229 
1230  if (fu_inst.inst->isBubble()) {
1231  /* No instruction ready */
1232  completed_inst = false;
1233  } else if (fu_inst_seq_num != head_exec_seq_num) {
1234  /* Past instruction: we must have already executed it
1235  * in the same cycle and so the head inst isn't
1236  * actually at the end of its pipeline
1237  * Future instruction: handled above and only for
1238  * mem refs on their way to the LSQ */
1239  } else if (fu_inst.inst->id == inst->id) {
1240  /* All instructions can be committed if they have the
1241  * right execSeqNum and there are no in-flight
1242  * mem insts before us */
1243  try_to_commit = true;
1244  completed_inst = true;
1245  }
1246  }
1247 
1248  if (try_to_commit) {
1249  discard_inst = inst->id.streamSeqNum !=
1250  ex_info.streamSeqNum || discard;
1251 
1252  /* Is this instruction discardable as its streamSeqNum
1253  * doesn't match? */
1254  if (!discard_inst) {
1255  /* Try to commit or discard a non-memory instruction.
1256  * Memory ops are actually 'committed' from this FUs
1257  * and 'issued' into the memory system so we need to
1258  * account for them later (commit_was_mem_issue gets
1259  * set) */
1260  if (inst->extraCommitDelayExpr) {
1261  DPRINTF(MinorExecute, "Evaluating expression for"
1262  " extra commit delay inst: %s\n", *inst);
1263 
1264  ThreadContext *thread = cpu.getContext(thread_id);
1265 
1266  TimingExprEvalContext context(inst->staticInst,
1267  thread, NULL);
1268 
1269  uint64_t extra_delay = inst->extraCommitDelayExpr->
1270  eval(context);
1271 
1272  DPRINTF(MinorExecute, "Extra commit delay expr"
1273  " result: %d\n", extra_delay);
1274 
1275  if (extra_delay < 128) {
1276  inst->extraCommitDelay += Cycles(extra_delay);
1277  } else {
1278  DPRINTF(MinorExecute, "Extra commit delay was"
1279  " very long: %d\n", extra_delay);
1280  }
1281  inst->extraCommitDelayExpr = NULL;
1282  }
1283 
1284  /* Move the extraCommitDelay from the instruction
1285  * into the minimumCommitCycle */
1286  if (inst->extraCommitDelay != Cycles(0)) {
1287  inst->minimumCommitCycle = cpu.curCycle() +
1288  inst->extraCommitDelay;
1289  inst->extraCommitDelay = Cycles(0);
1290  }
1291 
1292  /* @todo Think about making lastMemBarrier be
1293  * MAX_UINT_64 to avoid using 0 as a marker value */
1294  if (!inst->isFault() && inst->isMemRef() &&
1295  lsq.getLastMemBarrier(thread_id) <
1296  inst->id.execSeqNum &&
1297  lsq.getLastMemBarrier(thread_id) != 0)
1298  {
1299  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1300  " as there are incomplete barriers in flight\n",
1301  *inst);
1302  completed_inst = false;
1303  } else if (inst->minimumCommitCycle > now) {
1304  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1305  " as it wants to be stalled for %d more cycles\n",
1306  *inst, inst->minimumCommitCycle - now);
1307  completed_inst = false;
1308  } else {
1309  completed_inst = commitInst(inst,
1310  early_memory_issue, branch, fault,
1311  committed_inst, issued_mem_ref);
1312  }
1313  } else {
1314  /* Discard instruction */
1315  completed_inst = true;
1316  }
1317 
1318  if (completed_inst) {
1319  /* Allow the pipeline to advance. If the FU head
1320  * instruction wasn't the inFlightInsts head
1321  * but had already been committed, it would have
1322  * unstalled the pipeline before here */
1323  if (inst->fuIndex != noCostFUIndex) {
1324  DPRINTF(MinorExecute, "Unstalling %d for inst %s\n", inst->fuIndex, inst->id);
1325  funcUnits[inst->fuIndex]->stalled = false;
1326  }
1327  }
1328  }
1329  } else {
1330  DPRINTF(MinorExecute, "No instructions to commit\n");
1331  completed_inst = false;
1332  }
1333 
1334  /* All discardable instructions must also be 'completed' by now */
1335  assert(!(discard_inst && !completed_inst));
1336 
1337  /* Instruction committed but was discarded due to streamSeqNum
1338  * mismatch */
1339  if (discard_inst) {
1340  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
1341  " state was unexpected, expected: %d\n",
1342  *inst, ex_info.streamSeqNum);
1343 
1344  if (fault == NoFault)
1346  }
1347 
1348  /* Mark the mem inst as being in the LSQ */
1349  if (issued_mem_ref) {
1350  inst->fuIndex = 0;
1351  inst->inLSQ = true;
1352  }
1353 
1354  /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1355  * as they've *definitely* exited the FUs */
1356  if (completed_inst && inst->isMemRef()) {
1357  /* The MemRef could have been discarded from the FU or the memory
1358  * queue, so just check an FU instruction */
1359  if (!ex_info.inFUMemInsts->empty() &&
1360  ex_info.inFUMemInsts->front().inst == inst)
1361  {
1362  ex_info.inFUMemInsts->pop();
1363  }
1364  }
1365 
1366  if (completed_inst && !(issued_mem_ref && fault == NoFault)) {
1367  /* Note that this includes discarded insts */
1368  DPRINTF(MinorExecute, "Completed inst: %s\n", *inst);
1369 
1370  /* Got to the end of a full instruction? */
1371  ex_info.lastCommitWasEndOfMacroop = inst->isFault() ||
1372  inst->isLastOpInInst();
1373 
1374  /* lastPredictionSeqNum is kept as a convenience to prevent its
1375  * value from changing too much on the minorview display */
1376  ex_info.lastPredictionSeqNum = inst->id.predictionSeqNum;
1377 
1378  /* Finished with the inst, remove it from the inst queue and
1379  * clear its dependencies */
1380  ex_info.inFlightInsts->pop();
1381 
1382  /* Complete barriers in the LSQ/move to store buffer */
1383  if (inst->isInst() && inst->staticInst->isFullMemBarrier()) {
1384  DPRINTF(MinorMem, "Completing memory barrier"
1385  " inst: %s committed: %d\n", *inst, committed_inst);
1386  lsq.completeMemBarrierInst(inst, committed_inst);
1387  }
1388 
1389  scoreboard[thread_id].clearInstDests(inst, inst->isMemRef());
1390  }
1391 
1392  /* Handle per-cycle instruction counting */
1393  if (committed_inst) {
1394  bool is_no_cost_inst = inst->isNoCostInst();
1395 
1396  /* Don't show no cost instructions as having taken a commit
1397  * slot */
1398  if (debug::MinorTrace && !is_no_cost_inst)
1399  ex_info.instsBeingCommitted.insts[num_insts_committed] = inst;
1400 
1401  if (!is_no_cost_inst)
1402  num_insts_committed++;
1403 
1404  if (num_insts_committed == commitLimit)
1405  DPRINTF(MinorExecute, "Reached inst commit limit\n");
1406 
1407  /* Re-set the time of the instruction if that's required for
1408  * tracing */
1409  if (inst->traceData) {
1411  inst->traceData->setWhen(curTick());
1412  inst->traceData->dump();
1413  }
1414 
1415  if (completed_mem_ref)
1416  num_mem_refs_committed++;
1417 
1418  if (num_mem_refs_committed == memoryCommitLimit)
1419  DPRINTF(MinorExecute, "Reached mem ref commit limit\n");
1420  }
1421  }
1422 }
1423 
1424 bool
1426 {
1427  return executeInfo[thread_id].lastCommitWasEndOfMacroop &&
1428  !lsq.accessesInFlight();
1429 }
1430 
1431 void
1433 {
1434  if (!inp.outputWire->isBubble())
1435  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
1436 
1437  BranchData &branch = *out.inputWire;
1438 
1439  unsigned int num_issued = 0;
1440 
1441  /* Do all the cycle-wise activities for dcachePort here to potentially
1442  * free up input spaces in the LSQ's requests queue */
1443  lsq.step();
1444 
1445  /* Check interrupts first. Will halt commit if interrupt found */
1446  bool interrupted = false;
1447  ThreadID interrupt_tid = checkInterrupts(branch, interrupted);
1448 
1449  if (interrupt_tid != InvalidThreadID) {
1450  /* Signalling an interrupt this cycle, not issuing/committing from
1451  * any other threads */
1452  } else if (!branch.isBubble()) {
1453  /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1454  * without overwriting them */
1455  DPRINTF(MinorInterrupt, "Execute skipping a cycle to allow old"
1456  " branch to complete\n");
1457  } else {
1458  ThreadID commit_tid = getCommittingThread();
1459 
1460  if (commit_tid != InvalidThreadID) {
1461  ExecuteThreadInfo& commit_info = executeInfo[commit_tid];
1462 
1463  DPRINTF(MinorExecute, "Attempting to commit [tid:%d]\n",
1464  commit_tid);
1465  /* commit can set stalled flags observable to issue and so *must* be
1466  * called first */
1467  if (commit_info.drainState != NotDraining) {
1468  if (commit_info.drainState == DrainCurrentInst) {
1469  /* Commit only micro-ops, don't kill anything else */
1470  commit(commit_tid, true, false, branch);
1471 
1472  if (isInbetweenInsts(commit_tid))
1473  setDrainState(commit_tid, DrainHaltFetch);
1474 
1475  /* Discard any generated branch */
1476  branch = BranchData::bubble();
1477  } else if (commit_info.drainState == DrainAllInsts) {
1478  /* Kill all instructions */
1479  while (getInput(commit_tid))
1480  popInput(commit_tid);
1481  commit(commit_tid, false, true, branch);
1482  }
1483  } else {
1484  /* Commit micro-ops only if interrupted. Otherwise, commit
1485  * anything you like */
1486  DPRINTF(MinorExecute, "Committing micro-ops for interrupt[tid:%d]\n",
1487  commit_tid);
1488  bool only_commit_microops = interrupted &&
1489  hasInterrupt(commit_tid);
1490  commit(commit_tid, only_commit_microops, false, branch);
1491  }
1492 
1493  /* Halt fetch, but don't do it until we have the current instruction in
1494  * the bag */
1495  if (commit_info.drainState == DrainHaltFetch) {
1497  MinorDynInst::bubble(), TheISA::PCState(0), branch);
1498 
1500  setDrainState(commit_tid, DrainAllInsts);
1501  }
1502  }
1503  ThreadID issue_tid = getIssuingThread();
1504  /* This will issue merrily even when interrupted in the sure and
1505  * certain knowledge that the interrupt with change the stream */
1506  if (issue_tid != InvalidThreadID) {
1507  DPRINTF(MinorExecute, "Attempting to issue [tid:%d]\n",
1508  issue_tid);
1509  num_issued = issue(issue_tid);
1510  }
1511 
1512  }
1513 
1514  /* Run logic to step functional units + decide if we are active on the next
1515  * clock cycle */
1516  std::vector<MinorDynInstPtr> next_issuable_insts;
1517  bool can_issue_next = false;
1518 
1519  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1520  /* Find the next issuable instruction for each thread and see if it can
1521  be issued */
1522  if (getInput(tid)) {
1523  unsigned int input_index = executeInfo[tid].inputIndex;
1524  MinorDynInstPtr inst = getInput(tid)->insts[input_index];
1525  if (inst->isFault()) {
1526  can_issue_next = true;
1527  } else if (!inst->isBubble()) {
1528  next_issuable_insts.push_back(inst);
1529  }
1530  }
1531  }
1532 
1533  bool becoming_stalled = true;
1534 
1535  /* Advance the pipelines and note whether they still need to be
1536  * advanced */
1537  for (unsigned int i = 0; i < numFuncUnits; i++) {
1538  FUPipeline *fu = funcUnits[i];
1539  fu->advance();
1540 
1541  /* If we need to tick again, the pipeline will have been left or set
1542  * to be unstalled */
1543  if (fu->occupancy !=0 && !fu->stalled)
1544  becoming_stalled = false;
1545 
1546  /* Could we possibly issue the next instruction from any thread?
1547  * This is quite an expensive test and is only used to determine
1548  * if the CPU should remain active, only run it if we aren't sure
1549  * we are active next cycle yet */
1550  for (auto inst : next_issuable_insts) {
1551  if (!fu->stalled && fu->provides(inst->staticInst->opClass()) &&
1552  scoreboard[inst->id.threadId].canInstIssue(inst,
1553  NULL, NULL, cpu.curCycle() + Cycles(1),
1554  cpu.getContext(inst->id.threadId))) {
1555  can_issue_next = true;
1556  break;
1557  }
1558  }
1559  }
1560 
1561  bool head_inst_might_commit = false;
1562 
1563  /* Could the head in flight insts be committed */
1564  for (auto const &info : executeInfo) {
1565  if (!info.inFlightInsts->empty()) {
1566  const QueuedInst &head_inst = info.inFlightInsts->front();
1567 
1568  if (head_inst.inst->isNoCostInst()) {
1569  head_inst_might_commit = true;
1570  } else {
1571  FUPipeline *fu = funcUnits[head_inst.inst->fuIndex];
1572  if ((fu->stalled &&
1573  fu->front().inst->id == head_inst.inst->id) ||
1574  lsq.findResponse(head_inst.inst))
1575  {
1576  head_inst_might_commit = true;
1577  break;
1578  }
1579  }
1580  }
1581  }
1582 
1583  DPRINTF(Activity, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1584  (num_issued != 0 ? " (issued some insts)" : ""),
1585  (becoming_stalled ? "(becoming stalled)" : "(not becoming stalled)"),
1586  (can_issue_next ? " (can issued next inst)" : ""),
1587  (head_inst_might_commit ? "(head inst might commit)" : ""),
1588  (lsq.needsToTick() ? " (LSQ needs to tick)" : ""),
1589  (interrupted ? " (interrupted)" : ""));
1590 
1591  bool need_to_tick =
1592  num_issued != 0 || /* Issued some insts this cycle */
1593  !becoming_stalled || /* Some FU pipelines can still move */
1594  can_issue_next || /* Can still issue a new inst */
1595  head_inst_might_commit || /* Could possible commit the next inst */
1596  lsq.needsToTick() || /* Must step the dcache port */
1597  interrupted; /* There are pending interrupts */
1598 
1599  if (!need_to_tick) {
1600  DPRINTF(Activity, "The next cycle might be skippable as there are no"
1601  " advanceable FUs\n");
1602  }
1603 
1604  /* Wake up if we need to tick again */
1605  if (need_to_tick)
1607 
1608  /* Note activity of following buffer */
1609  if (!branch.isBubble())
1611 
1612  /* Make sure the input (if any left) is pushed */
1613  if (!inp.outputWire->isBubble())
1614  inputBuffer[inp.outputWire->threadId].pushTail();
1615 }
1616 
1617 ThreadID
1618 Execute::checkInterrupts(BranchData& branch, bool& interrupted)
1619 {
1621  /* Evaluate interrupts in round-robin based upon service */
1622  do {
1623  /* Has an interrupt been signalled? This may not be acted on
1624  * straighaway so this is different from took_interrupt */
1625  bool thread_interrupted = false;
1626 
1627  if (FullSystem && cpu.getInterruptController(tid)) {
1628  /* This is here because it seems that after drainResume the
1629  * interrupt controller isn't always set */
1630  thread_interrupted = executeInfo[tid].drainState == NotDraining &&
1631  isInterrupted(tid);
1632  interrupted = interrupted || thread_interrupted;
1633  } else {
1634  DPRINTF(MinorInterrupt, "No interrupt controller\n");
1635  }
1636  DPRINTF(MinorInterrupt, "[tid:%d] thread_interrupted?=%d isInbetweenInsts?=%d\n",
1637  tid, thread_interrupted, isInbetweenInsts(tid));
1638  /* Act on interrupts */
1639  if (thread_interrupted && isInbetweenInsts(tid)) {
1640  if (takeInterrupt(tid, branch)) {
1641  interruptPriority = tid;
1642  return tid;
1643  }
1644  } else {
1645  tid = (tid + 1) % cpu.numThreads;
1646  }
1647  } while (tid != interruptPriority);
1648 
1649  return InvalidThreadID;
1650 }
1651 
1652 bool
1654 {
1655  if (FullSystem && cpu.getInterruptController(thread_id)) {
1656  return executeInfo[thread_id].drainState == NotDraining &&
1657  isInterrupted(thread_id);
1658  }
1659 
1660  return false;
1661 }
1662 
1663 void
1665 {
1666  std::ostringstream insts;
1667  std::ostringstream stalled;
1668 
1669  executeInfo[0].instsBeingCommitted.reportData(insts);
1670  lsq.minorTrace();
1671  inputBuffer[0].minorTrace();
1672  scoreboard[0].minorTrace();
1673 
1674  /* Report functional unit stalling in one string */
1675  unsigned int i = 0;
1676  while (i < numFuncUnits)
1677  {
1678  stalled << (funcUnits[i]->stalled ? '1' : 'E');
1679  i++;
1680  if (i != numFuncUnits)
1681  stalled << ',';
1682  }
1683 
1684  minor::minorTrace("insts=%s inputIndex=%d streamSeqNum=%d"
1685  " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1686  insts.str(), executeInfo[0].inputIndex, executeInfo[0].streamSeqNum,
1687  stalled.str(), executeInfo[0].drainState, isInbetweenInsts(0));
1688 
1689  std::for_each(funcUnits.begin(), funcUnits.end(),
1690  std::mem_fn(&FUPipeline::minorTrace));
1691 
1692  executeInfo[0].inFlightInsts->minorTrace();
1693  executeInfo[0].inFUMemInsts->minorTrace();
1694 }
1695 
1696 inline ThreadID
1698 {
1699  std::vector<ThreadID> priority_list;
1700 
1701  switch (cpu.threadPolicy) {
1702  case enums::SingleThreaded:
1703  return 0;
1704  case enums::RoundRobin:
1705  priority_list = cpu.roundRobinPriority(commitPriority);
1706  break;
1707  case enums::Random:
1708  priority_list = cpu.randomPriority();
1709  break;
1710  default:
1711  panic("Invalid thread policy");
1712  }
1713 
1714  for (auto tid : priority_list) {
1715  ExecuteThreadInfo &ex_info = executeInfo[tid];
1716  bool can_commit_insts = !ex_info.inFlightInsts->empty();
1717  if (can_commit_insts) {
1718  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1719  MinorDynInstPtr inst = head_inflight_inst->inst;
1720 
1721  can_commit_insts = can_commit_insts &&
1722  (!inst->inLSQ || (lsq.findResponse(inst) != NULL));
1723 
1724  if (!inst->inLSQ) {
1725  bool can_transfer_mem_inst = false;
1726  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1727  const MinorDynInstPtr head_mem_ref_inst =
1728  ex_info.inFUMemInsts->front().inst;
1729  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1730  const MinorDynInstPtr &fu_inst = fu->front().inst;
1731  can_transfer_mem_inst =
1732  !fu_inst->isBubble() &&
1733  fu_inst->id.threadId == tid &&
1734  !fu_inst->inLSQ &&
1735  fu_inst->canEarlyIssue &&
1736  inst->id.execSeqNum > fu_inst->instToWaitFor;
1737  }
1738 
1739  bool can_execute_fu_inst = inst->fuIndex == noCostFUIndex;
1740  if (can_commit_insts && !can_transfer_mem_inst &&
1741  inst->fuIndex != noCostFUIndex)
1742  {
1743  QueuedInst& fu_inst = funcUnits[inst->fuIndex]->front();
1744  can_execute_fu_inst = !fu_inst.inst->isBubble() &&
1745  fu_inst.inst->id == inst->id;
1746  }
1747 
1748  can_commit_insts = can_commit_insts &&
1749  (can_transfer_mem_inst || can_execute_fu_inst);
1750  }
1751  }
1752 
1753 
1754  if (can_commit_insts) {
1755  commitPriority = tid;
1756  return tid;
1757  }
1758  }
1759 
1760  return InvalidThreadID;
1761 }
1762 
1763 inline ThreadID
1765 {
1766  std::vector<ThreadID> priority_list;
1767 
1768  switch (cpu.threadPolicy) {
1769  case enums::SingleThreaded:
1770  return 0;
1771  case enums::RoundRobin:
1772  priority_list = cpu.roundRobinPriority(issuePriority);
1773  break;
1774  case enums::Random:
1775  priority_list = cpu.randomPriority();
1776  break;
1777  default:
1778  panic("Invalid thread scheduling policy.");
1779  }
1780 
1781  for (auto tid : priority_list) {
1782  if (getInput(tid)) {
1783  issuePriority = tid;
1784  return tid;
1785  }
1786  }
1787 
1788  return InvalidThreadID;
1789 }
1790 
1791 void
1793 {
1794  DPRINTF(Drain, "MinorExecute drainResume\n");
1795 
1796  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1797  setDrainState(tid, NotDraining);
1798  }
1799 
1801 }
1802 
1803 std::ostream &operator <<(std::ostream &os, Execute::DrainState state)
1804 {
1805  switch (state)
1806  {
1807  case Execute::NotDraining:
1808  os << "NotDraining";
1809  break;
1811  os << "DrainCurrentInst";
1812  break;
1814  os << "DrainHaltFetch";
1815  break;
1817  os << "DrainAllInsts";
1818  break;
1819  default:
1820  os << "Drain-" << static_cast<int>(state);
1821  break;
1822  }
1823 
1824  return os;
1825 }
1826 
1827 void
1829 {
1830  DPRINTF(Drain, "setDrainState[%d]: %s\n", thread_id, state);
1831  executeInfo[thread_id].drainState = state;
1832 }
1833 
1834 unsigned int
1836 {
1837  DPRINTF(Drain, "MinorExecute drain\n");
1838 
1839  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1840  if (executeInfo[tid].drainState == NotDraining) {
1842 
1843  /* Go to DrainCurrentInst if we're between microops
1844  * or waiting on an unbufferable memory operation.
1845  * Otherwise we can go straight to DrainHaltFetch
1846  */
1847  if (isInbetweenInsts(tid))
1849  else
1851  }
1852  }
1853  return (isDrained() ? 0 : 1);
1854 }
1855 
1856 bool
1858 {
1859  if (!lsq.isDrained())
1860  return false;
1861 
1862  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1863  if (!inputBuffer[tid].empty() ||
1864  !executeInfo[tid].inFlightInsts->empty()) {
1865 
1866  return false;
1867  }
1868  }
1869 
1870  return true;
1871 }
1872 
1874 {
1875  for (unsigned int i = 0; i < numFuncUnits; i++)
1876  delete funcUnits[i];
1877 
1878  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
1879  delete executeInfo[tid].inFlightInsts;
1880 }
1881 
1882 bool
1884 {
1885  return inst->id.streamSeqNum == executeInfo[inst->id.threadId].streamSeqNum;
1886 }
1887 
1888 bool
1890 {
1891  bool ret = false;
1892 
1893  if (!executeInfo[inst->id.threadId].inFlightInsts->empty())
1894  ret = executeInfo[inst->id.threadId].inFlightInsts->front().inst->id == inst->id;
1895 
1896  return ret;
1897 }
1898 
1901 {
1902  return lsq.getDcachePort();
1903 }
1904 
1905 } // namespace minor
1906 } // namespace gem5
gem5::curTick
Tick curTick()
The universal simulation clock.
Definition: cur_tick.hh:46
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:189
gem5::MinorCPU::randomPriority
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:182
gem5::minor::Execute::getCommittingThread
ThreadID getCommittingThread()
Use the current threading policy to determine the next thread to decode from.
Definition: execute.cc:1697
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:260
warn
#define warn(...)
Definition: logging.hh:245
gem5::minor::Execute::tryToBranch
void tryToBranch(MinorDynInstPtr inst, Fault fault, BranchData &branch)
Generate Branch data based (into branch) on an observed (or not) change in PC while executing an inst...
Definition: execute.cc:225
gem5::minor::ForwardInstData::isBubble
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:251
gem5::minor::LSQ::issuedMemBarrierInst
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1714
gem5::minor::Execute::zeroReg
const RegIndex zeroReg
Index of the zero integer register.
Definition: execute.hh:82
op_class.hh
gem5::minor::BranchData::Reason
Reason
Definition: pipe_data.hh:69
gem5::Clocked::curCycle
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Definition: clocked_object.hh:195
gem5::minor::ForwardInstData::insts
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:261
gem5::auxv::Random
@ Random
Definition: aux_vector.hh:89
gem5::minor::Execute::NotDraining
@ NotDraining
Definition: execute.hh:150
gem5::minor::QueuedInst::inst
MinorDynInstPtr inst
Definition: func_unit.hh:210
gem5::minor::Execute::ExecuteThreadInfo::inputIndex
unsigned int inputIndex
Index that we've completed upto in getInput data.
Definition: execute.hh:185
gem5::BaseCPU::getInterruptController
BaseInterrupts * getInterruptController(ThreadID tid)
Definition: base.hh:229
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
gem5::minor::MinorStats::numFetchSuspends
statistics::Scalar numFetchSuspends
Number of times fetch was asked to suspend by Execute.
Definition: stats.hh:73
gem5::minor::Execute::drain
unsigned int drain()
Like the drain interface on SimObject.
Definition: execute.cc:1835
gem5::minor::LSQ::LSQRequest::packet
PacketPtr packet
Definition: lsq.hh:154
gem5::minor::Execute::commitLimit
unsigned int commitLimit
Number of instructions that can be committed per cycle.
Definition: execute.hh:91
gem5::minor::ExecContext::readPredicate
bool readPredicate() const override
Definition: exec_context.hh:247
gem5::MinorCPU::MinorCPUPort
Provide a non-protected base class for Minor's Ports as derived classes are created by Fetch1 and Exe...
Definition: cpu.hh:106
gem5::MinorFUTiming::extraAssumedLat
Cycles extraAssumedLat
Extra delay that results should show in the scoreboard after leaving the pipeline.
Definition: func_unit.hh:127
gem5::minor::Execute::fuDescriptions
MinorFUPool & fuDescriptions
Descriptions of the functional units we want to generate.
Definition: execute.hh:102
gem5::ThreadState::ThreadStateStats::numOps
statistics::Scalar numOps
Stat for number ops (including micro ops) committed.
Definition: thread_state.hh:108
gem5::minor::MinorStats::numOps
statistics::Scalar numOps
Number of simulated insts and microops.
Definition: stats.hh:67
gem5::minor::LSQ::LSQRequest::needsToBeSentToStoreBuffer
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:167
sc_dt::to_string
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:91
gem5::ThreadState::ThreadStateStats::numInsts
statistics::Scalar numInsts
Stat for number instructions committed.
Definition: thread_state.hh:106
gem5::minor::Queue
Wrapper for a queue type to act as a pipeline stage input queue.
Definition: buffers.hh:404
gem5::minor::Execute::ExecuteThreadInfo
Definition: execute.hh:156
gem5::minor::Execute::checkInterrupts
ThreadID checkInterrupts(BranchData &branch, bool &interrupted)
Check all threads for possible interrupts.
Definition: execute.cc:1618
cpu.hh
minor
gem5::MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:85
gem5::minor::Execute::lsq
LSQ lsq
Dcache port to pass on to the CPU.
Definition: execute.hh:126
gem5::minor::Execute::ExecuteThreadInfo::inFUMemInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFUMemInsts
Memory ref instructions still in the FUs.
Definition: execute.hh:181
gem5::minor::Execute::DrainCurrentInst
@ DrainCurrentInst
Definition: execute.hh:151
gem5::minor::Execute::evaluate
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: execute.cc:1432
gem5::minor::BranchData::bubble
static BranchData bubble()
BubbleIF interface.
Definition: pipe_data.hh:151
gem5::minor::LSQ::step
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1480
std::vector
STL vector class.
Definition: stl.hh:37
gem5::minor::QueuedInst
Container class to box instructions in the FUs to make those queues have correct bubble behaviour whe...
Definition: func_unit.hh:207
gem5::minor::LSQ::needsToTick
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1568
gem5::ThreadContext::instAddr
virtual Addr instAddr() const =0
gem5::minor::Execute::takeInterrupt
bool takeInterrupt(ThreadID thread_id, BranchData &branch)
Act on an interrupt.
Definition: execute.cc:425
gem5::minor::BranchData::NoBranch
@ NoBranch
Definition: pipe_data.hh:74
gem5::MinorFUTiming
Extra timing capability to allow individual ops to have their source register dependency latencies tw...
Definition: func_unit.hh:103
gem5::minor::LSQ::pushFailedRequest
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1663
gem5::minor::ForwardInstData
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:257
gem5::minor::Execute::setTraceTimeOnIssue
bool setTraceTimeOnIssue
Modify instruction trace times on issue.
Definition: execute.hh:115
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:66
gem5::minor::Execute::isInterrupted
bool isInterrupted(ThreadID thread_id) const
Has an interrupt been raised.
Definition: execute.cc:419
gem5::SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:94
gem5::minor::Execute::hasInterrupt
bool hasInterrupt(ThreadID thread_id)
Checks if a specific thread has an interrupt.
Definition: execute.cc:1653
gem5::EventQueue::serviceEvents
void serviceEvents(Tick when)
process all events up to the given timestamp.
Definition: eventq.hh:876
gem5::minor::Execute::memoryCommitLimit
unsigned int memoryCommitLimit
Number of memory instructions that can be committed per cycle.
Definition: execute.hh:94
execute.hh
gem5::minor::BranchData::isStreamChange
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:85
gem5::MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:302
gem5::minor::Execute::commit
void commit(ThreadID thread_id, bool only_commit_microops, bool discard, BranchData &branch)
Try and commit instructions from the ends of the functional unit pipelines.
Definition: execute.cc:1042
gem5::minor::Execute::commitPriority
ThreadID commitPriority
Definition: execute.hh:215
gem5::minor::Execute::ExecuteThreadInfo::streamSeqNum
InstSeqNum streamSeqNum
Source of sequence number for instuction streams.
Definition: execute.hh:199
gem5::minor::Execute::noCostFUIndex
unsigned int noCostFUIndex
The FU index of the non-existent costless FU for instructions which pass the MinorDynInst::isNoCostIn...
Definition: execute.hh:123
gem5::RefCountingPtr< MinorDynInst >
gem5::ThreadContext::status
virtual Status status() const =0
gem5::MinorCPU::threads
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:101
gem5::minor::InputBuffer
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:572
gem5::ArmISA::at
Bitfield< 35, 32 > at
Definition: misc_types.hh:154
gem5::MinorCPU::stats
minor::MinorStats stats
Processor-specific statistics.
Definition: cpu.hh:140
gem5::Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:78
gem5::minor::LSQ::LSQRequest
Derived SenderState to carry data access info.
Definition: lsq.hh:129
gem5::minor::Execute::getIssuingThread
ThreadID getIssuingThread()
Definition: execute.cc:1764
gem5::Named
Interface for things with names.
Definition: named.hh:38
gem5::minor::LSQ::isDrained
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1561
gem5::ThreadState::threadStats
gem5::ThreadState::ThreadStateStats threadStats
gem5::BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:368
gem5::minor::ReportTraitsAdaptor
...ReportTraits are trait classes with the same functionality as ReportIF, but with elements explicit...
Definition: buffers.hh:97
gem5::minor::Execute::inputBuffer
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: execute.hh:135
gem5::minor::MinorStats::numInsts
statistics::Scalar numInsts
Number of simulated instructions.
Definition: stats.hh:64
gem5::minor::Latch::Output
Definition: buffers.hh:263
gem5::minor::BranchData::Interrupt
@ Interrupt
Definition: pipe_data.hh:97
gem5::minor::Pipeline::ExecuteStageId
@ ExecuteStageId
Definition: pipeline.hh:104
gem5::minor::LSQ::minorTrace
void minorTrace() const
Definition: lsq.cc:1670
gem5::minor::Execute::getInput
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: execute.cc:203
gem5::minor::cyclicIndexDec
unsigned int cyclicIndexDec(unsigned int index, unsigned int cycle_size)
Decrement a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:540
exec_context.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:93
gem5::minor::Execute::minorTrace
void minorTrace() const
Definition: execute.cc:1664
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:255
gem5::PowerISA::fu
Bitfield< 12 > fu
Definition: misc.hh:89
gem5::minor::LSQ::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Return the raw-bindable port.
Definition: lsq.hh:738
gem5::minor::Execute::processMoreThanOneInput
bool processMoreThanOneInput
If true, more than one input line can be processed each cycle if there is room to execute more instru...
Definition: execute.hh:99
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:186
gem5::ThreadContext::Suspended
@ Suspended
Temporarily inactive.
Definition: thread_context.hh:112
gem5::minor::LSQ::getLastMemBarrier
InstSeqNum getLastMemBarrier(ThreadID thread_id) const
Get the execSeqNum of the last issued memory barrier.
Definition: lsq.hh:705
gem5::minor::BranchData::CorrectlyPredictedBranch
@ CorrectlyPredictedBranch
Definition: pipe_data.hh:77
gem5::MinorFU
A functional unit that can execute any of opClasses operations with a single op(eration)Lat(ency) and...
Definition: func_unit.hh:157
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:283
gem5::minor::Latch::Input
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:252
gem5::minor::Execute::isDrained
bool isDrained()
After thread suspension, has Execute been drained of in-flight instructions and memory accesses.
Definition: execute.cc:1857
gem5::TimingExpr
Definition: timing_expr.hh:91
gem5::MipsISA::PCState
GenericISA::DelaySlotPCState< 4 > PCState
Definition: pcstate.hh:40
gem5::BaseInterrupts::updateIntrInfo
virtual void updateIntrInfo()=0
gem5::minor::Execute::commitInst
bool commitInst(MinorDynInstPtr inst, bool early_memory_issue, BranchData &branch, Fault &fault, bool &committed, bool &completed_mem_issue)
Commit a single instruction.
Definition: execute.cc:898
gem5::minor::Execute::doInstCommitAccounting
void doInstCommitAccounting(MinorDynInstPtr inst)
Do the stats handling and instruction count and PC event events related to the new instruction/op cou...
Definition: execute.cc:867
gem5::minor::LSQ::canRequest
bool canRequest()
Is their space in the request queue to be able to push a request by issuing an isMemRef instruction.
Definition: lsq.hh:676
gem5::PCEvent
Definition: pc_event.hh:45
gem5::minor::BranchData::BadlyPredictedBranchTarget
@ BadlyPredictedBranchTarget
Definition: pipe_data.hh:86
gem5::minor::Execute::issuePriority
ThreadID issuePriority
Definition: execute.hh:214
gem5::Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1193
gem5::minor::MinorBuffer< ElemType, ReportTraits >::minorTrace
void minorTrace() const
Report buffer states from 'slot' 'from' to 'to'.
Definition: buffers.hh:198
gem5::minor::MinorStats::numDiscardedOps
statistics::Scalar numDiscardedOps
Number of ops discarded before committing.
Definition: stats.hh:70
gem5::minor::LSQ::completeMemBarrierInst
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:916
gem5::TimingExprEvalContext
Object to gather the visible context for evaluation.
Definition: timing_expr.hh:73
gem5::minor::MinorDynInst::bubble
static MinorDynInstPtr bubble()
There is a single bubble inst.
Definition: dyn_inst.hh:257
gem5::minor::ForwardInstData::width
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:277
gem5::minor::Execute::ExecuteThreadInfo::instsBeingCommitted
ForwardInstData instsBeingCommitted
Structure for reporting insts currently being processed/retired for MinorTrace.
Definition: execute.hh:193
gem5::minor::Execute::scoreboard
std::vector< Scoreboard > scoreboard
Scoreboard of instruction dependencies.
Definition: execute.hh:129
gem5::InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:243
gem5::minor::LSQ::popResponse
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1526
gem5::minor::BranchData
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:66
gem5::ThreadContext::pcState
virtual TheISA::PCState pcState() const =0
gem5::minor::Execute::isInbetweenInsts
bool isInbetweenInsts(ThreadID thread_id) const
Are we between instructions? Can we be interrupted?
Definition: execute.cc:1425
gem5::minor::Execute::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Returns the DcachePort owned by this Execute to pass upwards.
Definition: execute.cc:1900
gem5::minor::MinorStats::committedInstType
statistics::Vector2d committedInstType
Number of instructions by type (OpClass)
Definition: stats.hh:83
gem5::BaseCPU::getContext
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:290
gem5::BaseCPU::probeInstCommit
virtual void probeInstCommit(const StaticInstPtr &inst, Addr pc)
Helper method to trigger PMU probes for a committed instruction.
Definition: base.cc:356
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::MinorFUTiming::extraCommitLatExpr
TimingExpr * extraCommitLatExpr
Definition: func_unit.hh:121
gem5::Packet::isError
bool isError() const
Definition: packet.hh:610
gem5::GEM5_DEPRECATED_NAMESPACE
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
gem5::minor::BranchData::isBubble
bool isBubble() const
Definition: pipe_data.hh:152
gem5::MinorFUPool::funcUnits
std::vector< MinorFU * > funcUnits
Definition: func_unit.hh:192
gem5::minor::ExecContext::readMemAccPredicate
bool readMemAccPredicate() const override
Definition: exec_context.hh:259
gem5::minor::Execute::tryPCEvents
bool tryPCEvents(ThreadID thread_id)
Try to act on PC-related events.
Definition: execute.cc:845
gem5::minor::minorTrace
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
gem5::ActivityRecorder::activity
void activity()
Records that there is activity this cycle.
Definition: activity.cc:55
gem5::minor::Execute::~Execute
~Execute()
Definition: execute.cc:1873
gem5::minor::FUPipeline
A functional unit configured from a MinorFU object.
Definition: func_unit.hh:233
gem5::FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:223
gem5::minor::ExecContext
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:73
gem5::MinorFU::opLat
Cycles opLat
Delay from issuing the operation, to it reaching the end of the associated pipeline.
Definition: func_unit.hh:164
gem5::minor::Execute::ExecuteThreadInfo::lastCommitWasEndOfMacroop
bool lastCommitWasEndOfMacroop
The last commit was the end of a full instruction so an interrupt can safely happen.
Definition: execute.hh:189
gem5::minor::Execute::updateBranchData
void updateBranchData(ThreadID tid, BranchData::Reason reason, MinorDynInstPtr inst, const TheISA::PCState &target, BranchData &branch)
Actually create a branch to communicate to Fetch1/Fetch2 and, if that is a stream-changing branch upd...
Definition: execute.cc:302
gem5::minor::Execute::DrainHaltFetch
@ DrainHaltFetch
Definition: execute.hh:152
gem5::minor::BranchData::isStreamChange
bool isStreamChange() const
As static isStreamChange but on this branch data.
Definition: pipe_data.hh:155
gem5::minor::cyclicIndexInc
unsigned int cyclicIndexInc(unsigned int index, unsigned int cycle_size)
Increment a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:528
gem5::ThreadState::numInst
Counter numInst
Number of instructions committed.
Definition: thread_state.hh:98
gem5::minor::Execute::popInput
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: execute.cc:216
gem5::minor::Execute::setTraceTimeOnCommit
bool setTraceTimeOnCommit
Modify instruction trace times on commit.
Definition: execute.hh:112
gem5::X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:809
gem5::SparcISA::Branch
Base class for branch operations.
Definition: branch.hh:48
gem5::minor::Execute::setDrainState
void setDrainState(ThreadID thread_id, DrainState state)
Set the drain state (with useful debugging messages)
Definition: execute.cc:1828
gem5::minor::operator<<
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:65
gem5::minor::LSQ::accessesInFlight
bool accessesInFlight() const
Are there any accesses other than normal cached loads in the memory system or having received respons...
Definition: lsq.hh:696
gem5::minor::Execute::handleMemResponse
void handleMemResponse(MinorDynInstPtr inst, LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
Handle extracting mem ref responses from the memory queues and completing the associated instructions...
Definition: execute.cc:328
gem5::minor::Execute::ExecuteThreadInfo::drainState
DrainState drainState
State progression for draining NotDraining -> ...
Definition: execute.hh:208
gem5::BaseCPU::checkInterrupts
bool checkInterrupts(ThreadID tid) const
Definition: base.hh:256
gem5::minor::BranchData::HaltFetch
@ HaltFetch
Definition: pipe_data.hh:99
gem5::MinorFUTiming::srcRegsRelativeLats
std::vector< Cycles > srcRegsRelativeLats
Cycle offsets from the scoreboard delivery times of register values for each of this instruction's so...
Definition: func_unit.hh:136
gem5::minor::Execute::inp
Latch< ForwardInstData >::Output inp
Input port carrying instructions from Decode.
Definition: execute.hh:73
gem5::minor::Execute::ExecuteThreadInfo::inFlightInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFlightInsts
In-order instructions either in FUs or the LSQ.
Definition: execute.hh:178
gem5::InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:40
gem5::minor::Execute::executeMemRefInst
bool executeMemRefInst(MinorDynInstPtr inst, BranchData &branch, bool &failed_predicate, Fault &fault)
Execute a memory reference instruction.
Definition: execute.cc:453
gem5::minor::BranchData::BadlyPredictedBranch
@ BadlyPredictedBranch
Definition: pipe_data.hh:90
gem5::minor::Execute::out
Latch< BranchData >::Input out
Input port carrying stream changes to Fetch1.
Definition: execute.hh:76
gem5::ThreadState::numOp
Counter numOp
Number of ops (including micro ops) committed.
Definition: thread_state.hh:100
gem5::minor::Execute::allowEarlyMemIssue
bool allowEarlyMemIssue
Allow mem refs to leave their FUs before reaching the head of the in flight insts queue if their depe...
Definition: execute.hh:119
gem5::BaseInterrupts::getInterrupt
virtual Fault getInterrupt()=0
gem5::Num_OpClasses
static const OpClass Num_OpClasses
Definition: op_class.hh:108
gem5::minor::Execute::DrainState
DrainState
Stage cycle-by-cycle state.
Definition: execute.hh:148
gem5::MinorCPU::threadPolicy
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:120
gem5::MinorFUTiming::extraCommitLat
Cycles extraCommitLat
Extra latency that the instruction should spend at the end of the pipeline.
Definition: func_unit.hh:120
gem5::minor::Execute::numFuncUnits
unsigned int numFuncUnits
Number of functional units to produce.
Definition: execute.hh:105
gem5::MinorCPU::activityRecorder
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:96
gem5::minor::Execute::drainResume
void drainResume()
Definition: execute.cc:1792
gem5::minor::Execute::ExecuteThreadInfo::lastPredictionSeqNum
InstSeqNum lastPredictionSeqNum
A prediction number for use where one isn't available from an instruction.
Definition: execute.hh:205
gem5::minor::Execute::funcUnits
std::vector< FUPipeline * > funcUnits
The execution functional units.
Definition: execute.hh:132
gem5::minor::Execute::issueLimit
unsigned int issueLimit
Number of instructions that can be issued per cycle.
Definition: execute.hh:85
gem5::minor::BranchData::SuspendThread
@ SuspendThread
Definition: pipe_data.hh:95
gem5::minor::Execute::executeInfo
std::vector< ExecuteThreadInfo > executeInfo
Definition: execute.hh:211
gem5::minor::Execute::issue
unsigned int issue(ThreadID thread_id)
Try and issue instructions from the inputBuffer.
Definition: execute.cc:551
gem5::Packet::getAddr
Addr getAddr() const
Definition: packet.hh:781
gem5::MinorFUTiming::suppress
bool suppress
If true, instructions matching this mask/match should not be issued in this FU.
Definition: func_unit.hh:116
gem5::minor::Execute::cpu
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: execute.hh:79
gem5::minor::Execute::interruptPriority
ThreadID interruptPriority
Definition: execute.hh:213
gem5::IntRegClass
@ IntRegClass
Integer register.
Definition: reg_class.hh:57
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: decoder.cc:40
gem5::minor::Execute::instIsHeadInst
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1889
gem5::minor::Execute::memoryIssueLimit
unsigned int memoryIssueLimit
Number of memory ops that can be issued per cycle.
Definition: execute.hh:88
gem5::minor::Execute::DrainAllInsts
@ DrainAllInsts
Definition: execute.hh:153
lsq.hh
gem5::minor::LSQ::sendStoreToStoreBuffer
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1548
gem5::minor::BranchData::UnpredictedBranch
@ UnpredictedBranch
Definition: pipe_data.hh:82
gem5::minor::LSQ::findResponse
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1491
gem5::minor::LSQ::canPushIntoStoreBuffer
bool canPushIntoStoreBuffer() const
Must check this before trying to insert into the store buffer.
Definition: lsq.hh:688
gem5::SimpleThread::comInstEventQueue
EventQueue comInstEventQueue
An instruction-based event queue.
Definition: simple_thread.hh:130
fetch1.hh
gem5::Packet::getSize
unsigned getSize() const
Definition: packet.hh:791
gem5::MinorCPU::roundRobinPriority
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:173
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:242
gem5::minor::Execute::instIsRightStream
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1883
gem5::minor::Execute::Execute
Execute(const std::string &name_, MinorCPU &cpu_, const MinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< BranchData >::Input out_)
Definition: execute.cc:65
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:177

Generated on Wed Jul 28 2021 12:10:24 for gem5 by doxygen 1.8.17