gem5  v20.1.0.0
execute.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2018-2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/execute.hh"
39 
40 #include "arch/locked_mem.hh"
41 #include "arch/registers.hh"
42 #include "arch/utility.hh"
43 #include "cpu/minor/cpu.hh"
45 #include "cpu/minor/fetch1.hh"
46 #include "cpu/minor/lsq.hh"
47 #include "cpu/op_class.hh"
48 #include "debug/Activity.hh"
49 #include "debug/Branch.hh"
50 #include "debug/Drain.hh"
51 #include "debug/ExecFaulting.hh"
52 #include "debug/MinorExecute.hh"
53 #include "debug/MinorInterrupt.hh"
54 #include "debug/MinorMem.hh"
55 #include "debug/MinorTrace.hh"
56 #include "debug/PCEvent.hh"
57 
58 namespace Minor
59 {
60 
61 Execute::Execute(const std::string &name_,
62  MinorCPU &cpu_,
63  MinorCPUParams &params,
66  Named(name_),
67  inp(inp_),
68  out(out_),
69  cpu(cpu_),
70  issueLimit(params.executeIssueLimit),
71  memoryIssueLimit(params.executeMemoryIssueLimit),
72  commitLimit(params.executeCommitLimit),
73  memoryCommitLimit(params.executeMemoryCommitLimit),
74  processMoreThanOneInput(params.executeCycleInput),
75  fuDescriptions(*params.executeFuncUnits),
76  numFuncUnits(fuDescriptions.funcUnits.size()),
77  setTraceTimeOnCommit(params.executeSetTraceTimeOnCommit),
78  setTraceTimeOnIssue(params.executeSetTraceTimeOnIssue),
79  allowEarlyMemIssue(params.executeAllowEarlyMemoryIssue),
80  noCostFUIndex(fuDescriptions.funcUnits.size() + 1),
81  lsq(name_ + ".lsq", name_ + ".dcache_port",
82  cpu_, *this,
83  params.executeMaxAccessesInMemory,
84  params.executeMemoryWidth,
85  params.executeLSQRequestsQueueSize,
86  params.executeLSQTransfersQueueSize,
87  params.executeLSQStoreBufferSize,
88  params.executeLSQMaxStoreBufferStoresPerCycle),
89  executeInfo(params.numThreads, ExecuteThreadInfo(params.executeCommitLimit)),
90  interruptPriority(0),
91  issuePriority(0),
92  commitPriority(0)
93 {
94  if (commitLimit < 1) {
95  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
96  commitLimit);
97  }
98 
99  if (issueLimit < 1) {
100  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
101  issueLimit);
102  }
103 
104  if (memoryIssueLimit < 1) {
105  fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_,
107  }
108 
110  fatal("%s: executeMemoryCommitLimit (%d) must be <="
111  " executeCommitLimit (%d)\n",
113  }
114 
115  if (params.executeInputBufferSize < 1) {
116  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
117  params.executeInputBufferSize);
118  }
119 
120  if (params.executeInputBufferSize < 1) {
121  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
122  params.executeInputBufferSize);
123  }
124 
125  /* This should be large enough to count all the in-FU instructions
126  * which need to be accounted for in the inFlightInsts
127  * queue */
128  unsigned int total_slots = 0;
129 
130  /* Make FUPipelines for each MinorFU */
131  for (unsigned int i = 0; i < numFuncUnits; i++) {
132  std::ostringstream fu_name;
133  MinorFU *fu_description = fuDescriptions.funcUnits[i];
134 
135  /* Note the total number of instruction slots (for sizing
136  * the inFlightInst queue) and the maximum latency of any FU
137  * (for sizing the activity recorder) */
138  total_slots += fu_description->opLat;
139 
140  fu_name << name_ << ".fu." << i;
141 
142  FUPipeline *fu = new FUPipeline(fu_name.str(), *fu_description, cpu);
143 
144  funcUnits.push_back(fu);
145  }
146 
148  for (int op_class = No_OpClass + 1; op_class < Num_OpClasses; op_class++) {
149  bool found_fu = false;
150  unsigned int fu_index = 0;
151 
152  while (fu_index < numFuncUnits && !found_fu)
153  {
154  if (funcUnits[fu_index]->provides(
155  static_cast<OpClass>(op_class)))
156  {
157  found_fu = true;
158  }
159  fu_index++;
160  }
161 
162  if (!found_fu) {
163  warn("No functional unit for OpClass %s\n",
164  Enums::OpClassStrings[op_class]);
165  }
166  }
167 
168  /* Per-thread structures */
169  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
170  std::string tid_str = std::to_string(tid);
171 
172  /* Input Buffers */
173  inputBuffer.push_back(
175  name_ + ".inputBuffer" + tid_str, "insts",
176  params.executeInputBufferSize));
177 
178  /* Scoreboards */
179  scoreboard.push_back(Scoreboard(name_ + ".scoreboard" + tid_str));
180 
181  /* In-flight instruction records */
182  executeInfo[tid].inFlightInsts = new Queue<QueuedInst,
184  name_ + ".inFlightInsts" + tid_str, "insts", total_slots);
185 
186  executeInfo[tid].inFUMemInsts = new Queue<QueuedInst,
188  name_ + ".inFUMemInsts" + tid_str, "insts", total_slots);
189  }
190 }
191 
192 const ForwardInstData *
194 {
195  /* Get a line from the inputBuffer to work with */
196  if (!inputBuffer[tid].empty()) {
197  const ForwardInstData &head = inputBuffer[tid].front();
198 
199  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
200  } else {
201  return NULL;
202  }
203 }
204 
205 void
207 {
208  if (!inputBuffer[tid].empty())
209  inputBuffer[tid].pop();
210 
211  executeInfo[tid].inputIndex = 0;
212 }
213 
214 void
216 {
217  ThreadContext *thread = cpu.getContext(inst->id.threadId);
218  const TheISA::PCState &pc_before = inst->pc;
219  TheISA::PCState target = thread->pcState();
220 
221  /* Force a branch for SerializeAfter/SquashAfter instructions
222  * at the end of micro-op sequence when we're not suspended */
223  bool force_branch = thread->status() != ThreadContext::Suspended &&
224  !inst->isFault() &&
225  inst->isLastOpInInst() &&
226  (inst->staticInst->isSerializeAfter() ||
227  inst->staticInst->isSquashAfter() ||
228  inst->staticInst->isIprAccess());
229 
230  DPRINTF(Branch, "tryToBranch before: %s after: %s%s\n",
231  pc_before, target, (force_branch ? " (forcing)" : ""));
232 
233  /* Will we change the PC to something other than the next instruction? */
234  bool must_branch = pc_before != target ||
235  fault != NoFault ||
236  force_branch;
237 
238  /* The reason for the branch data we're about to generate, set below */
240 
241  if (fault == NoFault)
242  {
243  TheISA::advancePC(target, inst->staticInst);
244  thread->pcState(target);
245 
246  DPRINTF(Branch, "Advancing current PC from: %s to: %s\n",
247  pc_before, target);
248  }
249 
250  if (inst->predictedTaken && !force_branch) {
251  /* Predicted to branch */
252  if (!must_branch) {
253  /* No branch was taken, change stream to get us back to the
254  * intended PC value */
255  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x but"
256  " none happened inst: %s\n",
257  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
258 
260  } else if (inst->predictedTarget == target) {
261  /* Branch prediction got the right target, kill the branch and
262  * carry on.
263  * Note that this information to the branch predictor might get
264  * overwritten by a "real" branch during this cycle */
265  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x correctly"
266  " inst: %s\n",
267  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
268 
270  } else {
271  /* Branch prediction got the wrong target */
272  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x"
273  " but got the wrong target (actual: 0x%x) inst: %s\n",
274  inst->pc.instAddr(), inst->predictedTarget.instAddr(),
275  target.instAddr(), *inst);
276 
278  }
279  } else if (must_branch) {
280  /* Unpredicted branch */
281  DPRINTF(Branch, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
282  inst->pc.instAddr(), target.instAddr(), *inst);
283 
285  } else {
286  /* No branch at all */
287  reason = BranchData::NoBranch;
288  }
289 
290  updateBranchData(inst->id.threadId, reason, inst, target, branch);
291 }
292 
293 void
295  ThreadID tid,
296  BranchData::Reason reason,
297  MinorDynInstPtr inst, const TheISA::PCState &target,
298  BranchData &branch)
299 {
300  if (reason != BranchData::NoBranch) {
301  /* Bump up the stream sequence number on a real branch*/
302  if (BranchData::isStreamChange(reason))
303  executeInfo[tid].streamSeqNum++;
304 
305  /* Branches (even mis-predictions) don't change the predictionSeqNum,
306  * just the streamSeqNum */
307  branch = BranchData(reason, tid,
308  executeInfo[tid].streamSeqNum,
309  /* Maintaining predictionSeqNum if there's no inst is just a
310  * courtesy and looks better on minorview */
311  (inst->isBubble() ? executeInfo[tid].lastPredictionSeqNum
312  : inst->id.predictionSeqNum),
313  target, inst);
314 
315  DPRINTF(Branch, "Branch data signalled: %s\n", branch);
316  }
317 }
318 
319 void
321  LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
322 {
323  ThreadID thread_id = inst->id.threadId;
324  ThreadContext *thread = cpu.getContext(thread_id);
325 
326  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
327 
328  PacketPtr packet = response->packet;
329 
330  bool is_load = inst->staticInst->isLoad();
331  bool is_store = inst->staticInst->isStore();
332  bool is_atomic = inst->staticInst->isAtomic();
333  bool is_prefetch = inst->staticInst->isDataPrefetch();
334 
335  /* If true, the trace's predicate value will be taken from the exec
336  * context predicate, otherwise, it will be set to false */
337  bool use_context_predicate = true;
338 
339  if (inst->translationFault != NoFault) {
340  /* Invoke memory faults. */
341  DPRINTF(MinorMem, "Completing fault from DTLB access: %s\n",
342  inst->translationFault->name());
343 
344  if (inst->staticInst->isPrefetch()) {
345  DPRINTF(MinorMem, "Not taking fault on prefetch: %s\n",
346  inst->translationFault->name());
347 
348  /* Don't assign to fault */
349  } else {
350  /* Take the fault raised during the TLB/memory access */
351  fault = inst->translationFault;
352 
353  fault->invoke(thread, inst->staticInst);
354  }
355  } else if (!packet) {
356  DPRINTF(MinorMem, "Completing failed request inst: %s\n",
357  *inst);
358  use_context_predicate = false;
359  if (!context.readMemAccPredicate())
360  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
361  } else if (packet->isError()) {
362  DPRINTF(MinorMem, "Trying to commit error response: %s\n",
363  *inst);
364 
365  fatal("Received error response packet for inst: %s\n", *inst);
366  } else if (is_store || is_load || is_prefetch || is_atomic) {
367  assert(packet);
368 
369  DPRINTF(MinorMem, "Memory response inst: %s addr: 0x%x size: %d\n",
370  *inst, packet->getAddr(), packet->getSize());
371 
372  if (is_load && packet->getSize() > 0) {
373  DPRINTF(MinorMem, "Memory data[0]: 0x%x\n",
374  static_cast<unsigned int>(packet->getConstPtr<uint8_t>()[0]));
375  }
376 
377  /* Complete the memory access instruction */
378  fault = inst->staticInst->completeAcc(packet, &context,
379  inst->traceData);
380 
381  if (fault != NoFault) {
382  /* Invoke fault created by instruction completion */
383  DPRINTF(MinorMem, "Fault in memory completeAcc: %s\n",
384  fault->name());
385  fault->invoke(thread, inst->staticInst);
386  } else {
387  /* Stores need to be pushed into the store buffer to finish
388  * them off */
389  if (response->needsToBeSentToStoreBuffer())
390  lsq.sendStoreToStoreBuffer(response);
391  }
392  } else {
393  fatal("There should only ever be reads, "
394  "writes or faults at this point\n");
395  }
396 
397  lsq.popResponse(response);
398 
399  if (inst->traceData) {
400  inst->traceData->setPredicate((use_context_predicate ?
401  context.readPredicate() : false));
402  }
403 
405 
406  /* Generate output to account for branches */
407  tryToBranch(inst, fault, branch);
408 }
409 
410 bool
412 {
413  return cpu.checkInterrupts(thread_id);
414 }
415 
416 bool
418 {
419  DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
420  cpu.getContext(thread_id)->pcState());
421 
422  Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt();
423 
424  if (interrupt != NoFault) {
425  /* The interrupt *must* set pcState */
427  interrupt->invoke(cpu.getContext(thread_id));
428 
429  assert(!lsq.accessesInFlight());
430 
431  DPRINTF(MinorInterrupt, "Invoking interrupt: %s to PC: %s\n",
432  interrupt->name(), cpu.getContext(thread_id)->pcState());
433 
434  /* Assume that an interrupt *must* cause a branch. Assert this? */
435 
437  MinorDynInst::bubble(), cpu.getContext(thread_id)->pcState(),
438  branch);
439  }
440 
441  return interrupt != NoFault;
442 }
443 
444 bool
446  bool &passed_predicate, Fault &fault)
447 {
448  bool issued = false;
449 
450  /* Set to true if the mem op. is issued and sent to the mem system */
451  passed_predicate = false;
452 
453  if (!lsq.canRequest()) {
454  /* Not acting on instruction yet as the memory
455  * queues are full */
456  issued = false;
457  } else {
458  ThreadContext *thread = cpu.getContext(inst->id.threadId);
459  TheISA::PCState old_pc = thread->pcState();
460 
461  ExecContext context(cpu, *cpu.threads[inst->id.threadId],
462  *this, inst);
463 
464  DPRINTF(MinorExecute, "Initiating memRef inst: %s\n", *inst);
465 
466  Fault init_fault = inst->staticInst->initiateAcc(&context,
467  inst->traceData);
468 
469  if (inst->inLSQ) {
470  if (init_fault != NoFault) {
471  assert(inst->translationFault != NoFault);
472  // Translation faults are dealt with in handleMemResponse()
473  init_fault = NoFault;
474  } else {
475  // If we have a translation fault then it got suppressed by
476  // initateAcc()
477  inst->translationFault = NoFault;
478  }
479  }
480 
481  if (init_fault != NoFault) {
482  DPRINTF(MinorExecute, "Fault on memory inst: %s"
483  " initiateAcc: %s\n", *inst, init_fault->name());
484  fault = init_fault;
485  } else {
486  /* Only set this if the instruction passed its
487  * predicate */
488  if (!context.readMemAccPredicate()) {
489  DPRINTF(MinorMem, "No memory access for inst: %s\n", *inst);
490  assert(context.readPredicate());
491  }
492  passed_predicate = context.readPredicate();
493 
494  /* Set predicate in tracing */
495  if (inst->traceData)
496  inst->traceData->setPredicate(passed_predicate);
497 
498  /* If the instruction didn't pass its predicate
499  * or it is a predicated vector instruction and the
500  * associated predicate register is all-false (and so will not
501  * progress from here) Try to branch to correct and branch
502  * mis-prediction. */
503  if (!inst->inLSQ) {
504  /* Leave it up to commit to handle the fault */
505  lsq.pushFailedRequest(inst);
506  inst->inLSQ = true;
507  }
508  }
509 
510  /* Restore thread PC */
511  thread->pcState(old_pc);
512  issued = true;
513  }
514 
515  return issued;
516 }
517 
519 inline unsigned int
520 cyclicIndexInc(unsigned int index, unsigned int cycle_size)
521 {
522  unsigned int ret = index + 1;
523 
524  if (ret == cycle_size)
525  ret = 0;
526 
527  return ret;
528 }
529 
531 inline unsigned int
532 cyclicIndexDec(unsigned int index, unsigned int cycle_size)
533 {
534  int ret = index - 1;
535 
536  if (ret < 0)
537  ret = cycle_size - 1;
538 
539  return ret;
540 }
541 
542 unsigned int
544 {
545  const ForwardInstData *insts_in = getInput(thread_id);
546  ExecuteThreadInfo &thread = executeInfo[thread_id];
547 
548  /* Early termination if we have no instructions */
549  if (!insts_in)
550  return 0;
551 
552  /* Start from the first FU */
553  unsigned int fu_index = 0;
554 
555  /* Remains true while instructions are still being issued. If any
556  * instruction fails to issue, this is set to false and we exit issue.
557  * This strictly enforces in-order issue. For other issue behaviours,
558  * a more complicated test in the outer while loop below is needed. */
559  bool issued = true;
560 
561  /* Number of insts issues this cycle to check for issueLimit */
562  unsigned num_insts_issued = 0;
563 
564  /* Number of memory ops issues this cycle to check for memoryIssueLimit */
565  unsigned num_mem_insts_issued = 0;
566 
567  /* Number of instructions discarded this cycle in order to enforce a
568  * discardLimit. @todo, add that parameter? */
569  unsigned num_insts_discarded = 0;
570 
571  do {
572  MinorDynInstPtr inst = insts_in->insts[thread.inputIndex];
573  Fault fault = inst->fault;
574  bool discarded = false;
575  bool issued_mem_ref = false;
576 
577  if (inst->isBubble()) {
578  /* Skip */
579  issued = true;
580  } else if (cpu.getContext(thread_id)->status() ==
582  {
583  DPRINTF(MinorExecute, "Discarding inst: %s from suspended"
584  " thread\n", *inst);
585 
586  issued = true;
587  discarded = true;
588  } else if (inst->id.streamSeqNum != thread.streamSeqNum) {
589  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
590  " state was unexpected, expected: %d\n",
591  *inst, thread.streamSeqNum);
592  issued = true;
593  discarded = true;
594  } else {
595  /* Try and issue an instruction into an FU, assume we didn't and
596  * fix that in the loop */
597  issued = false;
598 
599  /* Try FU from 0 each instruction */
600  fu_index = 0;
601 
602  /* Try and issue a single instruction stepping through the
603  * available FUs */
604  do {
605  FUPipeline *fu = funcUnits[fu_index];
606 
607  DPRINTF(MinorExecute, "Trying to issue inst: %s to FU: %d\n",
608  *inst, fu_index);
609 
610  /* Does the examined fu have the OpClass-related capability
611  * needed to execute this instruction? Faults can always
612  * issue to any FU but probably should just 'live' in the
613  * inFlightInsts queue rather than having an FU. */
614  bool fu_is_capable = (!inst->isFault() ?
615  fu->provides(inst->staticInst->opClass()) : true);
616 
617  if (inst->isNoCostInst()) {
618  /* Issue free insts. to a fake numbered FU */
619  fu_index = noCostFUIndex;
620 
621  /* And start the countdown on activity to allow
622  * this instruction to get to the end of its FU */
624 
625  /* Mark the destinations for this instruction as
626  * busy */
627  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
628  Cycles(0), cpu.getContext(thread_id), false);
629 
630  DPRINTF(MinorExecute, "Issuing %s to %d\n", inst->id, noCostFUIndex);
631  inst->fuIndex = noCostFUIndex;
632  inst->extraCommitDelay = Cycles(0);
633  inst->extraCommitDelayExpr = NULL;
634 
635  /* Push the instruction onto the inFlight queue so
636  * it can be committed in order */
637  QueuedInst fu_inst(inst);
638  thread.inFlightInsts->push(fu_inst);
639 
640  issued = true;
641 
642  } else if (!fu_is_capable || fu->alreadyPushed()) {
643  /* Skip */
644  if (!fu_is_capable) {
645  DPRINTF(MinorExecute, "Can't issue as FU: %d isn't"
646  " capable\n", fu_index);
647  } else {
648  DPRINTF(MinorExecute, "Can't issue as FU: %d is"
649  " already busy\n", fu_index);
650  }
651  } else if (fu->stalled) {
652  DPRINTF(MinorExecute, "Can't issue inst: %s into FU: %d,"
653  " it's stalled\n",
654  *inst, fu_index);
655  } else if (!fu->canInsert()) {
656  DPRINTF(MinorExecute, "Can't issue inst: %s to busy FU"
657  " for another: %d cycles\n",
658  *inst, fu->cyclesBeforeInsert());
659  } else {
660  MinorFUTiming *timing = (!inst->isFault() ?
661  fu->findTiming(inst->staticInst) : NULL);
662 
663  const std::vector<Cycles> *src_latencies =
664  (timing ? &(timing->srcRegsRelativeLats)
665  : NULL);
666 
667  const std::vector<bool> *cant_forward_from_fu_indices =
668  &(fu->cantForwardFromFUIndices);
669 
670  if (timing && timing->suppress) {
671  DPRINTF(MinorExecute, "Can't issue inst: %s as extra"
672  " decoding is suppressing it\n",
673  *inst);
674  } else if (!scoreboard[thread_id].canInstIssue(inst,
675  src_latencies, cant_forward_from_fu_indices,
676  cpu.curCycle(), cpu.getContext(thread_id)))
677  {
678  DPRINTF(MinorExecute, "Can't issue inst: %s yet\n",
679  *inst);
680  } else {
681  /* Can insert the instruction into this FU */
682  DPRINTF(MinorExecute, "Issuing inst: %s"
683  " into FU %d\n", *inst,
684  fu_index);
685 
686  Cycles extra_dest_retire_lat = Cycles(0);
687  TimingExpr *extra_dest_retire_lat_expr = NULL;
688  Cycles extra_assumed_lat = Cycles(0);
689 
690  /* Add the extraCommitDelay and extraAssumeLat to
691  * the FU pipeline timings */
692  if (timing) {
693  extra_dest_retire_lat =
694  timing->extraCommitLat;
695  extra_dest_retire_lat_expr =
696  timing->extraCommitLatExpr;
697  extra_assumed_lat =
698  timing->extraAssumedLat;
699  }
700 
701  issued_mem_ref = inst->isMemRef();
702 
703  QueuedInst fu_inst(inst);
704 
705  /* Decorate the inst with FU details */
706  inst->fuIndex = fu_index;
707  inst->extraCommitDelay = extra_dest_retire_lat;
708  inst->extraCommitDelayExpr =
709  extra_dest_retire_lat_expr;
710 
711  if (issued_mem_ref) {
712  /* Remember which instruction this memory op
713  * depends on so that initiateAcc can be called
714  * early */
715  if (allowEarlyMemIssue) {
716  inst->instToWaitFor =
717  scoreboard[thread_id].execSeqNumToWaitFor(inst,
718  cpu.getContext(thread_id));
719 
720  if (lsq.getLastMemBarrier(thread_id) >
721  inst->instToWaitFor)
722  {
723  DPRINTF(MinorExecute, "A barrier will"
724  " cause a delay in mem ref issue of"
725  " inst: %s until after inst"
726  " %d(exec)\n", *inst,
727  lsq.getLastMemBarrier(thread_id));
728 
729  inst->instToWaitFor =
730  lsq.getLastMemBarrier(thread_id);
731  } else {
732  DPRINTF(MinorExecute, "Memory ref inst:"
733  " %s must wait for inst %d(exec)"
734  " before issuing\n",
735  *inst, inst->instToWaitFor);
736  }
737 
738  inst->canEarlyIssue = true;
739  }
740  /* Also queue this instruction in the memory ref
741  * queue to ensure in-order issue to the LSQ */
742  DPRINTF(MinorExecute, "Pushing mem inst: %s\n",
743  *inst);
744  thread.inFUMemInsts->push(fu_inst);
745  }
746 
747  /* Issue to FU */
748  fu->push(fu_inst);
749  /* And start the countdown on activity to allow
750  * this instruction to get to the end of its FU */
752 
753  /* Mark the destinations for this instruction as
754  * busy */
755  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
756  fu->description.opLat +
757  extra_dest_retire_lat +
758  extra_assumed_lat,
759  cpu.getContext(thread_id),
760  issued_mem_ref && extra_assumed_lat == Cycles(0));
761 
762  /* Push the instruction onto the inFlight queue so
763  * it can be committed in order */
764  thread.inFlightInsts->push(fu_inst);
765 
766  issued = true;
767  }
768  }
769 
770  fu_index++;
771  } while (fu_index != numFuncUnits && !issued);
772 
773  if (!issued)
774  DPRINTF(MinorExecute, "Didn't issue inst: %s\n", *inst);
775  }
776 
777  if (issued) {
778  /* Generate MinorTrace's MinorInst lines. Do this at commit
779  * to allow better instruction annotation? */
780  if (DTRACE(MinorTrace) && !inst->isBubble())
781  inst->minorTraceInst(*this);
782 
783  /* Mark up barriers in the LSQ */
784  if (!discarded && inst->isInst() &&
785  inst->staticInst->isMemBarrier())
786  {
787  DPRINTF(MinorMem, "Issuing memory barrier inst: %s\n", *inst);
789  }
790 
791  if (inst->traceData && setTraceTimeOnIssue) {
792  inst->traceData->setWhen(curTick());
793  }
794 
795  if (issued_mem_ref)
796  num_mem_insts_issued++;
797 
798  if (discarded) {
799  num_insts_discarded++;
800  } else if (!inst->isBubble()) {
801  num_insts_issued++;
802 
803  if (num_insts_issued == issueLimit)
804  DPRINTF(MinorExecute, "Reached inst issue limit\n");
805  }
806 
807  thread.inputIndex++;
808  DPRINTF(MinorExecute, "Stepping to next inst inputIndex: %d\n",
809  thread.inputIndex);
810  }
811 
812  /* Got to the end of a line */
813  if (thread.inputIndex == insts_in->width()) {
814  popInput(thread_id);
815  /* Set insts_in to null to force us to leave the surrounding
816  * loop */
817  insts_in = NULL;
818 
820  DPRINTF(MinorExecute, "Wrapping\n");
821  insts_in = getInput(thread_id);
822  }
823  }
824  } while (insts_in && thread.inputIndex < insts_in->width() &&
825  /* We still have instructions */
826  fu_index != numFuncUnits && /* Not visited all FUs */
827  issued && /* We've not yet failed to issue an instruction */
828  num_insts_issued != issueLimit && /* Still allowed to issue */
829  num_mem_insts_issued != memoryIssueLimit);
830 
831  return num_insts_issued;
832 }
833 
834 bool
836 {
837  ThreadContext *thread = cpu.getContext(thread_id);
838  unsigned int num_pc_event_checks = 0;
839 
840  /* Handle PC events on instructions */
841  Addr oldPC;
842  do {
843  oldPC = thread->instAddr();
844  cpu.threads[thread_id]->pcEventQueue.service(oldPC, thread);
845  num_pc_event_checks++;
846  } while (oldPC != thread->instAddr());
847 
848  if (num_pc_event_checks > 1) {
849  DPRINTF(PCEvent, "Acting on PC Event to PC: %s\n",
850  thread->pcState());
851  }
852 
853  return num_pc_event_checks > 1;
854 }
855 
856 void
858 {
859  assert(!inst->isFault());
860 
861  MinorThread *thread = cpu.threads[inst->id.threadId];
862 
863  /* Increment the many and various inst and op counts in the
864  * thread and system */
865  if (!inst->staticInst->isMicroop() || inst->staticInst->isLastMicroop())
866  {
867  thread->numInst++;
868  thread->threadStats.numInsts++;
869  cpu.stats.numInsts++;
871 
872  /* Act on events related to instruction counts */
873  thread->comInstEventQueue.serviceEvents(thread->numInst);
874  }
875  thread->numOp++;
876  thread->threadStats.numOps++;
877  cpu.stats.numOps++;
878  cpu.stats.committedInstType[inst->id.threadId]
879  [inst->staticInst->opClass()]++;
880 
881  /* Set the CP SeqNum to the numOps commit number */
882  if (inst->traceData)
883  inst->traceData->setCPSeq(thread->numOp);
884 
885  cpu.probeInstCommit(inst->staticInst, inst->pc.instAddr());
886 }
887 
888 bool
889 Execute::commitInst(MinorDynInstPtr inst, bool early_memory_issue,
890  BranchData &branch, Fault &fault, bool &committed,
891  bool &completed_mem_issue)
892 {
893  ThreadID thread_id = inst->id.threadId;
894  ThreadContext *thread = cpu.getContext(thread_id);
895 
896  bool completed_inst = true;
897  fault = NoFault;
898 
899  /* Is the thread for this instruction suspended? In that case, just
900  * stall as long as there are no pending interrupts */
901  if (thread->status() == ThreadContext::Suspended &&
902  !isInterrupted(thread_id))
903  {
904  panic("We should never hit the case where we try to commit from a "
905  "suspended thread as the streamSeqNum should not match");
906  } else if (inst->isFault()) {
907  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
908 
909  DPRINTF(MinorExecute, "Fault inst reached Execute: %s\n",
910  inst->fault->name());
911 
912  fault = inst->fault;
913  inst->fault->invoke(thread, NULL);
914 
915  tryToBranch(inst, fault, branch);
916  } else if (inst->staticInst->isMemRef()) {
917  /* Memory accesses are executed in two parts:
918  * executeMemRefInst -- calculates the EA and issues the access
919  * to memory. This is done here.
920  * handleMemResponse -- handles the response packet, done by
921  * Execute::commit
922  *
923  * While the memory access is in its FU, the EA is being
924  * calculated. At the end of the FU, when it is ready to
925  * 'commit' (in this function), the access is presented to the
926  * memory queues. When a response comes back from memory,
927  * Execute::commit will commit it.
928  */
929  bool predicate_passed = false;
930  bool completed_mem_inst = executeMemRefInst(inst, branch,
931  predicate_passed, fault);
932 
933  if (completed_mem_inst && fault != NoFault) {
934  if (early_memory_issue) {
935  DPRINTF(MinorExecute, "Fault in early executing inst: %s\n",
936  fault->name());
937  /* Don't execute the fault, just stall the instruction
938  * until it gets to the head of inFlightInsts */
939  inst->canEarlyIssue = false;
940  /* Not completed as we'll come here again to pick up
941  * the fault when we get to the end of the FU */
942  completed_inst = false;
943  } else {
944  DPRINTF(MinorExecute, "Fault in execute: %s\n",
945  fault->name());
946  fault->invoke(thread, NULL);
947 
948  tryToBranch(inst, fault, branch);
949  completed_inst = true;
950  }
951  } else {
952  completed_inst = completed_mem_inst;
953  }
954  completed_mem_issue = completed_inst;
955  } else if (inst->isInst() && inst->staticInst->isMemBarrier() &&
957  {
958  DPRINTF(MinorExecute, "Can't commit data barrier inst: %s yet as"
959  " there isn't space in the store buffer\n", *inst);
960 
961  completed_inst = false;
962  } else if (inst->isInst() && inst->staticInst->isQuiesce()
963  && !branch.isBubble()){
964  /* This instruction can suspend, need to be able to communicate
965  * backwards, so no other branches may evaluate this cycle*/
966  completed_inst = false;
967  } else {
968  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
969 
970  DPRINTF(MinorExecute, "Committing inst: %s\n", *inst);
971 
972  fault = inst->staticInst->execute(&context,
973  inst->traceData);
974 
975  /* Set the predicate for tracing and dump */
976  if (inst->traceData)
977  inst->traceData->setPredicate(context.readPredicate());
978 
979  committed = true;
980 
981  if (fault != NoFault) {
982  if (inst->traceData) {
983  if (DTRACE(ExecFaulting)) {
984  inst->traceData->setFaulting(true);
985  } else {
986  delete inst->traceData;
987  inst->traceData = NULL;
988  }
989  }
990 
991  DPRINTF(MinorExecute, "Fault in execute of inst: %s fault: %s\n",
992  *inst, fault->name());
993  fault->invoke(thread, inst->staticInst);
994  }
995 
997  tryToBranch(inst, fault, branch);
998  }
999 
1000  if (completed_inst) {
1001  /* Keep a copy of this instruction's predictionSeqNum just in case
1002  * we need to issue a branch without an instruction (such as an
1003  * interrupt) */
1004  executeInfo[thread_id].lastPredictionSeqNum = inst->id.predictionSeqNum;
1005 
1006  /* Check to see if this instruction suspended the current thread. */
1007  if (!inst->isFault() &&
1008  thread->status() == ThreadContext::Suspended &&
1009  branch.isBubble() && /* It didn't branch too */
1010  !isInterrupted(thread_id)) /* Don't suspend if we have
1011  interrupts */
1012  {
1013  TheISA::PCState resume_pc = cpu.getContext(thread_id)->pcState();
1014 
1015  assert(resume_pc.microPC() == 0);
1016 
1017  DPRINTF(MinorInterrupt, "Suspending thread: %d from Execute"
1018  " inst: %s\n", thread_id, *inst);
1019 
1021 
1023  resume_pc, branch);
1024  }
1025  }
1026 
1027  return completed_inst;
1028 }
1029 
1030 void
1031 Execute::commit(ThreadID thread_id, bool only_commit_microops, bool discard,
1032  BranchData &branch)
1033 {
1034  Fault fault = NoFault;
1035  Cycles now = cpu.curCycle();
1036  ExecuteThreadInfo &ex_info = executeInfo[thread_id];
1037 
1063  /* Has an instruction been completed? Once this becomes false, we stop
1064  * trying to complete instructions. */
1065  bool completed_inst = true;
1066 
1067  /* Number of insts committed this cycle to check against commitLimit */
1068  unsigned int num_insts_committed = 0;
1069 
1070  /* Number of memory access instructions committed to check against
1071  * memCommitLimit */
1072  unsigned int num_mem_refs_committed = 0;
1073 
1074  if (only_commit_microops && !ex_info.inFlightInsts->empty()) {
1075  DPRINTF(MinorInterrupt, "Only commit microops %s %d\n",
1076  *(ex_info.inFlightInsts->front().inst),
1077  ex_info.lastCommitWasEndOfMacroop);
1078  }
1079 
1080  while (!ex_info.inFlightInsts->empty() && /* Some more instructions to process */
1081  !branch.isStreamChange() && /* No real branch */
1082  fault == NoFault && /* No faults */
1083  completed_inst && /* Still finding instructions to execute */
1084  num_insts_committed != commitLimit /* Not reached commit limit */
1085  )
1086  {
1087  if (only_commit_microops) {
1088  DPRINTF(MinorInterrupt, "Committing tail of insts before"
1089  " interrupt: %s\n",
1090  *(ex_info.inFlightInsts->front().inst));
1091  }
1092 
1093  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1094 
1095  InstSeqNum head_exec_seq_num =
1096  head_inflight_inst->inst->id.execSeqNum;
1097 
1098  /* The instruction we actually process if completed_inst
1099  * remains true to the end of the loop body.
1100  * Start by considering the the head of the in flight insts queue */
1101  MinorDynInstPtr inst = head_inflight_inst->inst;
1102 
1103  bool committed_inst = false;
1104  bool discard_inst = false;
1105  bool completed_mem_ref = false;
1106  bool issued_mem_ref = false;
1107  bool early_memory_issue = false;
1108 
1109  /* Must set this again to go around the loop */
1110  completed_inst = false;
1111 
1112  /* If we're just completing a macroop before an interrupt or drain,
1113  * can we stil commit another microop (rather than a memory response)
1114  * without crosing into the next full instruction? */
1115  bool can_commit_insts = !ex_info.inFlightInsts->empty() &&
1116  !(only_commit_microops && ex_info.lastCommitWasEndOfMacroop);
1117 
1118  /* Can we find a mem response for this inst */
1119  LSQ::LSQRequestPtr mem_response =
1120  (inst->inLSQ ? lsq.findResponse(inst) : NULL);
1121 
1122  DPRINTF(MinorExecute, "Trying to commit canCommitInsts: %d\n",
1123  can_commit_insts);
1124 
1125  /* Test for PC events after every instruction */
1126  if (isInbetweenInsts(thread_id) && tryPCEvents(thread_id)) {
1127  ThreadContext *thread = cpu.getContext(thread_id);
1128 
1129  /* Branch as there was a change in PC */
1131  MinorDynInst::bubble(), thread->pcState(), branch);
1132  } else if (mem_response &&
1133  num_mem_refs_committed < memoryCommitLimit)
1134  {
1135  /* Try to commit from the memory responses next */
1136  discard_inst = inst->id.streamSeqNum !=
1137  ex_info.streamSeqNum || discard;
1138 
1139  DPRINTF(MinorExecute, "Trying to commit mem response: %s\n",
1140  *inst);
1141 
1142  /* Complete or discard the response */
1143  if (discard_inst) {
1144  DPRINTF(MinorExecute, "Discarding mem inst: %s as its"
1145  " stream state was unexpected, expected: %d\n",
1146  *inst, ex_info.streamSeqNum);
1147 
1148  lsq.popResponse(mem_response);
1149  } else {
1150  handleMemResponse(inst, mem_response, branch, fault);
1151  committed_inst = true;
1152  }
1153 
1154  completed_mem_ref = true;
1155  completed_inst = true;
1156  } else if (can_commit_insts) {
1157  /* If true, this instruction will, subject to timing tweaks,
1158  * be considered for completion. try_to_commit flattens
1159  * the `if' tree a bit and allows other tests for inst
1160  * commit to be inserted here. */
1161  bool try_to_commit = false;
1162 
1163  /* Try and issue memory ops early if they:
1164  * - Can push a request into the LSQ
1165  * - Have reached the end of their FUs
1166  * - Have had all their dependencies satisfied
1167  * - Are from the right stream
1168  *
1169  * For any other case, leave it to the normal instruction
1170  * issue below to handle them.
1171  */
1172  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1173  DPRINTF(MinorExecute, "Trying to commit from mem FUs\n");
1174 
1175  const MinorDynInstPtr head_mem_ref_inst =
1176  ex_info.inFUMemInsts->front().inst;
1177  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1178  const MinorDynInstPtr &fu_inst = fu->front().inst;
1179 
1180  /* Use this, possibly out of order, inst as the one
1181  * to 'commit'/send to the LSQ */
1182  if (!fu_inst->isBubble() &&
1183  !fu_inst->inLSQ &&
1184  fu_inst->canEarlyIssue &&
1185  ex_info.streamSeqNum == fu_inst->id.streamSeqNum &&
1186  head_exec_seq_num > fu_inst->instToWaitFor)
1187  {
1188  DPRINTF(MinorExecute, "Issuing mem ref early"
1189  " inst: %s instToWaitFor: %d\n",
1190  *(fu_inst), fu_inst->instToWaitFor);
1191 
1192  inst = fu_inst;
1193  try_to_commit = true;
1194  early_memory_issue = true;
1195  completed_inst = true;
1196  }
1197  }
1198 
1199  /* Try and commit FU-less insts */
1200  if (!completed_inst && inst->isNoCostInst()) {
1201  DPRINTF(MinorExecute, "Committing no cost inst: %s", *inst);
1202 
1203  try_to_commit = true;
1204  completed_inst = true;
1205  }
1206 
1207  /* Try to issue from the ends of FUs and the inFlightInsts
1208  * queue */
1209  if (!completed_inst && !inst->inLSQ) {
1210  DPRINTF(MinorExecute, "Trying to commit from FUs\n");
1211 
1212  /* Try to commit from a functional unit */
1213  /* Is the head inst of the expected inst's FU actually the
1214  * expected inst? */
1215  QueuedInst &fu_inst =
1216  funcUnits[inst->fuIndex]->front();
1217  InstSeqNum fu_inst_seq_num = fu_inst.inst->id.execSeqNum;
1218 
1219  if (fu_inst.inst->isBubble()) {
1220  /* No instruction ready */
1221  completed_inst = false;
1222  } else if (fu_inst_seq_num != head_exec_seq_num) {
1223  /* Past instruction: we must have already executed it
1224  * in the same cycle and so the head inst isn't
1225  * actually at the end of its pipeline
1226  * Future instruction: handled above and only for
1227  * mem refs on their way to the LSQ */
1228  } else if (fu_inst.inst->id == inst->id) {
1229  /* All instructions can be committed if they have the
1230  * right execSeqNum and there are no in-flight
1231  * mem insts before us */
1232  try_to_commit = true;
1233  completed_inst = true;
1234  }
1235  }
1236 
1237  if (try_to_commit) {
1238  discard_inst = inst->id.streamSeqNum !=
1239  ex_info.streamSeqNum || discard;
1240 
1241  /* Is this instruction discardable as its streamSeqNum
1242  * doesn't match? */
1243  if (!discard_inst) {
1244  /* Try to commit or discard a non-memory instruction.
1245  * Memory ops are actually 'committed' from this FUs
1246  * and 'issued' into the memory system so we need to
1247  * account for them later (commit_was_mem_issue gets
1248  * set) */
1249  if (inst->extraCommitDelayExpr) {
1250  DPRINTF(MinorExecute, "Evaluating expression for"
1251  " extra commit delay inst: %s\n", *inst);
1252 
1253  ThreadContext *thread = cpu.getContext(thread_id);
1254 
1255  TimingExprEvalContext context(inst->staticInst,
1256  thread, NULL);
1257 
1258  uint64_t extra_delay = inst->extraCommitDelayExpr->
1259  eval(context);
1260 
1261  DPRINTF(MinorExecute, "Extra commit delay expr"
1262  " result: %d\n", extra_delay);
1263 
1264  if (extra_delay < 128) {
1265  inst->extraCommitDelay += Cycles(extra_delay);
1266  } else {
1267  DPRINTF(MinorExecute, "Extra commit delay was"
1268  " very long: %d\n", extra_delay);
1269  }
1270  inst->extraCommitDelayExpr = NULL;
1271  }
1272 
1273  /* Move the extraCommitDelay from the instruction
1274  * into the minimumCommitCycle */
1275  if (inst->extraCommitDelay != Cycles(0)) {
1276  inst->minimumCommitCycle = cpu.curCycle() +
1277  inst->extraCommitDelay;
1278  inst->extraCommitDelay = Cycles(0);
1279  }
1280 
1281  /* @todo Think about making lastMemBarrier be
1282  * MAX_UINT_64 to avoid using 0 as a marker value */
1283  if (!inst->isFault() && inst->isMemRef() &&
1284  lsq.getLastMemBarrier(thread_id) <
1285  inst->id.execSeqNum &&
1286  lsq.getLastMemBarrier(thread_id) != 0)
1287  {
1288  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1289  " as there are incomplete barriers in flight\n",
1290  *inst);
1291  completed_inst = false;
1292  } else if (inst->minimumCommitCycle > now) {
1293  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1294  " as it wants to be stalled for %d more cycles\n",
1295  *inst, inst->minimumCommitCycle - now);
1296  completed_inst = false;
1297  } else {
1298  completed_inst = commitInst(inst,
1299  early_memory_issue, branch, fault,
1300  committed_inst, issued_mem_ref);
1301  }
1302  } else {
1303  /* Discard instruction */
1304  completed_inst = true;
1305  }
1306 
1307  if (completed_inst) {
1308  /* Allow the pipeline to advance. If the FU head
1309  * instruction wasn't the inFlightInsts head
1310  * but had already been committed, it would have
1311  * unstalled the pipeline before here */
1312  if (inst->fuIndex != noCostFUIndex) {
1313  DPRINTF(MinorExecute, "Unstalling %d for inst %s\n", inst->fuIndex, inst->id);
1314  funcUnits[inst->fuIndex]->stalled = false;
1315  }
1316  }
1317  }
1318  } else {
1319  DPRINTF(MinorExecute, "No instructions to commit\n");
1320  completed_inst = false;
1321  }
1322 
1323  /* All discardable instructions must also be 'completed' by now */
1324  assert(!(discard_inst && !completed_inst));
1325 
1326  /* Instruction committed but was discarded due to streamSeqNum
1327  * mismatch */
1328  if (discard_inst) {
1329  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
1330  " state was unexpected, expected: %d\n",
1331  *inst, ex_info.streamSeqNum);
1332 
1333  if (fault == NoFault)
1335  }
1336 
1337  /* Mark the mem inst as being in the LSQ */
1338  if (issued_mem_ref) {
1339  inst->fuIndex = 0;
1340  inst->inLSQ = true;
1341  }
1342 
1343  /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1344  * as they've *definitely* exited the FUs */
1345  if (completed_inst && inst->isMemRef()) {
1346  /* The MemRef could have been discarded from the FU or the memory
1347  * queue, so just check an FU instruction */
1348  if (!ex_info.inFUMemInsts->empty() &&
1349  ex_info.inFUMemInsts->front().inst == inst)
1350  {
1351  ex_info.inFUMemInsts->pop();
1352  }
1353  }
1354 
1355  if (completed_inst && !(issued_mem_ref && fault == NoFault)) {
1356  /* Note that this includes discarded insts */
1357  DPRINTF(MinorExecute, "Completed inst: %s\n", *inst);
1358 
1359  /* Got to the end of a full instruction? */
1360  ex_info.lastCommitWasEndOfMacroop = inst->isFault() ||
1361  inst->isLastOpInInst();
1362 
1363  /* lastPredictionSeqNum is kept as a convenience to prevent its
1364  * value from changing too much on the minorview display */
1365  ex_info.lastPredictionSeqNum = inst->id.predictionSeqNum;
1366 
1367  /* Finished with the inst, remove it from the inst queue and
1368  * clear its dependencies */
1369  ex_info.inFlightInsts->pop();
1370 
1371  /* Complete barriers in the LSQ/move to store buffer */
1372  if (inst->isInst() && inst->staticInst->isMemBarrier()) {
1373  DPRINTF(MinorMem, "Completing memory barrier"
1374  " inst: %s committed: %d\n", *inst, committed_inst);
1375  lsq.completeMemBarrierInst(inst, committed_inst);
1376  }
1377 
1378  scoreboard[thread_id].clearInstDests(inst, inst->isMemRef());
1379  }
1380 
1381  /* Handle per-cycle instruction counting */
1382  if (committed_inst) {
1383  bool is_no_cost_inst = inst->isNoCostInst();
1384 
1385  /* Don't show no cost instructions as having taken a commit
1386  * slot */
1387  if (DTRACE(MinorTrace) && !is_no_cost_inst)
1388  ex_info.instsBeingCommitted.insts[num_insts_committed] = inst;
1389 
1390  if (!is_no_cost_inst)
1391  num_insts_committed++;
1392 
1393  if (num_insts_committed == commitLimit)
1394  DPRINTF(MinorExecute, "Reached inst commit limit\n");
1395 
1396  /* Re-set the time of the instruction if that's required for
1397  * tracing */
1398  if (inst->traceData) {
1400  inst->traceData->setWhen(curTick());
1401  inst->traceData->dump();
1402  }
1403 
1404  if (completed_mem_ref)
1405  num_mem_refs_committed++;
1406 
1407  if (num_mem_refs_committed == memoryCommitLimit)
1408  DPRINTF(MinorExecute, "Reached mem ref commit limit\n");
1409  }
1410  }
1411 }
1412 
1413 bool
1415 {
1416  return executeInfo[thread_id].lastCommitWasEndOfMacroop &&
1417  !lsq.accessesInFlight();
1418 }
1419 
1420 void
1422 {
1423  if (!inp.outputWire->isBubble())
1424  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
1425 
1426  BranchData &branch = *out.inputWire;
1427 
1428  unsigned int num_issued = 0;
1429 
1430  /* Do all the cycle-wise activities for dcachePort here to potentially
1431  * free up input spaces in the LSQ's requests queue */
1432  lsq.step();
1433 
1434  /* Check interrupts first. Will halt commit if interrupt found */
1435  bool interrupted = false;
1436  ThreadID interrupt_tid = checkInterrupts(branch, interrupted);
1437 
1438  if (interrupt_tid != InvalidThreadID) {
1439  /* Signalling an interrupt this cycle, not issuing/committing from
1440  * any other threads */
1441  } else if (!branch.isBubble()) {
1442  /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1443  * without overwriting them */
1444  DPRINTF(MinorInterrupt, "Execute skipping a cycle to allow old"
1445  " branch to complete\n");
1446  } else {
1447  ThreadID commit_tid = getCommittingThread();
1448 
1449  if (commit_tid != InvalidThreadID) {
1450  ExecuteThreadInfo& commit_info = executeInfo[commit_tid];
1451 
1452  DPRINTF(MinorExecute, "Attempting to commit [tid:%d]\n",
1453  commit_tid);
1454  /* commit can set stalled flags observable to issue and so *must* be
1455  * called first */
1456  if (commit_info.drainState != NotDraining) {
1457  if (commit_info.drainState == DrainCurrentInst) {
1458  /* Commit only micro-ops, don't kill anything else */
1459  commit(commit_tid, true, false, branch);
1460 
1461  if (isInbetweenInsts(commit_tid))
1462  setDrainState(commit_tid, DrainHaltFetch);
1463 
1464  /* Discard any generated branch */
1465  branch = BranchData::bubble();
1466  } else if (commit_info.drainState == DrainAllInsts) {
1467  /* Kill all instructions */
1468  while (getInput(commit_tid))
1469  popInput(commit_tid);
1470  commit(commit_tid, false, true, branch);
1471  }
1472  } else {
1473  /* Commit micro-ops only if interrupted. Otherwise, commit
1474  * anything you like */
1475  DPRINTF(MinorExecute, "Committing micro-ops for interrupt[tid:%d]\n",
1476  commit_tid);
1477  bool only_commit_microops = interrupted &&
1478  hasInterrupt(commit_tid);
1479  commit(commit_tid, only_commit_microops, false, branch);
1480  }
1481 
1482  /* Halt fetch, but don't do it until we have the current instruction in
1483  * the bag */
1484  if (commit_info.drainState == DrainHaltFetch) {
1486  MinorDynInst::bubble(), TheISA::PCState(0), branch);
1487 
1489  setDrainState(commit_tid, DrainAllInsts);
1490  }
1491  }
1492  ThreadID issue_tid = getIssuingThread();
1493  /* This will issue merrily even when interrupted in the sure and
1494  * certain knowledge that the interrupt with change the stream */
1495  if (issue_tid != InvalidThreadID) {
1496  DPRINTF(MinorExecute, "Attempting to issue [tid:%d]\n",
1497  issue_tid);
1498  num_issued = issue(issue_tid);
1499  }
1500 
1501  }
1502 
1503  /* Run logic to step functional units + decide if we are active on the next
1504  * clock cycle */
1505  std::vector<MinorDynInstPtr> next_issuable_insts;
1506  bool can_issue_next = false;
1507 
1508  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1509  /* Find the next issuable instruction for each thread and see if it can
1510  be issued */
1511  if (getInput(tid)) {
1512  unsigned int input_index = executeInfo[tid].inputIndex;
1513  MinorDynInstPtr inst = getInput(tid)->insts[input_index];
1514  if (inst->isFault()) {
1515  can_issue_next = true;
1516  } else if (!inst->isBubble()) {
1517  next_issuable_insts.push_back(inst);
1518  }
1519  }
1520  }
1521 
1522  bool becoming_stalled = true;
1523 
1524  /* Advance the pipelines and note whether they still need to be
1525  * advanced */
1526  for (unsigned int i = 0; i < numFuncUnits; i++) {
1527  FUPipeline *fu = funcUnits[i];
1528  fu->advance();
1529 
1530  /* If we need to tick again, the pipeline will have been left or set
1531  * to be unstalled */
1532  if (fu->occupancy !=0 && !fu->stalled)
1533  becoming_stalled = false;
1534 
1535  /* Could we possibly issue the next instruction from any thread?
1536  * This is quite an expensive test and is only used to determine
1537  * if the CPU should remain active, only run it if we aren't sure
1538  * we are active next cycle yet */
1539  for (auto inst : next_issuable_insts) {
1540  if (!fu->stalled && fu->provides(inst->staticInst->opClass()) &&
1541  scoreboard[inst->id.threadId].canInstIssue(inst,
1542  NULL, NULL, cpu.curCycle() + Cycles(1),
1543  cpu.getContext(inst->id.threadId))) {
1544  can_issue_next = true;
1545  break;
1546  }
1547  }
1548  }
1549 
1550  bool head_inst_might_commit = false;
1551 
1552  /* Could the head in flight insts be committed */
1553  for (auto const &info : executeInfo) {
1554  if (!info.inFlightInsts->empty()) {
1555  const QueuedInst &head_inst = info.inFlightInsts->front();
1556 
1557  if (head_inst.inst->isNoCostInst()) {
1558  head_inst_might_commit = true;
1559  } else {
1560  FUPipeline *fu = funcUnits[head_inst.inst->fuIndex];
1561  if ((fu->stalled &&
1562  fu->front().inst->id == head_inst.inst->id) ||
1563  lsq.findResponse(head_inst.inst))
1564  {
1565  head_inst_might_commit = true;
1566  break;
1567  }
1568  }
1569  }
1570  }
1571 
1572  DPRINTF(Activity, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1573  (num_issued != 0 ? " (issued some insts)" : ""),
1574  (becoming_stalled ? "(becoming stalled)" : "(not becoming stalled)"),
1575  (can_issue_next ? " (can issued next inst)" : ""),
1576  (head_inst_might_commit ? "(head inst might commit)" : ""),
1577  (lsq.needsToTick() ? " (LSQ needs to tick)" : ""),
1578  (interrupted ? " (interrupted)" : ""));
1579 
1580  bool need_to_tick =
1581  num_issued != 0 || /* Issued some insts this cycle */
1582  !becoming_stalled || /* Some FU pipelines can still move */
1583  can_issue_next || /* Can still issue a new inst */
1584  head_inst_might_commit || /* Could possible commit the next inst */
1585  lsq.needsToTick() || /* Must step the dcache port */
1586  interrupted; /* There are pending interrupts */
1587 
1588  if (!need_to_tick) {
1589  DPRINTF(Activity, "The next cycle might be skippable as there are no"
1590  " advanceable FUs\n");
1591  }
1592 
1593  /* Wake up if we need to tick again */
1594  if (need_to_tick)
1596 
1597  /* Note activity of following buffer */
1598  if (!branch.isBubble())
1600 
1601  /* Make sure the input (if any left) is pushed */
1602  if (!inp.outputWire->isBubble())
1603  inputBuffer[inp.outputWire->threadId].pushTail();
1604 }
1605 
1606 ThreadID
1607 Execute::checkInterrupts(BranchData& branch, bool& interrupted)
1608 {
1610  /* Evaluate interrupts in round-robin based upon service */
1611  do {
1612  /* Has an interrupt been signalled? This may not be acted on
1613  * straighaway so this is different from took_interrupt */
1614  bool thread_interrupted = false;
1615 
1616  if (FullSystem && cpu.getInterruptController(tid)) {
1617  /* This is here because it seems that after drainResume the
1618  * interrupt controller isn't always set */
1619  thread_interrupted = executeInfo[tid].drainState == NotDraining &&
1620  isInterrupted(tid);
1621  interrupted = interrupted || thread_interrupted;
1622  } else {
1623  DPRINTF(MinorInterrupt, "No interrupt controller\n");
1624  }
1625  DPRINTF(MinorInterrupt, "[tid:%d] thread_interrupted?=%d isInbetweenInsts?=%d\n",
1626  tid, thread_interrupted, isInbetweenInsts(tid));
1627  /* Act on interrupts */
1628  if (thread_interrupted && isInbetweenInsts(tid)) {
1629  if (takeInterrupt(tid, branch)) {
1630  interruptPriority = tid;
1631  return tid;
1632  }
1633  } else {
1634  tid = (tid + 1) % cpu.numThreads;
1635  }
1636  } while (tid != interruptPriority);
1637 
1638  return InvalidThreadID;
1639 }
1640 
1641 bool
1643 {
1644  if (FullSystem && cpu.getInterruptController(thread_id)) {
1645  return executeInfo[thread_id].drainState == NotDraining &&
1646  isInterrupted(thread_id);
1647  }
1648 
1649  return false;
1650 }
1651 
1652 void
1654 {
1655  std::ostringstream insts;
1656  std::ostringstream stalled;
1657 
1658  executeInfo[0].instsBeingCommitted.reportData(insts);
1659  lsq.minorTrace();
1660  inputBuffer[0].minorTrace();
1661  scoreboard[0].minorTrace();
1662 
1663  /* Report functional unit stalling in one string */
1664  unsigned int i = 0;
1665  while (i < numFuncUnits)
1666  {
1667  stalled << (funcUnits[i]->stalled ? '1' : 'E');
1668  i++;
1669  if (i != numFuncUnits)
1670  stalled << ',';
1671  }
1672 
1673  MINORTRACE("insts=%s inputIndex=%d streamSeqNum=%d"
1674  " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1675  insts.str(), executeInfo[0].inputIndex, executeInfo[0].streamSeqNum,
1676  stalled.str(), executeInfo[0].drainState, isInbetweenInsts(0));
1677 
1678  std::for_each(funcUnits.begin(), funcUnits.end(),
1679  std::mem_fun(&FUPipeline::minorTrace));
1680 
1681  executeInfo[0].inFlightInsts->minorTrace();
1682  executeInfo[0].inFUMemInsts->minorTrace();
1683 }
1684 
1685 inline ThreadID
1687 {
1688  std::vector<ThreadID> priority_list;
1689 
1690  switch (cpu.threadPolicy) {
1691  case Enums::SingleThreaded:
1692  return 0;
1693  case Enums::RoundRobin:
1694  priority_list = cpu.roundRobinPriority(commitPriority);
1695  break;
1696  case Enums::Random:
1697  priority_list = cpu.randomPriority();
1698  break;
1699  default:
1700  panic("Invalid thread policy");
1701  }
1702 
1703  for (auto tid : priority_list) {
1704  ExecuteThreadInfo &ex_info = executeInfo[tid];
1705  bool can_commit_insts = !ex_info.inFlightInsts->empty();
1706  if (can_commit_insts) {
1707  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1708  MinorDynInstPtr inst = head_inflight_inst->inst;
1709 
1710  can_commit_insts = can_commit_insts &&
1711  (!inst->inLSQ || (lsq.findResponse(inst) != NULL));
1712 
1713  if (!inst->inLSQ) {
1714  bool can_transfer_mem_inst = false;
1715  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1716  const MinorDynInstPtr head_mem_ref_inst =
1717  ex_info.inFUMemInsts->front().inst;
1718  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1719  const MinorDynInstPtr &fu_inst = fu->front().inst;
1720  can_transfer_mem_inst =
1721  !fu_inst->isBubble() &&
1722  fu_inst->id.threadId == tid &&
1723  !fu_inst->inLSQ &&
1724  fu_inst->canEarlyIssue &&
1725  inst->id.execSeqNum > fu_inst->instToWaitFor;
1726  }
1727 
1728  bool can_execute_fu_inst = inst->fuIndex == noCostFUIndex;
1729  if (can_commit_insts && !can_transfer_mem_inst &&
1730  inst->fuIndex != noCostFUIndex)
1731  {
1732  QueuedInst& fu_inst = funcUnits[inst->fuIndex]->front();
1733  can_execute_fu_inst = !fu_inst.inst->isBubble() &&
1734  fu_inst.inst->id == inst->id;
1735  }
1736 
1737  can_commit_insts = can_commit_insts &&
1738  (can_transfer_mem_inst || can_execute_fu_inst);
1739  }
1740  }
1741 
1742 
1743  if (can_commit_insts) {
1744  commitPriority = tid;
1745  return tid;
1746  }
1747  }
1748 
1749  return InvalidThreadID;
1750 }
1751 
1752 inline ThreadID
1754 {
1755  std::vector<ThreadID> priority_list;
1756 
1757  switch (cpu.threadPolicy) {
1758  case Enums::SingleThreaded:
1759  return 0;
1760  case Enums::RoundRobin:
1761  priority_list = cpu.roundRobinPriority(issuePriority);
1762  break;
1763  case Enums::Random:
1764  priority_list = cpu.randomPriority();
1765  break;
1766  default:
1767  panic("Invalid thread scheduling policy.");
1768  }
1769 
1770  for (auto tid : priority_list) {
1771  if (getInput(tid)) {
1772  issuePriority = tid;
1773  return tid;
1774  }
1775  }
1776 
1777  return InvalidThreadID;
1778 }
1779 
1780 void
1782 {
1783  DPRINTF(Drain, "MinorExecute drainResume\n");
1784 
1785  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1786  setDrainState(tid, NotDraining);
1787  }
1788 
1790 }
1791 
1792 std::ostream &operator <<(std::ostream &os, Execute::DrainState state)
1793 {
1794  switch (state)
1795  {
1796  case Execute::NotDraining:
1797  os << "NotDraining";
1798  break;
1800  os << "DrainCurrentInst";
1801  break;
1803  os << "DrainHaltFetch";
1804  break;
1806  os << "DrainAllInsts";
1807  break;
1808  default:
1809  os << "Drain-" << static_cast<int>(state);
1810  break;
1811  }
1812 
1813  return os;
1814 }
1815 
1816 void
1818 {
1819  DPRINTF(Drain, "setDrainState[%d]: %s\n", thread_id, state);
1820  executeInfo[thread_id].drainState = state;
1821 }
1822 
1823 unsigned int
1825 {
1826  DPRINTF(Drain, "MinorExecute drain\n");
1827 
1828  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1829  if (executeInfo[tid].drainState == NotDraining) {
1831 
1832  /* Go to DrainCurrentInst if we're between microops
1833  * or waiting on an unbufferable memory operation.
1834  * Otherwise we can go straight to DrainHaltFetch
1835  */
1836  if (isInbetweenInsts(tid))
1838  else
1840  }
1841  }
1842  return (isDrained() ? 0 : 1);
1843 }
1844 
1845 bool
1847 {
1848  if (!lsq.isDrained())
1849  return false;
1850 
1851  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1852  if (!inputBuffer[tid].empty() ||
1853  !executeInfo[tid].inFlightInsts->empty()) {
1854 
1855  return false;
1856  }
1857  }
1858 
1859  return true;
1860 }
1861 
1863 {
1864  for (unsigned int i = 0; i < numFuncUnits; i++)
1865  delete funcUnits[i];
1866 
1867  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
1868  delete executeInfo[tid].inFlightInsts;
1869 }
1870 
1871 bool
1873 {
1874  return inst->id.streamSeqNum == executeInfo[inst->id.threadId].streamSeqNum;
1875 }
1876 
1877 bool
1879 {
1880  bool ret = false;
1881 
1882  if (!executeInfo[inst->id.threadId].inFlightInsts->empty())
1883  ret = executeInfo[inst->id.threadId].inFlightInsts->front().inst->id == inst->id;
1884 
1885  return ret;
1886 }
1887 
1890 {
1891  return lsq.getDcachePort();
1892 }
1893 
1894 }
Minor::Execute::ExecuteThreadInfo::inFUMemInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFUMemInsts
Memory ref instructions still in the FUs.
Definition: execute.hh:168
InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:228
Num_OpClasses
static const OpClass Num_OpClasses
Definition: op_class.hh:105
ThreadState::ThreadStateStats::numInsts
Stats::Scalar numInsts
Stat for number instructions committed.
Definition: thread_state.hh:116
Minor::LSQ::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Return the raw-bindable port.
Definition: lsq.hh:723
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
Packet::isError
bool isError() const
Definition: packet.hh:583
Minor::Execute::inputBuffer
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: execute.hh:123
MinorFUTiming::srcRegsRelativeLats
std::vector< Cycles > srcRegsRelativeLats
Cycle offsets from the scoreboard delivery times of register values for each of this instruction's so...
Definition: func_unit.hh:126
Minor::Execute::isInterrupted
bool isInterrupted(ThreadID thread_id) const
Has an interrupt been raised.
Definition: execute.cc:411
Minor::LSQ::popResponse
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1524
Minor::Execute::out
Latch< BranchData >::Input out
Input port carrying stream changes to Fetch1.
Definition: execute.hh:67
Minor::Execute::commitPriority
ThreadID commitPriority
Definition: execute.hh:202
MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:285
X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:803
warn
#define warn(...)
Definition: logging.hh:239
Minor::ForwardInstData
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:253
Minor::Execute::tryPCEvents
bool tryPCEvents(ThreadID thread_id)
Try to act on PC-related events.
Definition: execute.cc:835
Minor::MinorBuffer< ElemType, ReportTraits >::minorTrace
void minorTrace() const
Report buffer states from 'slot' 'from' to 'to'.
Definition: buffers.hh:191
TimingExprEvalContext
Object to gather the visible context for evaluation.
Definition: timing_expr.hh:70
op_class.hh
MinorFUTiming
Extra timing capability to allow individual ops to have their source register dependency latencies tw...
Definition: func_unit.hh:93
MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:44
Minor::Execute::ExecuteThreadInfo::lastCommitWasEndOfMacroop
bool lastCommitWasEndOfMacroop
The last commit was the end of a full instruction so an interrupt can safely happen.
Definition: execute.hh:176
Packet::getAddr
Addr getAddr() const
Definition: packet.hh:754
Minor::BranchData::isStreamChange
bool isStreamChange() const
As static isStreamChange but on this branch data.
Definition: pipe_data.hh:151
Minor::LSQ::pushFailedRequest
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order.
Definition: lsq.cc:1661
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
Minor::Execute::processMoreThanOneInput
bool processMoreThanOneInput
If true, more than one input line can be processed each cycle if there is room to execute more instru...
Definition: execute.hh:87
ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
Minor::MinorStats::committedInstType
Stats::Vector2d committedInstType
Number of instructions by type (OpClass)
Definition: stats.hh:78
Minor::Latch::Input
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:245
Minor::Execute::checkInterrupts
ThreadID checkInterrupts(BranchData &branch, bool &interrupted)
Check all threads for possible interrupts.
Definition: execute.cc:1607
MinorCPU::activityRecorder
Minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:88
Minor::Execute::fuDescriptions
MinorFUPool & fuDescriptions
Descriptions of the functional units we want to generate.
Definition: execute.hh:90
Minor::Pipeline::ExecuteStageId
@ ExecuteStageId
Definition: pipeline.hh:100
sc_dt::to_string
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:91
Minor::BranchData::HaltFetch
@ HaltFetch
Definition: pipe_data.hh:95
Minor::Execute::lsq
LSQ lsq
Dcache port to pass on to the CPU.
Definition: execute.hh:114
Minor::Execute::hasInterrupt
bool hasInterrupt(ThreadID thread_id)
Checks if a specific thread has an interrupt.
Definition: execute.cc:1642
Minor::Execute::instIsRightStream
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed?
Definition: execute.cc:1872
cpu.hh
Minor::MinorStats::numDiscardedOps
Stats::Scalar numDiscardedOps
Number of ops discarded before committing.
Definition: stats.hh:65
Minor::Execute::DrainAllInsts
@ DrainAllInsts
Definition: execute.hh:141
Minor::LSQ::accessesInFlight
bool accessesInFlight() const
Are there any accesses other than normal cached loads in the memory system or having received respons...
Definition: lsq.hh:681
Minor::Execute::isInbetweenInsts
bool isInbetweenInsts(ThreadID thread_id) const
Are we between instructions? Can we be interrupted?
Definition: execute.cc:1414
Minor::Execute::minorTrace
void minorTrace() const
Definition: execute.cc:1653
MinorCPU::threadPolicy
Enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:112
Minor::LSQ::step
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1478
MINORTRACE
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:60
DTRACE
#define DTRACE(x)
Definition: debug.hh:146
MinorCPU::stats
Minor::MinorStats stats
Processor-specific statistics.
Definition: cpu.hh:132
MinorFUPool::funcUnits
std::vector< MinorFU * > funcUnits
Definition: func_unit.hh:182
std::vector< Cycles >
FullSystem
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:132
Packet::getSize
unsigned getSize() const
Definition: packet.hh:764
Minor::Execute::DrainCurrentInst
@ DrainCurrentInst
Definition: execute.hh:139
Minor::BranchData::UnpredictedBranch
@ UnpredictedBranch
Definition: pipe_data.hh:78
MinorFUTiming::suppress
bool suppress
If true, instructions matching this mask/match should not be issued in this FU.
Definition: func_unit.hh:106
BaseCPU::getContext
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:283
Minor::Execute::interruptPriority
ThreadID interruptPriority
Definition: execute.hh:200
Minor::Execute::~Execute
~Execute()
Definition: execute.cc:1862
Minor::Execute::ExecuteThreadInfo::streamSeqNum
InstSeqNum streamSeqNum
Source of sequence number for instuction streams.
Definition: execute.hh:186
Minor::Execute::scoreboard
std::vector< Scoreboard > scoreboard
Scoreboard of instruction dependencies.
Definition: execute.hh:117
Minor::BranchData::isStreamChange
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:81
TimingExpr
Definition: timing_expr.hh:88
Minor::Execute::executeMemRefInst
bool executeMemRefInst(MinorDynInstPtr inst, BranchData &branch, bool &failed_predicate, Fault &fault)
Execute a memory reference instruction.
Definition: execute.cc:445
Minor::BranchData::BadlyPredictedBranchTarget
@ BadlyPredictedBranchTarget
Definition: pipe_data.hh:82
Minor::cyclicIndexDec
unsigned int cyclicIndexDec(unsigned int index, unsigned int cycle_size)
Decrement a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:532
Minor
Definition: activity.cc:44
ArmISA::advancePC
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:405
execute.hh
Minor::Execute::isDrained
bool isDrained()
After thread suspension, has Execute been drained of in-flight instructions and memory accesses.
Definition: execute.cc:1846
Minor::Execute::commitInst
bool commitInst(MinorDynInstPtr inst, bool early_memory_issue, BranchData &branch, Fault &fault, bool &committed, bool &completed_mem_issue)
Commit a single instruction.
Definition: execute.cc:889
Minor::BranchData::CorrectlyPredictedBranch
@ CorrectlyPredictedBranch
Definition: pipe_data.hh:73
Minor::Execute::inp
Latch< ForwardInstData >::Output inp
Input port carrying instructions from Decode.
Definition: execute.hh:64
MinorFU::opLat
Cycles opLat
Delay from issuing the operation, to it reaching the end of the associated pipeline.
Definition: func_unit.hh:154
Minor::Execute::ExecuteThreadInfo::inputIndex
unsigned int inputIndex
Index that we've completed upto in getInput data.
Definition: execute.hh:172
SimpleThread
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
Definition: simple_thread.hh:89
MinorCPU::threads
std::vector< Minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:93
Minor::Execute::executeInfo
std::vector< ExecuteThreadInfo > executeInfo
Definition: execute.hh:198
Minor::LSQ::canRequest
bool canRequest()
Is their space in the request queue to be able to push a request by issuing an isMemRef instruction.
Definition: lsq.hh:661
Minor::ReportTraitsAdaptor
...ReportTraits are trait classes with the same functionality as ReportIF, but with elements explicit...
Definition: buffers.hh:90
Minor::Execute::ExecuteThreadInfo::lastPredictionSeqNum
InstSeqNum lastPredictionSeqNum
A prediction number for use where one isn't available from an instruction.
Definition: execute.hh:192
MinorFUTiming::extraAssumedLat
Cycles extraAssumedLat
Extra delay that results should show in the scoreboard after leaving the pipeline.
Definition: func_unit.hh:117
Minor::Execute::getDcachePort
MinorCPU::MinorCPUPort & getDcachePort()
Returns the DcachePort owned by this Execute to pass upwards.
Definition: execute.cc:1889
Minor::Execute::DrainHaltFetch
@ DrainHaltFetch
Definition: execute.hh:140
Minor::LSQ::completeMemBarrierInst
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:916
Minor::BranchData::isBubble
bool isBubble() const
Definition: pipe_data.hh:148
Minor::LSQ::LSQRequest::packet
PacketPtr packet
Definition: lsq.hh:141
Minor::BranchData::NoBranch
@ NoBranch
Definition: pipe_data.hh:70
Minor::BranchData::Interrupt
@ Interrupt
Definition: pipe_data.hh:93
ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:88
ThreadState::numOp
Counter numOp
Number of ops (including micro ops) committed.
Definition: thread_state.hh:110
exec_context.hh
Minor::MinorDynInst::bubble
static MinorDynInstPtr bubble()
There is a single bubble inst.
Definition: dyn_inst.hh:248
MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:77
Minor::Latch::Output
Definition: buffers.hh:256
Minor::Execute::DrainState
DrainState
Stage cycle-by-cycle state.
Definition: execute.hh:136
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
Minor::Queue
Wrapper for a queue type to act as a pipeline stage input queue.
Definition: buffers.hh:397
Minor::Execute::handleMemResponse
void handleMemResponse(MinorDynInstPtr inst, LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
Handle extracting mem ref responses from the memory queues and completing the associated instructions...
Definition: execute.cc:320
Minor::LSQ::LSQRequest::needsToBeSentToStoreBuffer
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer.
Definition: lsq.cc:161
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
Minor::ForwardInstData::width
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:273
Minor::Execute::evaluate
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: execute.cc:1421
Minor::Execute::drain
unsigned int drain()
Like the drain interface on SimObject.
Definition: execute.cc:1824
Minor::ForwardInstData::isBubble
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:247
Minor::LSQ::minorTrace
void minorTrace() const
Definition: lsq.cc:1668
EventQueue::serviceEvents
void serviceEvents(Tick when)
process all events up to the given timestamp.
Definition: eventq.hh:869
Minor::Execute::setTraceTimeOnIssue
bool setTraceTimeOnIssue
Modify instruction trace times on issue.
Definition: execute.hh:103
ThreadState::numInst
Counter numInst
Number of instructions committed.
Definition: thread_state.hh:108
Minor::LSQ::canPushIntoStoreBuffer
bool canPushIntoStoreBuffer() const
Must check this before trying to insert into the store buffer.
Definition: lsq.hh:673
Minor::Execute::issueLimit
unsigned int issueLimit
Number of instructions that can be issued per cycle.
Definition: execute.hh:73
Minor::LSQ::needsToTick
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1566
Minor::Execute::ExecuteThreadInfo
Definition: execute.hh:144
Minor::Execute::updateBranchData
void updateBranchData(ThreadID tid, BranchData::Reason reason, MinorDynInstPtr inst, const TheISA::PCState &target, BranchData &branch)
Actually create a branch to communicate to Fetch1/Fetch2 and, if that is a stream-changing branch upd...
Definition: execute.cc:294
Minor::Execute::getInput
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: execute.cc:193
Minor::cyclicIndexInc
unsigned int cyclicIndexInc(unsigned int index, unsigned int cycle_size)
Increment a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:520
PowerISA::fu
Bitfield< 12 > fu
Definition: miscregs.hh:82
MinorCPU::MinorCPUPort
Provide a non-protected base class for Minor's Ports as derived classes are created by Fetch1 and Exe...
Definition: cpu.hh:98
Minor::ForwardInstData::insts
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:257
Minor::Execute::popInput
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: execute.cc:206
Minor::InputBuffer
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:565
Minor::Execute::memoryCommitLimit
unsigned int memoryCommitLimit
Number of memory instructions that can be committed per cycle.
Definition: execute.hh:82
Minor::Execute::commit
void commit(ThreadID thread_id, bool only_commit_microops, bool discard, BranchData &branch)
Try and commit instructions from the ends of the functional unit pipelines.
Definition: execute.cc:1031
Minor::ExecContext::readMemAccPredicate
bool readMemAccPredicate() const override
Definition: exec_context.hh:332
Minor::Execute::cpu
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: execute.hh:70
Minor::Execute::ExecuteThreadInfo::inFlightInsts
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFlightInsts
In-order instructions either in FUs or the LSQ.
Definition: execute.hh:165
InstSeqNum
uint64_t InstSeqNum
Definition: inst_seq.hh:37
NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:245
ThreadContext::status
virtual Status status() const =0
Minor::Execute::setDrainState
void setDrainState(ThreadID thread_id, DrainState state)
Set the drain state (with useful debugging messages)
Definition: execute.cc:1817
Minor::QueuedInst::inst
MinorDynInstPtr inst
Definition: func_unit.hh:199
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Minor::MinorStats::numInsts
Stats::Scalar numInsts
Number of simulated instructions.
Definition: stats.hh:59
Minor::Execute::tryToBranch
void tryToBranch(MinorDynInstPtr inst, Fault fault, BranchData &branch)
Generate Branch data based (into branch) on an observed (or not) change in PC while executing an inst...
Definition: execute.cc:215
Minor::Execute::allowEarlyMemIssue
bool allowEarlyMemIssue
Allow mem refs to leave their FUs before reaching the head of the in flight insts queue if their depe...
Definition: execute.hh:107
BaseInterrupts::getInterrupt
virtual Fault getInterrupt()=0
Minor::Execute::setTraceTimeOnCommit
bool setTraceTimeOnCommit
Modify instruction trace times on commit.
Definition: execute.hh:100
ThreadState::ThreadStateStats::numOps
Stats::Scalar numOps
Stat for number ops (including micro ops) committed.
Definition: thread_state.hh:118
Named
Definition: trace.hh:147
ThreadContext::pcState
virtual TheISA::PCState pcState() const =0
Minor::Execute::issue
unsigned int issue(ThreadID thread_id)
Try and issue instructions from the inputBuffer.
Definition: execute.cc:543
SimpleThread::comInstEventQueue
EventQueue comInstEventQueue
An instruction-based event queue.
Definition: simple_thread.hh:129
Minor::BranchData
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:62
BaseInterrupts::updateIntrInfo
virtual void updateIntrInfo()=0
Minor::BranchData::BadlyPredictedBranch
@ BadlyPredictedBranch
Definition: pipe_data.hh:86
Minor::BranchData::Reason
Reason
Definition: pipe_data.hh:65
Minor::Execute::issuePriority
ThreadID issuePriority
Definition: execute.hh:201
Minor::Execute::ExecuteThreadInfo::instsBeingCommitted
ForwardInstData instsBeingCommitted
Structure for reporting insts currently being processed/retired for MinorTrace.
Definition: execute.hh:180
Minor::operator<<
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:61
Minor::Execute::doInstCommitAccounting
void doInstCommitAccounting(MinorDynInstPtr inst)
Do the stats handling and instruction count and PC event events related to the new instruction/op cou...
Definition: execute.cc:857
Minor::Execute::instIsHeadInst
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue.
Definition: execute.cc:1878
ThreadContext::Suspended
@ Suspended
Temporarily inactive.
Definition: thread_context.hh:106
Minor::LSQ::issuedMemBarrierInst
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1712
Minor::ExecContext::readPredicate
bool readPredicate() const override
Definition: exec_context.hh:320
Minor::Execute::NotDraining
@ NotDraining
Definition: execute.hh:138
Minor::ExecContext
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:69
MipsISA::PCState
GenericISA::DelaySlotPCState< MachInst > PCState
Definition: types.hh:41
ActivityRecorder::activity
void activity()
Records that there is activity this cycle.
Definition: activity.cc:54
BaseCPU::system
System * system
Definition: base.hh:371
MinorCPU::roundRobinPriority
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:165
Minor::Execute::commitLimit
unsigned int commitLimit
Number of instructions that can be committed per cycle.
Definition: execute.hh:79
Minor::Execute::getCommittingThread
ThreadID getCommittingThread()
Use the current threading policy to determine the next thread to decode from.
Definition: execute.cc:1686
Minor::Execute::memoryIssueLimit
unsigned int memoryIssueLimit
Number of memory ops that can be issued per cycle.
Definition: execute.hh:76
Minor::FUPipeline
A functional unit configured from a MinorFU object.
Definition: func_unit.hh:222
Minor::LSQ::sendStoreToStoreBuffer
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1546
System::totalNumInsts
Counter totalNumInsts
Definition: system.hh:603
MinorFU
A functional unit that can execute any of opClasses operations with a single op(eration)Lat(ency) and...
Definition: func_unit.hh:147
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
MinorFUTiming::extraCommitLat
Cycles extraCommitLat
Extra latency that the instruction should spend at the end of the pipeline.
Definition: func_unit.hh:110
Minor::BranchData::bubble
static BranchData bubble()
BubbleIF interface.
Definition: pipe_data.hh:147
Minor::Scoreboard
A scoreboard of register dependencies including, for each register: The number of in-flight instructi...
Definition: scoreboard.hh:57
BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:363
Minor::LSQ::getLastMemBarrier
InstSeqNum getLastMemBarrier(ThreadID thread_id) const
Get the execSeqNum of the last issued memory barrier.
Definition: lsq.hh:690
Cycles
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
Minor::Execute::funcUnits
std::vector< FUPipeline * > funcUnits
The execution functional units.
Definition: execute.hh:120
Minor::QueuedInst
Container class to box instructions in the FUs to make those queues have correct bubble behaviour whe...
Definition: func_unit.hh:196
PCEvent
Definition: pc_event.hh:42
MinorCPU::randomPriority
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:174
RefCountingPtr< MinorDynInst >
Minor::Execute::numFuncUnits
unsigned int numFuncUnits
Number of functional units to produce.
Definition: execute.hh:93
Minor::Execute::drainResume
void drainResume()
Definition: execute.cc:1781
MinorFUTiming::extraCommitLatExpr
TimingExpr * extraCommitLatExpr
Definition: func_unit.hh:111
Minor::Execute::takeInterrupt
bool takeInterrupt(ThreadID thread_id, BranchData &branch)
Act on an interrupt.
Definition: execute.cc:417
Minor::LSQ::isDrained
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1559
Minor::Execute::ExecuteThreadInfo::drainState
DrainState drainState
State progression for draining NotDraining -> ...
Definition: execute.hh:195
Minor::MinorStats::numFetchSuspends
Stats::Scalar numFetchSuspends
Number of times fetch was asked to suspend by Execute.
Definition: stats.hh:68
Minor::Execute::Execute
Execute(const std::string &name_, MinorCPU &cpu_, MinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< BranchData >::Input out_)
Definition: execute.cc:61
BaseCPU::probeInstCommit
virtual void probeInstCommit(const StaticInstPtr &inst, Addr pc)
Helper method to trigger PMU probes for a committed instruction.
Definition: base.cc:351
lsq.hh
BaseCPU::checkInterrupts
bool checkInterrupts(ThreadID tid) const
Definition: base.hh:246
ThreadContext::instAddr
virtual Addr instAddr() const =0
Packet::getConstPtr
const T * getConstPtr() const
Definition: packet.hh:1166
BaseCPU::getInterruptController
BaseInterrupts * getInterruptController(ThreadID tid)
Definition: base.hh:219
Minor::LSQ::findResponse
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it's at the head of the transfers queue and it's either complete or can be sent...
Definition: lsq.cc:1489
ThreadState::threadStats
ThreadState::ThreadStateStats threadStats
fetch1.hh
Minor::LSQ::LSQRequest
Derived SenderState to carry data access info.
Definition: lsq.hh:118
Minor::Execute::getIssuingThread
ThreadID getIssuingThread()
Definition: execute.cc:1753
Minor::BranchData::SuspendThread
@ SuspendThread
Definition: pipe_data.hh:91
Minor::Execute::noCostFUIndex
unsigned int noCostFUIndex
The FU index of the non-existent costless FU for instructions which pass the MinorDynInst::isNoCostIn...
Definition: execute.hh:111
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
curTick
Tick curTick()
The current simulated tick.
Definition: core.hh:45
Minor::MinorStats::numOps
Stats::Scalar numOps
Number of simulated insts and microops.
Definition: stats.hh:62

Generated on Wed Sep 30 2020 14:02:08 for gem5 by doxygen 1.8.17