gem5  v19.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
execute.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014,2018-2019 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  *
37  * Authors: Andrew Bardsley
38  */
39 
40 #include "cpu/minor/execute.hh"
41 
42 #include "arch/locked_mem.hh"
43 #include "arch/registers.hh"
44 #include "arch/utility.hh"
45 #include "cpu/minor/cpu.hh"
47 #include "cpu/minor/fetch1.hh"
48 #include "cpu/minor/lsq.hh"
49 #include "cpu/op_class.hh"
50 #include "debug/Activity.hh"
51 #include "debug/Branch.hh"
52 #include "debug/Drain.hh"
53 #include "debug/MinorExecute.hh"
54 #include "debug/MinorInterrupt.hh"
55 #include "debug/MinorMem.hh"
56 #include "debug/MinorTrace.hh"
57 #include "debug/PCEvent.hh"
58 
59 namespace Minor
60 {
61 
62 Execute::Execute(const std::string &name_,
63  MinorCPU &cpu_,
64  MinorCPUParams &params,
67  Named(name_),
68  inp(inp_),
69  out(out_),
70  cpu(cpu_),
71  issueLimit(params.executeIssueLimit),
72  memoryIssueLimit(params.executeMemoryIssueLimit),
73  commitLimit(params.executeCommitLimit),
74  memoryCommitLimit(params.executeMemoryCommitLimit),
75  processMoreThanOneInput(params.executeCycleInput),
76  fuDescriptions(*params.executeFuncUnits),
77  numFuncUnits(fuDescriptions.funcUnits.size()),
78  setTraceTimeOnCommit(params.executeSetTraceTimeOnCommit),
79  setTraceTimeOnIssue(params.executeSetTraceTimeOnIssue),
80  allowEarlyMemIssue(params.executeAllowEarlyMemoryIssue),
81  noCostFUIndex(fuDescriptions.funcUnits.size() + 1),
82  lsq(name_ + ".lsq", name_ + ".dcache_port",
83  cpu_, *this,
84  params.executeMaxAccessesInMemory,
85  params.executeMemoryWidth,
86  params.executeLSQRequestsQueueSize,
87  params.executeLSQTransfersQueueSize,
88  params.executeLSQStoreBufferSize,
89  params.executeLSQMaxStoreBufferStoresPerCycle),
90  executeInfo(params.numThreads, ExecuteThreadInfo(params.executeCommitLimit)),
91  interruptPriority(0),
92  issuePriority(0),
93  commitPriority(0)
94 {
95  if (commitLimit < 1) {
96  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
97  commitLimit);
98  }
99 
100  if (issueLimit < 1) {
101  fatal("%s: executeCommitLimit must be >= 1 (%d)\n", name_,
102  issueLimit);
103  }
104 
105  if (memoryIssueLimit < 1) {
106  fatal("%s: executeMemoryIssueLimit must be >= 1 (%d)\n", name_,
108  }
109 
111  fatal("%s: executeMemoryCommitLimit (%d) must be <="
112  " executeCommitLimit (%d)\n",
114  }
115 
116  if (params.executeInputBufferSize < 1) {
117  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
118  params.executeInputBufferSize);
119  }
120 
121  if (params.executeInputBufferSize < 1) {
122  fatal("%s: executeInputBufferSize must be >= 1 (%d)\n", name_,
123  params.executeInputBufferSize);
124  }
125 
126  /* This should be large enough to count all the in-FU instructions
127  * which need to be accounted for in the inFlightInsts
128  * queue */
129  unsigned int total_slots = 0;
130 
131  /* Make FUPipelines for each MinorFU */
132  for (unsigned int i = 0; i < numFuncUnits; i++) {
133  std::ostringstream fu_name;
134  MinorFU *fu_description = fuDescriptions.funcUnits[i];
135 
136  /* Note the total number of instruction slots (for sizing
137  * the inFlightInst queue) and the maximum latency of any FU
138  * (for sizing the activity recorder) */
139  total_slots += fu_description->opLat;
140 
141  fu_name << name_ << ".fu." << i;
142 
143  FUPipeline *fu = new FUPipeline(fu_name.str(), *fu_description, cpu);
144 
145  funcUnits.push_back(fu);
146  }
147 
149  for (int op_class = No_OpClass + 1; op_class < Num_OpClasses; op_class++) {
150  bool found_fu = false;
151  unsigned int fu_index = 0;
152 
153  while (fu_index < numFuncUnits && !found_fu)
154  {
155  if (funcUnits[fu_index]->provides(
156  static_cast<OpClass>(op_class)))
157  {
158  found_fu = true;
159  }
160  fu_index++;
161  }
162 
163  if (!found_fu) {
164  warn("No functional unit for OpClass %s\n",
165  Enums::OpClassStrings[op_class]);
166  }
167  }
168 
169  /* Per-thread structures */
170  for (ThreadID tid = 0; tid < params.numThreads; tid++) {
171  std::string tid_str = std::to_string(tid);
172 
173  /* Input Buffers */
174  inputBuffer.push_back(
176  name_ + ".inputBuffer" + tid_str, "insts",
177  params.executeInputBufferSize));
178 
179  /* Scoreboards */
180  scoreboard.push_back(Scoreboard(name_ + ".scoreboard" + tid_str));
181 
182  /* In-flight instruction records */
183  executeInfo[tid].inFlightInsts = new Queue<QueuedInst,
185  name_ + ".inFlightInsts" + tid_str, "insts", total_slots);
186 
187  executeInfo[tid].inFUMemInsts = new Queue<QueuedInst,
189  name_ + ".inFUMemInsts" + tid_str, "insts", total_slots);
190  }
191 }
192 
193 const ForwardInstData *
195 {
196  /* Get a line from the inputBuffer to work with */
197  if (!inputBuffer[tid].empty()) {
198  const ForwardInstData &head = inputBuffer[tid].front();
199 
200  return (head.isBubble() ? NULL : &(inputBuffer[tid].front()));
201  } else {
202  return NULL;
203  }
204 }
205 
206 void
208 {
209  if (!inputBuffer[tid].empty())
210  inputBuffer[tid].pop();
211 
212  executeInfo[tid].inputIndex = 0;
213 }
214 
215 void
217 {
218  ThreadContext *thread = cpu.getContext(inst->id.threadId);
219  const TheISA::PCState &pc_before = inst->pc;
220  TheISA::PCState target = thread->pcState();
221 
222  /* Force a branch for SerializeAfter/SquashAfter instructions
223  * at the end of micro-op sequence when we're not suspended */
224  bool force_branch = thread->status() != ThreadContext::Suspended &&
225  !inst->isFault() &&
226  inst->isLastOpInInst() &&
227  (inst->staticInst->isSerializeAfter() ||
228  inst->staticInst->isSquashAfter() ||
229  inst->staticInst->isIprAccess());
230 
231  DPRINTF(Branch, "tryToBranch before: %s after: %s%s\n",
232  pc_before, target, (force_branch ? " (forcing)" : ""));
233 
234  /* Will we change the PC to something other than the next instruction? */
235  bool must_branch = pc_before != target ||
236  fault != NoFault ||
237  force_branch;
238 
239  /* The reason for the branch data we're about to generate, set below */
241 
242  if (fault == NoFault)
243  {
244  TheISA::advancePC(target, inst->staticInst);
245  thread->pcState(target);
246 
247  DPRINTF(Branch, "Advancing current PC from: %s to: %s\n",
248  pc_before, target);
249  }
250 
251  if (inst->predictedTaken && !force_branch) {
252  /* Predicted to branch */
253  if (!must_branch) {
254  /* No branch was taken, change stream to get us back to the
255  * intended PC value */
256  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x but"
257  " none happened inst: %s\n",
258  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
259 
261  } else if (inst->predictedTarget == target) {
262  /* Branch prediction got the right target, kill the branch and
263  * carry on.
264  * Note that this information to the branch predictor might get
265  * overwritten by a "real" branch during this cycle */
266  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x correctly"
267  " inst: %s\n",
268  inst->pc.instAddr(), inst->predictedTarget.instAddr(), *inst);
269 
271  } else {
272  /* Branch prediction got the wrong target */
273  DPRINTF(Branch, "Predicted a branch from 0x%x to 0x%x"
274  " but got the wrong target (actual: 0x%x) inst: %s\n",
275  inst->pc.instAddr(), inst->predictedTarget.instAddr(),
276  target.instAddr(), *inst);
277 
279  }
280  } else if (must_branch) {
281  /* Unpredicted branch */
282  DPRINTF(Branch, "Unpredicted branch from 0x%x to 0x%x inst: %s\n",
283  inst->pc.instAddr(), target.instAddr(), *inst);
284 
286  } else {
287  /* No branch at all */
288  reason = BranchData::NoBranch;
289  }
290 
291  updateBranchData(inst->id.threadId, reason, inst, target, branch);
292 }
293 
294 void
296  ThreadID tid,
297  BranchData::Reason reason,
298  MinorDynInstPtr inst, const TheISA::PCState &target,
299  BranchData &branch)
300 {
301  if (reason != BranchData::NoBranch) {
302  /* Bump up the stream sequence number on a real branch*/
303  if (BranchData::isStreamChange(reason))
304  executeInfo[tid].streamSeqNum++;
305 
306  /* Branches (even mis-predictions) don't change the predictionSeqNum,
307  * just the streamSeqNum */
308  branch = BranchData(reason, tid,
309  executeInfo[tid].streamSeqNum,
310  /* Maintaining predictionSeqNum if there's no inst is just a
311  * courtesy and looks better on minorview */
312  (inst->isBubble() ? executeInfo[tid].lastPredictionSeqNum
313  : inst->id.predictionSeqNum),
314  target, inst);
315 
316  DPRINTF(Branch, "Branch data signalled: %s\n", branch);
317  }
318 }
319 
320 void
322  LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
323 {
324  ThreadID thread_id = inst->id.threadId;
325  ThreadContext *thread = cpu.getContext(thread_id);
326 
327  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
328 
329  PacketPtr packet = response->packet;
330 
331  bool is_load = inst->staticInst->isLoad();
332  bool is_store = inst->staticInst->isStore();
333  bool is_atomic = inst->staticInst->isAtomic();
334  bool is_prefetch = inst->staticInst->isDataPrefetch();
335 
336  /* If true, the trace's predicate value will be taken from the exec
337  * context predicate, otherwise, it will be set to false */
338  bool use_context_predicate = true;
339 
340  if (inst->translationFault != NoFault) {
341  /* Invoke memory faults. */
342  DPRINTF(MinorMem, "Completing fault from DTLB access: %s\n",
343  inst->translationFault->name());
344 
345  if (inst->staticInst->isPrefetch()) {
346  DPRINTF(MinorMem, "Not taking fault on prefetch: %s\n",
347  inst->translationFault->name());
348 
349  /* Don't assign to fault */
350  } else {
351  /* Take the fault raised during the TLB/memory access */
352  fault = inst->translationFault;
353 
354  fault->invoke(thread, inst->staticInst);
355  }
356  } else if (!packet) {
357  DPRINTF(MinorMem, "Completing failed request inst: %s\n",
358  *inst);
359  use_context_predicate = false;
360  if (!context.readMemAccPredicate())
361  inst->staticInst->completeAcc(nullptr, &context, inst->traceData);
362  } else if (packet->isError()) {
363  DPRINTF(MinorMem, "Trying to commit error response: %s\n",
364  *inst);
365 
366  fatal("Received error response packet for inst: %s\n", *inst);
367  } else if (is_store || is_load || is_prefetch || is_atomic) {
368  assert(packet);
369 
370  DPRINTF(MinorMem, "Memory response inst: %s addr: 0x%x size: %d\n",
371  *inst, packet->getAddr(), packet->getSize());
372 
373  if (is_load && packet->getSize() > 0) {
374  DPRINTF(MinorMem, "Memory data[0]: 0x%x\n",
375  static_cast<unsigned int>(packet->getConstPtr<uint8_t>()[0]));
376  }
377 
378  /* Complete the memory access instruction */
379  fault = inst->staticInst->completeAcc(packet, &context,
380  inst->traceData);
381 
382  if (fault != NoFault) {
383  /* Invoke fault created by instruction completion */
384  DPRINTF(MinorMem, "Fault in memory completeAcc: %s\n",
385  fault->name());
386  fault->invoke(thread, inst->staticInst);
387  } else {
388  /* Stores need to be pushed into the store buffer to finish
389  * them off */
390  if (response->needsToBeSentToStoreBuffer())
391  lsq.sendStoreToStoreBuffer(response);
392  }
393  } else {
394  fatal("There should only ever be reads, "
395  "writes or faults at this point\n");
396  }
397 
398  lsq.popResponse(response);
399 
400  if (inst->traceData) {
401  inst->traceData->setPredicate((use_context_predicate ?
402  context.readPredicate() : false));
403  }
404 
406 
407  /* Generate output to account for branches */
408  tryToBranch(inst, fault, branch);
409 }
410 
411 bool
413 {
414  return cpu.checkInterrupts(cpu.getContext(thread_id));
415 }
416 
417 bool
419 {
420  DPRINTF(MinorInterrupt, "Considering interrupt status from PC: %s\n",
421  cpu.getContext(thread_id)->pcState());
422 
423  Fault interrupt = cpu.getInterruptController(thread_id)->getInterrupt
424  (cpu.getContext(thread_id));
425 
426  if (interrupt != NoFault) {
427  /* The interrupt *must* set pcState */
429  (cpu.getContext(thread_id));
430  interrupt->invoke(cpu.getContext(thread_id));
431 
432  assert(!lsq.accessesInFlight());
433 
434  DPRINTF(MinorInterrupt, "Invoking interrupt: %s to PC: %s\n",
435  interrupt->name(), cpu.getContext(thread_id)->pcState());
436 
437  /* Assume that an interrupt *must* cause a branch. Assert this? */
438 
440  MinorDynInst::bubble(), cpu.getContext(thread_id)->pcState(),
441  branch);
442  }
443 
444  return interrupt != NoFault;
445 }
446 
447 bool
449  bool &passed_predicate, Fault &fault)
450 {
451  bool issued = false;
452 
453  /* Set to true if the mem op. is issued and sent to the mem system */
454  passed_predicate = false;
455 
456  if (!lsq.canRequest()) {
457  /* Not acting on instruction yet as the memory
458  * queues are full */
459  issued = false;
460  } else {
461  ThreadContext *thread = cpu.getContext(inst->id.threadId);
462  TheISA::PCState old_pc = thread->pcState();
463 
464  ExecContext context(cpu, *cpu.threads[inst->id.threadId],
465  *this, inst);
466 
467  DPRINTF(MinorExecute, "Initiating memRef inst: %s\n", *inst);
468 
469  Fault init_fault = inst->staticInst->initiateAcc(&context,
470  inst->traceData);
471 
472  if (inst->inLSQ) {
473  if (init_fault != NoFault) {
474  assert(inst->translationFault != NoFault);
475  // Translation faults are dealt with in handleMemResponse()
476  init_fault = NoFault;
477  } else {
478  // If we have a translation fault then it got suppressed by
479  // initateAcc()
480  inst->translationFault = NoFault;
481  }
482  }
483 
484  if (init_fault != NoFault) {
485  DPRINTF(MinorExecute, "Fault on memory inst: %s"
486  " initiateAcc: %s\n", *inst, init_fault->name());
487  fault = init_fault;
488  } else {
489  /* Only set this if the instruction passed its
490  * predicate */
491  if (!context.readMemAccPredicate()) {
492  DPRINTF(MinorMem, "No memory access for inst: %s\n", *inst);
493  assert(context.readPredicate());
494  }
495  passed_predicate = context.readPredicate();
496 
497  /* Set predicate in tracing */
498  if (inst->traceData)
499  inst->traceData->setPredicate(passed_predicate);
500 
501  /* If the instruction didn't pass its predicate
502  * or it is a predicated vector instruction and the
503  * associated predicate register is all-false (and so will not
504  * progress from here) Try to branch to correct and branch
505  * mis-prediction. */
506  if (!inst->inLSQ) {
507  /* Leave it up to commit to handle the fault */
508  lsq.pushFailedRequest(inst);
509  inst->inLSQ = true;
510  }
511  }
512 
513  /* Restore thread PC */
514  thread->pcState(old_pc);
515  issued = true;
516  }
517 
518  return issued;
519 }
520 
522 inline unsigned int
523 cyclicIndexInc(unsigned int index, unsigned int cycle_size)
524 {
525  unsigned int ret = index + 1;
526 
527  if (ret == cycle_size)
528  ret = 0;
529 
530  return ret;
531 }
532 
534 inline unsigned int
535 cyclicIndexDec(unsigned int index, unsigned int cycle_size)
536 {
537  int ret = index - 1;
538 
539  if (ret < 0)
540  ret = cycle_size - 1;
541 
542  return ret;
543 }
544 
545 unsigned int
547 {
548  const ForwardInstData *insts_in = getInput(thread_id);
549  ExecuteThreadInfo &thread = executeInfo[thread_id];
550 
551  /* Early termination if we have no instructions */
552  if (!insts_in)
553  return 0;
554 
555  /* Start from the first FU */
556  unsigned int fu_index = 0;
557 
558  /* Remains true while instructions are still being issued. If any
559  * instruction fails to issue, this is set to false and we exit issue.
560  * This strictly enforces in-order issue. For other issue behaviours,
561  * a more complicated test in the outer while loop below is needed. */
562  bool issued = true;
563 
564  /* Number of insts issues this cycle to check for issueLimit */
565  unsigned num_insts_issued = 0;
566 
567  /* Number of memory ops issues this cycle to check for memoryIssueLimit */
568  unsigned num_mem_insts_issued = 0;
569 
570  /* Number of instructions discarded this cycle in order to enforce a
571  * discardLimit. @todo, add that parameter? */
572  unsigned num_insts_discarded = 0;
573 
574  do {
575  MinorDynInstPtr inst = insts_in->insts[thread.inputIndex];
576  Fault fault = inst->fault;
577  bool discarded = false;
578  bool issued_mem_ref = false;
579 
580  if (inst->isBubble()) {
581  /* Skip */
582  issued = true;
583  } else if (cpu.getContext(thread_id)->status() ==
585  {
586  DPRINTF(MinorExecute, "Discarding inst: %s from suspended"
587  " thread\n", *inst);
588 
589  issued = true;
590  discarded = true;
591  } else if (inst->id.streamSeqNum != thread.streamSeqNum) {
592  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
593  " state was unexpected, expected: %d\n",
594  *inst, thread.streamSeqNum);
595  issued = true;
596  discarded = true;
597  } else {
598  /* Try and issue an instruction into an FU, assume we didn't and
599  * fix that in the loop */
600  issued = false;
601 
602  /* Try FU from 0 each instruction */
603  fu_index = 0;
604 
605  /* Try and issue a single instruction stepping through the
606  * available FUs */
607  do {
608  FUPipeline *fu = funcUnits[fu_index];
609 
610  DPRINTF(MinorExecute, "Trying to issue inst: %s to FU: %d\n",
611  *inst, fu_index);
612 
613  /* Does the examined fu have the OpClass-related capability
614  * needed to execute this instruction? Faults can always
615  * issue to any FU but probably should just 'live' in the
616  * inFlightInsts queue rather than having an FU. */
617  bool fu_is_capable = (!inst->isFault() ?
618  fu->provides(inst->staticInst->opClass()) : true);
619 
620  if (inst->isNoCostInst()) {
621  /* Issue free insts. to a fake numbered FU */
622  fu_index = noCostFUIndex;
623 
624  /* And start the countdown on activity to allow
625  * this instruction to get to the end of its FU */
627 
628  /* Mark the destinations for this instruction as
629  * busy */
630  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
631  Cycles(0), cpu.getContext(thread_id), false);
632 
633  DPRINTF(MinorExecute, "Issuing %s to %d\n", inst->id, noCostFUIndex);
634  inst->fuIndex = noCostFUIndex;
635  inst->extraCommitDelay = Cycles(0);
636  inst->extraCommitDelayExpr = NULL;
637 
638  /* Push the instruction onto the inFlight queue so
639  * it can be committed in order */
640  QueuedInst fu_inst(inst);
641  thread.inFlightInsts->push(fu_inst);
642 
643  issued = true;
644 
645  } else if (!fu_is_capable || fu->alreadyPushed()) {
646  /* Skip */
647  if (!fu_is_capable) {
648  DPRINTF(MinorExecute, "Can't issue as FU: %d isn't"
649  " capable\n", fu_index);
650  } else {
651  DPRINTF(MinorExecute, "Can't issue as FU: %d is"
652  " already busy\n", fu_index);
653  }
654  } else if (fu->stalled) {
655  DPRINTF(MinorExecute, "Can't issue inst: %s into FU: %d,"
656  " it's stalled\n",
657  *inst, fu_index);
658  } else if (!fu->canInsert()) {
659  DPRINTF(MinorExecute, "Can't issue inst: %s to busy FU"
660  " for another: %d cycles\n",
661  *inst, fu->cyclesBeforeInsert());
662  } else {
663  MinorFUTiming *timing = (!inst->isFault() ?
664  fu->findTiming(inst->staticInst) : NULL);
665 
666  const std::vector<Cycles> *src_latencies =
667  (timing ? &(timing->srcRegsRelativeLats)
668  : NULL);
669 
670  const std::vector<bool> *cant_forward_from_fu_indices =
672 
673  if (timing && timing->suppress) {
674  DPRINTF(MinorExecute, "Can't issue inst: %s as extra"
675  " decoding is suppressing it\n",
676  *inst);
677  } else if (!scoreboard[thread_id].canInstIssue(inst,
678  src_latencies, cant_forward_from_fu_indices,
679  cpu.curCycle(), cpu.getContext(thread_id)))
680  {
681  DPRINTF(MinorExecute, "Can't issue inst: %s yet\n",
682  *inst);
683  } else {
684  /* Can insert the instruction into this FU */
685  DPRINTF(MinorExecute, "Issuing inst: %s"
686  " into FU %d\n", *inst,
687  fu_index);
688 
689  Cycles extra_dest_retire_lat = Cycles(0);
690  TimingExpr *extra_dest_retire_lat_expr = NULL;
691  Cycles extra_assumed_lat = Cycles(0);
692 
693  /* Add the extraCommitDelay and extraAssumeLat to
694  * the FU pipeline timings */
695  if (timing) {
696  extra_dest_retire_lat =
697  timing->extraCommitLat;
698  extra_dest_retire_lat_expr =
699  timing->extraCommitLatExpr;
700  extra_assumed_lat =
701  timing->extraAssumedLat;
702  }
703 
704  issued_mem_ref = inst->isMemRef();
705 
706  QueuedInst fu_inst(inst);
707 
708  /* Decorate the inst with FU details */
709  inst->fuIndex = fu_index;
710  inst->extraCommitDelay = extra_dest_retire_lat;
711  inst->extraCommitDelayExpr =
712  extra_dest_retire_lat_expr;
713 
714  if (issued_mem_ref) {
715  /* Remember which instruction this memory op
716  * depends on so that initiateAcc can be called
717  * early */
718  if (allowEarlyMemIssue) {
719  inst->instToWaitFor =
720  scoreboard[thread_id].execSeqNumToWaitFor(inst,
721  cpu.getContext(thread_id));
722 
723  if (lsq.getLastMemBarrier(thread_id) >
724  inst->instToWaitFor)
725  {
726  DPRINTF(MinorExecute, "A barrier will"
727  " cause a delay in mem ref issue of"
728  " inst: %s until after inst"
729  " %d(exec)\n", *inst,
730  lsq.getLastMemBarrier(thread_id));
731 
732  inst->instToWaitFor =
733  lsq.getLastMemBarrier(thread_id);
734  } else {
735  DPRINTF(MinorExecute, "Memory ref inst:"
736  " %s must wait for inst %d(exec)"
737  " before issuing\n",
738  *inst, inst->instToWaitFor);
739  }
740 
741  inst->canEarlyIssue = true;
742  }
743  /* Also queue this instruction in the memory ref
744  * queue to ensure in-order issue to the LSQ */
745  DPRINTF(MinorExecute, "Pushing mem inst: %s\n",
746  *inst);
747  thread.inFUMemInsts->push(fu_inst);
748  }
749 
750  /* Issue to FU */
751  fu->push(fu_inst);
752  /* And start the countdown on activity to allow
753  * this instruction to get to the end of its FU */
755 
756  /* Mark the destinations for this instruction as
757  * busy */
758  scoreboard[thread_id].markupInstDests(inst, cpu.curCycle() +
759  fu->description.opLat +
760  extra_dest_retire_lat +
761  extra_assumed_lat,
762  cpu.getContext(thread_id),
763  issued_mem_ref && extra_assumed_lat == Cycles(0));
764 
765  /* Push the instruction onto the inFlight queue so
766  * it can be committed in order */
767  thread.inFlightInsts->push(fu_inst);
768 
769  issued = true;
770  }
771  }
772 
773  fu_index++;
774  } while (fu_index != numFuncUnits && !issued);
775 
776  if (!issued)
777  DPRINTF(MinorExecute, "Didn't issue inst: %s\n", *inst);
778  }
779 
780  if (issued) {
781  /* Generate MinorTrace's MinorInst lines. Do this at commit
782  * to allow better instruction annotation? */
783  if (DTRACE(MinorTrace) && !inst->isBubble())
784  inst->minorTraceInst(*this);
785 
786  /* Mark up barriers in the LSQ */
787  if (!discarded && inst->isInst() &&
788  inst->staticInst->isMemBarrier())
789  {
790  DPRINTF(MinorMem, "Issuing memory barrier inst: %s\n", *inst);
792  }
793 
794  if (inst->traceData && setTraceTimeOnIssue) {
795  inst->traceData->setWhen(curTick());
796  }
797 
798  if (issued_mem_ref)
799  num_mem_insts_issued++;
800 
801  if (discarded) {
802  num_insts_discarded++;
803  } else if (!inst->isBubble()) {
804  num_insts_issued++;
805 
806  if (num_insts_issued == issueLimit)
807  DPRINTF(MinorExecute, "Reached inst issue limit\n");
808  }
809 
810  thread.inputIndex++;
811  DPRINTF(MinorExecute, "Stepping to next inst inputIndex: %d\n",
812  thread.inputIndex);
813  }
814 
815  /* Got to the end of a line */
816  if (thread.inputIndex == insts_in->width()) {
817  popInput(thread_id);
818  /* Set insts_in to null to force us to leave the surrounding
819  * loop */
820  insts_in = NULL;
821 
823  DPRINTF(MinorExecute, "Wrapping\n");
824  insts_in = getInput(thread_id);
825  }
826  }
827  } while (insts_in && thread.inputIndex < insts_in->width() &&
828  /* We still have instructions */
829  fu_index != numFuncUnits && /* Not visited all FUs */
830  issued && /* We've not yet failed to issue an instruction */
831  num_insts_issued != issueLimit && /* Still allowed to issue */
832  num_mem_insts_issued != memoryIssueLimit);
833 
834  return num_insts_issued;
835 }
836 
837 bool
839 {
840  ThreadContext *thread = cpu.getContext(thread_id);
841  unsigned int num_pc_event_checks = 0;
842 
843  /* Handle PC events on instructions */
844  Addr oldPC;
845  do {
846  oldPC = thread->instAddr();
847  cpu.threads[thread_id]->pcEventQueue.service(oldPC, thread);
848  num_pc_event_checks++;
849  } while (oldPC != thread->instAddr());
850 
851  if (num_pc_event_checks > 1) {
852  DPRINTF(PCEvent, "Acting on PC Event to PC: %s\n",
853  thread->pcState());
854  }
855 
856  return num_pc_event_checks > 1;
857 }
858 
859 void
861 {
862  assert(!inst->isFault());
863 
864  MinorThread *thread = cpu.threads[inst->id.threadId];
865 
866  /* Increment the many and various inst and op counts in the
867  * thread and system */
868  if (!inst->staticInst->isMicroop() || inst->staticInst->isLastMicroop())
869  {
870  thread->numInst++;
871  thread->numInsts++;
872  cpu.stats.numInsts++;
874 
875  /* Act on events related to instruction counts */
876  thread->comInstEventQueue.serviceEvents(thread->numInst);
877  }
878  thread->numOp++;
879  thread->numOps++;
880  cpu.stats.numOps++;
881  cpu.stats.committedInstType[inst->id.threadId]
882  [inst->staticInst->opClass()]++;
883 
884  /* Set the CP SeqNum to the numOps commit number */
885  if (inst->traceData)
886  inst->traceData->setCPSeq(thread->numOp);
887 
888  cpu.probeInstCommit(inst->staticInst, inst->pc.instAddr());
889 }
890 
891 bool
892 Execute::commitInst(MinorDynInstPtr inst, bool early_memory_issue,
893  BranchData &branch, Fault &fault, bool &committed,
894  bool &completed_mem_issue)
895 {
896  ThreadID thread_id = inst->id.threadId;
897  ThreadContext *thread = cpu.getContext(thread_id);
898 
899  bool completed_inst = true;
900  fault = NoFault;
901 
902  /* Is the thread for this instruction suspended? In that case, just
903  * stall as long as there are no pending interrupts */
904  if (thread->status() == ThreadContext::Suspended &&
905  !isInterrupted(thread_id))
906  {
907  panic("We should never hit the case where we try to commit from a "
908  "suspended thread as the streamSeqNum should not match");
909  } else if (inst->isFault()) {
910  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
911 
912  DPRINTF(MinorExecute, "Fault inst reached Execute: %s\n",
913  inst->fault->name());
914 
915  fault = inst->fault;
916  inst->fault->invoke(thread, NULL);
917 
918  tryToBranch(inst, fault, branch);
919  } else if (inst->staticInst->isMemRef()) {
920  /* Memory accesses are executed in two parts:
921  * executeMemRefInst -- calculates the EA and issues the access
922  * to memory. This is done here.
923  * handleMemResponse -- handles the response packet, done by
924  * Execute::commit
925  *
926  * While the memory access is in its FU, the EA is being
927  * calculated. At the end of the FU, when it is ready to
928  * 'commit' (in this function), the access is presented to the
929  * memory queues. When a response comes back from memory,
930  * Execute::commit will commit it.
931  */
932  bool predicate_passed = false;
933  bool completed_mem_inst = executeMemRefInst(inst, branch,
934  predicate_passed, fault);
935 
936  if (completed_mem_inst && fault != NoFault) {
937  if (early_memory_issue) {
938  DPRINTF(MinorExecute, "Fault in early executing inst: %s\n",
939  fault->name());
940  /* Don't execute the fault, just stall the instruction
941  * until it gets to the head of inFlightInsts */
942  inst->canEarlyIssue = false;
943  /* Not completed as we'll come here again to pick up
944  * the fault when we get to the end of the FU */
945  completed_inst = false;
946  } else {
947  DPRINTF(MinorExecute, "Fault in execute: %s\n",
948  fault->name());
949  fault->invoke(thread, NULL);
950 
951  tryToBranch(inst, fault, branch);
952  completed_inst = true;
953  }
954  } else {
955  completed_inst = completed_mem_inst;
956  }
957  completed_mem_issue = completed_inst;
958  } else if (inst->isInst() && inst->staticInst->isMemBarrier() &&
960  {
961  DPRINTF(MinorExecute, "Can't commit data barrier inst: %s yet as"
962  " there isn't space in the store buffer\n", *inst);
963 
964  completed_inst = false;
965  } else if (inst->isInst() && inst->staticInst->isQuiesce()
966  && !branch.isBubble()){
967  /* This instruction can suspend, need to be able to communicate
968  * backwards, so no other branches may evaluate this cycle*/
969  completed_inst = false;
970  } else {
971  ExecContext context(cpu, *cpu.threads[thread_id], *this, inst);
972 
973  DPRINTF(MinorExecute, "Committing inst: %s\n", *inst);
974 
975  fault = inst->staticInst->execute(&context,
976  inst->traceData);
977 
978  /* Set the predicate for tracing and dump */
979  if (inst->traceData)
980  inst->traceData->setPredicate(context.readPredicate());
981 
982  committed = true;
983 
984  if (fault != NoFault) {
985  DPRINTF(MinorExecute, "Fault in execute of inst: %s fault: %s\n",
986  *inst, fault->name());
987  fault->invoke(thread, inst->staticInst);
988  }
989 
991  tryToBranch(inst, fault, branch);
992  }
993 
994  if (completed_inst) {
995  /* Keep a copy of this instruction's predictionSeqNum just in case
996  * we need to issue a branch without an instruction (such as an
997  * interrupt) */
998  executeInfo[thread_id].lastPredictionSeqNum = inst->id.predictionSeqNum;
999 
1000  /* Check to see if this instruction suspended the current thread. */
1001  if (!inst->isFault() &&
1002  thread->status() == ThreadContext::Suspended &&
1003  branch.isBubble() && /* It didn't branch too */
1004  !isInterrupted(thread_id)) /* Don't suspend if we have
1005  interrupts */
1006  {
1007  TheISA::PCState resume_pc = cpu.getContext(thread_id)->pcState();
1008 
1009  assert(resume_pc.microPC() == 0);
1010 
1011  DPRINTF(MinorInterrupt, "Suspending thread: %d from Execute"
1012  " inst: %s\n", thread_id, *inst);
1013 
1015 
1017  resume_pc, branch);
1018  }
1019  }
1020 
1021  return completed_inst;
1022 }
1023 
1024 void
1025 Execute::commit(ThreadID thread_id, bool only_commit_microops, bool discard,
1026  BranchData &branch)
1027 {
1028  Fault fault = NoFault;
1029  Cycles now = cpu.curCycle();
1030  ExecuteThreadInfo &ex_info = executeInfo[thread_id];
1031 
1057  /* Has an instruction been completed? Once this becomes false, we stop
1058  * trying to complete instructions. */
1059  bool completed_inst = true;
1060 
1061  /* Number of insts committed this cycle to check against commitLimit */
1062  unsigned int num_insts_committed = 0;
1063 
1064  /* Number of memory access instructions committed to check against
1065  * memCommitLimit */
1066  unsigned int num_mem_refs_committed = 0;
1067 
1068  if (only_commit_microops && !ex_info.inFlightInsts->empty()) {
1069  DPRINTF(MinorInterrupt, "Only commit microops %s %d\n",
1070  *(ex_info.inFlightInsts->front().inst),
1071  ex_info.lastCommitWasEndOfMacroop);
1072  }
1073 
1074  while (!ex_info.inFlightInsts->empty() && /* Some more instructions to process */
1075  !branch.isStreamChange() && /* No real branch */
1076  fault == NoFault && /* No faults */
1077  completed_inst && /* Still finding instructions to execute */
1078  num_insts_committed != commitLimit /* Not reached commit limit */
1079  )
1080  {
1081  if (only_commit_microops) {
1082  DPRINTF(MinorInterrupt, "Committing tail of insts before"
1083  " interrupt: %s\n",
1084  *(ex_info.inFlightInsts->front().inst));
1085  }
1086 
1087  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1088 
1089  InstSeqNum head_exec_seq_num =
1090  head_inflight_inst->inst->id.execSeqNum;
1091 
1092  /* The instruction we actually process if completed_inst
1093  * remains true to the end of the loop body.
1094  * Start by considering the the head of the in flight insts queue */
1095  MinorDynInstPtr inst = head_inflight_inst->inst;
1096 
1097  bool committed_inst = false;
1098  bool discard_inst = false;
1099  bool completed_mem_ref = false;
1100  bool issued_mem_ref = false;
1101  bool early_memory_issue = false;
1102 
1103  /* Must set this again to go around the loop */
1104  completed_inst = false;
1105 
1106  /* If we're just completing a macroop before an interrupt or drain,
1107  * can we stil commit another microop (rather than a memory response)
1108  * without crosing into the next full instruction? */
1109  bool can_commit_insts = !ex_info.inFlightInsts->empty() &&
1110  !(only_commit_microops && ex_info.lastCommitWasEndOfMacroop);
1111 
1112  /* Can we find a mem response for this inst */
1113  LSQ::LSQRequestPtr mem_response =
1114  (inst->inLSQ ? lsq.findResponse(inst) : NULL);
1115 
1116  DPRINTF(MinorExecute, "Trying to commit canCommitInsts: %d\n",
1117  can_commit_insts);
1118 
1119  /* Test for PC events after every instruction */
1120  if (isInbetweenInsts(thread_id) && tryPCEvents(thread_id)) {
1121  ThreadContext *thread = cpu.getContext(thread_id);
1122 
1123  /* Branch as there was a change in PC */
1125  MinorDynInst::bubble(), thread->pcState(), branch);
1126  } else if (mem_response &&
1127  num_mem_refs_committed < memoryCommitLimit)
1128  {
1129  /* Try to commit from the memory responses next */
1130  discard_inst = inst->id.streamSeqNum !=
1131  ex_info.streamSeqNum || discard;
1132 
1133  DPRINTF(MinorExecute, "Trying to commit mem response: %s\n",
1134  *inst);
1135 
1136  /* Complete or discard the response */
1137  if (discard_inst) {
1138  DPRINTF(MinorExecute, "Discarding mem inst: %s as its"
1139  " stream state was unexpected, expected: %d\n",
1140  *inst, ex_info.streamSeqNum);
1141 
1142  lsq.popResponse(mem_response);
1143  } else {
1144  handleMemResponse(inst, mem_response, branch, fault);
1145  committed_inst = true;
1146  }
1147 
1148  completed_mem_ref = true;
1149  completed_inst = true;
1150  } else if (can_commit_insts) {
1151  /* If true, this instruction will, subject to timing tweaks,
1152  * be considered for completion. try_to_commit flattens
1153  * the `if' tree a bit and allows other tests for inst
1154  * commit to be inserted here. */
1155  bool try_to_commit = false;
1156 
1157  /* Try and issue memory ops early if they:
1158  * - Can push a request into the LSQ
1159  * - Have reached the end of their FUs
1160  * - Have had all their dependencies satisfied
1161  * - Are from the right stream
1162  *
1163  * For any other case, leave it to the normal instruction
1164  * issue below to handle them.
1165  */
1166  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1167  DPRINTF(MinorExecute, "Trying to commit from mem FUs\n");
1168 
1169  const MinorDynInstPtr head_mem_ref_inst =
1170  ex_info.inFUMemInsts->front().inst;
1171  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1172  const MinorDynInstPtr &fu_inst = fu->front().inst;
1173 
1174  /* Use this, possibly out of order, inst as the one
1175  * to 'commit'/send to the LSQ */
1176  if (!fu_inst->isBubble() &&
1177  !fu_inst->inLSQ &&
1178  fu_inst->canEarlyIssue &&
1179  ex_info.streamSeqNum == fu_inst->id.streamSeqNum &&
1180  head_exec_seq_num > fu_inst->instToWaitFor)
1181  {
1182  DPRINTF(MinorExecute, "Issuing mem ref early"
1183  " inst: %s instToWaitFor: %d\n",
1184  *(fu_inst), fu_inst->instToWaitFor);
1185 
1186  inst = fu_inst;
1187  try_to_commit = true;
1188  early_memory_issue = true;
1189  completed_inst = true;
1190  }
1191  }
1192 
1193  /* Try and commit FU-less insts */
1194  if (!completed_inst && inst->isNoCostInst()) {
1195  DPRINTF(MinorExecute, "Committing no cost inst: %s", *inst);
1196 
1197  try_to_commit = true;
1198  completed_inst = true;
1199  }
1200 
1201  /* Try to issue from the ends of FUs and the inFlightInsts
1202  * queue */
1203  if (!completed_inst && !inst->inLSQ) {
1204  DPRINTF(MinorExecute, "Trying to commit from FUs\n");
1205 
1206  /* Try to commit from a functional unit */
1207  /* Is the head inst of the expected inst's FU actually the
1208  * expected inst? */
1209  QueuedInst &fu_inst =
1210  funcUnits[inst->fuIndex]->front();
1211  InstSeqNum fu_inst_seq_num = fu_inst.inst->id.execSeqNum;
1212 
1213  if (fu_inst.inst->isBubble()) {
1214  /* No instruction ready */
1215  completed_inst = false;
1216  } else if (fu_inst_seq_num != head_exec_seq_num) {
1217  /* Past instruction: we must have already executed it
1218  * in the same cycle and so the head inst isn't
1219  * actually at the end of its pipeline
1220  * Future instruction: handled above and only for
1221  * mem refs on their way to the LSQ */
1222  } else if (fu_inst.inst->id == inst->id) {
1223  /* All instructions can be committed if they have the
1224  * right execSeqNum and there are no in-flight
1225  * mem insts before us */
1226  try_to_commit = true;
1227  completed_inst = true;
1228  }
1229  }
1230 
1231  if (try_to_commit) {
1232  discard_inst = inst->id.streamSeqNum !=
1233  ex_info.streamSeqNum || discard;
1234 
1235  /* Is this instruction discardable as its streamSeqNum
1236  * doesn't match? */
1237  if (!discard_inst) {
1238  /* Try to commit or discard a non-memory instruction.
1239  * Memory ops are actually 'committed' from this FUs
1240  * and 'issued' into the memory system so we need to
1241  * account for them later (commit_was_mem_issue gets
1242  * set) */
1243  if (inst->extraCommitDelayExpr) {
1244  DPRINTF(MinorExecute, "Evaluating expression for"
1245  " extra commit delay inst: %s\n", *inst);
1246 
1247  ThreadContext *thread = cpu.getContext(thread_id);
1248 
1249  TimingExprEvalContext context(inst->staticInst,
1250  thread, NULL);
1251 
1252  uint64_t extra_delay = inst->extraCommitDelayExpr->
1253  eval(context);
1254 
1255  DPRINTF(MinorExecute, "Extra commit delay expr"
1256  " result: %d\n", extra_delay);
1257 
1258  if (extra_delay < 128) {
1259  inst->extraCommitDelay += Cycles(extra_delay);
1260  } else {
1261  DPRINTF(MinorExecute, "Extra commit delay was"
1262  " very long: %d\n", extra_delay);
1263  }
1264  inst->extraCommitDelayExpr = NULL;
1265  }
1266 
1267  /* Move the extraCommitDelay from the instruction
1268  * into the minimumCommitCycle */
1269  if (inst->extraCommitDelay != Cycles(0)) {
1270  inst->minimumCommitCycle = cpu.curCycle() +
1271  inst->extraCommitDelay;
1272  inst->extraCommitDelay = Cycles(0);
1273  }
1274 
1275  /* @todo Think about making lastMemBarrier be
1276  * MAX_UINT_64 to avoid using 0 as a marker value */
1277  if (!inst->isFault() && inst->isMemRef() &&
1278  lsq.getLastMemBarrier(thread_id) <
1279  inst->id.execSeqNum &&
1280  lsq.getLastMemBarrier(thread_id) != 0)
1281  {
1282  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1283  " as there are incomplete barriers in flight\n",
1284  *inst);
1285  completed_inst = false;
1286  } else if (inst->minimumCommitCycle > now) {
1287  DPRINTF(MinorExecute, "Not committing inst: %s yet"
1288  " as it wants to be stalled for %d more cycles\n",
1289  *inst, inst->minimumCommitCycle - now);
1290  completed_inst = false;
1291  } else {
1292  completed_inst = commitInst(inst,
1293  early_memory_issue, branch, fault,
1294  committed_inst, issued_mem_ref);
1295  }
1296  } else {
1297  /* Discard instruction */
1298  completed_inst = true;
1299  }
1300 
1301  if (completed_inst) {
1302  /* Allow the pipeline to advance. If the FU head
1303  * instruction wasn't the inFlightInsts head
1304  * but had already been committed, it would have
1305  * unstalled the pipeline before here */
1306  if (inst->fuIndex != noCostFUIndex) {
1307  DPRINTF(MinorExecute, "Unstalling %d for inst %s\n", inst->fuIndex, inst->id);
1308  funcUnits[inst->fuIndex]->stalled = false;
1309  }
1310  }
1311  }
1312  } else {
1313  DPRINTF(MinorExecute, "No instructions to commit\n");
1314  completed_inst = false;
1315  }
1316 
1317  /* All discardable instructions must also be 'completed' by now */
1318  assert(!(discard_inst && !completed_inst));
1319 
1320  /* Instruction committed but was discarded due to streamSeqNum
1321  * mismatch */
1322  if (discard_inst) {
1323  DPRINTF(MinorExecute, "Discarding inst: %s as its stream"
1324  " state was unexpected, expected: %d\n",
1325  *inst, ex_info.streamSeqNum);
1326 
1327  if (fault == NoFault)
1329  }
1330 
1331  /* Mark the mem inst as being in the LSQ */
1332  if (issued_mem_ref) {
1333  inst->fuIndex = 0;
1334  inst->inLSQ = true;
1335  }
1336 
1337  /* Pop issued (to LSQ) and discarded mem refs from the inFUMemInsts
1338  * as they've *definitely* exited the FUs */
1339  if (completed_inst && inst->isMemRef()) {
1340  /* The MemRef could have been discarded from the FU or the memory
1341  * queue, so just check an FU instruction */
1342  if (!ex_info.inFUMemInsts->empty() &&
1343  ex_info.inFUMemInsts->front().inst == inst)
1344  {
1345  ex_info.inFUMemInsts->pop();
1346  }
1347  }
1348 
1349  if (completed_inst && !(issued_mem_ref && fault == NoFault)) {
1350  /* Note that this includes discarded insts */
1351  DPRINTF(MinorExecute, "Completed inst: %s\n", *inst);
1352 
1353  /* Got to the end of a full instruction? */
1354  ex_info.lastCommitWasEndOfMacroop = inst->isFault() ||
1355  inst->isLastOpInInst();
1356 
1357  /* lastPredictionSeqNum is kept as a convenience to prevent its
1358  * value from changing too much on the minorview display */
1359  ex_info.lastPredictionSeqNum = inst->id.predictionSeqNum;
1360 
1361  /* Finished with the inst, remove it from the inst queue and
1362  * clear its dependencies */
1363  ex_info.inFlightInsts->pop();
1364 
1365  /* Complete barriers in the LSQ/move to store buffer */
1366  if (inst->isInst() && inst->staticInst->isMemBarrier()) {
1367  DPRINTF(MinorMem, "Completing memory barrier"
1368  " inst: %s committed: %d\n", *inst, committed_inst);
1369  lsq.completeMemBarrierInst(inst, committed_inst);
1370  }
1371 
1372  scoreboard[thread_id].clearInstDests(inst, inst->isMemRef());
1373  }
1374 
1375  /* Handle per-cycle instruction counting */
1376  if (committed_inst) {
1377  bool is_no_cost_inst = inst->isNoCostInst();
1378 
1379  /* Don't show no cost instructions as having taken a commit
1380  * slot */
1381  if (DTRACE(MinorTrace) && !is_no_cost_inst)
1382  ex_info.instsBeingCommitted.insts[num_insts_committed] = inst;
1383 
1384  if (!is_no_cost_inst)
1385  num_insts_committed++;
1386 
1387  if (num_insts_committed == commitLimit)
1388  DPRINTF(MinorExecute, "Reached inst commit limit\n");
1389 
1390  /* Re-set the time of the instruction if that's required for
1391  * tracing */
1392  if (inst->traceData) {
1394  inst->traceData->setWhen(curTick());
1395  inst->traceData->dump();
1396  }
1397 
1398  if (completed_mem_ref)
1399  num_mem_refs_committed++;
1400 
1401  if (num_mem_refs_committed == memoryCommitLimit)
1402  DPRINTF(MinorExecute, "Reached mem ref commit limit\n");
1403  }
1404  }
1405 }
1406 
1407 bool
1409 {
1410  return executeInfo[thread_id].lastCommitWasEndOfMacroop &&
1411  !lsq.accessesInFlight();
1412 }
1413 
1414 void
1416 {
1417  if (!inp.outputWire->isBubble())
1418  inputBuffer[inp.outputWire->threadId].setTail(*inp.outputWire);
1419 
1420  BranchData &branch = *out.inputWire;
1421 
1422  unsigned int num_issued = 0;
1423 
1424  /* Do all the cycle-wise activities for dcachePort here to potentially
1425  * free up input spaces in the LSQ's requests queue */
1426  lsq.step();
1427 
1428  /* Check interrupts first. Will halt commit if interrupt found */
1429  bool interrupted = false;
1430  ThreadID interrupt_tid = checkInterrupts(branch, interrupted);
1431 
1432  if (interrupt_tid != InvalidThreadID) {
1433  /* Signalling an interrupt this cycle, not issuing/committing from
1434  * any other threads */
1435  } else if (!branch.isBubble()) {
1436  /* It's important that this is here to carry Fetch1 wakeups to Fetch1
1437  * without overwriting them */
1438  DPRINTF(MinorInterrupt, "Execute skipping a cycle to allow old"
1439  " branch to complete\n");
1440  } else {
1441  ThreadID commit_tid = getCommittingThread();
1442 
1443  if (commit_tid != InvalidThreadID) {
1444  ExecuteThreadInfo& commit_info = executeInfo[commit_tid];
1445 
1446  DPRINTF(MinorExecute, "Attempting to commit [tid:%d]\n",
1447  commit_tid);
1448  /* commit can set stalled flags observable to issue and so *must* be
1449  * called first */
1450  if (commit_info.drainState != NotDraining) {
1451  if (commit_info.drainState == DrainCurrentInst) {
1452  /* Commit only micro-ops, don't kill anything else */
1453  commit(commit_tid, true, false, branch);
1454 
1455  if (isInbetweenInsts(commit_tid))
1456  setDrainState(commit_tid, DrainHaltFetch);
1457 
1458  /* Discard any generated branch */
1459  branch = BranchData::bubble();
1460  } else if (commit_info.drainState == DrainAllInsts) {
1461  /* Kill all instructions */
1462  while (getInput(commit_tid))
1463  popInput(commit_tid);
1464  commit(commit_tid, false, true, branch);
1465  }
1466  } else {
1467  /* Commit micro-ops only if interrupted. Otherwise, commit
1468  * anything you like */
1469  DPRINTF(MinorExecute, "Committing micro-ops for interrupt[tid:%d]\n",
1470  commit_tid);
1471  bool only_commit_microops = interrupted &&
1472  hasInterrupt(commit_tid);
1473  commit(commit_tid, only_commit_microops, false, branch);
1474  }
1475 
1476  /* Halt fetch, but don't do it until we have the current instruction in
1477  * the bag */
1478  if (commit_info.drainState == DrainHaltFetch) {
1480  MinorDynInst::bubble(), TheISA::PCState(0), branch);
1481 
1483  setDrainState(commit_tid, DrainAllInsts);
1484  }
1485  }
1486  ThreadID issue_tid = getIssuingThread();
1487  /* This will issue merrily even when interrupted in the sure and
1488  * certain knowledge that the interrupt with change the stream */
1489  if (issue_tid != InvalidThreadID) {
1490  DPRINTF(MinorExecute, "Attempting to issue [tid:%d]\n",
1491  issue_tid);
1492  num_issued = issue(issue_tid);
1493  }
1494 
1495  }
1496 
1497  /* Run logic to step functional units + decide if we are active on the next
1498  * clock cycle */
1499  std::vector<MinorDynInstPtr> next_issuable_insts;
1500  bool can_issue_next = false;
1501 
1502  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1503  /* Find the next issuable instruction for each thread and see if it can
1504  be issued */
1505  if (getInput(tid)) {
1506  unsigned int input_index = executeInfo[tid].inputIndex;
1507  MinorDynInstPtr inst = getInput(tid)->insts[input_index];
1508  if (inst->isFault()) {
1509  can_issue_next = true;
1510  } else if (!inst->isBubble()) {
1511  next_issuable_insts.push_back(inst);
1512  }
1513  }
1514  }
1515 
1516  bool becoming_stalled = true;
1517 
1518  /* Advance the pipelines and note whether they still need to be
1519  * advanced */
1520  for (unsigned int i = 0; i < numFuncUnits; i++) {
1521  FUPipeline *fu = funcUnits[i];
1522  fu->advance();
1523 
1524  /* If we need to tick again, the pipeline will have been left or set
1525  * to be unstalled */
1526  if (fu->occupancy !=0 && !fu->stalled)
1527  becoming_stalled = false;
1528 
1529  /* Could we possibly issue the next instruction from any thread?
1530  * This is quite an expensive test and is only used to determine
1531  * if the CPU should remain active, only run it if we aren't sure
1532  * we are active next cycle yet */
1533  for (auto inst : next_issuable_insts) {
1534  if (!fu->stalled && fu->provides(inst->staticInst->opClass()) &&
1535  scoreboard[inst->id.threadId].canInstIssue(inst,
1536  NULL, NULL, cpu.curCycle() + Cycles(1),
1537  cpu.getContext(inst->id.threadId))) {
1538  can_issue_next = true;
1539  break;
1540  }
1541  }
1542  }
1543 
1544  bool head_inst_might_commit = false;
1545 
1546  /* Could the head in flight insts be committed */
1547  for (auto const &info : executeInfo) {
1548  if (!info.inFlightInsts->empty()) {
1549  const QueuedInst &head_inst = info.inFlightInsts->front();
1550 
1551  if (head_inst.inst->isNoCostInst()) {
1552  head_inst_might_commit = true;
1553  } else {
1554  FUPipeline *fu = funcUnits[head_inst.inst->fuIndex];
1555  if ((fu->stalled &&
1556  fu->front().inst->id == head_inst.inst->id) ||
1557  lsq.findResponse(head_inst.inst))
1558  {
1559  head_inst_might_commit = true;
1560  break;
1561  }
1562  }
1563  }
1564  }
1565 
1566  DPRINTF(Activity, "Need to tick num issued insts: %s%s%s%s%s%s\n",
1567  (num_issued != 0 ? " (issued some insts)" : ""),
1568  (becoming_stalled ? "(becoming stalled)" : "(not becoming stalled)"),
1569  (can_issue_next ? " (can issued next inst)" : ""),
1570  (head_inst_might_commit ? "(head inst might commit)" : ""),
1571  (lsq.needsToTick() ? " (LSQ needs to tick)" : ""),
1572  (interrupted ? " (interrupted)" : ""));
1573 
1574  bool need_to_tick =
1575  num_issued != 0 || /* Issued some insts this cycle */
1576  !becoming_stalled || /* Some FU pipelines can still move */
1577  can_issue_next || /* Can still issue a new inst */
1578  head_inst_might_commit || /* Could possible commit the next inst */
1579  lsq.needsToTick() || /* Must step the dcache port */
1580  interrupted; /* There are pending interrupts */
1581 
1582  if (!need_to_tick) {
1583  DPRINTF(Activity, "The next cycle might be skippable as there are no"
1584  " advanceable FUs\n");
1585  }
1586 
1587  /* Wake up if we need to tick again */
1588  if (need_to_tick)
1590 
1591  /* Note activity of following buffer */
1592  if (!branch.isBubble())
1594 
1595  /* Make sure the input (if any left) is pushed */
1596  if (!inp.outputWire->isBubble())
1597  inputBuffer[inp.outputWire->threadId].pushTail();
1598 }
1599 
1600 ThreadID
1601 Execute::checkInterrupts(BranchData& branch, bool& interrupted)
1602 {
1604  /* Evaluate interrupts in round-robin based upon service */
1605  do {
1606  /* Has an interrupt been signalled? This may not be acted on
1607  * straighaway so this is different from took_interrupt */
1608  bool thread_interrupted = false;
1609 
1610  if (FullSystem && cpu.getInterruptController(tid)) {
1611  /* This is here because it seems that after drainResume the
1612  * interrupt controller isn't always set */
1613  thread_interrupted = executeInfo[tid].drainState == NotDraining &&
1614  isInterrupted(tid);
1615  interrupted = interrupted || thread_interrupted;
1616  } else {
1617  DPRINTF(MinorInterrupt, "No interrupt controller\n");
1618  }
1619  DPRINTF(MinorInterrupt, "[tid:%d] thread_interrupted?=%d isInbetweenInsts?=%d\n",
1620  tid, thread_interrupted, isInbetweenInsts(tid));
1621  /* Act on interrupts */
1622  if (thread_interrupted && isInbetweenInsts(tid)) {
1623  if (takeInterrupt(tid, branch)) {
1624  interruptPriority = tid;
1625  return tid;
1626  }
1627  } else {
1628  tid = (tid + 1) % cpu.numThreads;
1629  }
1630  } while (tid != interruptPriority);
1631 
1632  return InvalidThreadID;
1633 }
1634 
1635 bool
1637 {
1638  if (FullSystem && cpu.getInterruptController(thread_id)) {
1639  return executeInfo[thread_id].drainState == NotDraining &&
1640  isInterrupted(thread_id);
1641  }
1642 
1643  return false;
1644 }
1645 
1646 void
1648 {
1649  std::ostringstream insts;
1650  std::ostringstream stalled;
1651 
1652  executeInfo[0].instsBeingCommitted.reportData(insts);
1653  lsq.minorTrace();
1654  inputBuffer[0].minorTrace();
1655  scoreboard[0].minorTrace();
1656 
1657  /* Report functional unit stalling in one string */
1658  unsigned int i = 0;
1659  while (i < numFuncUnits)
1660  {
1661  stalled << (funcUnits[i]->stalled ? '1' : 'E');
1662  i++;
1663  if (i != numFuncUnits)
1664  stalled << ',';
1665  }
1666 
1667  MINORTRACE("insts=%s inputIndex=%d streamSeqNum=%d"
1668  " stalled=%s drainState=%d isInbetweenInsts=%d\n",
1669  insts.str(), executeInfo[0].inputIndex, executeInfo[0].streamSeqNum,
1670  stalled.str(), executeInfo[0].drainState, isInbetweenInsts(0));
1671 
1672  std::for_each(funcUnits.begin(), funcUnits.end(),
1673  std::mem_fun(&FUPipeline::minorTrace));
1674 
1675  executeInfo[0].inFlightInsts->minorTrace();
1676  executeInfo[0].inFUMemInsts->minorTrace();
1677 }
1678 
1679 inline ThreadID
1681 {
1682  std::vector<ThreadID> priority_list;
1683 
1684  switch (cpu.threadPolicy) {
1685  case Enums::SingleThreaded:
1686  return 0;
1687  case Enums::RoundRobin:
1688  priority_list = cpu.roundRobinPriority(commitPriority);
1689  break;
1690  case Enums::Random:
1691  priority_list = cpu.randomPriority();
1692  break;
1693  default:
1694  panic("Invalid thread policy");
1695  }
1696 
1697  for (auto tid : priority_list) {
1698  ExecuteThreadInfo &ex_info = executeInfo[tid];
1699  bool can_commit_insts = !ex_info.inFlightInsts->empty();
1700  if (can_commit_insts) {
1701  QueuedInst *head_inflight_inst = &(ex_info.inFlightInsts->front());
1702  MinorDynInstPtr inst = head_inflight_inst->inst;
1703 
1704  can_commit_insts = can_commit_insts &&
1705  (!inst->inLSQ || (lsq.findResponse(inst) != NULL));
1706 
1707  if (!inst->inLSQ) {
1708  bool can_transfer_mem_inst = false;
1709  if (!ex_info.inFUMemInsts->empty() && lsq.canRequest()) {
1710  const MinorDynInstPtr head_mem_ref_inst =
1711  ex_info.inFUMemInsts->front().inst;
1712  FUPipeline *fu = funcUnits[head_mem_ref_inst->fuIndex];
1713  const MinorDynInstPtr &fu_inst = fu->front().inst;
1714  can_transfer_mem_inst =
1715  !fu_inst->isBubble() &&
1716  fu_inst->id.threadId == tid &&
1717  !fu_inst->inLSQ &&
1718  fu_inst->canEarlyIssue &&
1719  inst->id.execSeqNum > fu_inst->instToWaitFor;
1720  }
1721 
1722  bool can_execute_fu_inst = inst->fuIndex == noCostFUIndex;
1723  if (can_commit_insts && !can_transfer_mem_inst &&
1724  inst->fuIndex != noCostFUIndex)
1725  {
1726  QueuedInst& fu_inst = funcUnits[inst->fuIndex]->front();
1727  can_execute_fu_inst = !fu_inst.inst->isBubble() &&
1728  fu_inst.inst->id == inst->id;
1729  }
1730 
1731  can_commit_insts = can_commit_insts &&
1732  (can_transfer_mem_inst || can_execute_fu_inst);
1733  }
1734  }
1735 
1736 
1737  if (can_commit_insts) {
1738  commitPriority = tid;
1739  return tid;
1740  }
1741  }
1742 
1743  return InvalidThreadID;
1744 }
1745 
1746 inline ThreadID
1748 {
1749  std::vector<ThreadID> priority_list;
1750 
1751  switch (cpu.threadPolicy) {
1752  case Enums::SingleThreaded:
1753  return 0;
1754  case Enums::RoundRobin:
1755  priority_list = cpu.roundRobinPriority(issuePriority);
1756  break;
1757  case Enums::Random:
1758  priority_list = cpu.randomPriority();
1759  break;
1760  default:
1761  panic("Invalid thread scheduling policy.");
1762  }
1763 
1764  for (auto tid : priority_list) {
1765  if (getInput(tid)) {
1766  issuePriority = tid;
1767  return tid;
1768  }
1769  }
1770 
1771  return InvalidThreadID;
1772 }
1773 
1774 void
1776 {
1777  DPRINTF(Drain, "MinorExecute drainResume\n");
1778 
1779  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1780  setDrainState(tid, NotDraining);
1781  }
1782 
1784 }
1785 
1786 std::ostream &operator <<(std::ostream &os, Execute::DrainState state)
1787 {
1788  switch (state)
1789  {
1790  case Execute::NotDraining:
1791  os << "NotDraining";
1792  break;
1794  os << "DrainCurrentInst";
1795  break;
1797  os << "DrainHaltFetch";
1798  break;
1800  os << "DrainAllInsts";
1801  break;
1802  default:
1803  os << "Drain-" << static_cast<int>(state);
1804  break;
1805  }
1806 
1807  return os;
1808 }
1809 
1810 void
1812 {
1813  DPRINTF(Drain, "setDrainState[%d]: %s\n", thread_id, state);
1814  executeInfo[thread_id].drainState = state;
1815 }
1816 
1817 unsigned int
1819 {
1820  DPRINTF(Drain, "MinorExecute drain\n");
1821 
1822  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1823  if (executeInfo[tid].drainState == NotDraining) {
1825 
1826  /* Go to DrainCurrentInst if we're between microops
1827  * or waiting on an unbufferable memory operation.
1828  * Otherwise we can go straight to DrainHaltFetch
1829  */
1830  if (isInbetweenInsts(tid))
1832  else
1834  }
1835  }
1836  return (isDrained() ? 0 : 1);
1837 }
1838 
1839 bool
1841 {
1842  if (!lsq.isDrained())
1843  return false;
1844 
1845  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
1846  if (!inputBuffer[tid].empty() ||
1847  !executeInfo[tid].inFlightInsts->empty()) {
1848 
1849  return false;
1850  }
1851  }
1852 
1853  return true;
1854 }
1855 
1857 {
1858  for (unsigned int i = 0; i < numFuncUnits; i++)
1859  delete funcUnits[i];
1860 
1861  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
1862  delete executeInfo[tid].inFlightInsts;
1863 }
1864 
1865 bool
1867 {
1868  return inst->id.streamSeqNum == executeInfo[inst->id.threadId].streamSeqNum;
1869 }
1870 
1871 bool
1873 {
1874  bool ret = false;
1875 
1876  if (!executeInfo[inst->id.threadId].inFlightInsts->empty())
1877  ret = executeInfo[inst->id.threadId].inFlightInsts->front().inst->id == inst->id;
1878 
1879  return ret;
1880 }
1881 
1884 {
1885  return lsq.getDcachePort();
1886 }
1887 
1888 }
...ReportTraits are trait classes with the same functionality as ReportIF, but with elements explicit...
Definition: buffers.hh:92
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:167
DrainState
Stage cycle-by-cycle state.
Definition: execute.hh:138
#define DPRINTF(x,...)
Definition: trace.hh:229
void handleMemResponse(MinorDynInstPtr inst, LSQ::LSQRequestPtr response, BranchData &branch, Fault &fault)
Handle extracting mem ref responses from the memory queues and completing the associated instructions...
Definition: execute.cc:321
void popInput(ThreadID tid)
Pop an element off the input buffer, if there are any.
Definition: execute.cc:207
bool readMemAccPredicate() const override
Container class to box instructions in the FUs to make those queues have correct bubble behaviour whe...
Definition: func_unit.hh:198
Bitfield< 30, 0 > index
void commit(ThreadID thread_id, bool only_commit_microops, bool discard, BranchData &branch)
Try and commit instructions from the ends of the functional unit pipelines.
Definition: execute.cc:1025
virtual void probeInstCommit(const StaticInstPtr &inst, Addr pc)
Helper method to trigger PMU probes for a committed instruction.
Definition: base.cc:370
decltype(nullptr) constexpr NoFault
Definition: types.hh:245
Cycles is a wrapper class for representing cycle counts, i.e.
Definition: types.hh:83
unsigned int cyclicIndexInc(unsigned int index, unsigned int cycle_size)
Increment a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:523
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:175
bool allowEarlyMemIssue
Allow mem refs to leave their FUs before reaching the head of the in flight insts queue if their depe...
Definition: execute.hh:109
Bitfield< 7 > i
static BranchData bubble()
BubbleIF interface.
Definition: pipe_data.hh:149
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty...
Definition: buffers.hh:567
ThreadID numThreads
Number of threads we&#39;re actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:378
unsigned int memoryCommitLimit
Number of memory instructions that can be committed per cycle.
Definition: execute.hh:84
void minorTrace() const
Definition: lsq.cc:1664
virtual TheISA::PCState pcState() const =0
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:83
void doInstCommitAccounting(MinorDynInstPtr inst)
Do the stats handling and instruction count and PC event events related to the new instruction/op cou...
Definition: execute.cc:860
ThreadID issuePriority
Definition: execute.hh:203
void popResponse(LSQRequestPtr response)
Sanity check and pop the head response.
Definition: lsq.cc:1520
unsigned int cyclicIndexDec(unsigned int index, unsigned int cycle_size)
Decrement a cyclic buffer index for indices [0, cycle_size-1].
Definition: execute.cc:535
void issuedMemBarrierInst(MinorDynInstPtr inst)
A memory barrier instruction has been issued, remember its execSeqNum that we can avoid issuing memor...
Definition: lsq.cc:1708
bool instIsHeadInst(MinorDynInstPtr inst)
Returns true if the given instruction is at the head of the inFlightInsts instruction queue...
Definition: execute.cc:1872
MinorCPU & cpu
Pointer back to the containing CPU.
Definition: execute.hh:72
void pushFailedRequest(MinorDynInstPtr inst)
Push a predicate failed-representing request into the queues just to maintain commit order...
Definition: lsq.cc:1657
All the fun of executing instructions from Decode and sending branch/new instruction stream info...
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:298
void setDrainState(ThreadID thread_id, DrainState state)
Set the drain state (with useful debugging messages)
Definition: execute.cc:1811
bool setTraceTimeOnCommit
Modify instruction trace times on commit.
Definition: execute.hh:102
unsigned int memoryIssueLimit
Number of memory ops that can be issued per cycle.
Definition: execute.hh:78
void tryToBranch(MinorDynInstPtr inst, Fault fault, BranchData &branch)
Generate Branch data based (into branch) on an observed (or not) change in PC while executing an inst...
Definition: execute.cc:216
unsigned int width() const
Number of instructions carried by this object.
Definition: pipe_data.hh:275
unsigned int issue(ThreadID thread_id)
Try and issue instructions from the inputBuffer.
Definition: execute.cc:546
void activity()
Records that there is activity this cycle.
Definition: activity.cc:56
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:136
Object to gather the visible context for evaluation.
Definition: timing_expr.hh:72
unsigned int commitLimit
Number of instructions that can be committed per cycle.
Definition: execute.hh:81
ThreadID getCommittingThread()
Use the current threading policy to determine the next thread to decode from.
Definition: execute.cc:1680
bool needsToBeSentToStoreBuffer()
This request, once processed by the requests/transfers queues, will need to go to the store buffer...
Definition: lsq.cc:164
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
ElemType & front()
Peek at the end element of the pipe.
Definition: buffers.hh:337
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
Definition: activity.cc:46
System * system
Definition: base.hh:386
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFlightInsts
In-order instructions either in FUs or the LSQ.
Definition: execute.hh:167
A functional unit that can execute any of opClasses operations with a single op(eration)Lat(ency) and...
Definition: func_unit.hh:149
std::vector< FUPipeline * > funcUnits
The execution functional units.
Definition: execute.hh:122
LSQRequestPtr findResponse(MinorDynInstPtr inst)
Returns a response if it&#39;s at the head of the transfers queue and it&#39;s either complete or can be sent...
Definition: lsq.cc:1485
Stats::Scalar numOps
Number of simulated insts and microops.
Definition: stats.hh:64
Cycles opLat
Delay from issuing the operation, to it reaching the end of the associated pipeline.
Definition: func_unit.hh:156
Wrapper for a queue type to act as a pipeline stage input queue.
Definition: buffers.hh:399
unsigned int numFuncUnits
Number of functional units to produce.
Definition: execute.hh:95
Cycles extraAssumedLat
Extra delay that results should show in the scoreboard after leaving the pipeline.
Definition: func_unit.hh:119
ThreadContext is the external interface to all thread state for anything outside of the CPU...
virtual Fault getInterrupt(ThreadContext *tc)=0
ExecContext bears the exec_context interface for Minor.
Definition: exec_context.hh:74
const MinorFU & description
Functional unit description that this pipeline implements.
Definition: func_unit.hh:228
Bitfield< 17 > os
Definition: misc.hh:805
ExecContext bears the exec_context interface for Minor.
InstSeqNum lastPredictionSeqNum
A prediction number for use where one isn&#39;t available from an instruction.
Definition: execute.hh:194
Derived SenderState to carry data access info.
Definition: lsq.hh:120
InstSeqNum streamSeqNum
Source of sequence number for instuction streams.
Definition: execute.hh:188
Stats::Scalar numFetchSuspends
Number of times fetch was asked to suspend by Execute.
Definition: stats.hh:70
Provide a non-protected base class for Minor&#39;s Ports as derived classes are created by Fetch1 and Exe...
Definition: cpu.hh:100
Execute(const std::string &name_, MinorCPU &cpu_, MinorCPUParams &params, Latch< ForwardInstData >::Output inp_, Latch< BranchData >::Input out_)
Definition: execute.cc:62
Definition: trace.hh:151
ForwardInstData instsBeingCommitted
Structure for reporting insts currently being processed/retired for MinorTrace.
Definition: execute.hh:182
virtual void updateIntrInfo(ThreadContext *tc)=0
bool canRequest()
Is their space in the request queue to be able to push a request by issuing an isMemRef instruction...
Definition: lsq.hh:663
unsigned getSize() const
Definition: packet.hh:736
std::vector< MinorFU * > funcUnits
Definition: func_unit.hh:184
void completeMemBarrierInst(MinorDynInstPtr inst, bool committed)
Complete a barrier instruction.
Definition: lsq.cc:921
void drainResume()
Definition: execute.cc:1775
bool provides(OpClass capability)
Definition: func_unit.cc:78
ThreadID getIssuingThread()
Definition: execute.cc:1747
Tick curTick()
The current simulated tick.
Definition: core.hh:47
bool takeInterrupt(ThreadID thread_id, BranchData &branch)
Act on an interrupt.
Definition: execute.cc:418
unsigned int noCostFUIndex
The FU index of the non-existent costless FU for instructions which pass the MinorDynInst::isNoCostIn...
Definition: execute.hh:113
#define DTRACE(x)
Definition: trace.hh:227
bool isError() const
Definition: packet.hh:555
std::vector< InputBuffer< ForwardInstData > > inputBuffer
Definition: execute.hh:125
Stats::Scalar numInsts
Number of simulated instructions.
Definition: stats.hh:61
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:178
bool checkInterrupts(ThreadContext *tc) const
Definition: base.hh:258
bool isBubble() const
Definition: pipe_data.hh:150
bool isBubble() const
BubbleIF interface.
Definition: pipe_data.cc:249
MinorDynInstPtr insts[MAX_FORWARD_INSTS]
Array of carried insts, ref counted.
Definition: pipe_data.hh:259
bool tryPCEvents(ThreadID thread_id)
Try to act on PC-related events.
Definition: execute.cc:838
MinorCPU::MinorCPUPort & getDcachePort()
Return the raw-bindable port.
Definition: lsq.hh:725
Addr getAddr() const
Definition: packet.hh:726
Minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:90
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
bool canPushIntoStoreBuffer() const
Must check this before trying to insert into the store buffer.
Definition: lsq.hh:675
uint64_t InstSeqNum
Definition: inst_seq.hh:40
unsigned int inputIndex
Index that we&#39;ve completed upto in getInput data.
Definition: execute.hh:174
std::vector< Cycles > srcRegsRelativeLats
Cycle offsets from the scoreboard delivery times of register values for each of this instruction&#39;s so...
Definition: func_unit.hh:128
virtual Addr instAddr() const =0
Extra timing capability to allow individual ops to have their source register dependency latencies tw...
Definition: func_unit.hh:95
bool isInterrupted(ThreadID thread_id) const
Has an interrupt been raised.
Definition: execute.cc:412
unsigned int occupancy
The number of slots with non-bubbles in them.
Definition: buffers.hh:302
Latch< BranchData >::Input out
Input port carrying stream changes to Fetch1.
Definition: execute.hh:69
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
ThreadID commitPriority
Definition: execute.hh:204
void advancePC(PCState &pc, const StaticInstPtr &inst)
Definition: utility.hh:98
MinorFUPool & fuDescriptions
Descriptions of the functional units we want to generate.
Definition: execute.hh:92
bool isDrained()
Is there nothing left in the LSQ.
Definition: lsq.cc:1555
A Packet is used to encapsulate a transfer between two objects in the memory system (e...
Definition: packet.hh:255
MinorFUTiming * findTiming(const StaticInstPtr &inst)
Find the extra timing information for this instruction.
Definition: func_unit.cc:203
LSQ lsq
Dcache port to pass on to the CPU.
Definition: execute.hh:116
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:169
const ThreadID InvalidThreadID
Definition: types.hh:228
Bitfield< 12 > fu
Definition: miscregs.hh:84
void step()
Step checks the queues to see if their are issuable transfers which were not otherwise picked up by t...
Definition: lsq.cc:1474
Fetch1 is responsible for fetching "lines" from memory and passing them to Fetch2.
bool hasInterrupt(ThreadID thread_id)
Checks if a specific thread has an interrupt.
Definition: execute.cc:1636
A load/store queue that allows outstanding reads and writes.
bool processMoreThanOneInput
If true, more than one input line can be processed each cycle if there is room to execute more instru...
Definition: execute.hh:89
A scoreboard of register dependencies including, for each register: The number of in-flight instructi...
Definition: scoreboard.hh:59
Stats::Scalar numDiscardedOps
Number of ops discarded before committing.
Definition: stats.hh:67
bool readPredicate() const override
bool isInbetweenInsts(ThreadID thread_id) const
Are we between instructions? Can we be interrupted?
Definition: execute.cc:1408
ThreadID checkInterrupts(BranchData &branch, bool &interrupted)
Check all threads for possible interrupts.
Definition: execute.cc:1601
bool alreadyPushed()
Have we already pushed onto this pipe without advancing.
Definition: buffers.hh:342
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
DrainState drainState
State progression for draining NotDraining -> ...
Definition: execute.hh:197
void sendStoreToStoreBuffer(LSQRequestPtr request)
A store has been committed, please move it to the store buffer.
Definition: lsq.cc:1542
bool suppress
If true, instructions matching this mask/match should not be issued in this FU.
Definition: func_unit.hh:108
TimingExpr * extraCommitLatExpr
Definition: func_unit.hh:113
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:64
ThreadID interruptPriority
Definition: execute.hh:202
Cycles cyclesBeforeInsert()
How many cycles must from curCycle before insertion into the pipeline is allowed. ...
Definition: func_unit.cc:170
bool lastCommitWasEndOfMacroop
The last commit was the end of a full instruction so an interrupt can safely happen.
Definition: execute.hh:178
Enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:114
bool instIsRightStream(MinorDynInstPtr inst)
Does the given instruction have the right stream sequence number to be committed? ...
Definition: execute.cc:1866
void advance()
Step the pipeline.
Definition: func_unit.cc:185
void minorTrace() const
Definition: execute.cc:1647
static MinorDynInstPtr bubble()
There is a single bubble inst.
Definition: dyn_inst.hh:250
static const OpClass Num_OpClasses
Definition: op_class.hh:107
bool canInsert() const
Can an instruction be inserted now?
Definition: func_unit.cc:179
GenericISA::SimplePCState< MachInst > PCState
Definition: types.hh:43
Top level definition of the Minor in-order CPU model.
bool executeMemRefInst(MinorDynInstPtr inst, BranchData &branch, bool &failed_predicate, Fault &fault)
Execute a memory reference instruction.
Definition: execute.cc:448
PacketPtr packet
Definition: lsq.hh:143
Stats::Vector2d committedInstType
Number of instructions by type (OpClass)
Definition: stats.hh:80
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:298
std::vector< Minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:95
bool commitInst(MinorDynInstPtr inst, bool early_memory_issue, BranchData &branch, Fault &fault, bool &committed, bool &completed_mem_issue)
Commit a single instruction.
Definition: execute.cc:892
std::vector< ExecuteThreadInfo > executeInfo
Definition: execute.hh:200
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:62
BaseInterrupts * getInterruptController(ThreadID tid)
Definition: base.hh:226
std::vector< Scoreboard > scoreboard
Scoreboard of instruction dependencies.
Definition: execute.hh:119
const T * getConstPtr() const
Definition: packet.hh:1099
virtual Status status() const =0
void minorTrace() const
Report buffer states from &#39;slot&#39; &#39;from&#39; to &#39;to&#39;.
Definition: buffers.hh:193
Temporarily inactive.
void push(ElemType &elem)
Write an element to the back of the pipeline.
Definition: buffers.hh:328
#define warn(...)
Definition: logging.hh:212
InstSeqNum getLastMemBarrier(ThreadID thread_id) const
Get the execSeqNum of the last issued memory barrier.
Definition: lsq.hh:692
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:79
bool isDrained()
After thread suspension, has Execute been drained of in-flight instructions and memory accesses...
Definition: execute.cc:1840
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:247
Latch< ForwardInstData >::Output inp
Input port carrying instructions from Decode.
Definition: execute.hh:66
Cycles extraCommitLat
Extra latency that the instruction should spend at the end of the pipeline.
Definition: func_unit.hh:112
Minor::MinorStats stats
Processor-specific statistics.
Definition: cpu.hh:136
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: execute.cc:1415
bool stalled
If true, advance will not advance the pipeline.
Definition: buffers.hh:299
unsigned int drain()
Like the drain interface on SimObject.
Definition: execute.cc:1818
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
MinorCPU::MinorCPUPort & getDcachePort()
Returns the DcachePort owned by this Execute to pass upwards.
Definition: execute.cc:1883
std::vector< bool > cantForwardFromFUIndices
FUs which this pipeline can&#39;t receive a forwarded (i.e.
Definition: func_unit.hh:238
friend std::ostream & operator<<(std::ostream &os, DrainState state)
Definition: execute.cc:1786
bool accessesInFlight() const
Are there any accesses other than normal cached loads in the memory system or having received respons...
Definition: lsq.hh:683
unsigned int issueLimit
Number of instructions that can be issued per cycle.
Definition: execute.hh:75
bool setTraceTimeOnIssue
Modify instruction trace times on issue.
Definition: execute.hh:105
MinorDynInstPtr inst
Definition: func_unit.hh:201
Forward flowing data between Fetch2,Decode,Execute carrying a packet of instructions of a width appro...
Definition: pipe_data.hh:255
const std::string to_string(sc_enc enc)
Definition: sc_fxdefs.cc:60
Queue< QueuedInst, ReportTraitsAdaptor< QueuedInst > > * inFUMemInsts
Memory ref instructions still in the FUs.
Definition: execute.hh:170
A functional unit configured from a MinorFU object.
Definition: func_unit.hh:224
bool isStreamChange() const
As static isStreamChange but on this branch data.
Definition: pipe_data.hh:153
Counter totalNumInsts
Definition: system.hh:627
const ForwardInstData * getInput(ThreadID tid)
Get a piece of data to work on from the inputBuffer, or 0 if there is no data.
Definition: execute.cc:194
void updateBranchData(ThreadID tid, BranchData::Reason reason, MinorDynInstPtr inst, const TheISA::PCState &target, BranchData &branch)
Actually create a branch to communicate to Fetch1/Fetch2 and, if that is a stream-changing branch upd...
Definition: execute.cc:295
bool needsToTick()
May need to be ticked next cycle as one of the queues contains an actionable transfers or address tra...
Definition: lsq.cc:1562

Generated on Fri Feb 28 2020 16:26:59 for gem5 by doxygen 1.8.13