gem5  v20.1.0.0
fetch1.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/fetch1.hh"
39 
40 #include <cstring>
41 #include <iomanip>
42 #include <sstream>
43 
44 #include "base/cast.hh"
45 #include "cpu/minor/pipeline.hh"
46 #include "debug/Drain.hh"
47 #include "debug/Fetch.hh"
48 #include "debug/MinorTrace.hh"
49 
50 namespace Minor
51 {
52 
53 Fetch1::Fetch1(const std::string &name_,
54  MinorCPU &cpu_,
55  MinorCPUParams &params,
58  Latch<BranchData>::Output prediction_,
59  std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
60  Named(name_),
61  cpu(cpu_),
62  inp(inp_),
63  out(out_),
64  prediction(prediction_),
65  nextStageReserve(next_stage_input_buffer),
66  icachePort(name_ + ".icache_port", *this, cpu_),
67  lineSnap(params.fetch1LineSnapWidth),
68  maxLineWidth(params.fetch1LineWidth),
69  fetchLimit(params.fetch1FetchLimit),
70  fetchInfo(params.numThreads),
71  threadPriority(0),
72  requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
73  transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
74  icacheState(IcacheRunning),
75  lineSeqNum(InstId::firstLineSeqNum),
76  numFetchesInMemorySystem(0),
77  numFetchesInITLB(0)
78 {
79  if (lineSnap == 0) {
81  DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
82  lineSnap);
83  }
84 
85  if (maxLineWidth == 0) {
87  DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
88  maxLineWidth);
89  }
90 
91  /* These assertions should be copied to the Python config. as well */
92  if ((lineSnap % sizeof(TheISA::MachInst)) != 0) {
93  fatal("%s: fetch1LineSnapWidth must be a multiple "
94  "of sizeof(TheISA::MachInst) (%d)\n", name_,
95  sizeof(TheISA::MachInst));
96  }
97 
98  if (!(maxLineWidth >= lineSnap &&
99  (maxLineWidth % sizeof(TheISA::MachInst)) == 0))
100  {
101  fatal("%s: fetch1LineWidth must be a multiple of"
102  " sizeof(TheISA::MachInst)"
103  " (%d), and >= fetch1LineSnapWidth (%d)\n",
104  name_, sizeof(TheISA::MachInst), lineSnap);
105  }
106 
107  if (fetchLimit < 1) {
108  fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
109  fetchLimit);
110  }
111 }
112 
113 inline ThreadID
115 {
116  /* Select thread via policy. */
117  std::vector<ThreadID> priority_list;
118 
119  switch (cpu.threadPolicy) {
120  case Enums::SingleThreaded:
121  priority_list.push_back(0);
122  break;
123  case Enums::RoundRobin:
124  priority_list = cpu.roundRobinPriority(threadPriority);
125  break;
126  case Enums::Random:
127  priority_list = cpu.randomPriority();
128  break;
129  default:
130  panic("Unknown fetch policy");
131  }
132 
133  for (auto tid : priority_list) {
134  if (cpu.getContext(tid)->status() == ThreadContext::Active &&
135  !fetchInfo[tid].blocked &&
136  fetchInfo[tid].state == FetchRunning) {
137  threadPriority = tid;
138  return tid;
139  }
140  }
141 
142  return InvalidThreadID;
143 }
144 
145 void
147 {
148  /* Reference the currently used thread state. */
149  Fetch1ThreadInfo &thread = fetchInfo[tid];
150 
151  /* If line_offset != 0, a request is pushed for the remainder of the
152  * line. */
153  /* Use a lower, sizeof(MachInst) aligned address for the fetch */
154  Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1);
155  unsigned int line_offset = aligned_pc % lineSnap;
156  unsigned int request_size = maxLineWidth - line_offset;
157 
158  /* Fill in the line's id */
159  InstId request_id(tid,
160  thread.streamSeqNum, thread.predictionSeqNum,
161  lineSeqNum);
162 
163  FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc);
164 
165  DPRINTF(Fetch, "Inserting fetch into the fetch queue "
166  "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
167  request_id, aligned_pc, thread.pc, line_offset, request_size);
168 
169  request->request->setContext(cpu.threads[tid]->getTC()->contextId());
170  request->request->setVirt(
171  aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(),
172  /* I've no idea why we need the PC, but give it */
173  thread.pc.instAddr());
174 
175  DPRINTF(Fetch, "Submitting ITLB request\n");
177 
179 
180  /* Reserve space in the queues upstream of requests for results */
181  transfers.reserve();
182  requests.push(request);
183 
184  /* Submit the translation request. The response will come
185  * through finish/markDelayed on this request as it bears
186  * the Translation interface */
187  cpu.threads[request->id.threadId]->itb->translateTiming(
188  request->request,
189  cpu.getContext(request->id.threadId),
190  request, BaseTLB::Execute);
191 
192  lineSeqNum++;
193 
194  /* Step the PC for the next line onto the line aligned next address.
195  * Note that as instructions can span lines, this PC is only a
196  * reliable 'new' PC if the next line has a new stream sequence number. */
197  thread.pc.set(aligned_pc + request_size);
198 }
199 
200 std::ostream &
201 operator <<(std::ostream &os, Fetch1::IcacheState state)
202 {
203  switch (state) {
205  os << "IcacheRunning";
206  break;
208  os << "IcacheNeedsRetry";
209  break;
210  default:
211  os << "IcacheState-" << static_cast<int>(state);
212  break;
213  }
214  return os;
215 }
216 
217 void
219 {
220  /* Make the necessary packet for a memory transaction */
222  packet->allocate();
223 
224  /* This FetchRequest becomes SenderState to allow the response to be
225  * identified */
226  packet->pushSenderState(this);
227 }
228 
229 void
230 Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
232 {
233  fault = fault_;
234 
235  state = Translated;
236  fetch.handleTLBResponse(this);
237 
238  /* Let's try and wake up the processor for the next cycle */
239  fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
240 }
241 
242 void
244 {
246 
247  if (response->fault != NoFault) {
248  DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
249  "paddr: 0x%x, vaddr: 0x%x\n",
250  response->fault->name(),
251  (response->request->hasPaddr() ?
252  response->request->getPaddr() : 0),
253  response->request->getVaddr());
254 
255  if (DTRACE(MinorTrace))
256  minorTraceResponseLine(name(), response);
257  } else {
258  DPRINTF(Fetch, "Got ITLB response\n");
259  }
260 
261  response->state = FetchRequest::Translated;
262 
263  tryToSendToTransfers(response);
264 }
265 
267 {
268  if (packet)
269  delete packet;
270 }
271 
272 void
274 {
275  if (!requests.empty() && requests.front() != request) {
276  DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
277  " issue to memory\n");
278  return;
279  }
280 
281  if (request->state == FetchRequest::InTranslation) {
282  DPRINTF(Fetch, "Fetch still in translation, not issuing to"
283  " memory\n");
284  return;
285  }
286 
287  if (request->isDiscardable() || request->fault != NoFault) {
288  /* Discarded and faulting requests carry on through transfers
289  * as Complete/packet == NULL */
290 
291  request->state = FetchRequest::Complete;
293 
294  /* Wake up the pipeline next cycle as there will be no event
295  * for this queue->queue transfer */
297  } else if (request->state == FetchRequest::Translated) {
298  if (!request->packet)
299  request->makePacket();
300 
301  /* Ensure that the packet won't delete the request */
302  assert(request->packet->needsResponse());
303 
304  if (tryToSend(request))
306  } else {
307  DPRINTF(Fetch, "Not advancing line fetch\n");
308  }
309 }
310 
311 void
313 {
314  assert(!requests.empty() && requests.front() == request);
315 
316  requests.pop();
317  transfers.push(request);
318 }
319 
320 bool
322 {
323  bool ret = false;
324 
325  if (icachePort.sendTimingReq(request->packet)) {
326  /* Invalidate the fetch_requests packet so we don't
327  * accidentally fail to deallocate it (or use it!)
328  * later by overwriting it */
329  request->packet = NULL;
332 
333  ret = true;
334 
335  DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
336  request->id);
337  } else {
338  /* Needs to be resent, wait for that */
340 
341  DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
342  request->id);
343  }
344 
345  return ret;
346 }
347 
348 void
350 {
351  IcacheState old_icache_state = icacheState;
352 
353  switch (icacheState) {
354  case IcacheRunning:
355  /* Move ITLB results on to the memory system */
356  if (!requests.empty()) {
358  }
359  break;
360  case IcacheNeedsRetry:
361  break;
362  }
363 
364  if (icacheState != old_icache_state) {
365  DPRINTF(Fetch, "Step in state %s moving to state %s\n",
366  old_icache_state, icacheState);
367  }
368 }
369 
370 void
372 {
373  if (!queue.empty()) {
374  delete queue.front();
375  queue.pop();
376  }
377 }
378 
379 unsigned int
381 {
382  return requests.occupiedSpace() +
384 }
385 
387 void
389  Fetch1::FetchRequestPtr response) const
390 {
391  const RequestPtr &request M5_VAR_USED = response->request;
392 
393  if (response->packet && response->packet->isError()) {
394  MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
395  response->id, request->getVaddr());
396  } else if (response->fault != NoFault) {
397  MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
398  response->id, request->getVaddr(), response->fault->name());
399  } else {
400  MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
401  response->id, request->getSize(),
402  request->getVaddr(), request->getPaddr());
403  }
404 }
405 
406 bool
408 {
409  DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
410 
411  /* Only push the response if we didn't change stream? No, all responses
412  * should hit the responses queue. It's the job of 'step' to throw them
413  * away. */
414  FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
415  (response->popSenderState());
416 
417  /* Fixup packet in fetch_request as this may have changed */
418  assert(!fetch_request->packet);
419  fetch_request->packet = response;
420 
422  fetch_request->state = FetchRequest::Complete;
423 
424  if (DTRACE(MinorTrace))
425  minorTraceResponseLine(name(), fetch_request);
426 
427  if (response->isError()) {
428  DPRINTF(Fetch, "Received error response packet: %s\n",
429  fetch_request->id);
430  }
431 
432  /* We go to idle even if there are more things to do on the queues as
433  * it's the job of step to actually step us on to the next transaction */
434 
435  /* Let's try and wake up the processor for the next cycle to move on
436  * queues */
438 
439  /* Never busy */
440  return true;
441 }
442 
443 void
445 {
446  DPRINTF(Fetch, "recvRetry\n");
447  assert(icacheState == IcacheNeedsRetry);
448  assert(!requests.empty());
449 
450  FetchRequestPtr retryRequest = requests.front();
451 
453 
454  if (tryToSend(retryRequest))
455  moveFromRequestsToTransfers(retryRequest);
456 }
457 
458 std::ostream &
459 operator <<(std::ostream &os, Fetch1::FetchState state)
460 {
461  switch (state) {
462  case Fetch1::FetchHalted:
463  os << "FetchHalted";
464  break;
466  os << "FetchWaitingForPC";
467  break;
469  os << "FetchRunning";
470  break;
471  default:
472  os << "FetchState-" << static_cast<int>(state);
473  break;
474  }
475  return os;
476 }
477 
478 void
480 {
481  Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
482 
483  updateExpectedSeqNums(branch);
484 
485  /* Start fetching again if we were stopped */
486  switch (branch.reason) {
488  {
489  if (thread.wakeupGuard) {
490  DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
491  branch);
492  } else {
493  DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
494  thread.state = FetchWaitingForPC;
495  }
496  }
497  break;
499  DPRINTF(Fetch, "Halting fetch\n");
500  thread.state = FetchHalted;
501  break;
502  default:
503  DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
504  thread.state = FetchRunning;
505  break;
506  }
507  thread.pc = branch.target;
508 }
509 
510 void
512 {
513  Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
514 
515  DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
516  " predictionSeqNum from: %d to %d\n",
517  thread.streamSeqNum, branch.newStreamSeqNum,
518  thread.predictionSeqNum, branch.newPredictionSeqNum);
519 
520  /* Change the stream */
521  thread.streamSeqNum = branch.newStreamSeqNum;
522  /* Update the prediction. Note that it's possible for this to
523  * actually set the prediction to an *older* value if new
524  * predictions have been discarded by execute */
525  thread.predictionSeqNum = branch.newPredictionSeqNum;
526 }
527 
528 void
530  ForwardLineData &line)
531 {
532  Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
533  PacketPtr packet = response->packet;
534 
535  /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
536  * structure */
537  line.setFault(response->fault);
538  /* Make sequence numbers valid in return */
539  line.id = response->id;
540  /* Set PC to virtual address */
541  line.pc = response->pc;
542  /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
543  * pc.instAddr() */
544  line.lineBaseAddr = response->request->getVaddr();
545 
546  if (response->fault != NoFault) {
547  /* Stop fetching if there was a fault */
548  /* Should probably try to flush the queues as well, but we
549  * can't be sure that this fault will actually reach Execute, and we
550  * can't (currently) selectively remove this stream from the queues */
551  DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
552  response->fault->name());
554  } else {
555  line.adoptPacketData(packet);
556  /* Null the response's packet to prevent the response from trying to
557  * deallocate the packet */
558  response->packet = NULL;
559  }
560 }
561 
562 void
564 {
565  const BranchData &execute_branch = *inp.outputWire;
566  const BranchData &fetch2_branch = *prediction.outputWire;
567  ForwardLineData &line_out = *out.inputWire;
568 
569  assert(line_out.isBubble());
570 
571  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
572  fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
573 
575  if (execute_branch.threadId != InvalidThreadID &&
576  execute_branch.threadId == fetch2_branch.threadId) {
577 
578  Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
579 
580  /* Are we changing stream? Look to the Execute branches first, then
581  * to predicted changes of stream from Fetch2 */
582  if (execute_branch.isStreamChange()) {
583  if (thread.state == FetchHalted) {
584  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
585  } else {
586  changeStream(execute_branch);
587  }
588 
589  if (!fetch2_branch.isBubble()) {
590  DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
591  fetch2_branch);
592  }
593 
594  /* The streamSeqNum tagging in request/response ->req should handle
595  * discarding those requests when we get to them. */
596  } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
597  /* Handle branch predictions by changing the instruction source
598  * if we're still processing the same stream (as set by streamSeqNum)
599  * as the one of the prediction.
600  */
601  if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
602  DPRINTF(Fetch, "Not changing stream on prediction: %s,"
603  " streamSeqNum mismatch\n",
604  fetch2_branch);
605  } else {
606  changeStream(fetch2_branch);
607  }
608  }
609  } else {
610  /* Fetch2 and Execute branches are for different threads */
611  if (execute_branch.threadId != InvalidThreadID &&
612  execute_branch.isStreamChange()) {
613 
614  if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
615  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
616  } else {
617  changeStream(execute_branch);
618  }
619  }
620 
621  if (fetch2_branch.threadId != InvalidThreadID &&
622  fetch2_branch.isStreamChange()) {
623 
624  if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
625  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
626  } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
627  DPRINTF(Fetch, "Not changing stream on prediction: %s,"
628  " streamSeqNum mismatch\n", fetch2_branch);
629  } else {
630  changeStream(fetch2_branch);
631  }
632  }
633  }
634 
635  if (numInFlightFetches() < fetchLimit) {
636  ThreadID fetch_tid = getScheduledThread();
637 
638  if (fetch_tid != InvalidThreadID) {
639  DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
640 
641  /* Generate fetch to selected thread */
642  fetchLine(fetch_tid);
643  /* Take up a slot in the fetch queue */
644  nextStageReserve[fetch_tid].reserve();
645  } else {
646  DPRINTF(Fetch, "No active threads available to fetch from\n");
647  }
648  }
649 
650 
651  /* Halting shouldn't prevent fetches in flight from being processed */
652  /* Step fetches through the icachePort queues and memory system */
653  stepQueues();
654 
655  /* As we've thrown away early lines, if there is a line, it must
656  * be from the right stream */
657  if (!transfers.empty() &&
659  {
661 
662  if (response->isDiscardable()) {
663  nextStageReserve[response->id.threadId].freeReservation();
664 
665  DPRINTF(Fetch, "Discarding translated fetch as it's for"
666  " an old stream\n");
667 
668  /* Wake up next cycle just in case there was some other
669  * action to do */
671  } else {
672  DPRINTF(Fetch, "Processing fetched line: %s\n",
673  response->id);
674 
675  processResponse(response, line_out);
676  }
677 
679  }
680 
681  /* If we generated output, and mark the stage as being active
682  * to encourage that output on to the next stage */
683  if (!line_out.isBubble())
685 
686  /* Fetch1 has no inputBuffer so the only activity we can have is to
687  * generate a line output (tested just above) or to initiate a memory
688  * fetch which will signal activity when it returns/needs stepping
689  * between queues */
690 
691 
692  /* This looks hackish. And it is, but there doesn't seem to be a better
693  * way to do this. The signal from commit to suspend fetch takes 1
694  * clock cycle to propagate to fetch. However, a legitimate wakeup
695  * may occur between cycles from the memory system. Thus wakeup guard
696  * prevents us from suspending in that case. */
697 
698  for (auto& thread : fetchInfo) {
699  thread.wakeupGuard = false;
700  }
701 }
702 
703 void
705 {
706  ThreadContext *thread_ctx = cpu.getContext(tid);
707  Fetch1ThreadInfo &thread = fetchInfo[tid];
708  thread.pc = thread_ctx->pcState();
709  thread.state = FetchRunning;
710  thread.wakeupGuard = true;
711  DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n",
712  tid, thread_ctx->pcState());
713 
715 }
716 
717 bool
719 {
720  bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
721  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
722  Fetch1ThreadInfo &thread = fetchInfo[tid];
723  DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
724  tid,
725  thread.state == FetchHalted,
726  (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
727  ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
728 
729  drained = drained && (thread.state != FetchRunning);
730  }
731 
732  return drained;
733 }
734 
735 void
737 {
738  os << id;
739 }
740 
742 {
743  Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
744 
745  /* Can't discard lines in TLB/memory */
746  return state != InTranslation && state != RequestIssuing &&
747  (id.streamSeqNum != thread.streamSeqNum ||
748  id.predictionSeqNum != thread.predictionSeqNum);
749 }
750 
751 void
753 {
754  // TODO: Un-bork minorTrace for THREADS
755  // bork bork bork
756  const Fetch1ThreadInfo &thread = fetchInfo[0];
757 
758  std::ostringstream data;
759 
760  if (thread.blocked)
761  data << 'B';
762  else
763  (*out.inputWire).reportData(data);
764 
765  MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
766  " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
768  thread.streamSeqNum, data.str());
771 }
772 
773 }
InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:228
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:183
Packet::isError
bool isError() const
Definition: packet.hh:583
Minor::BranchData::threadId
ThreadID threadId
ThreadID associated with branch.
Definition: pipe_data.hh:112
Minor::BranchData::target
TheISA::PCState target
Starting PC of that stream.
Definition: pipe_data.hh:119
MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:285
X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:803
Minor::Fetch1::icachePort
IcachePort icachePort
IcachePort to pass to the CPU.
Definition: fetch1.hh:204
Minor::Fetch1::nextStageReserve
std::vector< InputBuffer< ForwardLineData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition: fetch1.hh:200
data
const char data[]
Definition: circlebuf.test.cc:42
Minor::Fetch1::evaluate
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: fetch1.cc:563
ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:227
ArmISA::MachInst
uint32_t MachInst
Definition: types.hh:52
Minor::Fetch1::icacheState
IcacheState icacheState
Retry state of icache_port.
Definition: fetch1.hh:303
Minor::Latch::Input
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:245
Minor::Fetch1::requests
FetchQueue requests
Queue of address translated requests from Fetch1.
Definition: fetch1.hh:297
MinorCPU::activityRecorder
Minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:88
Minor::ForwardLineData::adoptPacketData
void adoptPacketData(Packet *packet)
Use the data from a packet as line instead of allocating new space.
Definition: pipe_data.cc:183
Minor::BranchData::newStreamSeqNum
InstSeqNum newStreamSeqNum
Sequence number of new stream/prediction to be adopted.
Definition: pipe_data.hh:115
Minor::Fetch1::FetchRequest::fault
Fault fault
Fill in a fault if one happens during fetch, check this by picking apart the response packet.
Definition: fetch1.hh:138
Minor::ForwardLineData
Line fetch data in the forward direction.
Definition: pipe_data.hh:171
Minor::BranchData::HaltFetch
@ HaltFetch
Definition: pipe_data.hh:95
BaseCPU::cacheLineSize
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:376
Minor::Fetch1::Fetch1ThreadInfo::predictionSeqNum
InstSeqNum predictionSeqNum
Prediction sequence number.
Definition: fetch1.hh:272
BaseTLB::Mode
Mode
Definition: tlb.hh:57
MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:82
Minor::Fetch1::Fetch1ThreadInfo::streamSeqNum
InstSeqNum streamSeqNum
Stream sequence number.
Definition: fetch1.hh:266
Minor::Fetch1::handleTLBResponse
void handleTLBResponse(FetchRequestPtr response)
Handle pushing a TLB response onto the right queue.
Definition: fetch1.cc:243
Minor::Fetch1::recvReqRetry
virtual void recvReqRetry()
Definition: fetch1.cc:444
Minor::Fetch1::cpu
MinorCPU & cpu
Construction-assigned data members.
Definition: fetch1.hh:190
MinorCPU::threadPolicy
Enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:112
cast.hh
Minor::Queue::minorTrace
void minorTrace() const
Definition: buffers.hh:505
Minor::Fetch1::FetchRequest::isComplete
bool isComplete() const
Is this a complete read line or fault.
Definition: fetch1.hh:152
Minor::Fetch1::processResponse
void processResponse(FetchRequestPtr response, ForwardLineData &line)
Convert a response to a ForwardLineData.
Definition: fetch1.cc:529
RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:82
Minor::Fetch1::numFetchesInITLB
unsigned int numFetchesInITLB
Number of requests inside the ITLB rather than in the queues.
Definition: fetch1.hh:316
Minor::BranchData::reason
Reason reason
Explanation for this branch.
Definition: pipe_data.hh:109
MINORTRACE
#define MINORTRACE(...)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:60
DTRACE
#define DTRACE(x)
Definition: debug.hh:146
Minor::Queue::empty
bool empty() const
Is the queue empty?
Definition: buffers.hh:502
std::vector
STL vector class.
Definition: stl.hh:37
Minor::ForwardLineData::id
InstId id
Thread, stream, prediction ...
Definition: pipe_data.hh:195
Minor::Queue::push
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition: buffers.hh:426
Minor::Fetch1::FetchState
FetchState
Cycle-by-cycle state.
Definition: fetch1.hh:224
BaseCPU::getContext
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:283
Minor::BranchData::isStreamChange
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:81
Minor::Fetch1::prediction
Latch< BranchData >::Output prediction
Input port carrying branch predictions from Fetch2.
Definition: fetch1.hh:197
Minor::Fetch1::popAndDiscard
void popAndDiscard(FetchQueue &queue)
Pop a request from the given queue and correctly deallocate and discard it.
Definition: fetch1.cc:371
Minor::Fetch1::changeStream
void changeStream(const BranchData &branch)
Start fetching from a new address.
Definition: fetch1.cc:479
Minor
Definition: activity.cc:44
Minor::Queue::reserve
void reserve()
Reserve space in the queue for future pushes.
Definition: buffers.hh:454
Minor::Fetch1::FetchRequest::makePacket
void makePacket()
Make a packet to use with the memory transaction.
Definition: fetch1.cc:218
Request::INST_FETCH
@ INST_FETCH
The request was an instruction fetch.
Definition: request.hh:104
Minor::Fetch1::Fetch1ThreadInfo::blocked
bool blocked
Blocked indication for report.
Definition: fetch1.hh:275
MinorCPU::threads
std::vector< Minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:93
Minor::Fetch1::moveFromRequestsToTransfers
void moveFromRequestsToTransfers(FetchRequestPtr request)
Move a request between queues.
Definition: fetch1.cc:312
Minor::Fetch1::wakeupFetch
void wakeupFetch(ThreadID tid)
Initiate fetch1 fetching.
Definition: fetch1.cc:704
Minor::Fetch1::fetchLimit
unsigned int fetchLimit
Maximum number of fetches allowed in flight (in queues or memory)
Definition: fetch1.hh:218
Minor::Fetch1::FetchRequest::Translated
@ Translated
Definition: fetch1.hh:114
Minor::BranchData::isBubble
bool isBubble() const
Definition: pipe_data.hh:148
Minor::Fetch1::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition: fetch1.cc:407
Minor::Fetch1::out
Latch< ForwardLineData >::Input out
Output port carrying read lines to Fetch2.
Definition: fetch1.hh:195
Minor::Fetch1::FetchWaitingForPC
@ FetchWaitingForPC
Definition: fetch1.hh:228
Minor::Fetch1::threadPriority
ThreadID threadPriority
Definition: fetch1.hh:282
Minor::Fetch1::FetchRequest::request
RequestPtr request
The underlying request that this fetch represents.
Definition: fetch1.hh:131
Minor::Fetch1::FetchRequest::packet
PacketPtr packet
FetchRequests carry packets while they're in the requests and transfers responses queues.
Definition: fetch1.hh:128
Minor::Fetch1::FetchRequest::RequestIssuing
@ RequestIssuing
Definition: fetch1.hh:115
ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:88
Minor::Fetch1::inp
Latch< BranchData >::Output inp
Input port carrying branch requests from Execute.
Definition: fetch1.hh:193
Minor::Fetch1::FetchRequest::reportData
void reportData(std::ostream &os) const
Report interface.
Definition: fetch1.cc:736
Minor::Fetch1::IcacheNeedsRetry
@ IcacheNeedsRetry
Definition: fetch1.hh:288
RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:492
MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:77
Minor::Latch::Output
Definition: buffers.hh:256
Minor::ForwardLineData::lineBaseAddr
Addr lineBaseAddr
First byte address in the line.
Definition: pipe_data.hh:181
Minor::Fetch1::lineSeqNum
InstSeqNum lineSeqNum
Sequence number for line fetch used for ordering lines to flush.
Definition: fetch1.hh:306
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:234
Minor::Queue< FetchRequestPtr, ReportTraitsPtrAdaptor< FetchRequestPtr >, NoBubbleTraits< FetchRequestPtr > >
Minor::Fetch1::numInFlightFetches
unsigned int numInFlightFetches()
Returns the total number of queue occupancy, in-ITLB and in-memory system fetches.
Definition: fetch1.cc:380
Minor::Fetch1::operator<<
friend std::ostream & operator<<(std::ostream &os, Fetch1::FetchState state)
Definition: fetch1.cc:459
Minor::Fetch1::FetchRunning
@ FetchRunning
Definition: fetch1.hh:231
Minor::ForwardLineData::setFault
void setFault(Fault fault_)
Set fault and possible clear the bubble flag.
Definition: pipe_data.cc:163
Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:240
MINORLINE
#define MINORLINE(sim_object,...)
DPRINTFN for MinorTrace MinorLine line reporting.
Definition: trace.hh:68
Minor::Fetch1::FetchRequest::isDiscardable
bool isDiscardable() const
Is this line out of date with the current stream/prediction sequence and can it be discarded without ...
Definition: fetch1.cc:741
Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:570
Minor::Fetch1::FetchRequest::InTranslation
@ InTranslation
Definition: fetch1.hh:113
pipeline.hh
Minor::Fetch1::FetchRequest
Memory access queuing.
Definition: fetch1.hh:99
ArmISA::mode
Bitfield< 4, 0 > mode
Definition: miscregs_types.hh:70
Minor::Fetch1::tryToSend
bool tryToSend(FetchRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition: fetch1.cc:321
Minor::Queue::front
ElemType & front()
Head value.
Definition: buffers.hh:494
Minor::InputBuffer
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:565
Minor::Pipeline::Fetch1StageId
@ Fetch1StageId
Definition: pipeline.hh:100
Minor::Queue::pop
void pop()
Pop the head item.
Definition: buffers.hh:499
Minor::InstId::threadId
ThreadID threadId
The thread to which this line/instruction belongs.
Definition: dyn_inst.hh:81
Minor::Fetch1::getScheduledThread
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition: fetch1.cc:114
NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:245
ThreadContext::status
virtual Status status() const =0
ProbePoints::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:103
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:142
Minor::Queue::occupiedSpace
unsigned int occupiedSpace() const
Number of slots already occupied in this buffer.
Definition: buffers.hh:469
Minor::Fetch1::Fetch1
Fetch1(const std::string &name_, MinorCPU &cpu_, MinorCPUParams &params, Latch< BranchData >::Output inp_, Latch< ForwardLineData >::Input out_, Latch< BranchData >::Output prediction_, std::vector< InputBuffer< ForwardLineData >> &next_stage_input_buffer)
Definition: fetch1.cc:53
Minor::Fetch1::IcacheRunning
@ IcacheRunning
Definition: fetch1.hh:287
Minor::BranchData::newPredictionSeqNum
InstSeqNum newPredictionSeqNum
Definition: pipe_data.hh:116
Minor::Fetch1::fetchLine
void fetchLine(ThreadID tid)
Insert a line fetch into the requests.
Definition: fetch1.cc:146
Minor::Fetch1::FetchHalted
@ FetchHalted
Definition: fetch1.hh:226
Minor::Fetch1::Fetch1ThreadInfo
Stage cycle-by-cycle state.
Definition: fetch1.hh:236
Minor::Fetch1::fetchInfo
std::vector< Fetch1ThreadInfo > fetchInfo
Definition: fetch1.hh:281
Minor::Fetch1::tryToSendToTransfers
void tryToSendToTransfers(FetchRequestPtr request)
Try and issue a fetch for a translated request at the head of the requests queue.
Definition: fetch1.cc:273
Named
Definition: trace.hh:147
ThreadContext::pcState
virtual TheISA::PCState pcState() const =0
Minor::Fetch1::stepQueues
void stepQueues()
Step requests along between requests and transfers queues.
Definition: fetch1.cc:349
Minor::Fetch1::Fetch1ThreadInfo::state
FetchState state
Definition: fetch1.hh:256
Minor::ForwardLineData::isBubble
bool isBubble() const
Definition: pipe_data.hh:241
Minor::Fetch1::FetchRequest::pc
TheISA::PCState pc
PC to fixup with line address.
Definition: fetch1.hh:134
BaseCPU::instRequestorId
RequestorID instRequestorId() const
Reads this CPU's unique instruction requestor ID.
Definition: base.hh:186
Minor::Fetch1::FetchRequest::Complete
@ Complete
Definition: fetch1.hh:116
Minor::BranchData
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:62
Minor::Fetch1::Fetch1ThreadInfo::pc
TheISA::PCState pc
Fetch PC value.
Definition: fetch1.hh:261
Minor::operator<<
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:61
Packet::pushSenderState
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:332
Minor::Fetch1::updateExpectedSeqNums
void updateExpectedSeqNums(const BranchData &branch)
Update streamSeqNum and predictionSeqNum from the given branch (and assume these have changed and dis...
Definition: fetch1.cc:511
Minor::Fetch1::transfers
FetchQueue transfers
Queue of in-memory system requests and responses.
Definition: fetch1.hh:300
ActivityRecorder::activity
void activity()
Records that there is activity this cycle.
Definition: activity.cc:54
MinorCPU::roundRobinPriority
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:165
Minor::Fetch1::numFetchesInMemorySystem
unsigned int numFetchesInMemorySystem
Count of the number fetches which have left the transfers queue and are in the 'wild' in the memory s...
Definition: fetch1.hh:312
Minor::Fetch1::minorTrace
void minorTrace() const
Definition: fetch1.cc:752
Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:257
Minor::Fetch1::FetchRequest::finish
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseTLB::Mode mode)
Interface for ITLB responses.
Definition: fetch1.cc:230
Minor::Fetch1::isDrained
bool isDrained()
Is this stage drained? For Fetch1, draining is initiated by Execute signalling a branch with the reas...
Definition: fetch1.cc:718
Minor::ForwardLineData::pc
TheISA::PCState pc
PC of the first requested inst within this line.
Definition: pipe_data.hh:184
Packet::popSenderState
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:340
Minor::Fetch1::lineSnap
unsigned int lineSnap
Line snap size in bytes.
Definition: fetch1.hh:209
Minor::Fetch1::minorTraceResponseLine
void minorTraceResponseLine(const std::string &name, FetchRequestPtr response) const
Print the appropriate MinorLine line for a fetch response.
Definition: fetch1.cc:388
BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:363
Minor::Fetch1::maxLineWidth
unsigned int maxLineWidth
Maximum fetch width in bytes.
Definition: fetch1.hh:215
ThreadContext::Active
@ Active
Running.
Definition: thread_context.hh:102
MinorCPU::randomPriority
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:174
Named::name
const std::string & name() const
Definition: trace.hh:156
Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1299
BaseTLB::Execute
@ Execute
Definition: tlb.hh:57
Minor::Fetch1::FetchRequest::id
InstId id
Identity of the line that this request will generate.
Definition: fetch1.hh:122
Minor::Fetch1::FetchRequest::state
FetchRequestState state
Definition: fetch1.hh:119
fetch1.hh
Minor::InstId
Id for lines and instructions.
Definition: dyn_inst.hh:68
Minor::Fetch1::Fetch1ThreadInfo::wakeupGuard
bool wakeupGuard
Signal to guard against sleeping first cycle of wakeup.
Definition: fetch1.hh:278
ArmISA::id
Bitfield< 33 > id
Definition: miscregs_types.hh:247
Minor::BranchData::SuspendThread
@ SuspendThread
Definition: pipe_data.hh:91
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
Minor::Fetch1::IcacheState
IcacheState
State of memory access for head instruction fetch.
Definition: fetch1.hh:285
Minor::Fetch1::FetchRequest::~FetchRequest
~FetchRequest()
Definition: fetch1.cc:266

Generated on Wed Sep 30 2020 14:02:08 for gem5 by doxygen 1.8.17