gem5  v22.1.0.0
fetch1.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/fetch1.hh"
39 
40 #include <cstring>
41 #include <iomanip>
42 #include <sstream>
43 
44 #include "arch/generic/decoder.hh"
45 #include "base/cast.hh"
46 #include "base/compiler.hh"
47 #include "base/logging.hh"
48 #include "base/trace.hh"
49 #include "cpu/minor/pipeline.hh"
50 #include "debug/Drain.hh"
51 #include "debug/Fetch.hh"
52 #include "debug/MinorTrace.hh"
53 
54 namespace gem5
55 {
56 
58 namespace minor
59 {
60 
61 Fetch1::Fetch1(const std::string &name_,
62  MinorCPU &cpu_,
63  const BaseMinorCPUParams &params,
66  Latch<BranchData>::Output prediction_,
67  std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
68  Named(name_),
69  cpu(cpu_),
70  inp(inp_),
71  out(out_),
72  prediction(prediction_),
73  nextStageReserve(next_stage_input_buffer),
74  icachePort(name_ + ".icache_port", *this, cpu_),
75  lineSnap(params.fetch1LineSnapWidth),
76  maxLineWidth(params.fetch1LineWidth),
77  fetchLimit(params.fetch1FetchLimit),
78  fetchInfo(params.numThreads),
79  threadPriority(0),
80  requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
81  transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
82  icacheState(IcacheRunning),
83  lineSeqNum(InstId::firstLineSeqNum),
84  numFetchesInMemorySystem(0),
85  numFetchesInITLB(0)
86 {
87  for (auto &info: fetchInfo)
88  info.pc.reset(params.isa[0]->newPCState());
89 
90  if (lineSnap == 0) {
92  DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
93  lineSnap);
94  }
95 
96  if (maxLineWidth == 0) {
98  DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
99  maxLineWidth);
100  }
101 
102  size_t inst_size = cpu.threads[0]->decoder->moreBytesSize();
103 
104  /* These assertions should be copied to the Python config. as well */
105  if ((lineSnap % inst_size) != 0) {
106  fatal("%s: fetch1LineSnapWidth must be a multiple "
107  "of the inst width (%d)\n", name_,
108  inst_size);
109  }
110 
111  if ((maxLineWidth >= lineSnap && (maxLineWidth % inst_size)) != 0) {
112  fatal("%s: fetch1LineWidth must be a multiple of"
113  " the inst width (%d), and >= fetch1LineSnapWidth (%d)\n",
114  name_, inst_size, lineSnap);
115  }
116 
117  if (fetchLimit < 1) {
118  fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
119  fetchLimit);
120  }
121 }
122 
123 inline ThreadID
125 {
126  /* Select thread via policy. */
127  std::vector<ThreadID> priority_list;
128 
129  switch (cpu.threadPolicy) {
130  case enums::SingleThreaded:
131  priority_list.push_back(0);
132  break;
133  case enums::RoundRobin:
134  priority_list = cpu.roundRobinPriority(threadPriority);
135  break;
136  case enums::Random:
137  priority_list = cpu.randomPriority();
138  break;
139  default:
140  panic("Unknown fetch policy");
141  }
142 
143  for (auto tid : priority_list) {
144  if (cpu.getContext(tid)->status() == ThreadContext::Active &&
145  !fetchInfo[tid].blocked &&
146  fetchInfo[tid].state == FetchRunning) {
147  threadPriority = tid;
148  return tid;
149  }
150  }
151 
152  return InvalidThreadID;
153 }
154 
155 void
157 {
158  /* Reference the currently used thread state. */
159  Fetch1ThreadInfo &thread = fetchInfo[tid];
160 
161  /* If line_offset != 0, a request is pushed for the remainder of the
162  * line. */
163  /* Use a lower, sizeof(MachInst) aligned address for the fetch */
164  Addr aligned_pc = thread.fetchAddr & ~((Addr) lineSnap - 1);
165  unsigned int line_offset = aligned_pc % lineSnap;
166  unsigned int request_size = maxLineWidth - line_offset;
167 
168  /* Fill in the line's id */
169  InstId request_id(tid,
170  thread.streamSeqNum, thread.predictionSeqNum,
171  lineSeqNum);
172 
173  FetchRequestPtr request = new FetchRequest(*this, request_id,
174  thread.fetchAddr);
175 
176  DPRINTF(Fetch, "Inserting fetch into the fetch queue "
177  "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
178  request_id, aligned_pc, thread.fetchAddr, line_offset, request_size);
179 
180  request->request->setContext(cpu.threads[tid]->getTC()->contextId());
181  request->request->setVirt(
182  aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(),
183  /* I've no idea why we need the PC, but give it */
184  thread.fetchAddr);
185 
186  DPRINTF(Fetch, "Submitting ITLB request\n");
188 
190 
191  /* Reserve space in the queues upstream of requests for results */
192  transfers.reserve();
193  requests.push(request);
194 
195  /* Submit the translation request. The response will come
196  * through finish/markDelayed on this request as it bears
197  * the Translation interface */
198  cpu.threads[request->id.threadId]->mmu->translateTiming(
199  request->request,
200  cpu.getContext(request->id.threadId),
201  request, BaseMMU::Execute);
202 
203  lineSeqNum++;
204 
205  /* Step the PC for the next line onto the line aligned next address.
206  * Note that as instructions can span lines, this PC is only a
207  * reliable 'new' PC if the next line has a new stream sequence number. */
208  thread.fetchAddr = aligned_pc + request_size;
209 }
210 
211 std::ostream &
212 operator <<(std::ostream &os, Fetch1::IcacheState state)
213 {
214  switch (state) {
216  os << "IcacheRunning";
217  break;
219  os << "IcacheNeedsRetry";
220  break;
221  default:
222  os << "IcacheState-" << static_cast<int>(state);
223  break;
224  }
225  return os;
226 }
227 
228 void
230 {
231  /* Make the necessary packet for a memory transaction */
233  packet->allocate();
234 
235  /* This FetchRequest becomes SenderState to allow the response to be
236  * identified */
237  packet->pushSenderState(this);
238 }
239 
240 void
241 Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
243 {
244  fault = fault_;
245 
246  state = Translated;
247  fetch.handleTLBResponse(this);
248 
249  /* Let's try and wake up the processor for the next cycle */
250  fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
251 }
252 
253 void
255 {
257 
258  if (response->fault != NoFault) {
259  DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
260  "paddr: 0x%x, vaddr: 0x%x\n",
261  response->fault->name(),
262  (response->request->hasPaddr() ?
263  response->request->getPaddr() : 0),
264  response->request->getVaddr());
265 
266  if (debug::MinorTrace)
267  minorTraceResponseLine(name(), response);
268  } else {
269  DPRINTF(Fetch, "Got ITLB response\n");
270  }
271 
272  response->state = FetchRequest::Translated;
273 
274  tryToSendToTransfers(response);
275 }
276 
278 {
279  if (packet)
280  delete packet;
281 }
282 
283 void
285 {
286  if (!requests.empty() && requests.front() != request) {
287  DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
288  " issue to memory\n");
289  return;
290  }
291 
292  if (request->state == FetchRequest::InTranslation) {
293  DPRINTF(Fetch, "Fetch still in translation, not issuing to"
294  " memory\n");
295  return;
296  }
297 
298  if (request->isDiscardable() || request->fault != NoFault) {
299  /* Discarded and faulting requests carry on through transfers
300  * as Complete/packet == NULL */
301 
302  request->state = FetchRequest::Complete;
304 
305  /* Wake up the pipeline next cycle as there will be no event
306  * for this queue->queue transfer */
308  } else if (request->state == FetchRequest::Translated) {
309  if (!request->packet)
310  request->makePacket();
311 
312  /* Ensure that the packet won't delete the request */
313  assert(request->packet->needsResponse());
314 
315  if (tryToSend(request))
317  } else {
318  DPRINTF(Fetch, "Not advancing line fetch\n");
319  }
320 }
321 
322 void
324 {
325  assert(!requests.empty() && requests.front() == request);
326 
327  requests.pop();
328  transfers.push(request);
329 }
330 
331 bool
333 {
334  bool ret = false;
335 
336  if (icachePort.sendTimingReq(request->packet)) {
337  /* Invalidate the fetch_requests packet so we don't
338  * accidentally fail to deallocate it (or use it!)
339  * later by overwriting it */
340  request->packet = NULL;
343 
344  ret = true;
345 
346  DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
347  request->id);
348  } else {
349  /* Needs to be resent, wait for that */
351 
352  DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
353  request->id);
354  }
355 
356  return ret;
357 }
358 
359 void
361 {
362  IcacheState old_icache_state = icacheState;
363 
364  switch (icacheState) {
365  case IcacheRunning:
366  /* Move ITLB results on to the memory system */
367  if (!requests.empty()) {
369  }
370  break;
371  case IcacheNeedsRetry:
372  break;
373  }
374 
375  if (icacheState != old_icache_state) {
376  DPRINTF(Fetch, "Step in state %s moving to state %s\n",
377  old_icache_state, icacheState);
378  }
379 }
380 
381 void
383 {
384  if (!queue.empty()) {
385  delete queue.front();
386  queue.pop();
387  }
388 }
389 
390 unsigned int
392 {
393  return requests.occupiedSpace() +
395 }
396 
398 void
400  Fetch1::FetchRequestPtr response) const
401 {
402  const RequestPtr &request = response->request;
403 
404  if (response->packet && response->packet->isError()) {
405  minorLine(*this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
406  response->id, request->getVaddr());
407  } else if (response->fault != NoFault) {
408  minorLine(*this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
409  response->id, request->getVaddr(), response->fault->name());
410  } else {
411  minorLine(*this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
412  response->id, request->getSize(),
413  request->getVaddr(), request->getPaddr());
414  }
415 }
416 
417 bool
419 {
420  DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
421 
422  /* Only push the response if we didn't change stream? No, all responses
423  * should hit the responses queue. It's the job of 'step' to throw them
424  * away. */
425  FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
426  (response->popSenderState());
427 
428  /* Fixup packet in fetch_request as this may have changed */
429  assert(!fetch_request->packet);
430  fetch_request->packet = response;
431 
433  fetch_request->state = FetchRequest::Complete;
434 
435  if (debug::MinorTrace)
436  minorTraceResponseLine(name(), fetch_request);
437 
438  if (response->isError()) {
439  DPRINTF(Fetch, "Received error response packet: %s\n",
440  fetch_request->id);
441  }
442 
443  /* We go to idle even if there are more things to do on the queues as
444  * it's the job of step to actually step us on to the next transaction */
445 
446  /* Let's try and wake up the processor for the next cycle to move on
447  * queues */
449 
450  /* Never busy */
451  return true;
452 }
453 
454 void
456 {
457  DPRINTF(Fetch, "recvRetry\n");
458  assert(icacheState == IcacheNeedsRetry);
459  assert(!requests.empty());
460 
461  FetchRequestPtr retryRequest = requests.front();
462 
464 
465  if (tryToSend(retryRequest))
466  moveFromRequestsToTransfers(retryRequest);
467 }
468 
469 std::ostream &
470 operator <<(std::ostream &os, Fetch1::FetchState state)
471 {
472  switch (state) {
473  case Fetch1::FetchHalted:
474  os << "FetchHalted";
475  break;
477  os << "FetchWaitingForPC";
478  break;
480  os << "FetchRunning";
481  break;
482  default:
483  os << "FetchState-" << static_cast<int>(state);
484  break;
485  }
486  return os;
487 }
488 
489 void
491 {
492  Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
493 
494  updateExpectedSeqNums(branch);
495 
496  /* Start fetching again if we were stopped */
497  switch (branch.reason) {
499  {
500  if (thread.wakeupGuard) {
501  DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
502  branch);
503  } else {
504  DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
505  thread.state = FetchWaitingForPC;
506  }
507  }
508  break;
510  DPRINTF(Fetch, "Halting fetch\n");
511  thread.state = FetchHalted;
512  break;
513  default:
514  DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
515  thread.state = FetchRunning;
516  break;
517  }
518  set(thread.pc, branch.target);
519  thread.fetchAddr = thread.pc->instAddr();
520 }
521 
522 void
524 {
525  Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
526 
527  DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
528  " predictionSeqNum from: %d to %d\n",
529  thread.streamSeqNum, branch.newStreamSeqNum,
530  thread.predictionSeqNum, branch.newPredictionSeqNum);
531 
532  /* Change the stream */
533  thread.streamSeqNum = branch.newStreamSeqNum;
534  /* Update the prediction. Note that it's possible for this to
535  * actually set the prediction to an *older* value if new
536  * predictions have been discarded by execute */
537  thread.predictionSeqNum = branch.newPredictionSeqNum;
538 }
539 
540 void
542  ForwardLineData &line)
543 {
544  Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
545  PacketPtr packet = response->packet;
546 
547  /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
548  * structure */
549  line.setFault(response->fault);
550  /* Make sequence numbers valid in return */
551  line.id = response->id;
552  /* Set the PC in case there was a sequence change */
553  set(line.pc, thread.pc);
554  /* Set fetch address to virtual address */
555  line.fetchAddr = response->pc;
556  /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
557  * pc.instAddr() */
558  line.lineBaseAddr = response->request->getVaddr();
559 
560  if (response->fault != NoFault) {
561  /* Stop fetching if there was a fault */
562  /* Should probably try to flush the queues as well, but we
563  * can't be sure that this fault will actually reach Execute, and we
564  * can't (currently) selectively remove this stream from the queues */
565  DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
566  response->fault->name());
568  } else {
569  line.adoptPacketData(packet);
570  /* Null the response's packet to prevent the response from trying to
571  * deallocate the packet */
572  response->packet = NULL;
573  }
574 }
575 
576 void
578 {
579  const BranchData &execute_branch = *inp.outputWire;
580  const BranchData &fetch2_branch = *prediction.outputWire;
581  ForwardLineData &line_out = *out.inputWire;
582 
583  assert(line_out.isBubble());
584 
585  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
586  fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
587 
589  if (execute_branch.threadId != InvalidThreadID &&
590  execute_branch.threadId == fetch2_branch.threadId) {
591 
592  Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
593 
594  /* Are we changing stream? Look to the Execute branches first, then
595  * to predicted changes of stream from Fetch2 */
596  if (execute_branch.isStreamChange()) {
597  if (thread.state == FetchHalted) {
598  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
599  } else {
600  changeStream(execute_branch);
601  }
602 
603  if (!fetch2_branch.isBubble()) {
604  DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
605  fetch2_branch);
606  }
607 
608  /* The streamSeqNum tagging in request/response ->req should handle
609  * discarding those requests when we get to them. */
610  } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
611  /* Handle branch predictions by changing the instruction source
612  * if we're still processing the same stream (as set by streamSeqNum)
613  * as the one of the prediction.
614  */
615  if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
616  DPRINTF(Fetch, "Not changing stream on prediction: %s,"
617  " streamSeqNum mismatch\n",
618  fetch2_branch);
619  } else {
620  changeStream(fetch2_branch);
621  }
622  }
623  } else {
624  /* Fetch2 and Execute branches are for different threads */
625  if (execute_branch.threadId != InvalidThreadID &&
626  execute_branch.isStreamChange()) {
627 
628  if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
629  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
630  } else {
631  changeStream(execute_branch);
632  }
633  }
634 
635  if (fetch2_branch.threadId != InvalidThreadID &&
636  fetch2_branch.isStreamChange()) {
637 
638  if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
639  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
640  } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
641  DPRINTF(Fetch, "Not changing stream on prediction: %s,"
642  " streamSeqNum mismatch\n", fetch2_branch);
643  } else {
644  changeStream(fetch2_branch);
645  }
646  }
647  }
648 
649  if (numInFlightFetches() < fetchLimit) {
650  ThreadID fetch_tid = getScheduledThread();
651 
652  if (fetch_tid != InvalidThreadID) {
653  DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
654 
655  /* Generate fetch to selected thread */
656  fetchLine(fetch_tid);
657  /* Take up a slot in the fetch queue */
658  nextStageReserve[fetch_tid].reserve();
659  } else {
660  DPRINTF(Fetch, "No active threads available to fetch from\n");
661  }
662  }
663 
664 
665  /* Halting shouldn't prevent fetches in flight from being processed */
666  /* Step fetches through the icachePort queues and memory system */
667  stepQueues();
668 
669  /* As we've thrown away early lines, if there is a line, it must
670  * be from the right stream */
671  if (!transfers.empty() &&
673  {
675 
676  if (response->isDiscardable()) {
677  nextStageReserve[response->id.threadId].freeReservation();
678 
679  DPRINTF(Fetch, "Discarding translated fetch as it's for"
680  " an old stream\n");
681 
682  /* Wake up next cycle just in case there was some other
683  * action to do */
685  } else {
686  DPRINTF(Fetch, "Processing fetched line: %s\n",
687  response->id);
688 
689  processResponse(response, line_out);
690  }
691 
693  }
694 
695  /* If we generated output, and mark the stage as being active
696  * to encourage that output on to the next stage */
697  if (!line_out.isBubble())
699 
700  /* Fetch1 has no inputBuffer so the only activity we can have is to
701  * generate a line output (tested just above) or to initiate a memory
702  * fetch which will signal activity when it returns/needs stepping
703  * between queues */
704 
705 
706  /* This looks hackish. And it is, but there doesn't seem to be a better
707  * way to do this. The signal from commit to suspend fetch takes 1
708  * clock cycle to propagate to fetch. However, a legitimate wakeup
709  * may occur between cycles from the memory system. Thus wakeup guard
710  * prevents us from suspending in that case. */
711 
712  for (auto& thread : fetchInfo) {
713  thread.wakeupGuard = false;
714  }
715 }
716 
717 void
719 {
720  ThreadContext *thread_ctx = cpu.getContext(tid);
721  Fetch1ThreadInfo &thread = fetchInfo[tid];
722  set(thread.pc, thread_ctx->pcState());
723  thread.fetchAddr = thread.pc->instAddr();
724  thread.state = FetchRunning;
725  thread.wakeupGuard = true;
726  DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n", tid, *thread.pc);
727 
729 }
730 
731 bool
733 {
734  bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
735  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
736  Fetch1ThreadInfo &thread = fetchInfo[tid];
737  DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
738  tid,
739  thread.state == FetchHalted,
740  (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
741  ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
742 
743  drained = drained && (thread.state != FetchRunning);
744  }
745 
746  return drained;
747 }
748 
749 void
751 {
752  os << id;
753 }
754 
756 {
757  Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
758 
759  /* Can't discard lines in TLB/memory */
760  return state != InTranslation && state != RequestIssuing &&
761  (id.streamSeqNum != thread.streamSeqNum ||
762  id.predictionSeqNum != thread.predictionSeqNum);
763 }
764 
765 void
767 {
768  // TODO: Un-bork minorTrace for THREADS
769  // bork bork bork
770  const Fetch1ThreadInfo &thread = fetchInfo[0];
771 
772  std::ostringstream data;
773 
774  if (thread.blocked)
775  data << 'B';
776  else
777  (*out.inputWire).reportData(data);
778 
779  minor::minorTrace("state=%s icacheState=%s in_tlb_mem=%s/%s"
780  " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
782  thread.streamSeqNum, data.str());
785 }
786 
787 } // namespace minor
788 } // namespace gem5
#define DPRINTF(x,...)
Definition: trace.hh:186
const char data[]
void activity()
Records that there is activity this cycle.
Definition: activity.cc:55
RequestorID instRequestorId() const
Reads this CPU's unique instruction requestor ID.
Definition: base.hh:191
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:284
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:367
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:380
@ Execute
Definition: mmu.hh:56
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:86
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:96
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:291
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:173
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:182
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:120
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:101
Interface for things with names.
Definition: named.hh:39
virtual std::string name() const
Definition: named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
bool isError() const
Definition: packet.hh:621
bool needsResponse() const
Definition: packet.hh:607
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:334
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:342
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1354
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:495
@ INST_FETCH
The request was an instruction fetch.
Definition: request.hh:115
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual const PCStateBase & pcState() const =0
virtual Status status() const =0
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:67
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:85
InstSeqNum newStreamSeqNum
Sequence number of new stream/prediction to be adopted.
Definition: pipe_data.hh:119
Reason reason
Explanation for this branch.
Definition: pipe_data.hh:113
InstSeqNum newPredictionSeqNum
Definition: pipe_data.hh:120
ThreadID threadId
ThreadID associated with branch.
Definition: pipe_data.hh:116
bool isBubble() const
Definition: pipe_data.hh:164
std::unique_ptr< PCStateBase > target
Starting PC of that stream.
Definition: pipe_data.hh:123
Memory access queuing.
Definition: fetch1.hh:110
InstId id
Identity of the line that this request will generate.
Definition: fetch1.hh:130
bool isComplete() const
Is this a complete read line or fault.
Definition: fetch1.hh:160
bool isDiscardable() const
Is this line out of date with the current stream/prediction sequence and can it be discarded without ...
Definition: fetch1.cc:755
void reportData(std::ostream &os) const
Report interface.
Definition: fetch1.cc:750
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
Interface for ITLB responses.
Definition: fetch1.cc:241
Addr pc
PC to fixup with line address.
Definition: fetch1.hh:142
void makePacket()
Make a packet to use with the memory transaction.
Definition: fetch1.cc:229
Fault fault
Fill in a fault if one happens during fetch, check this by picking apart the response packet.
Definition: fetch1.hh:146
RequestPtr request
The underlying request that this fetch represents.
Definition: fetch1.hh:139
PacketPtr packet
FetchRequests carry packets while they're in the requests and transfers responses queues.
Definition: fetch1.hh:136
FetchState
Cycle-by-cycle state.
Definition: fetch1.hh:233
ThreadID threadPriority
Definition: fetch1.hh:286
unsigned int numFetchesInITLB
Number of requests inside the ITLB rather than in the queues.
Definition: fetch1.hh:320
virtual void recvReqRetry()
Definition: fetch1.cc:455
void changeStream(const BranchData &branch)
Start fetching from a new address.
Definition: fetch1.cc:490
void minorTraceResponseLine(const std::string &name, FetchRequestPtr response) const
Print the appropriate MinorLine line for a fetch response.
Definition: fetch1.cc:399
unsigned int numFetchesInMemorySystem
Count of the number fetches which have left the transfers queue and are in the 'wild' in the memory s...
Definition: fetch1.hh:316
Latch< BranchData >::Output inp
Input port carrying branch requests from Execute.
Definition: fetch1.hh:201
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition: fetch1.cc:124
void tryToSendToTransfers(FetchRequestPtr request)
Try and issue a fetch for a translated request at the head of the requests queue.
Definition: fetch1.cc:284
void popAndDiscard(FetchQueue &queue)
Pop a request from the given queue and correctly deallocate and discard it.
Definition: fetch1.cc:382
std::vector< Fetch1ThreadInfo > fetchInfo
Definition: fetch1.hh:285
IcacheState icacheState
Retry state of icache_port.
Definition: fetch1.hh:307
virtual bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition: fetch1.cc:418
IcachePort icachePort
IcachePort to pass to the CPU.
Definition: fetch1.hh:212
unsigned int maxLineWidth
Maximum fetch width in bytes.
Definition: fetch1.hh:223
std::vector< InputBuffer< ForwardLineData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition: fetch1.hh:208
void wakeupFetch(ThreadID tid)
Initiate fetch1 fetching.
Definition: fetch1.cc:718
bool tryToSend(FetchRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition: fetch1.cc:332
void handleTLBResponse(FetchRequestPtr response)
Handle pushing a TLB response onto the right queue.
Definition: fetch1.cc:254
unsigned int lineSnap
Line snap size in bytes.
Definition: fetch1.hh:217
bool isDrained()
Is this stage drained? For Fetch1, draining is initiated by Execute signalling a branch with the reas...
Definition: fetch1.cc:732
Latch< BranchData >::Output prediction
Input port carrying branch predictions from Fetch2.
Definition: fetch1.hh:205
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: fetch1.cc:577
void minorTrace() const
Definition: fetch1.cc:766
unsigned int fetchLimit
Maximum number of fetches allowed in flight (in queues or memory)
Definition: fetch1.hh:226
void processResponse(FetchRequestPtr response, ForwardLineData &line)
Convert a response to a ForwardLineData.
Definition: fetch1.cc:541
unsigned int numInFlightFetches()
Returns the total number of queue occupancy, in-ITLB and in-memory system fetches.
Definition: fetch1.cc:391
MinorCPU & cpu
Construction-assigned data members.
Definition: fetch1.hh:198
FetchQueue transfers
Queue of in-memory system requests and responses.
Definition: fetch1.hh:304
void stepQueues()
Step requests along between requests and transfers queues.
Definition: fetch1.cc:360
friend std::ostream & operator<<(std::ostream &os, Fetch1::FetchState state)
Definition: fetch1.cc:470
void updateExpectedSeqNums(const BranchData &branch)
Update streamSeqNum and predictionSeqNum from the given branch (and assume these have changed and dis...
Definition: fetch1.cc:523
InstSeqNum lineSeqNum
Sequence number for line fetch used for ordering lines to flush.
Definition: fetch1.hh:310
void fetchLine(ThreadID tid)
Insert a line fetch into the requests.
Definition: fetch1.cc:156
Latch< ForwardLineData >::Input out
Output port carrying read lines to Fetch2.
Definition: fetch1.hh:203
Fetch1(const std::string &name_, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< BranchData >::Output inp_, Latch< ForwardLineData >::Input out_, Latch< BranchData >::Output prediction_, std::vector< InputBuffer< ForwardLineData >> &next_stage_input_buffer)
Definition: fetch1.cc:61
FetchQueue requests
Queue of address translated requests from Fetch1.
Definition: fetch1.hh:301
void moveFromRequestsToTransfers(FetchRequestPtr request)
Move a request between queues.
Definition: fetch1.cc:323
IcacheState
State of memory access for head instruction fetch.
Definition: fetch1.hh:290
Line fetch data in the forward direction.
Definition: pipe_data.hh:188
void setFault(Fault fault_)
Set fault and possible clear the bubble flag.
Definition: pipe_data.cc:167
void adoptPacketData(Packet *packet)
Use the data from a packet as line instead of allocating new space.
Definition: pipe_data.cc:187
Addr fetchAddr
Address of this line of data.
Definition: pipe_data.hh:204
InstId id
Thread, stream, prediction ...
Definition: pipe_data.hh:215
std::unique_ptr< PCStateBase > pc
PC of the first inst within this sequence.
Definition: pipe_data.hh:201
Addr lineBaseAddr
First byte address in the line.
Definition: pipe_data.hh:198
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:573
Id for lines and instructions.
Definition: dyn_inst.hh:77
ThreadID threadId
The thread to which this line/instruction belongs.
Definition: dyn_inst.hh:89
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:253
bool empty() const
Is the queue empty?
Definition: buffers.hh:509
void minorTrace() const
Definition: buffers.hh:512
void pop()
Pop the head item.
Definition: buffers.hh:506
unsigned int occupiedSpace() const
Number of slots already occupied in this buffer.
Definition: buffers.hh:476
void reserve()
Reserve space in the queue for future pushes.
Definition: buffers.hh:461
ElemType & front()
Head value.
Definition: buffers.hh:501
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition: buffers.hh:433
STL vector class.
Definition: stl.hh:37
Fetch1 is responsible for fetching "lines" from memory and passing them to Fetch2.
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:190
atomic_var_t state
Definition: helpers.cc:188
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
Bitfield< 33 > id
Definition: misc_types.hh:257
Bitfield< 12, 11 > set
Definition: misc_types.hh:709
Bitfield< 17 > os
Definition: misc.hh:810
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:64
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:67
void minorLine(const Named &named, const char *fmt, Args ...args)
DPRINTFN for MinorTrace MinorLine line reporting.
Definition: trace.hh:84
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:109
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:235
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
const ThreadID InvalidThreadID
Definition: types.hh:236
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
The constructed pipeline.
Stage cycle-by-cycle state.
Definition: fetch1.hh:245
std::unique_ptr< PCStateBase > pc
Fetch PC value.
Definition: fetch1.hh:262
InstSeqNum streamSeqNum
Stream sequence number.
Definition: fetch1.hh:270
InstSeqNum predictionSeqNum
Prediction sequence number.
Definition: fetch1.hh:276
bool blocked
Blocked indication for report.
Definition: fetch1.hh:279
bool wakeupGuard
Signal to guard against sleeping first cycle of wakeup.
Definition: fetch1.hh:282
Addr fetchAddr
The address we're currently fetching lines from.
Definition: fetch1.hh:265

Generated on Wed Dec 21 2022 10:22:30 for gem5 by doxygen 1.9.1