gem5  [DEVELOP-FOR-23.0]
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
fetch1.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013-2014 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions are
16  * met: redistributions of source code must retain the above copyright
17  * notice, this list of conditions and the following disclaimer;
18  * redistributions in binary form must reproduce the above copyright
19  * notice, this list of conditions and the following disclaimer in the
20  * documentation and/or other materials provided with the distribution;
21  * neither the name of the copyright holders nor the names of its
22  * contributors may be used to endorse or promote products derived from
23  * this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "cpu/minor/fetch1.hh"
39 
40 #include <cstring>
41 #include <iomanip>
42 #include <sstream>
43 
44 #include "arch/generic/decoder.hh"
45 #include "base/cast.hh"
46 #include "base/compiler.hh"
47 #include "base/logging.hh"
48 #include "base/trace.hh"
49 #include "cpu/minor/pipeline.hh"
50 #include "debug/Drain.hh"
51 #include "debug/Fetch.hh"
52 #include "debug/MinorTrace.hh"
53 
54 namespace gem5
55 {
56 
57 namespace minor
58 {
59 
60 Fetch1::Fetch1(const std::string &name_,
61  MinorCPU &cpu_,
62  const BaseMinorCPUParams &params,
65  Latch<BranchData>::Output prediction_,
66  std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
67  Named(name_),
68  cpu(cpu_),
69  inp(inp_),
70  out(out_),
71  prediction(prediction_),
72  nextStageReserve(next_stage_input_buffer),
73  icachePort(name_ + ".icache_port", *this, cpu_),
74  lineSnap(params.fetch1LineSnapWidth),
75  maxLineWidth(params.fetch1LineWidth),
76  fetchLimit(params.fetch1FetchLimit),
77  fetchInfo(params.numThreads),
78  threadPriority(0),
79  requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
80  transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
81  icacheState(IcacheRunning),
82  lineSeqNum(InstId::firstLineSeqNum),
83  numFetchesInMemorySystem(0),
84  numFetchesInITLB(0)
85 {
86  for (auto &info: fetchInfo)
87  info.pc.reset(params.isa[0]->newPCState());
88 
89  if (lineSnap == 0) {
91  DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
92  lineSnap);
93  }
94 
95  if (maxLineWidth == 0) {
97  DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
98  maxLineWidth);
99  }
100 
101  size_t inst_size = cpu.threads[0]->decoder->moreBytesSize();
102 
103  /* These assertions should be copied to the Python config. as well */
104  if ((lineSnap % inst_size) != 0) {
105  fatal("%s: fetch1LineSnapWidth must be a multiple "
106  "of the inst width (%d)\n", name_,
107  inst_size);
108  }
109 
110  if ((maxLineWidth >= lineSnap && (maxLineWidth % inst_size)) != 0) {
111  fatal("%s: fetch1LineWidth must be a multiple of"
112  " the inst width (%d), and >= fetch1LineSnapWidth (%d)\n",
113  name_, inst_size, lineSnap);
114  }
115 
116  if (fetchLimit < 1) {
117  fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
118  fetchLimit);
119  }
120 }
121 
122 inline ThreadID
124 {
125  /* Select thread via policy. */
126  std::vector<ThreadID> priority_list;
127 
128  switch (cpu.threadPolicy) {
129  case enums::SingleThreaded:
130  priority_list.push_back(0);
131  break;
132  case enums::RoundRobin:
133  priority_list = cpu.roundRobinPriority(threadPriority);
134  break;
135  case enums::Random:
136  priority_list = cpu.randomPriority();
137  break;
138  default:
139  panic("Unknown fetch policy");
140  }
141 
142  for (auto tid : priority_list) {
143  if (cpu.getContext(tid)->status() == ThreadContext::Active &&
144  !fetchInfo[tid].blocked &&
145  fetchInfo[tid].state == FetchRunning) {
146  threadPriority = tid;
147  return tid;
148  }
149  }
150 
151  return InvalidThreadID;
152 }
153 
154 void
156 {
157  /* Reference the currently used thread state. */
158  Fetch1ThreadInfo &thread = fetchInfo[tid];
159 
160  /* If line_offset != 0, a request is pushed for the remainder of the
161  * line. */
162  /* Use a lower, sizeof(MachInst) aligned address for the fetch */
163  Addr aligned_pc = thread.fetchAddr & ~((Addr) lineSnap - 1);
164  unsigned int line_offset = aligned_pc % lineSnap;
165  unsigned int request_size = maxLineWidth - line_offset;
166 
167  /* Fill in the line's id */
168  InstId request_id(tid,
169  thread.streamSeqNum, thread.predictionSeqNum,
170  lineSeqNum);
171 
172  FetchRequestPtr request = new FetchRequest(*this, request_id,
173  thread.fetchAddr);
174 
175  DPRINTF(Fetch, "Inserting fetch into the fetch queue "
176  "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
177  request_id, aligned_pc, thread.fetchAddr, line_offset, request_size);
178 
179  request->request->setContext(cpu.threads[tid]->getTC()->contextId());
180  request->request->setVirt(
181  aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(),
182  /* I've no idea why we need the PC, but give it */
183  thread.fetchAddr);
184 
185  DPRINTF(Fetch, "Submitting ITLB request\n");
187 
189 
190  /* Reserve space in the queues upstream of requests for results */
191  transfers.reserve();
192  requests.push(request);
193 
194  /* Submit the translation request. The response will come
195  * through finish/markDelayed on this request as it bears
196  * the Translation interface */
197  cpu.threads[request->id.threadId]->mmu->translateTiming(
198  request->request,
199  cpu.getContext(request->id.threadId),
200  request, BaseMMU::Execute);
201 
202  lineSeqNum++;
203 
204  /* Step the PC for the next line onto the line aligned next address.
205  * Note that as instructions can span lines, this PC is only a
206  * reliable 'new' PC if the next line has a new stream sequence number. */
207  thread.fetchAddr = aligned_pc + request_size;
208 }
209 
210 std::ostream &
212 {
213  switch (state) {
215  os << "IcacheRunning";
216  break;
218  os << "IcacheNeedsRetry";
219  break;
220  default:
221  os << "IcacheState-" << static_cast<int>(state);
222  break;
223  }
224  return os;
225 }
226 
227 void
229 {
230  /* Make the necessary packet for a memory transaction */
232  packet->allocate();
233 
234  /* This FetchRequest becomes SenderState to allow the response to be
235  * identified */
236  packet->pushSenderState(this);
237 }
238 
239 void
240 Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
242 {
243  fault = fault_;
244 
245  state = Translated;
246  fetch.handleTLBResponse(this);
247 
248  /* Let's try and wake up the processor for the next cycle */
249  fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
250 }
251 
252 void
254 {
256 
257  if (response->fault != NoFault) {
258  DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
259  "paddr: 0x%x, vaddr: 0x%x\n",
260  response->fault->name(),
261  (response->request->hasPaddr() ?
262  response->request->getPaddr() : 0),
263  response->request->getVaddr());
264 
265  if (debug::MinorTrace)
266  minorTraceResponseLine(name(), response);
267  } else {
268  DPRINTF(Fetch, "Got ITLB response\n");
269  }
270 
271  response->state = FetchRequest::Translated;
272 
273  tryToSendToTransfers(response);
274 }
275 
277 {
278  if (packet)
279  delete packet;
280 }
281 
282 void
284 {
285  if (!requests.empty() && requests.front() != request) {
286  DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
287  " issue to memory\n");
288  return;
289  }
290 
291  if (request->state == FetchRequest::InTranslation) {
292  DPRINTF(Fetch, "Fetch still in translation, not issuing to"
293  " memory\n");
294  return;
295  }
296 
297  if (request->isDiscardable() || request->fault != NoFault) {
298  /* Discarded and faulting requests carry on through transfers
299  * as Complete/packet == NULL */
300 
301  request->state = FetchRequest::Complete;
303 
304  /* Wake up the pipeline next cycle as there will be no event
305  * for this queue->queue transfer */
307  } else if (request->state == FetchRequest::Translated) {
308  if (!request->packet)
309  request->makePacket();
310 
311  /* Ensure that the packet won't delete the request */
312  assert(request->packet->needsResponse());
313 
314  if (tryToSend(request))
316  } else {
317  DPRINTF(Fetch, "Not advancing line fetch\n");
318  }
319 }
320 
321 void
323 {
324  assert(!requests.empty() && requests.front() == request);
325 
326  requests.pop();
327  transfers.push(request);
328 }
329 
330 bool
332 {
333  bool ret = false;
334 
335  if (icachePort.sendTimingReq(request->packet)) {
336  /* Invalidate the fetch_requests packet so we don't
337  * accidentally fail to deallocate it (or use it!)
338  * later by overwriting it */
339  request->packet = NULL;
342 
343  ret = true;
344 
345  DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
346  request->id);
347  } else {
348  /* Needs to be resent, wait for that */
350 
351  DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
352  request->id);
353  }
354 
355  return ret;
356 }
357 
358 void
360 {
361  IcacheState old_icache_state = icacheState;
362 
363  switch (icacheState) {
364  case IcacheRunning:
365  /* Move ITLB results on to the memory system */
366  if (!requests.empty()) {
368  }
369  break;
370  case IcacheNeedsRetry:
371  break;
372  }
373 
374  if (icacheState != old_icache_state) {
375  DPRINTF(Fetch, "Step in state %s moving to state %s\n",
376  old_icache_state, icacheState);
377  }
378 }
379 
380 void
382 {
383  if (!queue.empty()) {
384  delete queue.front();
385  queue.pop();
386  }
387 }
388 
389 unsigned int
391 {
392  return requests.occupiedSpace() +
394 }
395 
397 void
399  Fetch1::FetchRequestPtr response) const
400 {
401  const RequestPtr &request = response->request;
402 
403  if (response->packet && response->packet->isError()) {
404  minorLine(*this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
405  response->id, request->getVaddr());
406  } else if (response->fault != NoFault) {
407  minorLine(*this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
408  response->id, request->getVaddr(), response->fault->name());
409  } else {
410  minorLine(*this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
411  response->id, request->getSize(),
412  request->getVaddr(), request->getPaddr());
413  }
414 }
415 
416 bool
418 {
419  DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
420 
421  /* Only push the response if we didn't change stream? No, all responses
422  * should hit the responses queue. It's the job of 'step' to throw them
423  * away. */
424  FetchRequestPtr fetch_request = safe_cast<FetchRequestPtr>
425  (response->popSenderState());
426 
427  /* Fixup packet in fetch_request as this may have changed */
428  assert(!fetch_request->packet);
429  fetch_request->packet = response;
430 
432  fetch_request->state = FetchRequest::Complete;
433 
434  if (debug::MinorTrace)
435  minorTraceResponseLine(name(), fetch_request);
436 
437  if (response->isError()) {
438  DPRINTF(Fetch, "Received error response packet: %s\n",
439  fetch_request->id);
440  }
441 
442  /* We go to idle even if there are more things to do on the queues as
443  * it's the job of step to actually step us on to the next transaction */
444 
445  /* Let's try and wake up the processor for the next cycle to move on
446  * queues */
448 
449  /* Never busy */
450  return true;
451 }
452 
453 void
455 {
456  DPRINTF(Fetch, "recvRetry\n");
457  assert(icacheState == IcacheNeedsRetry);
458  assert(!requests.empty());
459 
460  FetchRequestPtr retryRequest = requests.front();
461 
463 
464  if (tryToSend(retryRequest))
465  moveFromRequestsToTransfers(retryRequest);
466 }
467 
468 std::ostream &
470 {
471  switch (state) {
472  case Fetch1::FetchHalted:
473  os << "FetchHalted";
474  break;
476  os << "FetchWaitingForPC";
477  break;
479  os << "FetchRunning";
480  break;
481  default:
482  os << "FetchState-" << static_cast<int>(state);
483  break;
484  }
485  return os;
486 }
487 
488 void
490 {
491  Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
492 
493  updateExpectedSeqNums(branch);
494 
495  /* Start fetching again if we were stopped */
496  switch (branch.reason) {
498  {
499  if (thread.wakeupGuard) {
500  DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
501  branch);
502  } else {
503  DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
504  thread.state = FetchWaitingForPC;
505  }
506  }
507  break;
509  DPRINTF(Fetch, "Halting fetch\n");
510  thread.state = FetchHalted;
511  break;
512  default:
513  DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
514  thread.state = FetchRunning;
515  break;
516  }
517  set(thread.pc, branch.target);
518  thread.fetchAddr = thread.pc->instAddr();
519 }
520 
521 void
523 {
524  Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
525 
526  DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
527  " predictionSeqNum from: %d to %d\n",
528  thread.streamSeqNum, branch.newStreamSeqNum,
529  thread.predictionSeqNum, branch.newPredictionSeqNum);
530 
531  /* Change the stream */
532  thread.streamSeqNum = branch.newStreamSeqNum;
533  /* Update the prediction. Note that it's possible for this to
534  * actually set the prediction to an *older* value if new
535  * predictions have been discarded by execute */
536  thread.predictionSeqNum = branch.newPredictionSeqNum;
537 }
538 
539 void
541  ForwardLineData &line)
542 {
543  Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
544  PacketPtr packet = response->packet;
545 
546  /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
547  * structure */
548  line.setFault(response->fault);
549  /* Make sequence numbers valid in return */
550  line.id = response->id;
551  /* Set the PC in case there was a sequence change */
552  set(line.pc, thread.pc);
553  /* Set fetch address to virtual address */
554  line.fetchAddr = response->pc;
555  /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
556  * pc.instAddr() */
557  line.lineBaseAddr = response->request->getVaddr();
558 
559  if (response->fault != NoFault) {
560  /* Stop fetching if there was a fault */
561  /* Should probably try to flush the queues as well, but we
562  * can't be sure that this fault will actually reach Execute, and we
563  * can't (currently) selectively remove this stream from the queues */
564  DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
565  response->fault->name());
567  } else {
568  line.adoptPacketData(packet);
569  /* Null the response's packet to prevent the response from trying to
570  * deallocate the packet */
571  response->packet = NULL;
572  }
573 }
574 
575 void
577 {
578  const BranchData &execute_branch = *inp.outputWire;
579  const BranchData &fetch2_branch = *prediction.outputWire;
580  ForwardLineData &line_out = *out.inputWire;
581 
582  assert(line_out.isBubble());
583 
584  for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
585  fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
586 
588  if (execute_branch.threadId != InvalidThreadID &&
589  execute_branch.threadId == fetch2_branch.threadId) {
590 
591  Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
592 
593  /* Are we changing stream? Look to the Execute branches first, then
594  * to predicted changes of stream from Fetch2 */
595  if (execute_branch.isStreamChange()) {
596  if (thread.state == FetchHalted) {
597  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
598  } else {
599  changeStream(execute_branch);
600  }
601 
602  if (!fetch2_branch.isBubble()) {
603  DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
604  fetch2_branch);
605  }
606 
607  /* The streamSeqNum tagging in request/response ->req should handle
608  * discarding those requests when we get to them. */
609  } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
610  /* Handle branch predictions by changing the instruction source
611  * if we're still processing the same stream (as set by streamSeqNum)
612  * as the one of the prediction.
613  */
614  if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
615  DPRINTF(Fetch, "Not changing stream on prediction: %s,"
616  " streamSeqNum mismatch\n",
617  fetch2_branch);
618  } else {
619  changeStream(fetch2_branch);
620  }
621  }
622  } else {
623  /* Fetch2 and Execute branches are for different threads */
624  if (execute_branch.threadId != InvalidThreadID &&
625  execute_branch.isStreamChange()) {
626 
627  if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
628  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
629  } else {
630  changeStream(execute_branch);
631  }
632  }
633 
634  if (fetch2_branch.threadId != InvalidThreadID &&
635  fetch2_branch.isStreamChange()) {
636 
637  if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
638  DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
639  } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
640  DPRINTF(Fetch, "Not changing stream on prediction: %s,"
641  " streamSeqNum mismatch\n", fetch2_branch);
642  } else {
643  changeStream(fetch2_branch);
644  }
645  }
646  }
647 
648  if (numInFlightFetches() < fetchLimit) {
649  ThreadID fetch_tid = getScheduledThread();
650 
651  if (fetch_tid != InvalidThreadID) {
652  DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
653 
654  /* Generate fetch to selected thread */
655  fetchLine(fetch_tid);
656  /* Take up a slot in the fetch queue */
657  nextStageReserve[fetch_tid].reserve();
658  } else {
659  DPRINTF(Fetch, "No active threads available to fetch from\n");
660  }
661  }
662 
663 
664  /* Halting shouldn't prevent fetches in flight from being processed */
665  /* Step fetches through the icachePort queues and memory system */
666  stepQueues();
667 
668  /* As we've thrown away early lines, if there is a line, it must
669  * be from the right stream */
670  if (!transfers.empty() &&
672  {
674 
675  if (response->isDiscardable()) {
676  nextStageReserve[response->id.threadId].freeReservation();
677 
678  DPRINTF(Fetch, "Discarding translated fetch as it's for"
679  " an old stream\n");
680 
681  /* Wake up next cycle just in case there was some other
682  * action to do */
684  } else {
685  DPRINTF(Fetch, "Processing fetched line: %s\n",
686  response->id);
687 
688  processResponse(response, line_out);
689  }
690 
692  }
693 
694  /* If we generated output, and mark the stage as being active
695  * to encourage that output on to the next stage */
696  if (!line_out.isBubble())
698 
699  /* Fetch1 has no inputBuffer so the only activity we can have is to
700  * generate a line output (tested just above) or to initiate a memory
701  * fetch which will signal activity when it returns/needs stepping
702  * between queues */
703 
704 
705  /* This looks hackish. And it is, but there doesn't seem to be a better
706  * way to do this. The signal from commit to suspend fetch takes 1
707  * clock cycle to propagate to fetch. However, a legitimate wakeup
708  * may occur between cycles from the memory system. Thus wakeup guard
709  * prevents us from suspending in that case. */
710 
711  for (auto& thread : fetchInfo) {
712  thread.wakeupGuard = false;
713  }
714 }
715 
716 void
718 {
719  ThreadContext *thread_ctx = cpu.getContext(tid);
720  Fetch1ThreadInfo &thread = fetchInfo[tid];
721  set(thread.pc, thread_ctx->pcState());
722  thread.fetchAddr = thread.pc->instAddr();
723  thread.state = FetchRunning;
724  thread.wakeupGuard = true;
725  DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n", tid, *thread.pc);
726 
728 }
729 
730 bool
732 {
733  bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
734  for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
735  Fetch1ThreadInfo &thread = fetchInfo[tid];
736  DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
737  tid,
738  thread.state == FetchHalted,
739  (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
740  ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
741 
742  drained = drained && (thread.state != FetchRunning);
743  }
744 
745  return drained;
746 }
747 
748 void
750 {
751  os << id;
752 }
753 
755 {
756  Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
757 
758  /* Can't discard lines in TLB/memory */
759  return state != InTranslation && state != RequestIssuing &&
760  (id.streamSeqNum != thread.streamSeqNum ||
761  id.predictionSeqNum != thread.predictionSeqNum);
762 }
763 
764 void
766 {
767  // TODO: Un-bork minorTrace for THREADS
768  // bork bork bork
769  const Fetch1ThreadInfo &thread = fetchInfo[0];
770 
771  std::ostringstream data;
772 
773  if (thread.blocked)
774  data << 'B';
775  else
776  (*out.inputWire).reportData(data);
777 
778  minor::minorTrace("state=%s icacheState=%s in_tlb_mem=%s/%s"
779  " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
781  thread.streamSeqNum, data.str());
784 }
785 
786 } // namespace minor
787 } // namespace gem5
gem5::minor::ForwardLineData
Line fetch data in the forward direction.
Definition: pipe_data.hh:186
fatal
#define fatal(...)
This implements a cprintf based fatal() function.
Definition: logging.hh:200
gem5::minor::Queue::front
ElemType & front()
Head value.
Definition: buffers.hh:500
gem5::MinorCPU::randomPriority
std::vector< ThreadID > randomPriority()
Definition: cpu.hh:181
gem5::minor::Fetch1::numFetchesInITLB
unsigned int numFetchesInITLB
Number of requests inside the ITLB rather than in the queues.
Definition: fetch1.hh:319
gem5::NoFault
constexpr decltype(nullptr) NoFault
Definition: types.hh:253
gem5::ThreadContext::Active
@ Active
Running.
Definition: thread_context.hh:103
gem5::minor::BranchData::target
std::unique_ptr< PCStateBase > target
Starting PC of that stream.
Definition: pipe_data.hh:122
gem5::RequestPort::sendTimingReq
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition: port.hh:587
gem5::minor::Fetch1::fetchLine
void fetchLine(ThreadID tid)
Insert a line fetch into the requests.
Definition: fetch1.cc:155
gem5::minor::Fetch1::out
Latch< ForwardLineData >::Input out
Output port carrying read lines to Fetch2.
Definition: fetch1.hh:202
gem5::minor::Fetch1::operator<<
friend std::ostream & operator<<(std::ostream &os, Fetch1::FetchState state)
Definition: fetch1.cc:469
gem5::minor::Fetch1::threadPriority
ThreadID threadPriority
Definition: fetch1.hh:285
gem5::minor::ForwardLineData::id
InstId id
Thread, stream, prediction ...
Definition: pipe_data.hh:214
data
const char data[]
Definition: circlebuf.test.cc:48
gem5::minor::InstId
Id for lines and instructions.
Definition: dyn_inst.hh:75
gem5::minor::Fetch1::FetchRequest::reportData
void reportData(std::ostream &os) const
Report interface.
Definition: fetch1.cc:749
gem5::minor::Fetch1::FetchRequest::id
InstId id
Identity of the line that this request will generate.
Definition: fetch1.hh:129
gem5::auxv::Random
@ Random
Definition: aux_vector.hh:87
gem5::minor::Fetch1::minorTraceResponseLine
void minorTraceResponseLine(const std::string &name, FetchRequestPtr response) const
Print the appropriate MinorLine line for a fetch response.
Definition: fetch1.cc:398
gem5::minor::ForwardLineData::isBubble
bool isBubble() const
Definition: pipe_data.hh:271
gem5::minor::Fetch1::lineSeqNum
InstSeqNum lineSeqNum
Sequence number for line fetch used for ordering lines to flush.
Definition: fetch1.hh:309
gem5::Packet::pushSenderState
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition: packet.cc:334
gem5::BaseMMU::Mode
Mode
Definition: mmu.hh:56
gem5::minor::Fetch1::FetchState
FetchState
Cycle-by-cycle state.
Definition: fetch1.hh:231
gem5::minor::Fetch1::Fetch1ThreadInfo::predictionSeqNum
InstSeqNum predictionSeqNum
Prediction sequence number.
Definition: fetch1.hh:275
gem5::BaseCPU::cacheLineSize
unsigned int cacheLineSize() const
Get the cache line size of the system.
Definition: base.hh:397
gem5::minor::BranchData::reason
Reason reason
Explanation for this branch.
Definition: pipe_data.hh:112
gem5::ThreadContext::pcState
virtual const PCStateBase & pcState() const =0
gem5::ArmISA::set
Bitfield< 12, 11 > set
Definition: misc_types.hh:760
gem5::minor::Fetch1::FetchRequest
Memory access queuing.
Definition: fetch1.hh:106
gem5::minor::Fetch1::popAndDiscard
void popAndDiscard(FetchQueue &queue)
Pop a request from the given queue and correctly deallocate and discard it.
Definition: fetch1.cc:381
gem5::minor::Queue< FetchRequestPtr, ReportTraitsPtrAdaptor< FetchRequestPtr >, NoBubbleTraits< FetchRequestPtr > >
gem5::minor::Fetch1::numFetchesInMemorySystem
unsigned int numFetchesInMemorySystem
Count of the number fetches which have left the transfers queue and are in the 'wild' in the memory s...
Definition: fetch1.hh:315
minor
gem5::minor::Fetch1::updateExpectedSeqNums
void updateExpectedSeqNums(const BranchData &branch)
Update streamSeqNum and predictionSeqNum from the given branch (and assume these have changed and dis...
Definition: fetch1.cc:522
gem5::MinorCPU
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition: cpu.hh:84
cast.hh
gem5::minor::Fetch1::FetchRequest::InTranslation
@ InTranslation
Definition: fetch1.hh:120
gem5::minor::ForwardLineData::fetchAddr
Addr fetchAddr
Address of this line of data.
Definition: pipe_data.hh:203
std::vector
STL vector class.
Definition: stl.hh:37
gem5::minor::Fetch1::prediction
Latch< BranchData >::Output prediction
Input port carrying branch predictions from Fetch2.
Definition: fetch1.hh:204
gem5::minor::Queue::minorTrace
void minorTrace() const
Definition: buffers.hh:511
gem5::minor::Fetch1::transfers
FetchQueue transfers
Queue of in-memory system requests and responses.
Definition: fetch1.hh:303
gem5::minor::Queue::empty
bool empty() const
Is the queue empty?
Definition: buffers.hh:508
gem5::minor::Fetch1::lineSnap
unsigned int lineSnap
Line snap size in bytes.
Definition: fetch1.hh:216
gem5::minor::BranchData::isStreamChange
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition: pipe_data.cc:84
gem5::MinorCPU::wakeupOnEvent
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition: cpu.cc:291
gem5::BaseMMU::Execute
@ Execute
Definition: mmu.hh:56
gem5::ThreadContext::status
virtual Status status() const =0
gem5::MinorCPU::threads
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition: cpu.hh:100
gem5::minor::InputBuffer
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition: buffers.hh:571
gem5::Named
Interface for things with names.
Definition: named.hh:38
gem5::minor::Pipeline::Fetch1StageId
@ Fetch1StageId
Definition: pipeline.hh:103
gem5::BaseCPU::numThreads
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition: base.hh:384
gem5::minor::Fetch1::icachePort
IcachePort icachePort
IcachePort to pass to the CPU.
Definition: fetch1.hh:211
gem5::minor::Fetch1::handleTLBResponse
void handleTLBResponse(FetchRequestPtr response)
Handle pushing a TLB response onto the right queue.
Definition: fetch1.cc:253
gem5::minor::ForwardLineData::setFault
void setFault(Fault fault_)
Set fault and possible clear the bubble flag.
Definition: pipe_data.cc:166
gem5::minor::Fetch1::FetchRequest::isDiscardable
bool isDiscardable() const
Is this line out of date with the current stream/prediction sequence and can it be discarded without ...
Definition: fetch1.cc:754
gem5::minor::Latch::Output
Definition: buffers.hh:262
gem5::minor::Fetch1::getScheduledThread
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition: fetch1.cc:123
gem5::minor::Fetch1::nextStageReserve
std::vector< InputBuffer< ForwardLineData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition: fetch1.hh:207
decoder.hh
gem5::ThreadContext
ThreadContext is the external interface to all thread state for anything outside of the CPU.
Definition: thread_context.hh:88
gem5::minor::Fetch1::maxLineWidth
unsigned int maxLineWidth
Maximum fetch width in bytes.
Definition: fetch1.hh:222
gem5::Named::name
virtual std::string name() const
Definition: named.hh:47
gem5::Fault
std::shared_ptr< FaultBase > Fault
Definition: types.hh:248
gem5::minor::ForwardLineData::pc
std::unique_ptr< PCStateBase > pc
PC of the first inst within this sequence.
Definition: pipe_data.hh:200
DPRINTF
#define DPRINTF(x,...)
Definition: trace.hh:210
gem5::minor::Fetch1::FetchRequest::fault
Fault fault
Fill in a fault if one happens during fetch, check this by picking apart the response packet.
Definition: fetch1.hh:145
gem5::Packet
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition: packet.hh:294
gem5::minor::Latch::Input
Encapsulate wires on either input or output of the latch.
Definition: buffers.hh:251
gem5::minor::Fetch1::FetchRequest::isComplete
bool isComplete() const
Is this a complete read line or fault.
Definition: fetch1.hh:159
gem5::minor::Fetch1::FetchRequest::~FetchRequest
~FetchRequest()
Definition: fetch1.cc:276
gem5::minor::Fetch1::FetchWaitingForPC
@ FetchWaitingForPC
Definition: fetch1.hh:235
gem5::probing::Packet
ProbePointArg< PacketInfo > Packet
Packet probe point.
Definition: mem.hh:108
gem5::minor::Fetch1::Fetch1ThreadInfo::state
FetchState state
Definition: fetch1.hh:256
gem5::minor::minorLine
void minorLine(const Named &named, const char *fmt, Args ...args)
DPRINTFN for MinorTrace MinorLine line reporting.
Definition: trace.hh:83
gem5::minor::Fetch1::moveFromRequestsToTransfers
void moveFromRequestsToTransfers(FetchRequestPtr request)
Move a request between queues.
Definition: fetch1.cc:322
gem5::minor::Fetch1::tryToSendToTransfers
void tryToSendToTransfers(FetchRequestPtr request)
Try and issue a fetch for a translated request at the head of the requests queue.
Definition: fetch1.cc:283
gem5::BaseCPU::instRequestorId
RequestorID instRequestorId() const
Reads this CPU's unique instruction requestor ID.
Definition: base.hh:195
pipeline.hh
gem5::RequestPtr
std::shared_ptr< Request > RequestPtr
Definition: request.hh:92
gem5::MemCmd::ReadReq
@ ReadReq
Definition: packet.hh:87
gem5::minor::Queue::push
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition: buffers.hh:432
gem5::minor::Fetch1::Fetch1ThreadInfo::blocked
bool blocked
Blocked indication for report.
Definition: fetch1.hh:278
gem5::InvalidThreadID
const ThreadID InvalidThreadID
Definition: types.hh:236
gem5::minor::Fetch1::FetchRequest::Complete
@ Complete
Definition: fetch1.hh:123
gem5::minor::Fetch1::FetchRequest::RequestIssuing
@ RequestIssuing
Definition: fetch1.hh:122
compiler.hh
gem5::minor::BranchData
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition: pipe_data.hh:65
gem5::minor::Queue::occupiedSpace
unsigned int occupiedSpace() const
Number of slots already occupied in this buffer.
Definition: buffers.hh:475
gem5::BaseCPU::getContext
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition: base.hh:288
gem5::minor::Fetch1::processResponse
void processResponse(FetchRequestPtr response, ForwardLineData &line)
Convert a response to a ForwardLineData.
Definition: fetch1.cc:540
gem5::Packet::needsResponse
bool needsResponse() const
Definition: packet.hh:608
gem5::minor::Fetch1::IcacheState
IcacheState
State of memory access for head instruction fetch.
Definition: fetch1.hh:288
gem5::minor::Fetch1::fetchLimit
unsigned int fetchLimit
Maximum number of fetches allowed in flight (in queues or memory)
Definition: fetch1.hh:225
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::minor::Fetch1::FetchHalted
@ FetchHalted
Definition: fetch1.hh:233
gem5::Packet::isError
bool isError() const
Definition: packet.hh:622
gem5::minor::BranchData::isBubble
bool isBubble() const
Definition: pipe_data.hh:163
gem5::minor::Fetch1::changeStream
void changeStream(const BranchData &branch)
Start fetching from a new address.
Definition: fetch1.cc:489
gem5::minor::Fetch1::FetchRequest::state
FetchRequestState state
Definition: fetch1.hh:126
gem5::minor::minorTrace
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition: trace.hh:66
gem5::minor::Fetch1::FetchRequest::Translated
@ Translated
Definition: fetch1.hh:121
gem5::minor::Fetch1::FetchRunning
@ FetchRunning
Definition: fetch1.hh:238
gem5::minor::Fetch1::FetchRequest::makePacket
void makePacket()
Make a packet to use with the memory transaction.
Definition: fetch1.cc:228
gem5::Packet::popSenderState
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition: packet.cc:342
gem5::ActivityRecorder::activity
void activity()
Records that there is activity this cycle.
Definition: activity.cc:55
gem5::minor::Fetch1::recvReqRetry
virtual void recvReqRetry()
Definition: fetch1.cc:454
gem5::minor::Fetch1::Fetch1ThreadInfo::pc
std::unique_ptr< PCStateBase > pc
Fetch PC value.
Definition: fetch1.hh:261
state
atomic_var_t state
Definition: helpers.cc:188
gem5::minor::ForwardLineData::adoptPacketData
void adoptPacketData(Packet *packet)
Use the data from a packet as line instead of allocating new space.
Definition: pipe_data.cc:186
gem5::minor::Fetch1::IcacheRunning
@ IcacheRunning
Definition: fetch1.hh:290
gem5::minor::Fetch1::Fetch1
Fetch1(const std::string &name_, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< BranchData >::Output inp_, Latch< ForwardLineData >::Input out_, Latch< BranchData >::Output prediction_, std::vector< InputBuffer< ForwardLineData >> &next_stage_input_buffer)
Definition: fetch1.cc:60
gem5::minor::Fetch1::inp
Latch< BranchData >::Output inp
Input port carrying branch requests from Execute.
Definition: fetch1.hh:200
gem5::minor::Fetch1::wakeupFetch
void wakeupFetch(ThreadID tid)
Initiate fetch1 fetching.
Definition: fetch1.cc:717
gem5::Packet::allocate
void allocate()
Allocate memory for the packet.
Definition: packet.hh:1367
gem5::minor::BranchData::threadId
ThreadID threadId
ThreadID associated with branch.
Definition: pipe_data.hh:115
gem5::Request::INST_FETCH
@ INST_FETCH
The request was an instruction fetch.
Definition: request.hh:115
gem5::X86ISA::os
Bitfield< 17 > os
Definition: misc.hh:810
gem5::minor::Fetch1::recvTimingResp
virtual bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition: fetch1.cc:417
gem5::minor::Fetch1::Fetch1ThreadInfo
Stage cycle-by-cycle state.
Definition: fetch1.hh:243
gem5::minor::operator<<
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition: dyn_inst.cc:63
gem5::minor::Fetch1::cpu
MinorCPU & cpu
Construction-assigned data members.
Definition: fetch1.hh:197
gem5::minor::BranchData::HaltFetch
@ HaltFetch
Definition: pipe_data.hh:98
gem5::minor::Fetch1::numInFlightFetches
unsigned int numInFlightFetches()
Returns the total number of queue occupancy, in-ITLB and in-memory system fetches.
Definition: fetch1.cc:390
gem5::minor::Fetch1::Fetch1ThreadInfo::fetchAddr
Addr fetchAddr
The address we're currently fetching lines from.
Definition: fetch1.hh:264
gem5::minor::Fetch1::minorTrace
void minorTrace() const
Definition: fetch1.cc:765
logging.hh
gem5::minor::ForwardLineData::lineBaseAddr
Addr lineBaseAddr
First byte address in the line.
Definition: pipe_data.hh:197
gem5::ArmISA::id
Bitfield< 33 > id
Definition: misc_types.hh:305
gem5::minor::Fetch1::FetchRequest::finish
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
Interface for ITLB responses.
Definition: fetch1.cc:240
gem5::minor::Fetch1::requests
FetchQueue requests
Queue of address translated requests from Fetch1.
Definition: fetch1.hh:300
gem5::minor::Fetch1::stepQueues
void stepQueues()
Step requests along between requests and transfers queues.
Definition: fetch1.cc:359
gem5::minor::Fetch1::tryToSend
bool tryToSend(FetchRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition: fetch1.cc:331
gem5::MinorCPU::threadPolicy
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition: cpu.hh:119
trace.hh
gem5::MinorCPU::activityRecorder
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition: cpu.hh:95
gem5::minor::Fetch1::IcacheNeedsRetry
@ IcacheNeedsRetry
Definition: fetch1.hh:291
gem5::minor::BranchData::SuspendThread
@ SuspendThread
Definition: pipe_data.hh:94
gem5::minor::Queue::pop
void pop()
Pop the head item.
Definition: buffers.hh:505
gem5::minor::Fetch1::fetchInfo
std::vector< Fetch1ThreadInfo > fetchInfo
Definition: fetch1.hh:284
gem5::minor::Fetch1::FetchRequest::pc
Addr pc
PC to fixup with line address.
Definition: fetch1.hh:141
gem5::minor::Fetch1::evaluate
void evaluate()
Pass on input/buffer data to the output if you can.
Definition: fetch1.cc:576
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: gpu_translation_state.hh:37
gem5::minor::Fetch1::FetchRequest::packet
PacketPtr packet
FetchRequests carry packets while they're in the requests and transfers responses queues.
Definition: fetch1.hh:135
gem5::minor::Fetch1::Fetch1ThreadInfo::wakeupGuard
bool wakeupGuard
Signal to guard against sleeping first cycle of wakeup.
Definition: fetch1.hh:281
gem5::minor::Fetch1::FetchRequest::request
RequestPtr request
The underlying request that this fetch represents.
Definition: fetch1.hh:138
gem5::minor::Fetch1::isDrained
bool isDrained()
Is this stage drained? For Fetch1, draining is initiated by Execute signalling a branch with the reas...
Definition: fetch1.cc:731
gem5::minor::BranchData::newPredictionSeqNum
InstSeqNum newPredictionSeqNum
Definition: pipe_data.hh:119
gem5::minor::Fetch1::icacheState
IcacheState icacheState
Retry state of icache_port.
Definition: fetch1.hh:306
gem5::minor::BranchData::newStreamSeqNum
InstSeqNum newStreamSeqNum
Sequence number of new stream/prediction to be adopted.
Definition: pipe_data.hh:118
fetch1.hh
gem5::MinorCPU::roundRobinPriority
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition: cpu.hh:172
gem5::minor::Fetch1::Fetch1ThreadInfo::streamSeqNum
InstSeqNum streamSeqNum
Stream sequence number.
Definition: fetch1.hh:269
gem5::ThreadID
int16_t ThreadID
Thread index/ID type.
Definition: types.hh:235
gem5::minor::InstId::threadId
ThreadID threadId
The thread to which this line/instruction belongs.
Definition: dyn_inst.hh:88
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:188
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::minor::Queue::reserve
void reserve()
Reserve space in the queue for future pushes.
Definition: buffers.hh:460

Generated on Sun Jul 30 2023 01:56:52 for gem5 by doxygen 1.8.17