gem5 v24.0.0.0
Loading...
Searching...
No Matches
fetch1.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013-2014 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions are
16 * met: redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer;
18 * redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution;
21 * neither the name of the copyright holders nor the names of its
22 * contributors may be used to endorse or promote products derived from
23 * this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "cpu/minor/fetch1.hh"
39
40#include <cstring>
41#include <iomanip>
42#include <sstream>
43
45#include "base/cast.hh"
46#include "base/compiler.hh"
47#include "base/logging.hh"
48#include "base/trace.hh"
49#include "cpu/minor/pipeline.hh"
50#include "debug/Drain.hh"
51#include "debug/Fetch.hh"
52#include "debug/MinorTrace.hh"
53
54namespace gem5
55{
56
57namespace minor
58{
59
60Fetch1::Fetch1(const std::string &name_,
61 MinorCPU &cpu_,
62 const BaseMinorCPUParams &params,
65 Latch<BranchData>::Output prediction_,
66 std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
67 Named(name_),
68 cpu(cpu_),
69 inp(inp_),
70 out(out_),
71 prediction(prediction_),
72 nextStageReserve(next_stage_input_buffer),
73 icachePort(name_ + ".icache_port", *this, cpu_),
74 lineSnap(params.fetch1LineSnapWidth),
75 maxLineWidth(params.fetch1LineWidth),
76 fetchLimit(params.fetch1FetchLimit),
77 fetchInfo(params.numThreads),
78 threadPriority(0),
79 requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
80 transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
81 icacheState(IcacheRunning),
82 lineSeqNum(InstId::firstLineSeqNum),
83 numFetchesInMemorySystem(0),
84 numFetchesInITLB(0)
85{
86 for (auto &info: fetchInfo)
87 info.pc.reset(params.isa[0]->newPCState());
88
89 if (lineSnap == 0) {
91 DPRINTF(Fetch, "lineSnap set to cache line size of: %d\n",
92 lineSnap);
93 }
94
95 if (maxLineWidth == 0) {
97 DPRINTF(Fetch, "maxLineWidth set to cache line size of: %d\n",
99 }
100
101 size_t inst_size = cpu.threads[0]->decoder->moreBytesSize();
102
103 /* These assertions should be copied to the Python config. as well */
104 if ((lineSnap % inst_size) != 0) {
105 fatal("%s: fetch1LineSnapWidth must be a multiple "
106 "of the inst width (%d)\n", name_,
107 inst_size);
108 }
109
110 if ((maxLineWidth >= lineSnap && (maxLineWidth % inst_size)) != 0) {
111 fatal("%s: fetch1LineWidth must be a multiple of"
112 " the inst width (%d), and >= fetch1LineSnapWidth (%d)\n",
113 name_, inst_size, lineSnap);
114 }
115
116 if (fetchLimit < 1) {
117 fatal("%s: fetch1FetchLimit must be >= 1 (%d)\n", name_,
118 fetchLimit);
119 }
120}
121
122inline ThreadID
124{
125 /* Select thread via policy. */
126 std::vector<ThreadID> priority_list;
127
128 switch (cpu.threadPolicy) {
129 case enums::SingleThreaded:
130 priority_list.push_back(0);
131 break;
132 case enums::RoundRobin:
133 priority_list = cpu.roundRobinPriority(threadPriority);
134 break;
135 case enums::Random:
136 priority_list = cpu.randomPriority();
137 break;
138 default:
139 panic("Unknown fetch policy");
140 }
141
142 for (auto tid : priority_list) {
144 !fetchInfo[tid].blocked &&
145 fetchInfo[tid].state == FetchRunning) {
146 threadPriority = tid;
147 return tid;
148 }
149 }
150
151 return InvalidThreadID;
152}
153
154void
156{
157 /* Reference the currently used thread state. */
158 Fetch1ThreadInfo &thread = fetchInfo[tid];
159
160 /* If line_offset != 0, a request is pushed for the remainder of the
161 * line. */
162 /* Use a lower, sizeof(MachInst) aligned address for the fetch */
163 Addr aligned_pc = thread.fetchAddr & ~((Addr) lineSnap - 1);
164 unsigned int line_offset = aligned_pc % lineSnap;
165 unsigned int request_size = maxLineWidth - line_offset;
166
167 /* Fill in the line's id */
168 InstId request_id(tid,
169 thread.streamSeqNum, thread.predictionSeqNum,
170 lineSeqNum);
171
172 FetchRequestPtr request = new FetchRequest(*this, request_id,
173 thread.fetchAddr);
174
175 DPRINTF(Fetch, "Inserting fetch into the fetch queue "
176 "%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
177 request_id, aligned_pc, thread.fetchAddr, line_offset, request_size);
178
179 request->request->setContext(cpu.threads[tid]->getTC()->contextId());
180 request->request->setVirt(
181 aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(),
182 /* I've no idea why we need the PC, but give it */
183 thread.fetchAddr);
184
185 DPRINTF(Fetch, "Submitting ITLB request\n");
187
189
190 /* Reserve space in the queues upstream of requests for results */
192 requests.push(request);
193
194 /* Submit the translation request. The response will come
195 * through finish/markDelayed on this request as it bears
196 * the Translation interface */
197 cpu.threads[request->id.threadId]->mmu->translateTiming(
198 request->request,
199 cpu.getContext(request->id.threadId),
200 request, BaseMMU::Execute);
201
202 lineSeqNum++;
203
204 /* Step the PC for the next line onto the line aligned next address.
205 * Note that as instructions can span lines, this PC is only a
206 * reliable 'new' PC if the next line has a new stream sequence number. */
207 thread.fetchAddr = aligned_pc + request_size;
208}
209
210std::ostream &
212{
213 switch (state) {
215 os << "IcacheRunning";
216 break;
218 os << "IcacheNeedsRetry";
219 break;
220 default:
221 os << "IcacheState-" << static_cast<int>(state);
222 break;
223 }
224 return os;
225}
226
227void
229{
230 /* Make the necessary packet for a memory transaction */
232 packet->allocate();
233
234 /* This FetchRequest becomes SenderState to allow the response to be
235 * identified */
236 packet->pushSenderState(this);
237}
238
239void
240Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
242{
243 fault = fault_;
244
245 state = Translated;
246 fetch.handleTLBResponse(this);
247
248 /* Let's try and wake up the processor for the next cycle */
249 fetch.cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
250}
251
252void
254{
256
257 if (response->fault != NoFault) {
258 DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
259 "paddr: 0x%x, vaddr: 0x%x\n",
260 response->fault->name(),
261 (response->request->hasPaddr() ?
262 response->request->getPaddr() : 0),
263 response->request->getVaddr());
264
265 if (debug::MinorTrace)
266 minorTraceResponseLine(name(), response);
267 } else {
268 DPRINTF(Fetch, "Got ITLB response\n");
269 }
270
272
273 tryToSendToTransfers(response);
274}
275
277{
278 if (packet)
279 delete packet;
280}
281
282void
284{
285 if (!requests.empty() && requests.front() != request) {
286 DPRINTF(Fetch, "Fetch not at front of requests queue, can't"
287 " issue to memory\n");
288 return;
289 }
290
291 if (request->state == FetchRequest::InTranslation) {
292 DPRINTF(Fetch, "Fetch still in translation, not issuing to"
293 " memory\n");
294 return;
295 }
296
297 if (request->isDiscardable() || request->fault != NoFault) {
298 /* Discarded and faulting requests carry on through transfers
299 * as Complete/packet == NULL */
300
301 request->state = FetchRequest::Complete;
303
304 /* Wake up the pipeline next cycle as there will be no event
305 * for this queue->queue transfer */
307 } else if (request->state == FetchRequest::Translated) {
308 if (!request->packet)
309 request->makePacket();
310
311 /* Ensure that the packet won't delete the request */
312 assert(request->packet->needsResponse());
313
314 if (tryToSend(request))
316 } else {
317 DPRINTF(Fetch, "Not advancing line fetch\n");
318 }
319}
320
321void
323{
324 assert(!requests.empty() && requests.front() == request);
325
326 requests.pop();
327 transfers.push(request);
328}
329
330bool
332{
333 bool ret = false;
334
335 if (icachePort.sendTimingReq(request->packet)) {
336 /* Invalidate the fetch_requests packet so we don't
337 * accidentally fail to deallocate it (or use it!)
338 * later by overwriting it */
339 request->packet = NULL;
342
343 ret = true;
344
345 DPRINTF(Fetch, "Issued fetch request to memory: %s\n",
346 request->id);
347 } else {
348 /* Needs to be resent, wait for that */
350
351 DPRINTF(Fetch, "Line fetch needs to retry: %s\n",
352 request->id);
353 }
354
355 return ret;
356}
357
358void
360{
361 IcacheState old_icache_state = icacheState;
362
363 switch (icacheState) {
364 case IcacheRunning:
365 /* Move ITLB results on to the memory system */
366 if (!requests.empty()) {
368 }
369 break;
370 case IcacheNeedsRetry:
371 break;
372 }
373
374 if (icacheState != old_icache_state) {
375 DPRINTF(Fetch, "Step in state %s moving to state %s\n",
376 old_icache_state, icacheState);
377 }
378}
379
380void
382{
383 if (!queue.empty()) {
384 delete queue.front();
385 queue.pop();
386 }
387}
388
389unsigned int
395
397void
399 Fetch1::FetchRequestPtr response) const
400{
401 const RequestPtr &request = response->request;
402
403 if (response->packet && response->packet->isError()) {
404 minorLine(*this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
405 response->id, request->getVaddr());
406 } else if (response->fault != NoFault) {
407 minorLine(*this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
408 response->id, request->getVaddr(), response->fault->name());
409 } else {
410 minorLine(*this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
411 response->id, request->getSize(),
412 request->getVaddr(), request->getPaddr());
413 }
414}
415
416bool
418{
419 DPRINTF(Fetch, "recvTimingResp %d\n", numFetchesInMemorySystem);
420
421 /* Only push the response if we didn't change stream? No, all responses
422 * should hit the responses queue. It's the job of 'step' to throw them
423 * away. */
425 (response->popSenderState());
426
427 /* Fixup packet in fetch_request as this may have changed */
428 assert(!fetch_request->packet);
429 fetch_request->packet = response;
430
432 fetch_request->state = FetchRequest::Complete;
433
434 if (debug::MinorTrace)
435 minorTraceResponseLine(name(), fetch_request);
436
437 if (response->isError()) {
438 DPRINTF(Fetch, "Received error response packet: %s\n",
439 fetch_request->id);
440 }
441
442 /* We go to idle even if there are more things to do on the queues as
443 * it's the job of step to actually step us on to the next transaction */
444
445 /* Let's try and wake up the processor for the next cycle to move on
446 * queues */
448
449 /* Never busy */
450 return true;
451}
452
453void
455{
456 DPRINTF(Fetch, "recvRetry\n");
457 assert(icacheState == IcacheNeedsRetry);
458 assert(!requests.empty());
459
460 FetchRequestPtr retryRequest = requests.front();
461
463
464 if (tryToSend(retryRequest))
465 moveFromRequestsToTransfers(retryRequest);
466}
467
468std::ostream &
470{
471 switch (state) {
473 os << "FetchHalted";
474 break;
476 os << "FetchWaitingForPC";
477 break;
479 os << "FetchRunning";
480 break;
481 default:
482 os << "FetchState-" << static_cast<int>(state);
483 break;
484 }
485 return os;
486}
487
488void
490{
491 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
492
493 updateExpectedSeqNums(branch);
494
495 /* Start fetching again if we were stopped */
496 switch (branch.reason) {
498 {
499 if (thread.wakeupGuard) {
500 DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
501 branch);
502 } else {
503 DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
504 thread.state = FetchWaitingForPC;
505 }
506 }
507 break;
509 DPRINTF(Fetch, "Halting fetch\n");
510 thread.state = FetchHalted;
511 break;
512 default:
513 DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
514 thread.state = FetchRunning;
515 break;
516 }
517 set(thread.pc, branch.target);
518 thread.fetchAddr = thread.pc->instAddr();
519}
520
521void
523{
524 Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
525
526 DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
527 " predictionSeqNum from: %d to %d\n",
528 thread.streamSeqNum, branch.newStreamSeqNum,
530
531 /* Change the stream */
532 thread.streamSeqNum = branch.newStreamSeqNum;
533 /* Update the prediction. Note that it's possible for this to
534 * actually set the prediction to an *older* value if new
535 * predictions have been discarded by execute */
537}
538
539void
541 ForwardLineData &line)
542{
543 Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
544 PacketPtr packet = response->packet;
545
546 /* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
547 * structure */
548 line.setFault(response->fault);
549 /* Make sequence numbers valid in return */
550 line.id = response->id;
551 /* Set the PC in case there was a sequence change */
552 set(line.pc, thread.pc);
553 /* Set fetch address to virtual address */
554 line.fetchAddr = response->pc;
555 /* Set the lineBase, which is a sizeof(MachInst) aligned address <=
556 * pc.instAddr() */
557 line.lineBaseAddr = response->request->getVaddr();
558
559 if (response->fault != NoFault) {
560 /* Stop fetching if there was a fault */
561 /* Should probably try to flush the queues as well, but we
562 * can't be sure that this fault will actually reach Execute, and we
563 * can't (currently) selectively remove this stream from the queues */
564 DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
565 response->fault->name());
567 } else {
568 line.adoptPacketData(packet);
569 /* Null the response's packet to prevent the response from trying to
570 * deallocate the packet */
571 response->packet = NULL;
572 }
573}
574
575void
577{
578 const BranchData &execute_branch = *inp.outputWire;
579 const BranchData &fetch2_branch = *prediction.outputWire;
580 ForwardLineData &line_out = *out.inputWire;
581
582 assert(line_out.isBubble());
583
584 for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
585 fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
586
588 if (execute_branch.threadId != InvalidThreadID &&
589 execute_branch.threadId == fetch2_branch.threadId) {
590
591 Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
592
593 /* Are we changing stream? Look to the Execute branches first, then
594 * to predicted changes of stream from Fetch2 */
595 if (execute_branch.isStreamChange()) {
596 if (thread.state == FetchHalted) {
597 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
598 } else {
599 changeStream(execute_branch);
600 }
601
602 if (!fetch2_branch.isBubble()) {
603 DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
604 fetch2_branch);
605 }
606
607 /* The streamSeqNum tagging in request/response ->req should handle
608 * discarding those requests when we get to them. */
609 } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
610 /* Handle branch predictions by changing the instruction source
611 * if we're still processing the same stream (as set by streamSeqNum)
612 * as the one of the prediction.
613 */
614 if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
615 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
616 " streamSeqNum mismatch\n",
617 fetch2_branch);
618 } else {
619 changeStream(fetch2_branch);
620 }
621 }
622 } else {
623 /* Fetch2 and Execute branches are for different threads */
624 if (execute_branch.threadId != InvalidThreadID &&
625 execute_branch.isStreamChange()) {
626
627 if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
628 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
629 } else {
630 changeStream(execute_branch);
631 }
632 }
633
634 if (fetch2_branch.threadId != InvalidThreadID &&
635 fetch2_branch.isStreamChange()) {
636
637 if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
638 DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
639 } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
640 DPRINTF(Fetch, "Not changing stream on prediction: %s,"
641 " streamSeqNum mismatch\n", fetch2_branch);
642 } else {
643 changeStream(fetch2_branch);
644 }
645 }
646 }
647
649 ThreadID fetch_tid = getScheduledThread();
650
651 if (fetch_tid != InvalidThreadID) {
652 DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
653
654 /* Generate fetch to selected thread */
655 fetchLine(fetch_tid);
656 /* Take up a slot in the fetch queue */
657 nextStageReserve[fetch_tid].reserve();
658 } else {
659 DPRINTF(Fetch, "No active threads available to fetch from\n");
660 }
661 }
662
663
664 /* Halting shouldn't prevent fetches in flight from being processed */
665 /* Step fetches through the icachePort queues and memory system */
666 stepQueues();
667
668 /* As we've thrown away early lines, if there is a line, it must
669 * be from the right stream */
670 if (!transfers.empty() &&
672 {
674
675 if (response->isDiscardable()) {
676 nextStageReserve[response->id.threadId].freeReservation();
677
678 DPRINTF(Fetch, "Discarding translated fetch as it's for"
679 " an old stream\n");
680
681 /* Wake up next cycle just in case there was some other
682 * action to do */
684 } else {
685 DPRINTF(Fetch, "Processing fetched line: %s\n",
686 response->id);
687
688 processResponse(response, line_out);
689 }
690
692 }
693
694 /* If we generated output, and mark the stage as being active
695 * to encourage that output on to the next stage */
696 if (!line_out.isBubble())
698
699 /* Fetch1 has no inputBuffer so the only activity we can have is to
700 * generate a line output (tested just above) or to initiate a memory
701 * fetch which will signal activity when it returns/needs stepping
702 * between queues */
703
704
705 /* This looks hackish. And it is, but there doesn't seem to be a better
706 * way to do this. The signal from commit to suspend fetch takes 1
707 * clock cycle to propagate to fetch. However, a legitimate wakeup
708 * may occur between cycles from the memory system. Thus wakeup guard
709 * prevents us from suspending in that case. */
710
711 for (auto& thread : fetchInfo) {
712 thread.wakeupGuard = false;
713 }
714}
715
716void
718{
719 ThreadContext *thread_ctx = cpu.getContext(tid);
720 Fetch1ThreadInfo &thread = fetchInfo[tid];
721 set(thread.pc, thread_ctx->pcState());
722 thread.fetchAddr = thread.pc->instAddr();
723 thread.state = FetchRunning;
724 thread.wakeupGuard = true;
725 DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n", tid, *thread.pc);
726
728}
729
730bool
732{
733 bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
734 for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
735 Fetch1ThreadInfo &thread = fetchInfo[tid];
736 DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
737 tid,
738 thread.state == FetchHalted,
739 (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
740 ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
741
742 drained = drained && (thread.state != FetchRunning);
743 }
744
745 return drained;
746}
747
748void
750{
751 os << id;
752}
753
755{
756 Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
757
758 /* Can't discard lines in TLB/memory */
759 return state != InTranslation && state != RequestIssuing &&
760 (id.streamSeqNum != thread.streamSeqNum ||
761 id.predictionSeqNum != thread.predictionSeqNum);
762}
763
764void
766{
767 // TODO: Un-bork minorTrace for THREADS
768 // bork bork bork
769 const Fetch1ThreadInfo &thread = fetchInfo[0];
770
771 std::ostringstream data;
772
773 if (thread.blocked)
774 data << 'B';
775 else
776 (*out.inputWire).reportData(data);
777
778 minor::minorTrace("state=%s icacheState=%s in_tlb_mem=%s/%s"
779 " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
781 thread.streamSeqNum, data.str());
784}
785
786} // namespace minor
787} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
void activity()
Records that there is activity this cycle.
Definition activity.cc:55
RequestorID instRequestorId() const
Reads this CPU's unique instruction requestor ID.
Definition base.hh:195
Addr cacheLineSize() const
Get the cache line size of the system.
Definition base.hh:397
virtual ThreadContext * getContext(int tn)
Given a thread num get tho thread context for it.
Definition base.hh:288
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition base.hh:390
MinorCPU is an in-order CPU model with four fixed pipeline stages:
Definition cpu.hh:85
minor::MinorActivityRecorder * activityRecorder
Activity recording for pipeline.
Definition cpu.hh:95
void wakeupOnEvent(unsigned int stage_id)
Interface for stages to signal that they have become active after a callback or eventq event where th...
Definition cpu.cc:291
std::vector< ThreadID > randomPriority()
Definition cpu.hh:181
std::vector< ThreadID > roundRobinPriority(ThreadID priority)
Thread scheduling utility functions.
Definition cpu.hh:172
enums::ThreadPolicy threadPolicy
Thread Scheduling Policy (RoundRobin, Random, etc)
Definition cpu.hh:119
std::vector< minor::MinorThread * > threads
These are thread state-representing objects for this CPU.
Definition cpu.hh:100
Interface for things with names.
Definition named.hh:39
virtual std::string name() const
Definition named.hh:47
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isError() const
Definition packet.hh:622
bool needsResponse() const
Definition packet.hh:608
void pushSenderState(SenderState *sender_state)
Push a new sender state to the packet and make the current sender state the predecessor of the new on...
Definition packet.cc:334
SenderState * popSenderState()
Pop the top of the state stack and return a pointer to it.
Definition packet.cc:342
void allocate()
Allocate memory for the packet.
Definition packet.hh:1367
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
@ INST_FETCH
The request was an instruction fetch.
Definition request.hh:115
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual const PCStateBase & pcState() const =0
virtual Status status() const =0
Forward data betwen Execute and Fetch1 carrying change-of-address/stream information.
Definition pipe_data.hh:66
static bool isStreamChange(const BranchData::Reason reason)
Is a request with this reason actually a request to change the PC rather than a bubble or branch pred...
Definition pipe_data.cc:84
InstSeqNum newStreamSeqNum
Sequence number of new stream/prediction to be adopted.
Definition pipe_data.hh:118
Reason reason
Explanation for this branch.
Definition pipe_data.hh:112
InstSeqNum newPredictionSeqNum
Definition pipe_data.hh:119
ThreadID threadId
ThreadID associated with branch.
Definition pipe_data.hh:115
std::unique_ptr< PCStateBase > target
Starting PC of that stream.
Definition pipe_data.hh:122
Memory access queuing.
Definition fetch1.hh:109
InstId id
Identity of the line that this request will generate.
Definition fetch1.hh:129
bool isComplete() const
Is this a complete read line or fault.
Definition fetch1.hh:159
bool isDiscardable() const
Is this line out of date with the current stream/prediction sequence and can it be discarded without ...
Definition fetch1.cc:754
void reportData(std::ostream &os) const
Report interface.
Definition fetch1.cc:749
void finish(const Fault &fault_, const RequestPtr &request_, ThreadContext *tc, BaseMMU::Mode mode)
Interface for ITLB responses.
Definition fetch1.cc:240
Addr pc
PC to fixup with line address.
Definition fetch1.hh:141
void makePacket()
Make a packet to use with the memory transaction.
Definition fetch1.cc:228
Fault fault
Fill in a fault if one happens during fetch, check this by picking apart the response packet.
Definition fetch1.hh:145
RequestPtr request
The underlying request that this fetch represents.
Definition fetch1.hh:138
PacketPtr packet
FetchRequests carry packets while they're in the requests and transfers responses queues.
Definition fetch1.hh:135
FetchState
Cycle-by-cycle state.
Definition fetch1.hh:232
ThreadID threadPriority
Definition fetch1.hh:285
unsigned int numFetchesInITLB
Number of requests inside the ITLB rather than in the queues.
Definition fetch1.hh:319
Fetch1(const std::string &name_, MinorCPU &cpu_, const BaseMinorCPUParams &params, Latch< BranchData >::Output inp_, Latch< ForwardLineData >::Input out_, Latch< BranchData >::Output prediction_, std::vector< InputBuffer< ForwardLineData > > &next_stage_input_buffer)
Definition fetch1.cc:60
virtual void recvReqRetry()
Definition fetch1.cc:454
void changeStream(const BranchData &branch)
Start fetching from a new address.
Definition fetch1.cc:489
void minorTraceResponseLine(const std::string &name, FetchRequestPtr response) const
Print the appropriate MinorLine line for a fetch response.
Definition fetch1.cc:398
unsigned int numFetchesInMemorySystem
Count of the number fetches which have left the transfers queue and are in the 'wild' in the memory s...
Definition fetch1.hh:315
Latch< BranchData >::Output inp
Input port carrying branch requests from Execute.
Definition fetch1.hh:200
ThreadID getScheduledThread()
Use the current threading policy to determine the next thread to fetch from.
Definition fetch1.cc:123
void tryToSendToTransfers(FetchRequestPtr request)
Try and issue a fetch for a translated request at the head of the requests queue.
Definition fetch1.cc:283
void popAndDiscard(FetchQueue &queue)
Pop a request from the given queue and correctly deallocate and discard it.
Definition fetch1.cc:381
std::vector< Fetch1ThreadInfo > fetchInfo
Definition fetch1.hh:284
IcacheState icacheState
Retry state of icache_port.
Definition fetch1.hh:306
virtual bool recvTimingResp(PacketPtr pkt)
Memory interface.
Definition fetch1.cc:417
IcachePort icachePort
IcachePort to pass to the CPU.
Definition fetch1.hh:211
Addr lineSnap
Line snap size in bytes.
Definition fetch1.hh:216
std::vector< InputBuffer< ForwardLineData > > & nextStageReserve
Interface to reserve space in the next stage.
Definition fetch1.hh:207
Addr maxLineWidth
Maximum fetch width in bytes.
Definition fetch1.hh:222
void wakeupFetch(ThreadID tid)
Initiate fetch1 fetching.
Definition fetch1.cc:717
friend std::ostream & operator<<(std::ostream &os, Fetch1::FetchState state)
Definition fetch1.cc:469
bool tryToSend(FetchRequestPtr request)
Try to send (or resend) a memory request's next/only packet to the memory system.
Definition fetch1.cc:331
void handleTLBResponse(FetchRequestPtr response)
Handle pushing a TLB response onto the right queue.
Definition fetch1.cc:253
bool isDrained()
Is this stage drained? For Fetch1, draining is initiated by Execute signalling a branch with the reas...
Definition fetch1.cc:731
Latch< BranchData >::Output prediction
Input port carrying branch predictions from Fetch2.
Definition fetch1.hh:204
void evaluate()
Pass on input/buffer data to the output if you can.
Definition fetch1.cc:576
void minorTrace() const
Definition fetch1.cc:765
unsigned int fetchLimit
Maximum number of fetches allowed in flight (in queues or memory)
Definition fetch1.hh:225
void processResponse(FetchRequestPtr response, ForwardLineData &line)
Convert a response to a ForwardLineData.
Definition fetch1.cc:540
unsigned int numInFlightFetches()
Returns the total number of queue occupancy, in-ITLB and in-memory system fetches.
Definition fetch1.cc:390
MinorCPU & cpu
Construction-assigned data members.
Definition fetch1.hh:197
FetchQueue transfers
Queue of in-memory system requests and responses.
Definition fetch1.hh:303
void stepQueues()
Step requests along between requests and transfers queues.
Definition fetch1.cc:359
void updateExpectedSeqNums(const BranchData &branch)
Update streamSeqNum and predictionSeqNum from the given branch (and assume these have changed and dis...
Definition fetch1.cc:522
InstSeqNum lineSeqNum
Sequence number for line fetch used for ordering lines to flush.
Definition fetch1.hh:309
void fetchLine(ThreadID tid)
Insert a line fetch into the requests.
Definition fetch1.cc:155
Latch< ForwardLineData >::Input out
Output port carrying read lines to Fetch2.
Definition fetch1.hh:202
FetchQueue requests
Queue of address translated requests from Fetch1.
Definition fetch1.hh:300
void moveFromRequestsToTransfers(FetchRequestPtr request)
Move a request between queues.
Definition fetch1.cc:322
IcacheState
State of memory access for head instruction fetch.
Definition fetch1.hh:289
Line fetch data in the forward direction.
Definition pipe_data.hh:187
void setFault(Fault fault_)
Set fault and possible clear the bubble flag.
Definition pipe_data.cc:166
void adoptPacketData(Packet *packet)
Use the data from a packet as line instead of allocating new space.
Definition pipe_data.cc:186
Addr fetchAddr
Address of this line of data.
Definition pipe_data.hh:203
InstId id
Thread, stream, prediction ... id of this line.
Definition pipe_data.hh:214
std::unique_ptr< PCStateBase > pc
PC of the first inst within this sequence.
Definition pipe_data.hh:200
Addr lineBaseAddr
First byte address in the line.
Definition pipe_data.hh:197
Like a Queue but with a restricted interface and a setTail function which, when the queue is empty,...
Definition buffers.hh:572
Id for lines and instructions.
Definition dyn_inst.hh:76
ThreadID threadId
The thread to which this line/instruction belongs.
Definition dyn_inst.hh:88
Encapsulate wires on either input or output of the latch.
Definition buffers.hh:252
bool empty() const
Is the queue empty?
Definition buffers.hh:508
void minorTrace() const
Definition buffers.hh:511
void pop()
Pop the head item.
Definition buffers.hh:505
unsigned int occupiedSpace() const
Number of slots already occupied in this buffer.
Definition buffers.hh:475
ElemType & front()
Head value.
Definition buffers.hh:500
void reserve()
Reserve space in the queue for future pushes.
Definition buffers.hh:460
void push(ElemType &data)
Push an element into the buffer if it isn't a bubble.
Definition buffers.hh:432
STL vector class.
Definition stl.hh:37
Fetch1 is responsible for fetching "lines" from memory and passing them to Fetch2.
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
atomic_var_t state
Definition helpers.cc:211
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 33 > id
Bitfield< 12, 11 > set
Bitfield< 17 > os
Definition misc.hh:838
std::ostream & operator<<(std::ostream &os, const InstId &id)
Print this id in the usual slash-separated format expected by MinorTrace.
Definition dyn_inst.cc:63
void minorTrace(const char *fmt, Args ...args)
DPRINTFN for MinorTrace reporting.
Definition trace.hh:66
void minorLine(const Named &named, const char *fmt, Args ...args)
DPRINTFN for MinorTrace MinorLine line reporting.
Definition trace.hh:83
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
T safe_cast(U &&ref_or_ptr)
Definition cast.hh:74
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
const ThreadID InvalidThreadID
Definition types.hh:236
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
constexpr decltype(nullptr) NoFault
Definition types.hh:253
Minor contains all the definitions within the MinorCPU apart from the CPU class itself.
The constructed pipeline.
Stage cycle-by-cycle state.
Definition fetch1.hh:244
std::unique_ptr< PCStateBase > pc
Fetch PC value.
Definition fetch1.hh:261
InstSeqNum streamSeqNum
Stream sequence number.
Definition fetch1.hh:269
InstSeqNum predictionSeqNum
Prediction sequence number.
Definition fetch1.hh:275
bool blocked
Blocked indication for report.
Definition fetch1.hh:278
bool wakeupGuard
Signal to guard against sleeping first cycle of wakeup.
Definition fetch1.hh:281
Addr fetchAddr
The address we're currently fetching lines from.
Definition fetch1.hh:264

Generated on Tue Jun 18 2024 16:24:01 for gem5 by doxygen 1.11.0