gem5 [DEVELOP-FOR-25.1]
Loading...
Searching...
No Matches
timing.cc
Go to the documentation of this file.
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2010-2013,2015,2017-2018, 2020-2021 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/simple/timing.hh"
43
45#include "base/compiler.hh"
46#include "cpu/exetrace.hh"
47#include "debug/Config.hh"
48#include "debug/Drain.hh"
49#include "debug/ExecFaulting.hh"
50#include "debug/HtmCpu.hh"
51#include "debug/Mwait.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/BaseTimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/full_system.hh"
58#include "sim/system.hh"
59
60namespace gem5
61{
62
63void
68
69void
71{
72 pkt = _pkt;
73 cpu->schedule(this, t);
74}
75
76TimingSimpleCPU::TimingSimpleCPU(const BaseTimingSimpleCPUParams &p)
78 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
79 fetchEvent([this]{ fetch(); }, name())
80{
81 _status = Idle;
82}
83
84
85
89
92{
93 // Deschedule any power gating event (if any)
95
96 if (switchedOut())
98
99 if (_status == Idle ||
101 DPRINTF(Drain, "No need to drain.\n");
102 activeThreads.clear();
103 return DrainState::Drained;
104 } else {
105 DPRINTF(Drain, "Requesting drain.\n");
106
107 // The fetch event can become descheduled if a drain didn't
108 // succeed on the first attempt. We need to reschedule it if
109 // the CPU is waiting for a microcode routine to complete.
110 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
112
114 }
115}
116
117void
119{
120 assert(!fetchEvent.scheduled());
121 if (switchedOut())
122 return;
123
124 DPRINTF(SimpleCPU, "Resume\n");
126
127 assert(!threadContexts.empty());
128
130
131 for (ThreadID tid = 0; tid < numThreads; tid++) {
132 if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
133 threadInfo[tid]->execContextStats.notIdleFraction = 1;
134
135 activeThreads.push_back(tid);
136
138
139 // Fetch if any threads active
140 if (!fetchEvent.scheduled()) {
142 }
143 } else {
144 threadInfo[tid]->execContextStats.notIdleFraction = 0;
145 }
146 }
147
148 // Reschedule any power gating event (if any)
150}
151
152bool
154{
156 return false;
157
158 DPRINTF(Drain, "tryCompleteDrain.\n");
159 if (!isCpuDrained())
160 return false;
161
162 DPRINTF(Drain, "CPU done draining, processing drain event\n");
164
165 return true;
166}
167
168void
170{
172 [[maybe_unused]] SimpleThread* thread = t_info.thread;
173
174 // hardware transactional memory
175 // Cannot switch out the CPU in the middle of a transaction
176 assert(!t_info.inHtmTransactionalState());
177
179
180 assert(!fetchEvent.scheduled());
182 assert(!t_info.stayAtPC);
183 assert(thread->pcState().microPC() == 0);
184
187}
188
189
190void
197
198void
200{
201 if (!system->isTimingMode()) {
202 fatal("The timing CPU requires the memory system to be in "
203 "'timing' mode.\n");
204 }
205}
206
207void
209{
210 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
211
212 assert(thread_num < numThreads);
213
214 threadInfo[thread_num]->execContextStats.notIdleFraction = 1;
217
218 // kick things off by initiating the fetch of the next instruction
219 if (!fetchEvent.scheduled())
221
222 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
223 == activeThreads.end()) {
224 activeThreads.push_back(thread_num);
225 }
226
227 BaseCPU::activateContext(thread_num);
228}
229
230
231void
233{
234 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
235
236 assert(thread_num < numThreads);
237 activeThreads.remove(thread_num);
238
239 // hardware transactional memory
240 // Cannot suspend context in the middle of a transaction.
241 assert(!threadInfo[curThread]->inHtmTransactionalState());
242
243 if (_status == Idle)
244 return;
245
247
248 threadInfo[thread_num]->execContextStats.notIdleFraction = 0;
249
250 if (activeThreads.empty()) {
251 _status = Idle;
252
253 if (fetchEvent.scheduled()) {
255 }
256 }
257
258 BaseCPU::suspendContext(thread_num);
259}
260
261bool
263{
265 SimpleThread* thread = t_info.thread;
266
267 const RequestPtr &req = pkt->req;
268
269 // hardware transactional memory
270 // sanity check
271 if (req->isHTMCmd()) {
272 assert(!req->isLocalAccess());
273 }
274
275 // We're about the issues a locked load, so tell the monitor
276 // to start caring about this address
277 if (pkt->isRead() && pkt->req->isLLSC()) {
278 thread->getIsaPtr()->handleLockedRead(pkt->req);
279 }
280 if (req->isLocalAccess()) {
281 Cycles delay = req->localAccessor(thread->getTC(), pkt);
282 new IprEvent(pkt, this, clockEdge(delay));
284 dcache_pkt = NULL;
285 } else if (!dcachePort.sendTimingReq(pkt)) {
287 dcache_pkt = pkt;
288 } else {
290 // memory system takes ownership of packet
291 dcache_pkt = NULL;
292 }
293 return dcache_pkt == NULL;
294}
295
296void
297TimingSimpleCPU::sendData(const RequestPtr &req, uint8_t *data, uint64_t *res,
298 bool read)
299{
301 SimpleThread* thread = t_info.thread;
302
303 PacketPtr pkt = buildPacket(req, read);
304 pkt->dataDynamic<uint8_t>(data);
305
306 // hardware transactional memory
307 // If the core is in transactional mode or if the request is HtmCMD
308 // to abort a transaction, the packet should reflect that it is
309 // transactional and also contain a HtmUid for debugging.
310 const bool is_htm_speculative = t_info.inHtmTransactionalState();
311 if (is_htm_speculative || req->isHTMAbort()) {
313 }
314 if (req->isHTMAbort())
315 DPRINTF(HtmCpu, "htmabort htmUid=%u\n", t_info.getHtmTransactionUid());
316
317 if (req->getFlags().isSet(Request::NO_ACCESS)) {
318 assert(!dcache_pkt);
319 pkt->makeResponse();
321 } else if (read) {
322 handleReadPacket(pkt);
323 } else {
324 bool do_access = true; // flag to suppress cache access
325
326 if (req->isLLSC()) {
327 do_access = thread->getIsaPtr()->handleLockedWrite(
328 req, dcachePort.cacheBlockMask);
329 } else if (req->isCondSwap()) {
330 assert(res);
331 req->setExtraData(*res);
332 }
333
334 if (do_access) {
335 dcache_pkt = pkt;
338 } else {
341 }
342 }
343}
344
345void
347 const RequestPtr &req, uint8_t *data, bool read)
348{
350 PacketPtr pkt1, pkt2;
351 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
352
353 // hardware transactional memory
354 // HTM commands should never use SplitData
355 assert(!req1->isHTMCmd() && !req2->isHTMCmd());
356
357 // If the thread is executing transactionally,
358 // reflect this in the packets.
359 if (t_info.inHtmTransactionalState()) {
362 }
363
364 if (req->getFlags().isSet(Request::NO_ACCESS)) {
365 assert(!dcache_pkt);
366 pkt1->makeResponse();
367 completeDataAccess(pkt1);
368 } else if (read) {
369 SplitFragmentSenderState * send_state =
370 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
371 if (handleReadPacket(pkt1)) {
372 send_state->clearFromParent();
373 send_state = dynamic_cast<SplitFragmentSenderState *>(
374 pkt2->senderState);
375 if (handleReadPacket(pkt2)) {
376 send_state->clearFromParent();
377 }
378 }
379 } else {
380 dcache_pkt = pkt1;
381 SplitFragmentSenderState * send_state =
382 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
383 if (handleWritePacket()) {
384 send_state->clearFromParent();
385 dcache_pkt = pkt2;
386 send_state = dynamic_cast<SplitFragmentSenderState *>(
387 pkt2->senderState);
388 if (handleWritePacket()) {
389 send_state->clearFromParent();
390 }
391 }
392 }
393}
394
395void
397{
398 // fault may be NoFault in cases where a fault is suppressed,
399 // for instance prefetches.
402
403 if ((fault != NoFault) && traceData) {
404 traceFault();
405 }
406
407 if (fault == NoFault) {
408 postExecute();
409 }
410
411 advanceInst(fault);
412}
413
416{
417 return read ? Packet::createRead(req) : Packet::createWrite(req);
418}
419
420void
422 const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req,
423 uint8_t *data, bool read)
424{
425 pkt1 = pkt2 = NULL;
426
427 assert(!req1->isLocalAccess() && !req2->isLocalAccess());
428
429 if (req->getFlags().isSet(Request::NO_ACCESS)) {
430 pkt1 = buildPacket(req, read);
431 return;
432 }
433
434 pkt1 = buildPacket(req1, read);
435 pkt2 = buildPacket(req2, read);
436
437 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
438
439 pkt->dataDynamic<uint8_t>(data);
440 pkt1->dataStatic<uint8_t>(data);
441 pkt2->dataStatic<uint8_t>(data + req1->getSize());
442
443 SplitMainSenderState * main_send_state = new SplitMainSenderState;
444 pkt->senderState = main_send_state;
445 main_send_state->fragments[0] = pkt1;
446 main_send_state->fragments[1] = pkt2;
447 main_send_state->outstanding = 2;
448 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
449 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
450}
451
452Fault
454 Request::Flags flags,
455 const std::vector<bool>& byte_enable)
456{
458 SimpleThread* thread = t_info.thread;
459
460 Fault fault;
461 const Addr pc = thread->pcState().instAddr();
462 unsigned block_size = cacheLineSize();
464
465 if (traceData)
466 traceData->setMem(addr, size, flags);
467
468 RequestPtr req = std::make_shared<Request>(
469 addr, size, flags, dataRequestorId(), pc, thread->contextId());
470 req->setByteEnable(byte_enable);
471
472 req->taskId(taskId());
473
474 Addr split_addr = roundDown(addr + size - 1, block_size);
475 assert(split_addr <= addr || split_addr - addr < block_size);
476
478 if (split_addr > addr) {
479 RequestPtr req1, req2;
480 assert(!req->isLLSC() && !req->isSwap());
481 req->splitOnVaddr(split_addr, req1, req2);
482
483 WholeTranslationState *state =
484 new WholeTranslationState(req, req1, req2, new uint8_t[size],
485 NULL, mode);
487 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
489 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
490
491 thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
492 thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
493 } else {
494 WholeTranslationState *state =
495 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
497 = new DataTranslation<TimingSimpleCPU *>(this, state);
498 thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
499 }
500
501 return NoFault;
502}
503
504bool
506{
508 SimpleThread* thread = t_info.thread;
509
510 const RequestPtr &req = dcache_pkt->req;
511 if (req->isLocalAccess()) {
512 Cycles delay = req->localAccessor(thread->getTC(), dcache_pkt);
513 new IprEvent(dcache_pkt, this, clockEdge(delay));
515 dcache_pkt = NULL;
516 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
518 } else {
520 // memory system takes ownership of packet
521 dcache_pkt = NULL;
522 }
523 return dcache_pkt == NULL;
524}
525
526Fault
527TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
528 Addr addr, Request::Flags flags, uint64_t *res,
529 const std::vector<bool>& byte_enable)
530{
532 SimpleThread* thread = t_info.thread;
533
534 uint8_t *newData = new uint8_t[size];
535 const Addr pc = thread->pcState().instAddr();
536 unsigned block_size = cacheLineSize();
538
539 if (data == NULL) {
540 assert(flags & Request::STORE_NO_DATA);
541 // This must be a cache block cleaning request
542 memset(newData, 0, size);
543 } else {
544 memcpy(newData, data, size);
545 }
546
547 if (traceData)
548 traceData->setMem(addr, size, flags);
549
550 RequestPtr req = std::make_shared<Request>(
551 addr, size, flags, dataRequestorId(), pc, thread->contextId());
552 req->setByteEnable(byte_enable);
553
554 req->taskId(taskId());
555
556 Addr split_addr = roundDown(addr + size - 1, block_size);
557 assert(split_addr <= addr || split_addr - addr < block_size);
558
560
561 // TODO: TimingSimpleCPU doesn't support arbitrarily long multi-line mem.
562 // accesses yet
563
564 if (split_addr > addr) {
565 RequestPtr req1, req2;
566 assert(!req->isLLSC() && !req->isSwap());
567 req->splitOnVaddr(split_addr, req1, req2);
568
569 WholeTranslationState *state =
570 new WholeTranslationState(req, req1, req2, newData, res, mode);
572 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
574 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
575
576 thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
577 thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
578 } else {
579 WholeTranslationState *state =
580 new WholeTranslationState(req, newData, res, mode);
582 new DataTranslation<TimingSimpleCPU *>(this, state);
583 thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
584 }
585
586 // Translation faults will be returned via finishTranslation()
587 return NoFault;
588}
589
590Fault
592 Request::Flags flags,
593 AtomicOpFunctorPtr amo_op)
594{
596 SimpleThread* thread = t_info.thread;
597
598 Fault fault;
599 const Addr pc = thread->pcState().instAddr();
600 unsigned block_size = cacheLineSize();
602
603 if (traceData)
604 traceData->setMem(addr, size, flags);
605
606 RequestPtr req = std::make_shared<Request>(addr, size, flags,
607 dataRequestorId(), pc, thread->contextId(),
608 std::move(amo_op));
609
610 assert(req->hasAtomicOpFunctor());
611
612 req->taskId(taskId());
613
614 Addr split_addr = roundDown(addr + size - 1, block_size);
615
616 // AMO requests that access across a cache line boundary are not
617 // allowed since the cache does not guarantee AMO ops to be executed
618 // atomically in two cache lines
619 // For ISAs such as x86 that requires AMO operations to work on
620 // accesses that cross cache-line boundaries, the cache needs to be
621 // modified to support locking both cache lines to guarantee the
622 // atomicity.
623 if (split_addr > addr) {
624 panic("AMO requests should not access across a cache line boundary\n");
625 }
626
628
629 WholeTranslationState *state =
630 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
632 = new DataTranslation<TimingSimpleCPU *>(this, state);
633 thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
634
635 return NoFault;
636}
637
638void
640{
641 for (ThreadID tid = 0; tid < numThreads; tid++) {
642 if (tid != sender) {
643 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
644 wakeup(tid);
645 }
646 threadInfo[tid]->thread->getIsaPtr()->handleLockedSnoop(pkt,
647 dcachePort.cacheBlockMask);
648 }
649 }
650}
651
652void
654{
656
657 if (state->getFault() != NoFault) {
658 if (state->isPrefetch()) {
659 state->setNoFault();
660 }
661 delete [] state->data;
662 state->deleteReqs();
663 translationFault(state->getFault());
664 } else {
665 if (!state->isSplit) {
666 sendData(state->mainReq, state->data, state->res,
667 state->mode == BaseMMU::Read);
668 } else {
669 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
670 state->data, state->mode == BaseMMU::Read);
671 }
672 }
673
674 delete state;
675}
676
677
678void
680{
681 // Change thread if multi-threaded
683
685 SimpleThread* thread = t_info.thread;
686
687 DPRINTF(SimpleCPU, "Fetch\n");
688
689 if (!curStaticInst || !curStaticInst->isDelayedCommit()) {
692 }
693
694 // We must have just got suspended by a PC event
695 if (_status == Idle)
696 return;
697
698 MicroPC upc = thread->pcState().microPC();
699 bool needToFetch = !isRomMicroPC(upc) && !curMacroStaticInst;
700
701 if (needToFetch) {
703 RequestPtr ifetch_req = std::make_shared<Request>();
704 ifetch_req->taskId(taskId());
705 ifetch_req->setContext(thread->contextId());
706 setupFetchRequest(ifetch_req);
707 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
708 thread->mmu->translateTiming(ifetch_req, thread->getTC(),
710 } else {
712 completeIfetch(NULL);
713
716 }
717}
718
719
720void
722 ThreadContext *tc)
723{
724 auto &decoder = threadInfo[curThread]->thread->decoder;
725
726 if (fault == NoFault) {
727 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
728 req->getVaddr(), req->getPaddr());
730 ifetch_pkt->dataStatic(decoder->moreBytesPtr());
731 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
732
733 if (!icachePort.sendTimingReq(ifetch_pkt)) {
734 // Need to wait for retry
736 } else {
737 // Need to wait for cache to respond
739 // ownership of packet transferred to memory system
740 ifetch_pkt = NULL;
741 }
742 } else {
743 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
744 // fetch fault: advance directly to next instruction (fault handler)
746 advanceInst(fault);
747 }
748
751}
752
753
754void
756{
758
759 if (_status == Faulting)
760 return;
761
762 if (fault != NoFault) {
763 // hardware transactional memory
764 // If a fault occurred within a transaction
765 // ensure that the transaction aborts
766 if (t_info.inHtmTransactionalState() &&
767 !std::dynamic_pointer_cast<GenericHtmFailureFault>(fault)) {
768 DPRINTF(HtmCpu, "fault (%s) occurred - "
769 "replacing with HTM abort fault htmUid=%u\n",
770 fault->name(), t_info.getHtmTransactionUid());
771
772 Fault tmfault = std::make_shared<GenericHtmFailureFault>(
773 t_info.getHtmTransactionUid(),
775
776 advancePC(tmfault);
779 return;
780 }
781
782 DPRINTF(SimpleCPU, "Fault occured. Handling the fault\n");
783
784 advancePC(fault);
785
786 // A syscall fault could suspend this CPU (e.g., futex_wait)
787 // If the _status is not Idle, schedule an event to fetch the next
788 // instruction after 'stall' ticks.
789 // If the cpu has been suspended (i.e., _status == Idle), another
790 // cpu will wake this cpu up later.
791 if (_status != Idle) {
792 DPRINTF(SimpleCPU, "Scheduling fetch event after the Fault\n");
793
794 Tick stall = std::dynamic_pointer_cast<SyscallRetryFault>(fault) ?
796 reschedule(fetchEvent, stall, true);
798 }
799
800 return;
801 }
802
803 if (!t_info.stayAtPC)
804 advancePC(fault);
805
806 if (tryCompleteDrain())
807 return;
808
810
812 // kick off fetch of next instruction... callback from icache
813 // response will cause that instruction to be executed,
814 // keeping the CPU running.
815 fetch();
816 }
817}
818
819
820void
822{
824
825 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
826 pkt->getAddr() : 0);
827
828 // received a response from the icache: execute the received
829 // instruction
830 panic_if(pkt && pkt->isError(), "Instruction fetch (%s) failed: %s",
831 pkt->getAddrRange().to_string(), pkt->print());
832 assert(_status == IcacheWaitResponse);
833
835
838
839 if (pkt)
840 pkt->req->setAccessLatency();
841
842
843 preExecute();
844
845 // hardware transactional memory
846 if (curStaticInst && curStaticInst->isHtmStart()) {
847 // if this HtmStart is not within a transaction,
848 // then assign it a new htmTransactionUid
849 if (!t_info.inHtmTransactionalState())
850 t_info.newHtmTransactionUid();
851 SimpleThread* thread = t_info.thread;
852 thread->htmTransactionStarts++;
853 DPRINTF(HtmCpu, "htmTransactionStarts++=%u\n",
854 thread->htmTransactionStarts);
855 }
856
857 if (curStaticInst && curStaticInst->isMemRef()) {
858 // load or store: just send to dcache
859 Fault fault = curStaticInst->initiateAcc(&t_info, traceData);
860
861 // If we're not running now the instruction will complete in a dcache
862 // response callback or the instruction faulted and has started an
863 // ifetch
865 if (fault != NoFault && traceData) {
866 traceFault();
867 }
868
869 if (fault == NoFault) {
870 postExecute();
871 }
872 // @todo remove me after debugging with legion done
873 if (curStaticInst && (!curStaticInst->isMicroop() ||
874 curStaticInst->isFirstMicroop()))
875 instCnt++;
876 advanceInst(fault);
877 }
878 } else if (curStaticInst) {
879 // non-memory instruction: execute completely now
880 Fault fault = curStaticInst->execute(&t_info, traceData);
881
882 // keep an instruction count
883 if (fault == NoFault) {
884 postExecute();
885 countInst();
886 } else if (traceData) {
887 traceFault();
888 }
889
890 // @todo remove me after debugging with legion done
891 if (curStaticInst && (!curStaticInst->isMicroop() ||
892 curStaticInst->isFirstMicroop()))
893 instCnt++;
894 advanceInst(fault);
895 } else {
897 }
898
899 if (pkt) {
900 delete pkt;
901 }
902}
903
904void
909
910bool
912{
913 DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr());
914
915 // hardware transactional memory
916 // Currently, there is no support for tracking instruction fetches
917 // in an transaction's read set.
918 if (pkt->htmTransactionFailedInCache()) {
919 panic("HTM transactional support for"
920 " instruction stream not yet supported\n");
921 }
922
923 // we should only ever see one response per cycle since we only
924 // issue a new request once this response is sunk
925 assert(!tickEvent.scheduled());
926 // delay processing of returned data until next CPU clock edge
927 tickEvent.schedule(pkt, cpu->clockEdge());
928
929 return true;
930}
931
932void
934{
935 // we shouldn't get a retry unless we have a packet that we're
936 // waiting to transmit
937 assert(cpu->ifetch_pkt != NULL);
938 assert(cpu->_status == IcacheRetry);
939 PacketPtr tmp = cpu->ifetch_pkt;
940 if (sendTimingReq(tmp)) {
941 cpu->_status = IcacheWaitResponse;
942 cpu->ifetch_pkt = NULL;
943 }
944}
945
946void
948{
949 // hardware transactional memory
950
952 [[maybe_unused]] const bool is_htm_speculative =
953 t_info->inHtmTransactionalState();
954
955 // received a response from the dcache: complete the load or store
956 // instruction
957 panic_if(pkt->isError(), "Data access (%s) failed: %s",
958 pkt->getAddrRange().to_string(), pkt->print());
960 pkt->req->getFlags().isSet(Request::NO_ACCESS));
961
962 pkt->req->setAccessLatency();
963
966
967 if (pkt->senderState) {
968 // hardware transactional memory
969 // There shouldn't be HtmCmds occurring in multipacket requests
970 if (pkt->req->isHTMCmd()) {
971 panic("unexpected HTM case");
972 }
973
974 SplitFragmentSenderState * send_state =
975 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
976 assert(send_state);
977 PacketPtr big_pkt = send_state->bigPkt;
978 delete send_state;
979
980 if (pkt->isHtmTransactional()) {
981 assert(is_htm_speculative);
982
983 big_pkt->setHtmTransactional(
985 );
986 }
987
988 if (pkt->htmTransactionFailedInCache()) {
989 assert(is_htm_speculative);
992 );
993 }
994
995 delete pkt;
996
997 SplitMainSenderState * main_send_state =
998 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
999 assert(main_send_state);
1000 // Record the fact that this packet is no longer outstanding.
1001 assert(main_send_state->outstanding != 0);
1002 main_send_state->outstanding--;
1003
1004 if (main_send_state->outstanding) {
1005 return;
1006 } else {
1007 delete main_send_state;
1008 big_pkt->senderState = NULL;
1009 pkt = big_pkt;
1010 }
1011 }
1012
1014
1015 Fault fault;
1016
1017 // hardware transactional memory
1018 // sanity checks
1019 // ensure htmTransactionUids are equivalent
1020 if (pkt->isHtmTransactional())
1021 assert (pkt->getHtmTransactionUid() ==
1022 t_info->getHtmTransactionUid());
1023
1024 // can't have a packet that fails a transaction while not in a transaction
1025 if (pkt->htmTransactionFailedInCache())
1026 assert(is_htm_speculative);
1027
1028 // shouldn't fail through stores because this would be inconsistent w/ O3
1029 // which cannot fault after the store has been sent to memory
1030 if (pkt->htmTransactionFailedInCache() && !pkt->isWrite()) {
1031 const HtmCacheFailure htm_rc =
1033 DPRINTF(HtmCpu, "HTM abortion in cache (rc=%s) detected htmUid=%u\n",
1034 htmFailureToStr(htm_rc), pkt->getHtmTransactionUid());
1035
1036 // Currently there are only two reasons why a transaction would
1037 // fail in the memory subsystem--
1038 // (1) A transactional line was evicted from the cache for
1039 // space (or replacement policy) reasons.
1040 // (2) Another core/device requested a cache line that is in this
1041 // transaction's read/write set that is incompatible with the
1042 // HTM's semantics, e.g. another core requesting exclusive access
1043 // of a line in this core's read set.
1044 if (htm_rc == HtmCacheFailure::FAIL_SELF) {
1045 fault = std::make_shared<GenericHtmFailureFault>(
1046 t_info->getHtmTransactionUid(),
1048 } else if (htm_rc == HtmCacheFailure::FAIL_REMOTE) {
1049 fault = std::make_shared<GenericHtmFailureFault>(
1050 t_info->getHtmTransactionUid(),
1052 } else {
1053 panic("HTM - unhandled rc %s", htmFailureToStr(htm_rc));
1054 }
1055 } else {
1056 fault = curStaticInst->completeAcc(pkt, t_info,
1057 traceData);
1058 }
1059
1060 // hardware transactional memory
1061 // Track HtmStop instructions,
1062 // e.g. instructions which commit a transaction.
1063 if (curStaticInst && curStaticInst->isHtmStop()) {
1064 t_info->thread->htmTransactionStops++;
1065 DPRINTF(HtmCpu, "htmTransactionStops++=%u\n",
1066 t_info->thread->htmTransactionStops);
1067 }
1068
1069 // keep an instruction count
1070 if (fault == NoFault) {
1071 postExecute();
1072 countInst();
1073 } else if (traceData) {
1074 traceFault();
1075 }
1076
1077 delete pkt;
1078
1079 advanceInst(fault);
1080}
1081
1082void
1084{
1085 const Cycles delta(curCycle() - previousCycle);
1086
1087 baseStats.numCycles += delta;
1088
1090}
1091
1092void
1094{
1095 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
1096 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1097 cpu->wakeup(tid);
1098 }
1099 }
1100
1101 // Making it uniform across all CPUs:
1102 // The CPUs need to be woken up only on an invalidation packet
1103 // (when using caches) or on an incoming write packet (when not
1104 // using caches) It is not necessary to wake up the processor on
1105 // all incoming packets
1106 if (pkt->isInvalidate() || pkt->isWrite()) {
1107 for (auto &t_info : cpu->threadInfo) {
1108 t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
1110 }
1111 } else if (pkt->req && pkt->req->isTlbiExtSync()) {
1112 // We received a TLBI_EXT_SYNC request.
1113 // In a detailed sim we would wait for memory ops to complete,
1114 // but in our simple case we just respond immediately
1115 auto reply_req = Request::createMemManagement(
1117 cpu->dataRequestorId());
1118
1119 // Extra Data = the transaction ID of the Sync we're completing
1120 reply_req->setExtraData(pkt->req->getExtraData());
1121 PacketPtr reply_pkt = Packet::createRead(reply_req);
1122
1123 // TODO - reserve some credit for these responses?
1124 if (!sendTimingReq(reply_pkt)) {
1125 panic("Couldn't send TLBI_EXT_SYNC_COMP message");
1126 }
1127 }
1128}
1129
1130void
1132{
1133 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
1134 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1135 cpu->wakeup(tid);
1136 }
1137 }
1138}
1139
1140bool
1142{
1143 DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr());
1144
1145 // The timing CPU is not really ticked, instead it relies on the
1146 // memory system (fetch and load/store) to set the pace.
1147 if (!tickEvent.scheduled()) {
1148 // Delay processing of returned data until next CPU clock edge
1149 tickEvent.schedule(pkt, cpu->clockEdge());
1150 return true;
1151 } else {
1152 // In the case of a split transaction and a cache that is
1153 // faster than a CPU we could get two responses in the
1154 // same tick, delay the second one
1155 if (!retryRespEvent.scheduled())
1156 cpu->schedule(retryRespEvent, cpu->clockEdge(Cycles(1)));
1157 return false;
1158 }
1159}
1160
1161void
1163{
1164 cpu->completeDataAccess(pkt);
1165}
1166
1167void
1169{
1170 // we shouldn't get a retry unless we have a packet that we're
1171 // waiting to transmit
1172 assert(cpu->dcache_pkt != NULL);
1173 assert(cpu->_status == DcacheRetry);
1174 PacketPtr tmp = cpu->dcache_pkt;
1175 if (tmp->senderState) {
1176 // This is a packet from a split access.
1177 SplitFragmentSenderState * send_state =
1178 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1179 assert(send_state);
1180 PacketPtr big_pkt = send_state->bigPkt;
1181
1182 SplitMainSenderState * main_send_state =
1183 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1184 assert(main_send_state);
1185
1186 if (sendTimingReq(tmp)) {
1187 // If we were able to send without retrying, record that fact
1188 // and try sending the other fragment.
1189 send_state->clearFromParent();
1190 int other_index = main_send_state->getPendingFragment();
1191 if (other_index > 0) {
1192 tmp = main_send_state->fragments[other_index];
1193 cpu->dcache_pkt = tmp;
1194 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1195 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1196 main_send_state->fragments[other_index] = NULL;
1197 }
1198 } else {
1199 cpu->_status = DcacheWaitResponse;
1200 // memory system takes ownership of packet
1201 cpu->dcache_pkt = NULL;
1202 }
1203 }
1204 } else if (sendTimingReq(tmp)) {
1205 cpu->_status = DcacheWaitResponse;
1206 // memory system takes ownership of packet
1207 cpu->dcache_pkt = NULL;
1208 }
1209}
1210
1212 Tick t)
1213 : pkt(_pkt), cpu(_cpu)
1214{
1215 cpu->schedule(this, t);
1216}
1217
1218void
1220{
1221 cpu->completeDataAccess(pkt);
1222}
1223
1224const char *
1226{
1227 return "Timing Simple CPU Delay IPR event";
1228}
1229
1230
1231void
1233{
1234 dcachePort.printAddr(a);
1235}
1236
1237Fault
1239{
1241 SimpleThread* thread = t_info.thread;
1242
1243 const Addr addr = 0x0ul;
1244 const Addr pc = thread->pcState().instAddr();
1245 const int size = 8;
1246
1247 if (traceData)
1248 traceData->setMem(addr, size, flags);
1249
1250 RequestPtr req = std::make_shared<Request>(
1251 addr, size, flags, dataRequestorId());
1252
1253 req->setPC(pc);
1254 req->setContext(thread->contextId());
1255 req->taskId(taskId());
1256 req->setInstCount(t_info.numInst);
1257
1258 assert(req->isHTMCmd() || req->isTlbiCmd());
1259
1260 // Use the payload as a sanity check,
1261 // the memory subsystem will clear allocated data
1262 uint8_t *data = new uint8_t[size];
1263 assert(data);
1264 uint64_t rc = 0xdeadbeeflu;
1265 memcpy (data, &rc, size);
1266
1267 // debugging output
1268 if (req->isHTMCmd()) {
1269 if (req->isHTMStart())
1270 DPRINTF(HtmCpu, "HTMstart htmUid=%u\n",
1271 t_info.getHtmTransactionUid());
1272 else if (req->isHTMCommit())
1273 DPRINTF(HtmCpu, "HTMcommit htmUid=%u\n",
1274 t_info.getHtmTransactionUid());
1275 else if (req->isHTMCancel())
1276 DPRINTF(HtmCpu, "HTMcancel htmUid=%u\n",
1277 t_info.getHtmTransactionUid());
1278 else
1279 panic("initiateMemMgmtCmd: unknown HTM CMD");
1280 }
1281
1282 sendData(req, data, nullptr, true);
1283
1284 return NoFault;
1285}
1286
1287void
1290{
1291 SimpleExecContext& t_info = *threadInfo[tid];
1292 SimpleThread* thread = t_info.thread;
1293
1294 const Addr addr = 0x0ul;
1295 const Addr pc = thread->pcState().instAddr();
1296 const int size = 8;
1297 const Request::Flags flags =
1299
1300 if (traceData)
1301 traceData->setMem(addr, size, flags);
1302
1303 // notify l1 d-cache (ruby) that core has aborted transaction
1304
1305 RequestPtr req = std::make_shared<Request>(
1306 addr, size, flags, dataRequestorId());
1307
1308 req->setPC(pc);
1309 req->setContext(thread->contextId());
1310 req->taskId(taskId());
1311 req->setInstCount(t_info.numInst);
1312 req->setHtmAbortCause(cause);
1313
1314 assert(req->isHTMAbort());
1315
1316 uint8_t *data = new uint8_t[size];
1317 assert(data);
1318 uint64_t rc = 0lu;
1319 memcpy (data, &rc, size);
1320
1321 sendData(req, data, nullptr, true);
1322}
1323
1324} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:209
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:219
Cycles syscallRetryLatency
Definition base.hh:686
Tick instCnt
Instruction count used for SPARC misc register.
Definition base.hh:112
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:330
Addr cacheLineSize() const
Get the cache line size of the system.
Definition base.hh:421
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition base.hh:680
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition base.hh:585
void schedulePowerGatingEvent()
Definition base.cc:524
uint32_t taskId() const
Get cpu task id.
Definition base.hh:237
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition base.cc:577
gem5::BaseCPU::BaseCPUStats baseStats
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition base.hh:414
void deschedulePowerGatingEvent()
Definition base.cc:516
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition base.cc:620
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:634
std::vector< ThreadContext * > threadContexts
Definition base.hh:286
bool switchedOut() const
Determine if the CPU is switched out.
Definition base.hh:397
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition base.cc:552
virtual void handleLockedRead(const RequestPtr &req)
Definition isa.hh:97
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition isa.hh:104
virtual void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode)
Definition mmu.cc:118
void preExecute()
Definition base.cc:347
void checkPcEventQueue()
Definition base.cc:123
BaseSimpleCPU(const BaseSimpleCPUParams &params)
Definition base.cc:83
ThreadID curThread
Definition base.hh:86
void wakeup(ThreadID tid) override
Definition base.cc:270
StaticInstPtr curMacroStaticInst
Definition base.hh:105
void checkForInterrupts()
Definition base.cc:292
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Definition base.cc:281
void advancePC(const Fault &fault)
Definition base.cc:524
void swapActiveThread()
Definition base.cc:135
void setupFetchRequest(const RequestPtr &req)
Definition base.cc:323
std::list< ThreadID > activeThreads
Definition base.hh:101
std::vector< SimpleExecContext * > threadInfo
Definition base.hh:100
StaticInstPtr curStaticInst
Current instruction.
Definition base.hh:104
void postExecute()
Definition base.cc:431
trace::InstRecord * traceData
Definition base.hh:97
void serviceInstCountEvents()
Definition base.cc:340
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
This class represents part of a data address translation.
Command responseCommand() const
Definition packet.hh:270
MicroPC microPC() const
Returns the current micropc.
Definition pcstate.hh:119
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition pcstate.hh:108
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
void setHtmTransactionFailedInCache(const HtmCacheFailure ret_code)
Stipulates that this packet/request has returned from the cache hierarchy in a failed transaction.
Definition packet.cc:493
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition packet.cc:243
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
static PacketPtr createWrite(const RequestPtr &req)
Definition packet.hh:1044
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition packet.hh:1062
HtmCacheFailure getHtmTransactionFailedInCacheRC() const
If a packet/request has returned from the cache hierarchy in a failed transaction,...
Definition packet.cc:509
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:523
bool isWrite() const
Definition packet.hh:594
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition packet.cc:529
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition packet.hh:1038
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition packet.hh:1213
void setHtmTransactional(uint64_t val)
Stipulates that this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:516
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isInvalidate() const
Definition packet.hh:609
bool htmTransactionFailedInCache() const
Returns whether or not this packet/request has returned from the cache hierarchy in a failed transact...
Definition packet.cc:503
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
static RequestPtr createMemManagement(Flags flags, RequestorID id)
Factory method for creating memory management requests, with unspecified addr and size.
Definition request.hh:538
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ PHYSICAL
The virtual address is also the physical address.
Definition request.hh:117
@ HTM_ABORT
The request aborts a HTM transaction.
Definition request.hh:216
@ TLBI_EXT_SYNC_COMP
The Request tells the interconnect that a remote TLB Sync request has completed.
Definition request.hh:252
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
static const FlagsType STORE_NO_DATA
Definition request.hh:263
gem5::Flags< FlagsType > Flags
Definition request.hh:102
Counter numInst
PER-THREAD STATS.
uint64_t newHtmTransactionUid() const override
bool inHtmTransactionalState() const override
uint64_t getHtmTransactionUid() const override
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
const PCStateBase & pcState() const override
ContextID contextId() const override
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
BaseISA * getIsaPtr() const override
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition timing.cc:1168
virtual void recvTimingSnoopReq(PacketPtr pkt)
Snoop a coherence request, we need to check if this causes a wakeup event on a cpu that is monitoring...
Definition timing.cc:1093
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition timing.cc:1141
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition timing.cc:1131
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition timing.cc:933
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition timing.cc:911
EventFunctionWrapper retryRespEvent
Definition timing.hh:185
bool tryCompleteDrain()
Try to complete a drain request.
Definition timing.cc:153
Fault initiateMemMgmtCmd(Request::Flags flags) override
hardware transactional memory & TLBI operations
Definition timing.cc:1238
bool isCpuDrained() const
Check if a system is in a drained state.
Definition timing.hh:362
void advanceInst(const Fault &fault)
Definition timing.cc:755
void switchOut() override
Prepare for another CPU to take over execution.
Definition timing.cc:169
PacketPtr ifetch_pkt
Definition timing.hh:261
EventFunctionWrapper fetchEvent
Definition timing.hh:335
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
Definition timing.cc:91
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition timing.cc:232
void threadSnoop(PacketPtr pkt, ThreadID sender)
Definition timing.cc:639
void drainResume() override
Resume execution after a successful drain.
Definition timing.cc:118
Fault initiateMemRead(Addr addr, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition timing.cc:453
void htmSendAbortSignal(ThreadID tid, uint64_t htm_uid, HtmFailureFaultCause) override
This function is used to instruct the memory subsystem that a transaction should be aborted and the s...
Definition timing.cc:1288
bool handleReadPacket(PacketPtr pkt)
Definition timing.cc:262
PacketPtr buildPacket(const RequestPtr &req, bool read)
Definition timing.cc:415
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition timing.cc:527
void sendSplitData(const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req, uint8_t *data, bool read)
Definition timing.cc:346
void translationFault(const Fault &fault)
Definition timing.cc:396
void sendData(const RequestPtr &req, uint8_t *data, uint64_t *res, bool read)
Definition timing.cc:297
FetchTranslation fetchTranslation
Definition timing.hh:135
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition timing.cc:1232
PacketPtr dcache_pkt
Definition timing.hh:262
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition timing.cc:199
void finishTranslation(WholeTranslationState *state)
Finish a DTB translation.
Definition timing.cc:653
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition timing.cc:208
void completeIfetch(PacketPtr)
Definition timing.cc:821
TimingSimpleCPU(const BaseTimingSimpleCPUParams &params)
Definition timing.cc:76
void completeDataAccess(PacketPtr pkt)
Definition timing.cc:947
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition timing.cc:64
Fault initiateMemAMO(Addr addr, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition timing.cc:591
DcachePort dcachePort
Definition timing.hh:259
void buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req, uint8_t *data, bool read)
Definition timing.cc:421
IcachePort icachePort
Definition timing.hh:258
virtual ~TimingSimpleCPU()
Definition timing.cc:86
void takeOverFrom(BaseCPU *oldCPU) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition timing.cc:191
void sendFetch(const Fault &fault, const RequestPtr &req, ThreadContext *tc)
Definition timing.cc:721
This class captures the state of an address translation.
bool isPrefetch() const
Check if this request is a prefetch.
void deleteReqs()
Delete all requests that make up this translation.
void setNoFault()
Remove all faults from the translation.
Fault getFault() const
Determine whether this translation produced a fault.
STL vector class.
Definition stl.hh:37
std::string to_string() const
Get a string representation of the range.
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition amo.hh:269
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:279
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:310
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:329
DrainState
Object drain/handover states.
Definition drain.hh:76
@ Draining
Draining buffers pending serialization/handover.
Definition drain.hh:78
@ Drained
Buffers drained, ready for serialization/handover.
Definition drain.hh:79
void deschedule(Event &event)
Definition eventq.hh:1021
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void reschedule(Event &event, Tick when, bool always=false)
Definition eventq.hh:1030
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:220
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:232
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:246
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 5 > t
Definition misc_types.hh:71
Bitfield< 8 > a
Definition misc_types.hh:66
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 0 > rc
Definition types.hh:87
Bitfield< 15 > system
Definition misc.hh:1032
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 Arm Limited All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
std::string htmFailureToStr(HtmFailureFaultCause cause)
Convert enum into string to be used for debug purposes.
Definition htm.cc:44
uint64_t Tick
Tick count type.
Definition types.hh:58
Packet * PacketPtr
HtmCacheFailure
Definition htm.hh:60
uint16_t MicroPC
Definition types.hh:149
static bool isRomMicroPC(MicroPC upc)
Definition types.hh:166
constexpr decltype(nullptr) NoFault
Definition types.hh:253
HtmFailureFaultCause
Definition htm.hh:48
output decoder
Definition nop.cc:61
Declaration of the Packet class.
virtual const char * description() const
Return a C string describing the event.
Definition timing.cc:1225
IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t)
Definition timing.cc:1211
void schedule(PacketPtr _pkt, Tick t)
Definition timing.cc:70
const std::string & name()
Definition trace.cc:48

Generated on Mon Oct 27 2025 04:13:01 for gem5 by doxygen 1.14.0