gem5 v24.0.0.0
Loading...
Searching...
No Matches
timing.cc
Go to the documentation of this file.
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2010-2013,2015,2017-2018, 2020-2021 ARM Limited
4 * All rights reserved
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/simple/timing.hh"
43
45#include "base/compiler.hh"
46#include "cpu/exetrace.hh"
47#include "debug/Config.hh"
48#include "debug/Drain.hh"
49#include "debug/ExecFaulting.hh"
50#include "debug/HtmCpu.hh"
51#include "debug/Mwait.hh"
52#include "debug/SimpleCPU.hh"
53#include "mem/packet.hh"
54#include "mem/packet_access.hh"
55#include "params/BaseTimingSimpleCPU.hh"
56#include "sim/faults.hh"
57#include "sim/full_system.hh"
58#include "sim/system.hh"
59
60namespace gem5
61{
62
63void
68
69void
75
76TimingSimpleCPU::TimingSimpleCPU(const BaseTimingSimpleCPUParams &p)
78 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
79 fetchEvent([this]{ fetch(); }, name())
80{
81 _status = Idle;
82}
83
84
85
89
92{
93 // Deschedule any power gating event (if any)
95
96 if (switchedOut())
98
99 if (_status == Idle ||
101 DPRINTF(Drain, "No need to drain.\n");
102 activeThreads.clear();
103 return DrainState::Drained;
104 } else {
105 DPRINTF(Drain, "Requesting drain.\n");
106
107 // The fetch event can become descheduled if a drain didn't
108 // succeed on the first attempt. We need to reschedule it if
109 // the CPU is waiting for a microcode routine to complete.
112
114 }
115}
116
117void
119{
120 assert(!fetchEvent.scheduled());
121 if (switchedOut())
122 return;
123
124 DPRINTF(SimpleCPU, "Resume\n");
126
127 assert(!threadContexts.empty());
128
130
131 for (ThreadID tid = 0; tid < numThreads; tid++) {
132 if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
133 threadInfo[tid]->execContextStats.notIdleFraction = 1;
134
135 activeThreads.push_back(tid);
136
138
139 // Fetch if any threads active
140 if (!fetchEvent.scheduled()) {
142 }
143 } else {
144 threadInfo[tid]->execContextStats.notIdleFraction = 0;
145 }
146 }
147
148 // Reschedule any power gating event (if any)
150}
151
152bool
154{
156 return false;
157
158 DPRINTF(Drain, "tryCompleteDrain.\n");
159 if (!isCpuDrained())
160 return false;
161
162 DPRINTF(Drain, "CPU done draining, processing drain event\n");
164
165 return true;
166}
167
168void
170{
172 [[maybe_unused]] SimpleThread* thread = t_info.thread;
173
174 // hardware transactional memory
175 // Cannot switch out the CPU in the middle of a transaction
176 assert(!t_info.inHtmTransactionalState());
177
179
180 assert(!fetchEvent.scheduled());
182 assert(!t_info.stayAtPC);
183 assert(thread->pcState().microPC() == 0);
184
187}
188
189
190void
197
198void
200{
201 if (!system->isTimingMode()) {
202 fatal("The timing CPU requires the memory system to be in "
203 "'timing' mode.\n");
204 }
205}
206
207void
209{
210 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
211
212 assert(thread_num < numThreads);
213
214 threadInfo[thread_num]->execContextStats.notIdleFraction = 1;
217
218 // kick things off by initiating the fetch of the next instruction
219 if (!fetchEvent.scheduled())
221
222 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
223 == activeThreads.end()) {
224 activeThreads.push_back(thread_num);
225 }
226
227 BaseCPU::activateContext(thread_num);
228}
229
230
231void
233{
234 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
235
236 assert(thread_num < numThreads);
237 activeThreads.remove(thread_num);
238
239 // hardware transactional memory
240 // Cannot suspend context in the middle of a transaction.
241 assert(!threadInfo[curThread]->inHtmTransactionalState());
242
243 if (_status == Idle)
244 return;
245
247
248 threadInfo[thread_num]->execContextStats.notIdleFraction = 0;
249
250 if (activeThreads.empty()) {
251 _status = Idle;
252
253 if (fetchEvent.scheduled()) {
255 }
256 }
257
258 BaseCPU::suspendContext(thread_num);
259}
260
261bool
263{
265 SimpleThread* thread = t_info.thread;
266
267 const RequestPtr &req = pkt->req;
268
269 // hardware transactional memory
270 // sanity check
271 if (req->isHTMCmd()) {
272 assert(!req->isLocalAccess());
273 }
274
275 // We're about the issues a locked load, so tell the monitor
276 // to start caring about this address
277 if (pkt->isRead() && pkt->req->isLLSC()) {
278 thread->getIsaPtr()->handleLockedRead(pkt->req);
279 }
280 if (req->isLocalAccess()) {
281 Cycles delay = req->localAccessor(thread->getTC(), pkt);
282 new IprEvent(pkt, this, clockEdge(delay));
284 dcache_pkt = NULL;
285 } else if (!dcachePort.sendTimingReq(pkt)) {
287 dcache_pkt = pkt;
288 } else {
290 // memory system takes ownership of packet
291 dcache_pkt = NULL;
292 }
293 return dcache_pkt == NULL;
294}
295
296void
297TimingSimpleCPU::sendData(const RequestPtr &req, uint8_t *data, uint64_t *res,
298 bool read)
299{
301 SimpleThread* thread = t_info.thread;
302
303 PacketPtr pkt = buildPacket(req, read);
304 pkt->dataDynamic<uint8_t>(data);
305
306 // hardware transactional memory
307 // If the core is in transactional mode or if the request is HtmCMD
308 // to abort a transaction, the packet should reflect that it is
309 // transactional and also contain a HtmUid for debugging.
310 const bool is_htm_speculative = t_info.inHtmTransactionalState();
311 if (is_htm_speculative || req->isHTMAbort()) {
313 }
314 if (req->isHTMAbort())
315 DPRINTF(HtmCpu, "htmabort htmUid=%u\n", t_info.getHtmTransactionUid());
316
317 if (req->getFlags().isSet(Request::NO_ACCESS)) {
318 assert(!dcache_pkt);
319 pkt->makeResponse();
321 } else if (read) {
322 handleReadPacket(pkt);
323 } else {
324 bool do_access = true; // flag to suppress cache access
325
326 if (req->isLLSC()) {
327 do_access = thread->getIsaPtr()->handleLockedWrite(
329 } else if (req->isCondSwap()) {
330 assert(res);
331 req->setExtraData(*res);
332 }
333
334 if (do_access) {
335 dcache_pkt = pkt;
338 } else {
341 }
342 }
343}
344
345void
347 const RequestPtr &req, uint8_t *data, bool read)
348{
350 PacketPtr pkt1, pkt2;
351 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
352
353 // hardware transactional memory
354 // HTM commands should never use SplitData
355 assert(!req1->isHTMCmd() && !req2->isHTMCmd());
356
357 // If the thread is executing transactionally,
358 // reflect this in the packets.
359 if (t_info.inHtmTransactionalState()) {
362 }
363
364 if (req->getFlags().isSet(Request::NO_ACCESS)) {
365 assert(!dcache_pkt);
366 pkt1->makeResponse();
367 completeDataAccess(pkt1);
368 } else if (read) {
369 SplitFragmentSenderState * send_state =
370 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
371 if (handleReadPacket(pkt1)) {
372 send_state->clearFromParent();
373 send_state = dynamic_cast<SplitFragmentSenderState *>(
374 pkt2->senderState);
375 if (handleReadPacket(pkt2)) {
376 send_state->clearFromParent();
377 }
378 }
379 } else {
380 dcache_pkt = pkt1;
381 SplitFragmentSenderState * send_state =
382 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
383 if (handleWritePacket()) {
384 send_state->clearFromParent();
385 dcache_pkt = pkt2;
386 send_state = dynamic_cast<SplitFragmentSenderState *>(
387 pkt2->senderState);
388 if (handleWritePacket()) {
389 send_state->clearFromParent();
390 }
391 }
392 }
393}
394
395void
397{
398 // fault may be NoFault in cases where a fault is suppressed,
399 // for instance prefetches.
402
403 if ((fault != NoFault) && traceData) {
404 traceFault();
405 }
406
407 postExecute();
408
409 advanceInst(fault);
410}
411
414{
415 return read ? Packet::createRead(req) : Packet::createWrite(req);
416}
417
418void
420 const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req,
421 uint8_t *data, bool read)
422{
423 pkt1 = pkt2 = NULL;
424
425 assert(!req1->isLocalAccess() && !req2->isLocalAccess());
426
427 if (req->getFlags().isSet(Request::NO_ACCESS)) {
428 pkt1 = buildPacket(req, read);
429 return;
430 }
431
432 pkt1 = buildPacket(req1, read);
433 pkt2 = buildPacket(req2, read);
434
435 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
436
437 pkt->dataDynamic<uint8_t>(data);
438 pkt1->dataStatic<uint8_t>(data);
439 pkt2->dataStatic<uint8_t>(data + req1->getSize());
440
441 SplitMainSenderState * main_send_state = new SplitMainSenderState;
442 pkt->senderState = main_send_state;
443 main_send_state->fragments[0] = pkt1;
444 main_send_state->fragments[1] = pkt2;
445 main_send_state->outstanding = 2;
446 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
447 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
448}
449
450Fault
453 const std::vector<bool>& byte_enable)
454{
456 SimpleThread* thread = t_info.thread;
457
458 Fault fault;
459 const Addr pc = thread->pcState().instAddr();
460 unsigned block_size = cacheLineSize();
462
463 if (traceData)
464 traceData->setMem(addr, size, flags);
465
466 RequestPtr req = std::make_shared<Request>(
467 addr, size, flags, dataRequestorId(), pc, thread->contextId());
468 req->setByteEnable(byte_enable);
469
470 req->taskId(taskId());
471
472 Addr split_addr = roundDown(addr + size - 1, block_size);
473 assert(split_addr <= addr || split_addr - addr < block_size);
474
476 if (split_addr > addr) {
477 RequestPtr req1, req2;
478 assert(!req->isLLSC() && !req->isSwap());
479 req->splitOnVaddr(split_addr, req1, req2);
480
482 new WholeTranslationState(req, req1, req2, new uint8_t[size],
483 NULL, mode);
488
489 thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
490 thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
491 } else {
493 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
496 thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
497 }
498
499 return NoFault;
500}
501
502bool
504{
506 SimpleThread* thread = t_info.thread;
507
508 const RequestPtr &req = dcache_pkt->req;
509 if (req->isLocalAccess()) {
510 Cycles delay = req->localAccessor(thread->getTC(), dcache_pkt);
511 new IprEvent(dcache_pkt, this, clockEdge(delay));
513 dcache_pkt = NULL;
514 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
516 } else {
518 // memory system takes ownership of packet
519 dcache_pkt = NULL;
520 }
521 return dcache_pkt == NULL;
522}
523
524Fault
525TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
526 Addr addr, Request::Flags flags, uint64_t *res,
527 const std::vector<bool>& byte_enable)
528{
530 SimpleThread* thread = t_info.thread;
531
532 uint8_t *newData = new uint8_t[size];
533 const Addr pc = thread->pcState().instAddr();
534 unsigned block_size = cacheLineSize();
536
537 if (data == NULL) {
539 // This must be a cache block cleaning request
540 memset(newData, 0, size);
541 } else {
542 memcpy(newData, data, size);
543 }
544
545 if (traceData)
546 traceData->setMem(addr, size, flags);
547
548 RequestPtr req = std::make_shared<Request>(
549 addr, size, flags, dataRequestorId(), pc, thread->contextId());
550 req->setByteEnable(byte_enable);
551
552 req->taskId(taskId());
553
554 Addr split_addr = roundDown(addr + size - 1, block_size);
555 assert(split_addr <= addr || split_addr - addr < block_size);
556
558
559 // TODO: TimingSimpleCPU doesn't support arbitrarily long multi-line mem.
560 // accesses yet
561
562 if (split_addr > addr) {
563 RequestPtr req1, req2;
564 assert(!req->isLLSC() && !req->isSwap());
565 req->splitOnVaddr(split_addr, req1, req2);
566
568 new WholeTranslationState(req, req1, req2, newData, res, mode);
573
574 thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
575 thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
576 } else {
578 new WholeTranslationState(req, newData, res, mode);
581 thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
582 }
583
584 // Translation faults will be returned via finishTranslation()
585 return NoFault;
586}
587
588Fault
591 AtomicOpFunctorPtr amo_op)
592{
594 SimpleThread* thread = t_info.thread;
595
596 Fault fault;
597 const Addr pc = thread->pcState().instAddr();
598 unsigned block_size = cacheLineSize();
600
601 if (traceData)
602 traceData->setMem(addr, size, flags);
603
604 RequestPtr req = std::make_shared<Request>(addr, size, flags,
605 dataRequestorId(), pc, thread->contextId(),
606 std::move(amo_op));
607
608 assert(req->hasAtomicOpFunctor());
609
610 req->taskId(taskId());
611
612 Addr split_addr = roundDown(addr + size - 1, block_size);
613
614 // AMO requests that access across a cache line boundary are not
615 // allowed since the cache does not guarantee AMO ops to be executed
616 // atomically in two cache lines
617 // For ISAs such as x86 that requires AMO operations to work on
618 // accesses that cross cache-line boundaries, the cache needs to be
619 // modified to support locking both cache lines to guarantee the
620 // atomicity.
621 if (split_addr > addr) {
622 panic("AMO requests should not access across a cache line boundary\n");
623 }
624
626
628 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
631 thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
632
633 return NoFault;
634}
635
636void
638{
639 for (ThreadID tid = 0; tid < numThreads; tid++) {
640 if (tid != sender) {
641 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
642 wakeup(tid);
643 }
644 threadInfo[tid]->thread->getIsaPtr()->handleLockedSnoop(pkt,
646 }
647 }
648}
649
650void
652{
654
655 if (state->getFault() != NoFault) {
656 if (state->isPrefetch()) {
657 state->setNoFault();
658 }
659 delete [] state->data;
660 state->deleteReqs();
661 translationFault(state->getFault());
662 } else {
663 if (!state->isSplit) {
664 sendData(state->mainReq, state->data, state->res,
665 state->mode == BaseMMU::Read);
666 } else {
667 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
668 state->data, state->mode == BaseMMU::Read);
669 }
670 }
671
672 delete state;
673}
674
675
676void
678{
679 // Change thread if multi-threaded
681
683 SimpleThread* thread = t_info.thread;
684
685 DPRINTF(SimpleCPU, "Fetch\n");
686
690 }
691
692 // We must have just got suspended by a PC event
693 if (_status == Idle)
694 return;
695
696 MicroPC upc = thread->pcState().microPC();
697 bool needToFetch = !isRomMicroPC(upc) && !curMacroStaticInst;
698
699 if (needToFetch) {
701 RequestPtr ifetch_req = std::make_shared<Request>();
702 ifetch_req->taskId(taskId());
703 ifetch_req->setContext(thread->contextId());
704 setupFetchRequest(ifetch_req);
705 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
706 thread->mmu->translateTiming(ifetch_req, thread->getTC(),
708 } else {
710 completeIfetch(NULL);
711
714 }
715}
716
717
718void
720 ThreadContext *tc)
721{
722 auto &decoder = threadInfo[curThread]->thread->decoder;
723
724 if (fault == NoFault) {
725 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
726 req->getVaddr(), req->getPaddr());
728 ifetch_pkt->dataStatic(decoder->moreBytesPtr());
729 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
730
732 // Need to wait for retry
734 } else {
735 // Need to wait for cache to respond
737 // ownership of packet transferred to memory system
738 ifetch_pkt = NULL;
739 }
740 } else {
741 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
742 // fetch fault: advance directly to next instruction (fault handler)
744 advanceInst(fault);
745 }
746
749}
750
751
752void
754{
756
757 if (_status == Faulting)
758 return;
759
760 if (fault != NoFault) {
761 // hardware transactional memory
762 // If a fault occurred within a transaction
763 // ensure that the transaction aborts
764 if (t_info.inHtmTransactionalState() &&
765 !std::dynamic_pointer_cast<GenericHtmFailureFault>(fault)) {
766 DPRINTF(HtmCpu, "fault (%s) occurred - "
767 "replacing with HTM abort fault htmUid=%u\n",
768 fault->name(), t_info.getHtmTransactionUid());
769
770 Fault tmfault = std::make_shared<GenericHtmFailureFault>(
771 t_info.getHtmTransactionUid(),
773
774 advancePC(tmfault);
777 return;
778 }
779
780 DPRINTF(SimpleCPU, "Fault occured. Handling the fault\n");
781
782 advancePC(fault);
783
784 // A syscall fault could suspend this CPU (e.g., futex_wait)
785 // If the _status is not Idle, schedule an event to fetch the next
786 // instruction after 'stall' ticks.
787 // If the cpu has been suspended (i.e., _status == Idle), another
788 // cpu will wake this cpu up later.
789 if (_status != Idle) {
790 DPRINTF(SimpleCPU, "Scheduling fetch event after the Fault\n");
791
792 Tick stall = std::dynamic_pointer_cast<SyscallRetryFault>(fault) ?
794 reschedule(fetchEvent, stall, true);
796 }
797
798 return;
799 }
800
801 if (!t_info.stayAtPC)
802 advancePC(fault);
803
804 if (tryCompleteDrain())
805 return;
806
808
810 // kick off fetch of next instruction... callback from icache
811 // response will cause that instruction to be executed,
812 // keeping the CPU running.
813 fetch();
814 }
815}
816
817
818void
820{
822
823 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
824 pkt->getAddr() : 0);
825
826 // received a response from the icache: execute the received
827 // instruction
828 panic_if(pkt && pkt->isError(), "Instruction fetch (%s) failed: %s",
829 pkt->getAddrRange().to_string(), pkt->print());
830 assert(_status == IcacheWaitResponse);
831
833
836
837 if (pkt)
838 pkt->req->setAccessLatency();
839
840
841 preExecute();
842
843 // hardware transactional memory
845 // if this HtmStart is not within a transaction,
846 // then assign it a new htmTransactionUid
847 if (!t_info.inHtmTransactionalState())
848 t_info.newHtmTransactionUid();
849 SimpleThread* thread = t_info.thread;
850 thread->htmTransactionStarts++;
851 DPRINTF(HtmCpu, "htmTransactionStarts++=%u\n",
852 thread->htmTransactionStarts);
853 }
854
856 // load or store: just send to dcache
857 Fault fault = curStaticInst->initiateAcc(&t_info, traceData);
858
859 // If we're not running now the instruction will complete in a dcache
860 // response callback or the instruction faulted and has started an
861 // ifetch
863 if (fault != NoFault && traceData) {
864 traceFault();
865 }
866
867 postExecute();
868 // @todo remove me after debugging with legion done
871 instCnt++;
872 advanceInst(fault);
873 }
874 } else if (curStaticInst) {
875 // non-memory instruction: execute completely now
876 Fault fault = curStaticInst->execute(&t_info, traceData);
877
878 // keep an instruction count
879 if (fault == NoFault)
880 countInst();
881 else if (traceData) {
882 traceFault();
883 }
884
885 postExecute();
886 // @todo remove me after debugging with legion done
889 instCnt++;
890 advanceInst(fault);
891 } else {
893 }
894
895 if (pkt) {
896 delete pkt;
897 }
898}
899
900void
905
906bool
908{
909 DPRINTF(SimpleCPU, "Received fetch response %#x\n", pkt->getAddr());
910
911 // hardware transactional memory
912 // Currently, there is no support for tracking instruction fetches
913 // in an transaction's read set.
914 if (pkt->htmTransactionFailedInCache()) {
915 panic("HTM transactional support for"
916 " instruction stream not yet supported\n");
917 }
918
919 // we should only ever see one response per cycle since we only
920 // issue a new request once this response is sunk
921 assert(!tickEvent.scheduled());
922 // delay processing of returned data until next CPU clock edge
924
925 return true;
926}
927
928void
930{
931 // we shouldn't get a retry unless we have a packet that we're
932 // waiting to transmit
933 assert(cpu->ifetch_pkt != NULL);
934 assert(cpu->_status == IcacheRetry);
935 PacketPtr tmp = cpu->ifetch_pkt;
936 if (sendTimingReq(tmp)) {
938 cpu->ifetch_pkt = NULL;
939 }
940}
941
942void
944{
945 // hardware transactional memory
946
948 [[maybe_unused]] const bool is_htm_speculative =
949 t_info->inHtmTransactionalState();
950
951 // received a response from the dcache: complete the load or store
952 // instruction
953 panic_if(pkt->isError(), "Data access (%s) failed: %s",
954 pkt->getAddrRange().to_string(), pkt->print());
956 pkt->req->getFlags().isSet(Request::NO_ACCESS));
957
958 pkt->req->setAccessLatency();
959
962
963 if (pkt->senderState) {
964 // hardware transactional memory
965 // There shouldn't be HtmCmds occurring in multipacket requests
966 if (pkt->req->isHTMCmd()) {
967 panic("unexpected HTM case");
968 }
969
970 SplitFragmentSenderState * send_state =
971 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
972 assert(send_state);
973 PacketPtr big_pkt = send_state->bigPkt;
974 delete send_state;
975
976 if (pkt->isHtmTransactional()) {
977 assert(is_htm_speculative);
978
979 big_pkt->setHtmTransactional(
981 );
982 }
983
984 if (pkt->htmTransactionFailedInCache()) {
985 assert(is_htm_speculative);
988 );
989 }
990
991 delete pkt;
992
993 SplitMainSenderState * main_send_state =
994 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
995 assert(main_send_state);
996 // Record the fact that this packet is no longer outstanding.
997 assert(main_send_state->outstanding != 0);
998 main_send_state->outstanding--;
999
1000 if (main_send_state->outstanding) {
1001 return;
1002 } else {
1003 delete main_send_state;
1004 big_pkt->senderState = NULL;
1005 pkt = big_pkt;
1006 }
1007 }
1008
1010
1011 Fault fault;
1012
1013 // hardware transactional memory
1014 // sanity checks
1015 // ensure htmTransactionUids are equivalent
1016 if (pkt->isHtmTransactional())
1017 assert (pkt->getHtmTransactionUid() ==
1018 t_info->getHtmTransactionUid());
1019
1020 // can't have a packet that fails a transaction while not in a transaction
1021 if (pkt->htmTransactionFailedInCache())
1022 assert(is_htm_speculative);
1023
1024 // shouldn't fail through stores because this would be inconsistent w/ O3
1025 // which cannot fault after the store has been sent to memory
1026 if (pkt->htmTransactionFailedInCache() && !pkt->isWrite()) {
1027 const HtmCacheFailure htm_rc =
1029 DPRINTF(HtmCpu, "HTM abortion in cache (rc=%s) detected htmUid=%u\n",
1030 htmFailureToStr(htm_rc), pkt->getHtmTransactionUid());
1031
1032 // Currently there are only two reasons why a transaction would
1033 // fail in the memory subsystem--
1034 // (1) A transactional line was evicted from the cache for
1035 // space (or replacement policy) reasons.
1036 // (2) Another core/device requested a cache line that is in this
1037 // transaction's read/write set that is incompatible with the
1038 // HTM's semantics, e.g. another core requesting exclusive access
1039 // of a line in this core's read set.
1040 if (htm_rc == HtmCacheFailure::FAIL_SELF) {
1041 fault = std::make_shared<GenericHtmFailureFault>(
1042 t_info->getHtmTransactionUid(),
1044 } else if (htm_rc == HtmCacheFailure::FAIL_REMOTE) {
1045 fault = std::make_shared<GenericHtmFailureFault>(
1046 t_info->getHtmTransactionUid(),
1048 } else {
1049 panic("HTM - unhandled rc %s", htmFailureToStr(htm_rc));
1050 }
1051 } else {
1052 fault = curStaticInst->completeAcc(pkt, t_info,
1053 traceData);
1054 }
1055
1056 // hardware transactional memory
1057 // Track HtmStop instructions,
1058 // e.g. instructions which commit a transaction.
1060 t_info->thread->htmTransactionStops++;
1061 DPRINTF(HtmCpu, "htmTransactionStops++=%u\n",
1062 t_info->thread->htmTransactionStops);
1063 }
1064
1065 // keep an instruction count
1066 if (fault == NoFault)
1067 countInst();
1068 else if (traceData) {
1069 traceFault();
1070 }
1071
1072 delete pkt;
1073
1074 postExecute();
1075
1076 advanceInst(fault);
1077}
1078
1079void
1081{
1082 const Cycles delta(curCycle() - previousCycle);
1083
1084 baseStats.numCycles += delta;
1085
1087}
1088
1089void
1091{
1092 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
1093 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1094 cpu->wakeup(tid);
1095 }
1096 }
1097
1098 // Making it uniform across all CPUs:
1099 // The CPUs need to be woken up only on an invalidation packet
1100 // (when using caches) or on an incoming write packet (when not
1101 // using caches) It is not necessary to wake up the processor on
1102 // all incoming packets
1103 if (pkt->isInvalidate() || pkt->isWrite()) {
1104 for (auto &t_info : cpu->threadInfo) {
1105 t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
1106 cacheBlockMask);
1107 }
1108 } else if (pkt->req && pkt->req->isTlbiExtSync()) {
1109 // We received a TLBI_EXT_SYNC request.
1110 // In a detailed sim we would wait for memory ops to complete,
1111 // but in our simple case we just respond immediately
1112 auto reply_req = Request::createMemManagement(
1114 cpu->dataRequestorId());
1115
1116 // Extra Data = the transaction ID of the Sync we're completing
1117 reply_req->setExtraData(pkt->req->getExtraData());
1118 PacketPtr reply_pkt = Packet::createRead(reply_req);
1119
1120 // TODO - reserve some credit for these responses?
1121 if (!sendTimingReq(reply_pkt)) {
1122 panic("Couldn't send TLBI_EXT_SYNC_COMP message");
1123 }
1124 }
1125}
1126
1127void
1129{
1130 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
1131 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
1132 cpu->wakeup(tid);
1133 }
1134 }
1135}
1136
1137bool
1139{
1140 DPRINTF(SimpleCPU, "Received load/store response %#x\n", pkt->getAddr());
1141
1142 // The timing CPU is not really ticked, instead it relies on the
1143 // memory system (fetch and load/store) to set the pace.
1144 if (!tickEvent.scheduled()) {
1145 // Delay processing of returned data until next CPU clock edge
1147 return true;
1148 } else {
1149 // In the case of a split transaction and a cache that is
1150 // faster than a CPU we could get two responses in the
1151 // same tick, delay the second one
1154 return false;
1155 }
1156}
1157
1158void
1163
1164void
1166{
1167 // we shouldn't get a retry unless we have a packet that we're
1168 // waiting to transmit
1169 assert(cpu->dcache_pkt != NULL);
1170 assert(cpu->_status == DcacheRetry);
1171 PacketPtr tmp = cpu->dcache_pkt;
1172 if (tmp->senderState) {
1173 // This is a packet from a split access.
1174 SplitFragmentSenderState * send_state =
1175 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
1176 assert(send_state);
1177 PacketPtr big_pkt = send_state->bigPkt;
1178
1179 SplitMainSenderState * main_send_state =
1180 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
1181 assert(main_send_state);
1182
1183 if (sendTimingReq(tmp)) {
1184 // If we were able to send without retrying, record that fact
1185 // and try sending the other fragment.
1186 send_state->clearFromParent();
1187 int other_index = main_send_state->getPendingFragment();
1188 if (other_index > 0) {
1189 tmp = main_send_state->fragments[other_index];
1190 cpu->dcache_pkt = tmp;
1191 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
1192 (big_pkt->isWrite() && cpu->handleWritePacket())) {
1193 main_send_state->fragments[other_index] = NULL;
1194 }
1195 } else {
1197 // memory system takes ownership of packet
1198 cpu->dcache_pkt = NULL;
1199 }
1200 }
1201 } else if (sendTimingReq(tmp)) {
1203 // memory system takes ownership of packet
1204 cpu->dcache_pkt = NULL;
1205 }
1206}
1207
1209 Tick t)
1210 : pkt(_pkt), cpu(_cpu)
1211{
1212 cpu->schedule(this, t);
1213}
1214
1215void
1217{
1218 cpu->completeDataAccess(pkt);
1219}
1220
1221const char *
1223{
1224 return "Timing Simple CPU Delay IPR event";
1225}
1226
1227
1228void
1233
1234Fault
1236{
1238 SimpleThread* thread = t_info.thread;
1239
1240 const Addr addr = 0x0ul;
1241 const Addr pc = thread->pcState().instAddr();
1242 const int size = 8;
1243
1244 if (traceData)
1245 traceData->setMem(addr, size, flags);
1246
1247 RequestPtr req = std::make_shared<Request>(
1248 addr, size, flags, dataRequestorId());
1249
1250 req->setPC(pc);
1251 req->setContext(thread->contextId());
1252 req->taskId(taskId());
1253 req->setInstCount(t_info.numInst);
1254
1255 assert(req->isHTMCmd() || req->isTlbiCmd());
1256
1257 // Use the payload as a sanity check,
1258 // the memory subsystem will clear allocated data
1259 uint8_t *data = new uint8_t[size];
1260 assert(data);
1261 uint64_t rc = 0xdeadbeeflu;
1262 memcpy (data, &rc, size);
1263
1264 // debugging output
1265 if (req->isHTMCmd()) {
1266 if (req->isHTMStart())
1267 DPRINTF(HtmCpu, "HTMstart htmUid=%u\n",
1268 t_info.getHtmTransactionUid());
1269 else if (req->isHTMCommit())
1270 DPRINTF(HtmCpu, "HTMcommit htmUid=%u\n",
1271 t_info.getHtmTransactionUid());
1272 else if (req->isHTMCancel())
1273 DPRINTF(HtmCpu, "HTMcancel htmUid=%u\n",
1274 t_info.getHtmTransactionUid());
1275 else
1276 panic("initiateMemMgmtCmd: unknown HTM CMD");
1277 }
1278
1279 sendData(req, data, nullptr, true);
1280
1281 return NoFault;
1282}
1283
1284void
1287{
1288 SimpleExecContext& t_info = *threadInfo[tid];
1289 SimpleThread* thread = t_info.thread;
1290
1291 const Addr addr = 0x0ul;
1292 const Addr pc = thread->pcState().instAddr();
1293 const int size = 8;
1294 const Request::Flags flags =
1296
1297 if (traceData)
1298 traceData->setMem(addr, size, flags);
1299
1300 // notify l1 d-cache (ruby) that core has aborted transaction
1301
1302 RequestPtr req = std::make_shared<Request>(
1303 addr, size, flags, dataRequestorId());
1304
1305 req->setPC(pc);
1306 req->setContext(thread->contextId());
1307 req->taskId(taskId());
1308 req->setInstCount(t_info.numInst);
1309 req->setHtmAbortCause(cause);
1310
1311 assert(req->isHTMAbort());
1312
1313 uint8_t *data = new uint8_t[size];
1314 assert(data);
1315 uint64_t rc = 0lu;
1316 memcpy (data, &rc, size);
1317
1318 sendData(req, data, nullptr, true);
1319}
1320
1321} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:193
Cycles syscallRetryLatency
Definition base.hh:662
Tick instCnt
Instruction count used for SPARC misc register.
Definition base.hh:110
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:310
Addr cacheLineSize() const
Get the cache line size of the system.
Definition base.hh:397
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition base.hh:656
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition base.hh:561
void schedulePowerGatingEvent()
Definition base.cc:502
uint32_t taskId() const
Get cpu task id.
Definition base.hh:211
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition base.cc:550
gem5::BaseCPU::BaseCPUStats baseStats
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition base.hh:390
void deschedulePowerGatingEvent()
Definition base.cc:494
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition base.cc:588
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:602
std::vector< ThreadContext * > threadContexts
Definition base.hh:260
bool switchedOut() const
Determine if the CPU is switched out.
Definition base.hh:373
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition base.cc:530
virtual void handleLockedRead(const RequestPtr &req)
Definition isa.hh:97
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition isa.hh:104
virtual void translateTiming(const RequestPtr &req, ThreadContext *tc, Translation *translation, Mode mode)
Definition mmu.cc:111
void preExecute()
Definition base.cc:328
void checkPcEventQueue()
Definition base.cc:123
ThreadID curThread
Definition base.hh:86
void wakeup(ThreadID tid) override
Definition base.cc:251
StaticInstPtr curMacroStaticInst
Definition base.hh:105
void checkForInterrupts()
Definition base.cc:273
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Definition base.cc:262
void advancePC(const Fault &fault)
Definition base.cc:493
void swapActiveThread()
Definition base.cc:135
void setupFetchRequest(const RequestPtr &req)
Definition base.cc:304
std::list< ThreadID > activeThreads
Definition base.hh:101
std::vector< SimpleExecContext * > threadInfo
Definition base.hh:100
StaticInstPtr curStaticInst
Current instruction.
Definition base.hh:104
void postExecute()
Definition base.cc:412
trace::InstRecord * traceData
Definition base.hh:97
void serviceInstCountEvents()
Definition base.cc:321
Cycles curCycle() const
Determine the current cycle, corresponding to a tick aligned to a clock edge.
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
This class represents part of a data address translation.
Command responseCommand() const
Definition packet.hh:270
MicroPC microPC() const
Returns the current micropc.
Definition pcstate.hh:119
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition pcstate.hh:108
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
bool isRead() const
Definition packet.hh:593
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
void setHtmTransactionFailedInCache(const HtmCacheFailure ret_code)
Stipulates that this packet/request has returned from the cache hierarchy in a failed transaction.
Definition packet.cc:493
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition packet.cc:243
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
static PacketPtr createWrite(const RequestPtr &req)
Definition packet.hh:1044
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
void makeResponse()
Take a request packet and modify it in place to be suitable for returning as a response to that reque...
Definition packet.hh:1062
HtmCacheFailure getHtmTransactionFailedInCacheRC() const
If a packet/request has returned from the cache hierarchy in a failed transaction,...
Definition packet.cc:509
bool isHtmTransactional() const
Returns whether or not this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:523
bool isWrite() const
Definition packet.hh:594
uint64_t getHtmTransactionUid() const
If a packet/request originates in a CPU executing in transactional mode, i.e.
Definition packet.cc:529
static PacketPtr createRead(const RequestPtr &req)
Constructor-like methods that return Packets based on Request objects.
Definition packet.hh:1038
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
void dataDynamic(T *p)
Set the data pointer to a value that should have delete [] called on it.
Definition packet.hh:1213
void setHtmTransactional(uint64_t val)
Stipulates that this packet/request originates in the CPU executing in transactional mode,...
Definition packet.cc:516
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
bool isInvalidate() const
Definition packet.hh:609
bool htmTransactionFailedInCache() const
Returns whether or not this packet/request has returned from the cache hierarchy in a failed transact...
Definition packet.cc:503
bool sendTimingReq(PacketPtr pkt)
Attempt to send a timing request to the responder port by calling its corresponding receive function.
Definition port.hh:603
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition port.cc:178
@ STRICT_ORDER
The request is required to be strictly ordered by CPU models and is non-speculative.
Definition request.hh:135
@ PHYSICAL
The virtual address is also the physical address.
Definition request.hh:117
@ HTM_ABORT
The request aborts a HTM transaction.
Definition request.hh:216
@ TLBI_EXT_SYNC_COMP
The Request tells the interconnect that a remote TLB Sync request has completed.
Definition request.hh:252
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
static RequestPtr createMemManagement(Flags flags, RequestorID id)
Factory method for creating memory management requests, with unspecified addr and size.
Definition request.hh:531
static const FlagsType STORE_NO_DATA
Definition request.hh:260
Counter numInst
PER-THREAD STATS.
uint64_t newHtmTransactionUid() const override
bool inHtmTransactionalState() const override
uint64_t getHtmTransactionUid() const override
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
const PCStateBase & pcState() const override
ContextID contextId() const override
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
BaseISA * getIsaPtr() const override
bool isHtmStop() const
bool isFirstMicroop() const
bool isMemRef() const
virtual Fault execute(ExecContext *xc, trace::InstRecord *traceData) const =0
bool isHtmStart() const
virtual Fault completeAcc(Packet *pkt, ExecContext *xc, trace::InstRecord *trace_data) const
bool isMicroop() const
virtual Fault initiateAcc(ExecContext *xc, trace::InstRecord *traceData) const
bool isDelayedCommit() const
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition timing.cc:1165
virtual void recvTimingSnoopReq(PacketPtr pkt)
Snoop a coherence request, we need to check if this causes a wakeup event on a cpu that is monitoring...
Definition timing.cc:1090
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition timing.cc:1138
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition timing.cc:1128
virtual void recvReqRetry()
Called by the peer if sendTimingReq was called on this peer (causing recvTimingReq to be called on th...
Definition timing.cc:929
virtual bool recvTimingResp(PacketPtr pkt)
Receive a timing response from the peer.
Definition timing.cc:907
EventFunctionWrapper retryRespEvent
Definition timing.hh:185
bool tryCompleteDrain()
Try to complete a drain request.
Definition timing.cc:153
Fault initiateMemMgmtCmd(Request::Flags flags) override
hardware transactional memory & TLBI operations
Definition timing.cc:1235
bool isCpuDrained() const
Check if a system is in a drained state.
Definition timing.hh:362
void advanceInst(const Fault &fault)
Definition timing.cc:753
void switchOut() override
Prepare for another CPU to take over execution.
Definition timing.cc:169
PacketPtr ifetch_pkt
Definition timing.hh:261
EventFunctionWrapper fetchEvent
Definition timing.hh:335
DrainState drain() override
Provide a default implementation of the drain interface for objects that don't need draining.
Definition timing.cc:91
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition timing.cc:232
void threadSnoop(PacketPtr pkt, ThreadID sender)
Definition timing.cc:637
void drainResume() override
Resume execution after a successful drain.
Definition timing.cc:118
Fault initiateMemRead(Addr addr, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition timing.cc:451
void htmSendAbortSignal(ThreadID tid, uint64_t htm_uid, HtmFailureFaultCause) override
This function is used to instruct the memory subsystem that a transaction should be aborted and the s...
Definition timing.cc:1285
bool handleReadPacket(PacketPtr pkt)
Definition timing.cc:262
PacketPtr buildPacket(const RequestPtr &req, bool read)
Definition timing.cc:413
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition timing.cc:525
void sendSplitData(const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req, uint8_t *data, bool read)
Definition timing.cc:346
void translationFault(const Fault &fault)
Definition timing.cc:396
void sendData(const RequestPtr &req, uint8_t *data, uint64_t *res, bool read)
Definition timing.cc:297
FetchTranslation fetchTranslation
Definition timing.hh:135
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition timing.cc:1229
PacketPtr dcache_pkt
Definition timing.hh:262
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition timing.cc:199
void finishTranslation(WholeTranslationState *state)
Finish a DTB translation.
Definition timing.cc:651
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition timing.cc:208
void completeIfetch(PacketPtr)
Definition timing.cc:819
TimingSimpleCPU(const BaseTimingSimpleCPUParams &params)
Definition timing.cc:76
void completeDataAccess(PacketPtr pkt)
Definition timing.cc:943
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition timing.cc:64
Fault initiateMemAMO(Addr addr, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition timing.cc:589
DcachePort dcachePort
Definition timing.hh:259
void buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2, const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req, uint8_t *data, bool read)
Definition timing.cc:419
IcachePort icachePort
Definition timing.hh:258
virtual ~TimingSimpleCPU()
Definition timing.cc:86
void takeOverFrom(BaseCPU *oldCPU) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition timing.cc:191
void sendFetch(const Fault &fault, const RequestPtr &req, ThreadContext *tc)
Definition timing.cc:719
This class captures the state of an address translation.
void setMem(Addr a, Addr s, unsigned f)
STL vector class.
Definition stl.hh:37
std::string to_string() const
Get a string representation of the range.
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition amo.hh:269
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:279
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void deschedule(Event &event)
Definition eventq.hh:1021
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void reschedule(Event &event, Tick when, bool always=false)
Definition eventq.hh:1030
#define panic(...)
This implements a cprintf based panic() function.
Definition logging.hh:188
#define fatal(...)
This implements a cprintf based fatal() function.
Definition logging.hh:200
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
atomic_var_t state
Definition helpers.cc:211
uint8_t flags
Definition helpers.cc:87
Bitfield< 4, 0 > mode
Definition misc_types.hh:74
Bitfield< 5 > t
Definition misc_types.hh:71
Bitfield< 8 > a
Definition misc_types.hh:66
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 0 > rc
Definition types.hh:87
Bitfield< 15 > system
Definition misc.hh:1032
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
std::string htmFailureToStr(HtmFailureFaultCause cause)
Convert enum into string to be used for debug purposes.
Definition htm.cc:44
uint64_t Tick
Tick count type.
Definition types.hh:58
HtmCacheFailure
Definition htm.hh:60
uint16_t MicroPC
Definition types.hh:149
static bool isRomMicroPC(MicroPC upc)
Definition types.hh:166
constexpr decltype(nullptr) NoFault
Definition types.hh:253
HtmFailureFaultCause
Definition htm.hh:48
output decoder
Definition nop.cc:61
Declaration of the Packet class.
bool doMonitor(PacketPtr pkt)
Definition base.cc:764
statistics::Scalar numCycles
Definition base.hh:640
virtual const char * description() const
Return a C string describing the event.
Definition timing.cc:1222
IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t)
Definition timing.cc:1208
void schedule(PacketPtr _pkt, Tick t)
Definition timing.cc:70
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:02 for gem5 by doxygen 1.11.0