gem5 v24.0.0.0
Loading...
Searching...
No Matches
atomic.cc
Go to the documentation of this file.
1/*
2 * Copyright 2014 Google, Inc.
3 * Copyright (c) 2012-2013,2015,2017-2020 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2002-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "cpu/simple/atomic.hh"
43
45#include "base/output.hh"
46#include "cpu/exetrace.hh"
47#include "cpu/utils.hh"
48#include "debug/Drain.hh"
49#include "debug/ExecFaulting.hh"
50#include "debug/SimpleCPU.hh"
51#include "mem/packet.hh"
52#include "mem/packet_access.hh"
53#include "mem/physical.hh"
54#include "params/BaseAtomicSimpleCPU.hh"
55#include "sim/faults.hh"
56#include "sim/full_system.hh"
57#include "sim/system.hh"
58
59namespace gem5
60{
61
62void
64{
66
67 int cid = threadContexts[0]->contextId();
68 ifetch_req->setContext(cid);
69 data_read_req->setContext(cid);
70 data_write_req->setContext(cid);
71 data_amo_req->setContext(cid);
72}
73
74AtomicSimpleCPU::AtomicSimpleCPU(const BaseAtomicSimpleCPUParams &p)
76 tickEvent([this]{ tick(); }, "AtomicSimpleCPU tick",
77 false, Event::CPU_Tick_Pri),
78 width(p.width), locked(false),
79 simulate_data_stalls(p.simulate_data_stalls),
80 simulate_inst_stalls(p.simulate_inst_stalls),
81 icachePort(name() + ".icache_port"),
82 dcachePort(name() + ".dcache_port", this),
83 dcache_access(false), dcache_latency(0),
84 ppCommit(nullptr)
85{
86 _status = Idle;
87 ifetch_req = std::make_shared<Request>();
88 data_read_req = std::make_shared<Request>();
89 data_write_req = std::make_shared<Request>();
90 data_amo_req = std::make_shared<Request>();
91}
92
93
100
103{
104 // Deschedule any power gating event (if any)
106
107 if (switchedOut())
108 return DrainState::Drained;
109
110 if (!isCpuDrained()) {
111 DPRINTF(Drain, "Requesting drain.\n");
113 } else {
114 if (tickEvent.scheduled())
116
117 activeThreads.clear();
118 DPRINTF(Drain, "Not executing microcode, no need to drain.\n");
119 return DrainState::Drained;
120 }
121}
122
123void
125{
126 DPRINTF(SimpleCPU, "%s received snoop pkt for addr:%#x %s\n",
127 __func__, pkt->getAddr(), pkt->cmdString());
128
129 for (ThreadID tid = 0; tid < numThreads; tid++) {
130 if (tid != sender) {
131 if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
132 wakeup(tid);
133 }
134
135 threadInfo[tid]->thread->getIsaPtr()->handleLockedSnoop(pkt,
137 }
138 }
139}
140
141void
143{
144 assert(!tickEvent.scheduled());
145 if (switchedOut())
146 return;
147
148 DPRINTF(SimpleCPU, "Resume\n");
150
151 assert(!threadContexts.empty());
152
154
155 for (ThreadID tid = 0; tid < numThreads; tid++) {
156 if (threadInfo[tid]->thread->status() == ThreadContext::Active) {
157 threadInfo[tid]->execContextStats.notIdleFraction = 1;
158 activeThreads.push_back(tid);
160
161 // Tick if any threads active
162 if (!tickEvent.scheduled()) {
164 }
165 } else {
166 threadInfo[tid]->execContextStats.notIdleFraction = 0;
167 }
168 }
169
170 // Reschedule any power gating event (if any)
172}
173
174bool
176{
178 return false;
179
180 DPRINTF(Drain, "tryCompleteDrain.\n");
181 if (!isCpuDrained())
182 return false;
183
184 DPRINTF(Drain, "CPU done draining, processing drain event\n");
186
187 return true;
188}
189
190
191void
193{
195
196 assert(!tickEvent.scheduled());
198 assert(isCpuDrained());
199}
200
201
202void
204{
206
207 // The tick event should have been descheduled by drain()
208 assert(!tickEvent.scheduled());
209}
210
211void
213{
214 fatal_if(!system->isAtomicMode(),
215 "The atomic CPU requires the memory system to be in "
216 "'atomic' mode.");
217}
218
219void
221{
222 DPRINTF(SimpleCPU, "ActivateContext %d\n", thread_num);
223
224 assert(thread_num < numThreads);
225
226 threadInfo[thread_num]->execContextStats.notIdleFraction = 1;
227 Cycles delta = ticksToCycles(threadInfo[thread_num]->thread->lastActivate -
228 threadInfo[thread_num]->thread->lastSuspend);
229 baseStats.numCycles += delta;
230
231 if (!tickEvent.scheduled()) {
232 //Make sure ticks are still on multiples of cycles
234 }
236 if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
237 activeThreads.end()) {
238 activeThreads.push_back(thread_num);
239 }
240
241 BaseCPU::activateContext(thread_num);
242}
243
244
245void
247{
248 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
249
250 assert(thread_num < numThreads);
251 activeThreads.remove(thread_num);
252
253 if (_status == Idle)
254 return;
255
257
258 threadInfo[thread_num]->execContextStats.notIdleFraction = 0;
259
260 if (activeThreads.empty()) {
261 _status = Idle;
262
263 if (tickEvent.scheduled()) {
265 }
266 }
267
268 BaseCPU::suspendContext(thread_num);
269}
270
271Tick
273{
274 return port.sendAtomic(pkt);
275}
276
277Tick
279{
280 DPRINTF(SimpleCPU, "%s received atomic snoop pkt for addr:%#x %s\n",
281 __func__, pkt->getAddr(), pkt->cmdString());
282
283 // X86 ISA: Snooping an invalidation for monitor/mwait
284 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
285 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
286 cpu->wakeup(tid);
287 }
288 }
289
290 // if snoop invalidates, release any associated locks
291 // When run without caches, Invalidation packets will not be received
292 // hence we must check if the incoming packets are writes and wakeup
293 // the processor accordingly
294 if (pkt->isInvalidate() || pkt->isWrite()) {
295 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
296 pkt->getAddr());
297 for (auto &t_info : cpu->threadInfo) {
298 t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
300 }
301 }
302
303 return 0;
304}
305
306void
308{
309 DPRINTF(SimpleCPU, "%s received functional snoop pkt for addr:%#x %s\n",
310 __func__, pkt->getAddr(), pkt->cmdString());
311
312 // X86 ISA: Snooping an invalidation for monitor/mwait
313 for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
314 if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
315 cpu->wakeup(tid);
316 }
317 }
318
319 // if snoop invalidates, release any associated locks
320 if (pkt->isInvalidate()) {
321 DPRINTF(SimpleCPU, "received invalidation for addr:%#x\n",
322 pkt->getAddr());
323 for (auto &t_info : cpu->threadInfo) {
324 t_info->thread->getIsaPtr()->handleLockedSnoop(pkt,
325 cacheBlockMask);
326 }
327 }
328}
329
330bool
332 int size, Request::Flags flags,
333 const std::vector<bool> &byte_enable,
334 int &frag_size, int &size_left) const
335{
336 bool predicate = true;
337 Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
338
339 frag_size = std::min(
341 (Addr)size_left);
342 size_left -= frag_size;
343
344 // Set up byte-enable mask for the current fragment
345 auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
346 auto it_end = byte_enable.begin() + (size - size_left);
347 if (isAnyActiveElement(it_start, it_end)) {
348 req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
349 inst_addr);
350 req->setByteEnable(std::vector<bool>(it_start, it_end));
351 } else {
352 predicate = false;
353 }
354
355 return predicate;
356}
357
358Fault
359AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
361 const std::vector<bool> &byte_enable)
362{
364 SimpleThread *thread = t_info.thread;
365
366 // use the CPU's statically allocated read request and packet objects
367 const RequestPtr &req = data_read_req;
368
369 if (traceData)
370 traceData->setMem(addr, size, flags);
371
372 dcache_latency = 0;
373
374 req->taskId(taskId());
375
376 Addr frag_addr = addr;
377 int frag_size = 0;
378 int size_left = size;
379 bool predicate;
380 Fault fault = NoFault;
381
382 while (1) {
383 predicate = genMemFragmentRequest(req, frag_addr, size, flags,
384 byte_enable, frag_size, size_left);
385
386 // translate to physical address
387 if (predicate) {
388 fault = thread->mmu->translateAtomic(req, thread->getTC(),
390 }
391
392 // Now do the access.
393 if (predicate && fault == NoFault &&
394 !req->getFlags().isSet(Request::NO_ACCESS)) {
395 Packet pkt(req, Packet::makeReadCmd(req));
396 pkt.dataStatic(data);
397
398 if (req->isLocalAccess()) {
399 dcache_latency += req->localAccessor(thread->getTC(), &pkt);
400 } else {
402 }
403 dcache_access = true;
404
405 panic_if(pkt.isError(), "Data fetch (%s) failed: %s",
406 pkt.getAddrRange().to_string(), pkt.print());
407
408 if (req->isLLSC()) {
409 thread->getIsaPtr()->handleLockedRead(req);
410 }
411 }
412
413 //If there's a fault, return it
414 if (fault != NoFault)
415 return req->isPrefetch() ? NoFault : fault;
416
417 // If we don't need to access further cache lines, stop now.
418 if (size_left == 0) {
419 if (req->isLockedRMW() && fault == NoFault) {
420 assert(!locked);
421 locked = true;
422 }
423 return fault;
424 }
425
426 /*
427 * Set up for accessing the next cache line.
428 */
429 frag_addr += frag_size;
430
431 //Move the pointer we're reading into to the correct location.
432 data += frag_size;
433 }
434}
435
436Fault
437AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
438 Request::Flags flags, uint64_t *res,
439 const std::vector<bool>& byte_enable)
440{
442 SimpleThread *thread = t_info.thread;
443 static uint8_t zero_array[64] = {};
444
445 if (data == NULL) {
446 assert(size <= 64);
448 // This must be a cache block cleaning request
449 data = zero_array;
450 }
451
452 // use the CPU's statically allocated write request and packet objects
453 const RequestPtr &req = data_write_req;
454
455 if (traceData)
456 traceData->setMem(addr, size, flags);
457
458 dcache_latency = 0;
459
460 req->taskId(taskId());
461
462 Addr frag_addr = addr;
463 int frag_size = 0;
464 int size_left = size;
465 [[maybe_unused]] int curr_frag_id = 0;
466 bool predicate;
467 Fault fault = NoFault;
468
469 while (1) {
470 predicate = genMemFragmentRequest(req, frag_addr, size, flags,
471 byte_enable, frag_size, size_left);
472
473 // translate to physical address
474 if (predicate)
475 fault = thread->mmu->translateAtomic(req, thread->getTC(),
477
478 // Now do the access.
479 if (predicate && fault == NoFault) {
480 bool do_access = true; // flag to suppress cache access
481
482 if (req->isLLSC()) {
483 assert(curr_frag_id == 0);
484 do_access = thread->getIsaPtr()->handleLockedWrite(req,
486 } else if (req->isSwap()) {
487 assert(curr_frag_id == 0);
488 if (req->isCondSwap()) {
489 assert(res);
490 req->setExtraData(*res);
491 }
492 }
493
494 if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
495 Packet pkt(req, Packet::makeWriteCmd(req));
496 pkt.dataStatic(data);
497
498 if (req->isLocalAccess()) {
500 req->localAccessor(thread->getTC(), &pkt);
501 } else {
503
504 // Notify other threads on this CPU of write
505 threadSnoop(&pkt, curThread);
506 }
507 dcache_access = true;
508 panic_if(pkt.isError(), "Data write (%s) failed: %s",
509 pkt.getAddrRange().to_string(), pkt.print());
510 if (req->isSwap()) {
511 assert(res && curr_frag_id == 0);
512 memcpy(res, pkt.getConstPtr<uint8_t>(), size);
513 }
514 }
515
516 if (res && !req->isSwap()) {
517 *res = req->getExtraData();
518 }
519 }
520
521 //If there's a fault or we don't need to access a second cache line,
522 //stop now.
523 if (fault != NoFault || size_left == 0) {
524 if (req->isLockedRMW() && fault == NoFault) {
525 assert(!req->isMasked());
526 locked = false;
527 }
528
529 //Supress faults from prefetches.
530 return req->isPrefetch() ? NoFault : fault;
531 }
532
533 /*
534 * Set up for accessing the next cache line.
535 */
536 frag_addr += frag_size;
537
538 //Move the pointer we're reading into to the correct location.
539 data += frag_size;
540
541 curr_frag_id++;
542 }
543}
544
545Fault
546AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
548{
550 SimpleThread *thread = t_info.thread;
551
552 // use the CPU's statically allocated amo request and packet objects
553 const RequestPtr &req = data_amo_req;
554
555 if (traceData)
556 traceData->setMem(addr, size, flags);
557
558 //The address of the second part of this access if it needs to be split
559 //across a cache line boundary.
560 Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
561
562 // AMO requests that access across a cache line boundary are not
563 // allowed since the cache does not guarantee AMO ops to be executed
564 // atomically in two cache lines
565 // For ISAs such as x86 that requires AMO operations to work on
566 // accesses that cross cache-line boundaries, the cache needs to be
567 // modified to support locking both cache lines to guarantee the
568 // atomicity.
569 panic_if(secondAddr > addr,
570 "AMO request should not access across a cache line boundary.");
571
572 dcache_latency = 0;
573
574 req->taskId(taskId());
575 req->setVirt(addr, size, flags, dataRequestorId(),
576 thread->pcState().instAddr(), std::move(amo_op));
577
578 // translate to physical address
579 Fault fault = thread->mmu->translateAtomic(
580 req, thread->getTC(), BaseMMU::Write);
581
582 // Now do the access.
583 if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
584 // We treat AMO accesses as Write accesses with SwapReq command
585 // data will hold the return data of the AMO access
586 Packet pkt(req, Packet::makeWriteCmd(req));
587 pkt.dataStatic(data);
588
589 if (req->isLocalAccess()) {
590 dcache_latency += req->localAccessor(thread->getTC(), &pkt);
591 } else {
593 }
594
595 dcache_access = true;
596
597 panic_if(pkt.isError(), "Atomic access (%s) failed: %s",
598 pkt.getAddrRange().to_string(), pkt.print());
599 assert(!req->isLLSC());
600 }
601
602 if (fault != NoFault && req->isPrefetch()) {
603 return NoFault;
604 }
605
606 //If there's a fault and we're not doing prefetch, return it
607 return fault;
608}
609
610void
612{
613 DPRINTF(SimpleCPU, "Tick\n");
614
615 // Change thread if multi-threaded
617
618 // Set memory request ids to current thread
619 if (numThreads > 1) {
620 ContextID cid = threadContexts[curThread]->contextId();
621
622 ifetch_req->setContext(cid);
623 data_read_req->setContext(cid);
624 data_write_req->setContext(cid);
625 data_amo_req->setContext(cid);
626 }
627
629 SimpleThread *thread = t_info.thread;
630
631 Tick latency = 0;
632
633 for (int i = 0; i < width || locked; ++i) {
636
640 }
641
642 // We must have just got suspended by a PC event
643 if (_status == Idle) {
645 return;
646 }
647
649
650 Fault fault = NoFault;
651
652 const PCStateBase &pc = thread->pcState();
653
654 bool needToFetch = !isRomMicroPC(pc.microPC()) && !curMacroStaticInst;
655 if (needToFetch) {
656 ifetch_req->taskId(taskId());
658 fault = thread->mmu->translateAtomic(ifetch_req, thread->getTC(),
660 }
661
662 if (fault == NoFault) {
663 Tick icache_latency = 0;
664 bool icache_access = false;
665 dcache_access = false; // assume no dcache access
666
667 if (needToFetch) {
668 // This is commented out because the decoder would act like
669 // a tiny cache otherwise. It wouldn't be flushed when needed
670 // like the I cache. It should be flushed, and when that works
671 // this code should be uncommented.
672 //Fetch more instruction memory if necessary
673 //if (decoder.needMoreBytes())
674 //{
675 icache_access = true;
676 icache_latency = fetchInstMem();
677 //}
678 }
679
680 preExecute();
681
682 Tick stall_ticks = 0;
683 if (curStaticInst) {
684 fault = curStaticInst->execute(&t_info, traceData);
685
686 // keep an instruction count
687 if (fault == NoFault) {
688 countInst();
689 ppCommit->notify(std::make_pair(thread, curStaticInst));
690 } else if (traceData) {
691 traceFault();
692 }
693
694 if (fault != NoFault &&
695 std::dynamic_pointer_cast<SyscallRetryFault>(fault)) {
696 // Retry execution of system calls after a delay.
697 // Prevents immediate re-execution since conditions which
698 // caused the retry are unlikely to change every tick.
699 stall_ticks += clockEdge(syscallRetryLatency) - curTick();
700 }
701
702 postExecute();
703 }
704
705 // @todo remove me after debugging with legion done
708 instCnt++;
709 }
710
711 if (simulate_inst_stalls && icache_access)
712 stall_ticks += icache_latency;
713
715 stall_ticks += dcache_latency;
716
717 if (stall_ticks) {
718 // the atomic cpu does its accounting in ticks, so
719 // keep counting in ticks but round to the clock
720 // period
721 latency += divCeil(stall_ticks, clockPeriod()) *
722 clockPeriod();
723 }
724
725 }
726 if (fault != NoFault || !t_info.stayAtPC)
727 advancePC(fault);
728 }
729
730 if (tryCompleteDrain())
731 return;
732
733 // instruction takes at least one cycle
734 if (latency < clockPeriod())
735 latency = clockPeriod();
736
737 if (_status != Idle)
738 reschedule(tickEvent, curTick() + latency, true);
739}
740
741Tick
743{
744 auto &decoder = threadInfo[curThread]->thread->decoder;
745
747
748 // ifetch_req is initialized to read the instruction
749 // directly into the CPU object's inst field.
750 pkt.dataStatic(decoder->moreBytesPtr());
751
752 Tick latency = sendPacket(icachePort, &pkt);
753 panic_if(pkt.isError(), "Instruction fetch (%s) failed: %s",
754 pkt.getAddrRange().to_string(), pkt.print());
755
756 return latency;
757}
758
759void
767
768void
773
774} // namespace gem5
#define DPRINTF(x,...)
Definition trace.hh:210
const char data[]
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
Definition atomic.cc:307
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
Definition atomic.cc:278
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
Definition atomic.cc:246
void takeOverFrom(BaseCPU *old_cpu) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition atomic.cc:203
const bool simulate_data_stalls
Definition atomic.hh:67
void drainResume() override
Resume execution after a successful drain.
Definition atomic.cc:142
void switchOut() override
Prepare for another CPU to take over execution.
Definition atomic.cc:192
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
Definition atomic.cc:331
bool isCpuDrained() const
Check if a system is in a drained state.
Definition atomic.hh:92
void regProbePoints() override
Register probe points for this object.
Definition atomic.cc:760
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
Definition atomic.cc:769
virtual Tick fetchInstMem()
Definition atomic.cc:742
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Definition atomic.cc:212
AtomicCPUDPort dcachePort
Definition atomic.hh:162
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition atomic.cc:437
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Definition atomic.hh:174
AtomicSimpleCPU(const BaseAtomicSimpleCPUParams &params)
Definition atomic.cc:74
RequestPtr data_read_req
Definition atomic.hh:166
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
Definition atomic.cc:359
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
Definition atomic.cc:546
RequestPtr ifetch_req
Definition atomic.hh:165
const bool simulate_inst_stalls
Definition atomic.hh:68
DrainState drain() override
Draining is the process of clearing out the states of SimObjects.These are the SimObjects that are pa...
Definition atomic.cc:102
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
Definition atomic.cc:124
RequestPtr data_amo_req
Definition atomic.hh:168
bool tryCompleteDrain()
Try to complete a drain request.
Definition atomic.cc:175
AtomicCPUPort icachePort
Definition atomic.hh:161
virtual Tick sendPacket(RequestPort &port, const PacketPtr &pkt)
Definition atomic.cc:272
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition atomic.cc:63
RequestPtr data_write_req
Definition atomic.hh:167
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
Definition atomic.cc:220
virtual ~AtomicSimpleCPU()
Definition atomic.cc:94
EventFunctionWrapper tickEvent
Definition atomic.hh:63
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Definition base.hh:193
Cycles syscallRetryLatency
Definition base.hh:662
Tick instCnt
Instruction count used for SPARC misc register.
Definition base.hh:110
void init() override
init() is called after all C++ SimObjects have been created and all ports are connected.
Definition base.cc:310
Addr cacheLineSize() const
Get the cache line size of the system.
Definition base.hh:397
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Definition base.hh:656
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
Definition base.hh:561
void schedulePowerGatingEvent()
Definition base.cc:502
void regProbePoints() override
Register probe points for this object.
Definition base.cc:374
uint32_t taskId() const
Get cpu task id.
Definition base.hh:211
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
Definition base.cc:550
gem5::BaseCPU::BaseCPUStats baseStats
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
Definition base.hh:390
void deschedulePowerGatingEvent()
Definition base.cc:494
virtual void switchOut()
Prepare for another CPU to take over execution.
Definition base.cc:588
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
Definition base.cc:602
std::vector< ThreadContext * > threadContexts
Definition base.hh:260
bool switchedOut() const
Determine if the CPU is switched out.
Definition base.hh:373
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
Definition base.cc:530
virtual void handleLockedRead(const RequestPtr &req)
Definition isa.hh:97
virtual bool handleLockedWrite(const RequestPtr &req, Addr cacheBlockMask)
Definition isa.hh:104
virtual Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
Definition mmu.cc:104
void preExecute()
Definition base.cc:328
void checkPcEventQueue()
Definition base.cc:123
ThreadID curThread
Definition base.hh:86
void wakeup(ThreadID tid) override
Definition base.cc:251
StaticInstPtr curMacroStaticInst
Definition base.hh:105
void checkForInterrupts()
Definition base.cc:273
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Definition base.cc:262
void advancePC(const Fault &fault)
Definition base.cc:493
void swapActiveThread()
Definition base.cc:135
void setupFetchRequest(const RequestPtr &req)
Definition base.cc:304
std::list< ThreadID > activeThreads
Definition base.hh:101
std::vector< SimpleExecContext * > threadInfo
Definition base.hh:100
StaticInstPtr curStaticInst
Current instruction.
Definition base.hh:104
void postExecute()
Definition base.cc:412
trace::InstRecord * traceData
Definition base.hh:97
void serviceInstCountEvents()
Definition base.cc:321
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick nextCycle() const
Based on the clock of the object, determine the start tick of the first cycle that is at least one cy...
Tick clockPeriod() const
Cycles ticksToCycles(Tick t) const
Cycles is a wrapper class for representing cycle counts, i.e.
Definition types.hh:79
Addr instAddr() const
Returns the memory address of the instruction this PC points to.
Definition pcstate.hh:108
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isError() const
Definition packet.hh:622
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
Definition packet.hh:588
AddrRange getAddrRange() const
Get address range to which this packet belongs.
Definition packet.cc:243
void print(std::ostream &o, int verbosity=0, const std::string &prefix="") const
Definition packet.cc:368
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
Definition packet.hh:1175
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
Definition packet.hh:1016
bool isWrite() const
Definition packet.hh:594
const T * getConstPtr() const
Definition packet.hh:1234
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Definition packet.hh:993
bool isInvalidate() const
Definition packet.hh:609
ProbePointArg generates a point for the class of Arg.
Definition probe.hh:264
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
Definition port.hh:136
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
Definition port.hh:552
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
Definition port.cc:178
@ NO_ACCESS
The request should not cause a memory access.
Definition request.hh:146
static const FlagsType STORE_NO_DATA
Definition request.hh:260
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
const PCStateBase & pcState() const override
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
BaseISA * getIsaPtr() const override
bool isFirstMicroop() const
virtual Fault execute(ExecContext *xc, trace::InstRecord *traceData) const =0
bool isMicroop() const
bool isDelayedCommit() const
void setMem(Addr a, Addr s, unsigned f)
STL vector class.
Definition stl.hh:37
std::string to_string() const
Get a string representation of the range.
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
Definition amo.hh:269
static constexpr T divCeil(const T &a, const U &b)
Definition intmath.hh:110
static constexpr T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
Definition intmath.hh:279
void signalDrainDone() const
Signal that an object is drained.
Definition drain.hh:305
DrainState drainState() const
Return the current drain state of an object.
Definition drain.hh:324
DrainState
Object drain/handover states.
Definition drain.hh:75
@ Draining
Draining buffers pending serialization/handover.
@ Drained
Buffers drained, ready for serialization/handover.
void deschedule(Event &event)
Definition eventq.hh:1021
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
void reschedule(Event &event, Tick when, bool always=false)
Definition eventq.hh:1030
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
Definition eventq.hh:207
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
#define panic_if(cond,...)
Conditional panic macro that checks the supplied condition and only panics if the condition is true a...
Definition logging.hh:214
ProbeManager * getProbeManager()
Get the probe manager for this object.
uint8_t flags
Definition helpers.cc:87
Bitfield< 4 > width
Definition misc_types.hh:72
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 8 > a
Definition misc_types.hh:66
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 15 > system
Definition misc.hh:1032
Bitfield< 3 > addr
Definition types.hh:84
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< FaultBase > Fault
Definition types.hh:249
int16_t ThreadID
Thread index/ID type.
Definition types.hh:235
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
Definition utils.hh:53
uint64_t Tick
Tick count type.
Definition types.hh:58
int ContextID
Globally unique thread context ID.
Definition types.hh:239
static bool isRomMicroPC(MicroPC upc)
Definition types.hh:166
constexpr decltype(nullptr) NoFault
Definition types.hh:253
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
Definition utils.hh:89
output decoder
Definition nop.cc:61
Declaration of the Packet class.
bool doMonitor(PacketPtr pkt)
Definition base.cc:764
statistics::Scalar numCycles
Definition base.hh:640
const std::string & name()
Definition trace.cc:48

Generated on Tue Jun 18 2024 16:24:02 for gem5 by doxygen 1.11.0