Go to the documentation of this file.
44 #include "arch/locked_mem.hh"
45 #include "arch/utility.hh"
47 #include "config/the_isa.hh"
50 #include "debug/Drain.hh"
51 #include "debug/ExecFaulting.hh"
52 #include "debug/SimpleCPU.hh"
56 #include "params/AtomicSimpleCPU.hh"
69 int cid = threadContexts[0]->contextId();
70 ifetch_req->setContext(cid);
71 data_read_req->setContext(cid);
72 data_write_req->setContext(cid);
73 data_amo_req->setContext(cid);
78 tickEvent([this]{
tick(); },
"AtomicSimpleCPU tick",
80 width(
p->width), locked(
false),
81 simulate_data_stalls(
p->simulate_data_stalls),
82 simulate_inst_stalls(
p->simulate_inst_stalls),
83 icachePort(
name() +
".icache_port",
this),
84 dcachePort(
name() +
".dcache_port",
this),
85 dcache_access(
false), dcache_latency(0),
89 ifetch_req = std::make_shared<Request>();
90 data_read_req = std::make_shared<Request>();
91 data_write_req = std::make_shared<Request>();
92 data_amo_req = std::make_shared<Request>();
113 DPRINTF(Drain,
"Requesting drain.\n");
120 DPRINTF(Drain,
"Not executing microcode, no need to drain.\n");
128 DPRINTF(SimpleCPU,
"%s received snoop pkt for addr:%#x %s\n",
150 DPRINTF(SimpleCPU,
"Resume\n");
182 DPRINTF(Drain,
"tryCompleteDrain.\n");
186 DPRINTF(Drain,
"CPU done draining, processing drain event\n");
217 fatal(
"The atomic CPU requires the memory system to be in "
225 DPRINTF(SimpleCPU,
"ActivateContext %d\n", thread_num);
251 DPRINTF(SimpleCPU,
"SuspendContext %d\n", thread_num);
283 DPRINTF(SimpleCPU,
"%s received atomic snoop pkt for addr:%#x %s\n",
300 DPRINTF(SimpleCPU,
"received invalidation for addr:%#x\n",
313 DPRINTF(SimpleCPU,
"%s received functional snoop pkt for addr:%#x %s\n",
326 DPRINTF(SimpleCPU,
"received invalidation for addr:%#x\n",
338 int& frag_size,
int& size_left)
const
340 bool predicate =
true;
343 frag_size = std::min(
346 size_left -= frag_size;
348 if (!byte_enable.empty()) {
350 auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
351 auto it_end = byte_enable.begin() + (size - size_left);
388 int size_left = size;
394 byte_enable, frag_size, size_left);
403 if (predicate && fault ==
NoFault &&
408 if (req->isLocalAccess()) {
424 if (req->isPrefetch()) {
432 if (size_left == 0) {
433 if (req->isLockedRMW() && fault ==
NoFault) {
443 frag_addr += frag_size;
457 static uint8_t zero_array[64] = {};
478 int size_left = size;
479 int curr_frag_id = 0;
485 byte_enable, frag_size, size_left);
493 if (predicate && fault ==
NoFault) {
494 bool do_access =
true;
497 assert(curr_frag_id == 0);
501 }
else if (req->isSwap()) {
502 assert(curr_frag_id == 0);
503 if (req->isCondSwap()) {
505 req->setExtraData(*res);
513 if (req->isLocalAccess()) {
515 req->localAccessor(thread->
getTC(), &pkt);
526 assert(res && curr_frag_id == 0);
531 if (res && !req->isSwap()) {
532 *res = req->getExtraData();
538 if (fault !=
NoFault || size_left == 0)
540 if (req->isLockedRMW() && fault ==
NoFault) {
541 assert(!req->isMasked());
545 if (fault !=
NoFault && req->isPrefetch()) {
555 frag_addr += frag_size;
588 if (secondAddr >
addr) {
589 panic(
"AMO request should not access across a cache line boundary\n");
596 thread->
pcState().instAddr(), std::move(amo_op));
609 if (req->isLocalAccess())
618 assert(!req->isLLSC());
621 if (fault !=
NoFault && req->isPrefetch()) {
681 Tick icache_latency = 0;
682 bool icache_access =
false;
693 icache_access =
true;
708 Tick stall_ticks = 0;
721 dynamic_pointer_cast<SyscallRetryFault>(fault)) {
737 stall_ticks += icache_latency;
746 latency +=
divCeil(stall_ticks, clockPeriod()) *
759 if (latency < clockPeriod())
760 latency = clockPeriod();
772 (getProbeManager(),
"Commit");
786 AtomicSimpleCPUParams::create()
#define fatal(...)
This implements a cprintf based fatal() function.
std::unique_ptr< AtomicOpFunctor > AtomicOpFunctorPtr
bool scheduled() const
Determine if the current event is scheduled.
void activateContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now active.
bool handleLockedWrite(XC *xc, const RequestPtr &req, Addr cacheBlockMask)
T roundDown(const T &val, const U &align)
This function is used to align addresses in memory.
TheISA::PCState pcState() const override
ProbePointArg< std::pair< SimpleThread *, const StaticInstPtr > > * ppCommit
Probe Points.
Trace::InstRecord * traceData
StaticInstPtr curMacroStaticInst
static const Priority CPU_Tick_Pri
CPU ticks must come after other associated CPU events (such as writebacks).
virtual Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)=0
void handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask)
int16_t ThreadID
Thread index/ID type.
virtual Tick sendPacket(RequestPort &port, const PacketPtr &pkt)
@ NO_ACCESS
The request should not cause a memory access.
void drainResume() override
int ContextID
Globally unique thread context ID.
bool doMonitor(PacketPtr pkt)
unsigned int cacheLineSize() const
Get the cache line size of the system.
ProbePointArg generates a point for the class of Arg.
uint64_t Tick
Tick count type.
static MemCmd makeReadCmd(const RequestPtr &req)
Generate the appropriate read MemCmd based on the Request flags.
Cycles syscallRetryLatency
void setMem(Addr a, Addr s, unsigned f)
void setupFetchRequest(const RequestPtr &req)
bool isInvalidate() const
std::shared_ptr< Request > RequestPtr
EventFunctionWrapper tickEvent
void regProbePoints() override
void deschedulePowerGatingEvent()
std::vector< SimpleExecContext * > threadInfo
void traceFault()
Handler used when encountering a fault; its purpose is to tear down the InstRecord.
Fault readMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, const std::vector< bool > &byte_enable=std::vector< bool >()) override
virtual void recvFunctionalSnoop(PacketPtr pkt)
Receive a functional snoop request packet from the peer.
AtomicSimpleCPU(AtomicSimpleCPUParams *params)
const bool simulate_inst_stalls
void updateCycleCounters(CPUState state)
base method keeping track of cycle progression
The SimpleThread object provides a combination of the ThreadState object and the ThreadContext interf...
bool isAnyActiveElement(const std::vector< bool >::const_iterator &it_start, const std::vector< bool >::const_iterator &it_end)
Test if there is any active element in an enablement range.
@ Drained
Buffers drained, ready for serialization/handover.
std::list< ThreadID > activeThreads
DrainState
Object drain/handover states.
void takeOverFrom(BaseCPU *oldCPU) override
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
bool isCpuDrained() const
Check if a system is in a drained state.
bool isFirstMicroop() const
T divCeil(const T &a, const U &b)
bool isDelayedCommit() const
void threadSnoop(PacketPtr pkt, ThreadID sender)
Perform snoop for other cpu-local thread contexts.
ThreadContext * getTC()
Returns the pointer to this SimpleThread's ThreadContext.
Tick instCnt
Instruction count used for SPARC misc register.
const std::string & cmdString() const
Return the string name of the cmd field (for debugging and tracing).
std::shared_ptr< FaultBase > Fault
TheISA::MachInst inst
Current instruction.
RequestPtr data_write_req
std::vector< ThreadContext * > threadContexts
void suspendContext(ThreadID thread_num) override
Notify the CPU that the indicated context is now suspended.
virtual void activateContext(ThreadID thread_num)
Notify the CPU that the indicated context is now active.
void printAddr(Addr a)
Print state of address in memory system via PrintReq (for debugging).
void checkForInterrupts()
Fault amoMem(Addr addr, uint8_t *data, unsigned size, Request::Flags flags, AtomicOpFunctorPtr amo_op) override
AddressMonitor * getCpuAddrMonitor(ThreadID tid)
Addr addrBlockOffset(Addr addr, Addr block_size)
Calculates the offset of a given address wrt aligned fixed-size blocks.
static MemCmd makeWriteCmd(const RequestPtr &req)
Generate the appropriate write MemCmd based on the Request flags.
A RequestPort is a specialisation of a Port, which implements the default protocol for the three diff...
constexpr decltype(nullptr) NoFault
void switchOut() override
Prepare for another CPU to take over execution.
ProbePointArg< PacketInfo > Packet
Packet probe point.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
const std::string & name()
static bool isRomMicroPC(MicroPC upc)
virtual Tick recvAtomicSnoop(PacketPtr pkt)
Receive an atomic snoop request packet from our peer.
uint32_t taskId() const
Get cpu task id.
void wakeup(ThreadID tid) override
StaticInstPtr curStaticInst
void advancePC(const Fault &fault)
void handleLockedRead(XC *xc, const RequestPtr &req)
Overload hash function for BasicBlockRange type.
GenericISA::DelaySlotPCState< MachInst > PCState
void schedulePowerGatingEvent()
void dataStatic(T *p)
Set the data pointer to the following value that should not be freed.
virtual void takeOverFrom(BaseCPU *cpu)
Load the state of a CPU from the previous CPU object, invoked on all new CPUs that are about to be sw...
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
virtual void switchOut()
Prepare for another CPU to take over execution.
void printAddr(Addr a)
Inject a PrintReq for the given address to print the state of that address throughout the memory syst...
ThreadID numThreads
Number of threads we're actually simulating (<= SMT_MAX_THREADS).
RequestorID dataRequestorId() const
Reads this CPU's unique data requestor ID.
Cycles is a wrapper class for representing cycle counts, i.e.
void regProbePoints() override
virtual void suspendContext(ThreadID thread_num)
Notify the CPU that the indicated context is now suspended.
const bool simulate_data_stalls
virtual Fault execute(ExecContext *xc, Trace::InstRecord *traceData) const =0
Tick sendAtomic(PacketPtr pkt)
Send an atomic request packet, where the data is moved and the state is updated in zero time,...
bool tryCompleteDrain()
Try to complete a drain request.
static const FlagsType STORE_NO_DATA
bool switchedOut() const
Determine if the CPU is switched out.
bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr, int size, Request::Flags flags, const std::vector< bool > &byte_enable, int &frag_size, int &size_left) const
Helper function used to set up the request for a single fragment of a memory access.
bool isAtomicMode() const
Is the system in atomic mode?
const T * getConstPtr() const
void verifyMemoryMode() const override
Verify that the system is in a memory mode supported by the CPU.
Fault writeMem(uint8_t *data, unsigned size, Addr addr, Request::Flags flags, uint64_t *res, const std::vector< bool > &byte_enable=std::vector< bool >()) override
virtual ~AtomicSimpleCPU()
@ Draining
Draining buffers pending serialization/handover.
#define panic(...)
This implements a cprintf based panic() function.
DrainState drain() override
Tick curTick()
The current simulated tick.
AtomicCPUDPort dcachePort
Generated on Wed Sep 30 2020 14:02:09 for gem5 by doxygen 1.8.17