37#include "debug/GPUCoalescer.hh"
38#include "debug/MemoryAccess.hh"
39#include "debug/ProtocolTrace.hh"
49#include "params/VIPERCoalescer.hh"
59 m_cache_inv_pkt(nullptr),
95 return RequestStatus_Aliased;
100 if (pkt->
req->isInvL1()) {
110 if (pkt->
req->isInvL2()) {
114 return RequestStatus_Issued;
123 if (pkt != NULL && pkt->
req->hasContextId()) {
124 proc_id = pkt->
req->contextId();
129 if (pkt->
req->hasPC()) {
130 pc = pkt->
req->getPC();
143 uint32_t tableSize = crequest->
getPackets().size();
144 for (
int i = 0;
i < tableSize;
i++) {
146 uint32_t tmpOffset = (tmpPkt->
getAddr()) - line_addr;
147 uint32_t tmpSize = tmpPkt->
getSize();
151 atomicOps.push_back(tmpAtomicOp);
152 }
else if (tmpPkt->
isWrite()) {
156 for (
int j = 0; j < tmpSize; j++) {
157 accessMask[tmpOffset + j] =
true;
160 std::shared_ptr<RubyRequest> msg;
162 msg = std::make_shared<RubyRequest>(
clockEdge(), blockSize,
165 RubyAccessMode_Supervisor, pkt,
166 PrefetchBit_No, proc_id, 100,
167 blockSize, accessMask,
168 dataBlock, atomicOps, crequest->
getSeqNum());
170 msg = std::make_shared<RubyRequest>(
clockEdge(), blockSize,
173 RubyAccessMode_Supervisor, pkt,
174 PrefetchBit_No, proc_id, 100,
175 blockSize, accessMask,
183 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s %s\n",
186 RubyRequestType_to_string(crequest->
getRubyType()));
189 "there should not be any I-Fetch requests in the GPU Coalescer");
221 for (
auto pkt : req_pkts) {
228 writeCompletePkt->
setAddr(pkt->getAddr());
240 auto key = instSeqNum;
250 RubyPort::SenderState *ss =
251 safe_cast<RubyPort::SenderState *>
252 (writeCompletePkt->senderState);
253 MemResponsePort *port = ss->port;
254 assert(port != NULL);
256 writeCompletePkt->senderState = ss->predecessor;
258 port->hitCallback(writeCompletePkt);
295 "There are %d Invalidations outstanding before Cache Walk\n",
298 for (
int i = 0;
i < size;
i++) {
301 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
302 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
304 addr, 0, 0, request_type, RubyAccessMode_Supervisor,
nullptr);
315 "There are %d Invalidatons outstanding after Cache Walk\n",
324 safe_cast<RubyPort::SenderState *>(pkt->senderState);
326 assert(port !=
nullptr);
331 pkt->senderState =
ss->predecessor;
348 RubyRequestType request_type = RubyRequestType_InvL2;
350 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
352 addr, 0, 0, request_type, RubyAccessMode_Supervisor,
nullptr);
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
SenderState * senderState
This packet's sender state.
T * getPtr()
get a pointer to the data ptr.
RequestPtr req
A pointer to the original request.
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
MemCmd cmd
The command field of the packet.
virtual Cycles mandatoryQueueLatency(const RubyRequestType ¶m_type)
Addr getAddressAtIdx(int idx) const
uint64_t getSeqNum() const
PacketPtr getFirstPkt() const
RubyRequestType getRubyType() const
std::vector< PacketPtr > & getPackets()
void setData(const uint8_t *data, int offset, int len)
Cycles m_deadlock_threshold
RequestStatus makeRequest(PacketPtr pkt) override
void completeHitCallback(std::vector< PacketPtr > &mylist)
CacheMemory * m_dataCache_ptr
EventFunctionWrapper deadlockCheckEvent
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool ruby_is_random, bool ruby_warmup, bool bypassStrictFIFO=false)
void hitCallback(PacketPtr pkt)
RubySystem * m_ruby_system
AbstractController * m_controller
MessageBuffer * m_mandatory_q_ptr
uint32_t getBlockSizeBytes()
void makeWriteCompletePkts(CoalescedRequest *crequest)
void invTCP()
Invalidate TCP.
PacketPtr m_cache_inv_pkt
void invTCC(PacketPtr pkt)
std::unordered_map< uint64_t, std::vector< PacketPtr > > m_writeCompletePktMap
RequestStatus makeRequest(PacketPtr pkt) override
void issueRequest(CoalescedRequest *crequest) override
VIPERCoalescer(const Params &)
std::unordered_map< Addr, std::vector< PacketPtr > > m_pending_invl2s
void invTCCCallback(Addr address)
void invTCPCallback(Addr address)
VIPERCoalescerParams Params
void writeCompleteCallback(Addr address, uint64_t instSeqNum)
bool scheduled() const
Determine if the current event is scheduled.
void schedule(Event &event, Tick when)
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Addr makeLineAddress(Addr addr, int cacheLineBits)
std::string printAddress(Addr addr, int cacheLineBits)
Copyright (c) 2024 Arm Limited All rights reserved.
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
Declaration of the Packet class.