37#include "debug/GPUCoalescer.hh"
38#include "debug/MemoryAccess.hh"
39#include "debug/ProtocolTrace.hh"
49#include "params/VIPERCoalescer.hh"
59 m_cache_inv_pkt(nullptr),
95 return RequestStatus_Aliased;
100 if (pkt->
req->isInvL1()) {
110 if (pkt->
req->isInvL2()) {
114 return RequestStatus_Issued;
123 if (pkt != NULL && pkt->
req->hasContextId()) {
124 proc_id = pkt->
req->contextId();
129 if (pkt->
req->hasPC()) {
130 pc = pkt->
req->getPC();
143 uint32_t tableSize = crequest->
getPackets().size();
144 for (
int i = 0;
i < tableSize;
i++) {
146 uint32_t tmpOffset = (tmpPkt->
getAddr()) - line_addr;
147 uint32_t tmpSize = tmpPkt->
getSize();
151 atomicOps.push_back(tmpAtomicOp);
152 }
else if (tmpPkt->
isWrite()) {
156 for (
int j = 0; j < tmpSize; j++) {
157 accessMask[tmpOffset + j] =
true;
160 std::shared_ptr<RubyRequest> msg;
164 RubyAccessMode_Supervisor, pkt,
165 PrefetchBit_No, proc_id, 100,
166 blockSize, accessMask,
167 dataBlock, atomicOps, crequest->
getSeqNum());
171 RubyAccessMode_Supervisor, pkt,
172 PrefetchBit_No, proc_id, 100,
173 blockSize, accessMask,
181 DPRINTFR(ProtocolTrace,
"%15s %3s %10s%20s %6s>%-6s %s %s\n",
184 RubyRequestType_to_string(crequest->
getRubyType()));
187 "there should not be any I-Fetch requests in the GPU Coalescer");
217 for (
auto pkt : req_pkts) {
224 writeCompletePkt->
setAddr(pkt->getAddr());
236 auto key = instSeqNum;
246 RubyPort::SenderState *ss =
247 safe_cast<RubyPort::SenderState *>
248 (writeCompletePkt->senderState);
249 MemResponsePort *port = ss->port;
250 assert(port != NULL);
252 writeCompletePkt->senderState = ss->predecessor;
254 port->hitCallback(writeCompletePkt);
291 "There are %d Invalidations outstanding before Cache Walk\n",
294 for (
int i = 0;
i < size;
i++) {
297 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
298 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
300 request_type, RubyAccessMode_Supervisor,
310 "There are %d Invalidatons outstanding after Cache Walk\n",
321 assert(port !=
nullptr);
326 pkt->senderState =
ss->predecessor;
343 RubyRequestType request_type = RubyRequestType_InvL2;
345 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
347 request_type, RubyAccessMode_Supervisor,
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
SenderState * senderState
This packet's sender state.
T * getPtr()
get a pointer to the data ptr.
RequestPtr req
A pointer to the original request.
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
MemCmd cmd
The command field of the packet.
virtual Cycles mandatoryQueueLatency(const RubyRequestType ¶m_type)
Addr getAddressAtIdx(int idx) const
uint64_t getSeqNum() const
PacketPtr getFirstPkt() const
RubyRequestType getRubyType() const
std::vector< PacketPtr > & getPackets()
void setData(const uint8_t *data, int offset, int len)
Cycles m_deadlock_threshold
RequestStatus makeRequest(PacketPtr pkt) override
void completeHitCallback(std::vector< PacketPtr > &mylist)
CacheMemory * m_dataCache_ptr
EventFunctionWrapper deadlockCheckEvent
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool bypassStrictFIFO=false)
void hitCallback(PacketPtr pkt)
AbstractController * m_controller
MessageBuffer * m_mandatory_q_ptr
static uint32_t getBlockSizeBytes()
void makeWriteCompletePkts(CoalescedRequest *crequest)
void invTCP()
Invalidate TCP.
PacketPtr m_cache_inv_pkt
void invTCC(PacketPtr pkt)
std::unordered_map< uint64_t, std::vector< PacketPtr > > m_writeCompletePktMap
RequestStatus makeRequest(PacketPtr pkt) override
void issueRequest(CoalescedRequest *crequest) override
VIPERCoalescer(const Params &)
std::unordered_map< Addr, std::vector< PacketPtr > > m_pending_invl2s
void invTCCCallback(Addr address)
void invTCPCallback(Addr address)
VIPERCoalescerParams Params
void writeCompleteCallback(Addr address, uint64_t instSeqNum)
bool scheduled() const
Determine if the current event is scheduled.
void schedule(Event &event, Tick when)
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Addr makeLineAddress(Addr addr)
std::string printAddress(Addr addr)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
T safe_cast(U &&ref_or_ptr)
Tick curTick()
The universal simulation clock.
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
uint64_t Tick
Tick count type.
Declaration of the Packet class.