Go to the documentation of this file.
32 #ifndef __MEM_RUBY_SYSTEM_GPU_COALESCER_HH__
33 #define __MEM_RUBY_SYSTEM_GPU_COALESCER_HH__
36 #include <unordered_map>
44 #include "mem/ruby/protocol/PrefetchBit.hh"
45 #include "mem/ruby/protocol/RubyAccessMode.hh"
46 #include "mem/ruby/protocol/RubyRequestType.hh"
47 #include "mem/ruby/protocol/SequencerRequestType.hh"
54 struct RubyGPUCoalescerParams;
151 bool usingRubyTester)
156 if (!usingRubyTester) {
182 if (!usingRubyTester) {
271 Cycles initialRequestTime,
272 Cycles forwardRequestTime,
279 Cycles initialRequestTime,
280 Cycles forwardRequestTime,
281 Cycles firstResponseTime);
296 Cycles initialRequestTime,
297 Cycles forwardRequestTime,
298 Cycles firstResponseTime);
303 Cycles initialRequestTime,
304 Cycles forwardRequestTime,
331 void print(std::ostream& out)
const;
375 Addr pc, RubyAccessMode access_mode,
389 Cycles initialRequestTime,
390 Cycles forwardRequestTime,
395 Cycles initialRequestTime,
396 Cycles forwardRequestTime,
398 bool success,
bool isRegion);
546 #endif // __MEM_RUBY_SYSTEM_GPU_COALESCER_HH__
virtual RubyRequestType getRequestType(PacketPtr pkt)
void printRequestTable(std::stringstream &ss)
std::unordered_map< int, PacketPtr > kernelEndList
int m_load_waiting_on_load_cycles
int16_t PortID
Port index/ID type, and a symbolic name for an invalid port id.
void setIssueTime(Cycles _issueTime)
std::unordered_map< uint64_t, PendingWriteInst > pendingWriteInsts
statistics::Histogram & getTypeLatencyHist(uint32_t t)
GMTokenPort(const std::string &name, PortID id=InvalidPortID)
const std::string name() const
Return port name (for DPRINTF).
Tick recvAtomic(PacketPtr)
Receive an atomic request packet from the peer.
void ackWriteCompletion(bool usingRubyTester)
std::vector< statistics::Histogram * > m_typeLatencyHist
statistics::Histogram & getOutstandReqHist()
void descheduleDeadlockEvent() override
std::map< Addr, std::deque< CoalescedRequest * > > coalescedTable
int getNumPendingStores()
GPUCoalescer & operator=(const GPUCoalescer &obj)
std::vector< statistics::Histogram * > m_missTypeLatencyHist
std::ostream & operator<<(std::ostream &os, const BoolVec &myvector)
virtual void issueMemSyncRequest(PacketPtr pkt)
AddrRangeList getAddrRanges() const
Get a list of the non-overlapping address ranges the owner is responsible for.
void setPacketsRemaining(InstSeqNum seqNum, int count)
const PortID id
A numeric identifier to distinguish ports in a vector, and set to InvalidPortID in case this port is ...
bool coalescePacket(PacketPtr pkt)
std::vector< int > newKernelEnds
std::map< InstSeqNum, PerInstPackets > instMap
GPUDynInstPtr getDynInst(PacketPtr pkt) const
bool recvTimingReq(PacketPtr)
Receive a timing request from the peer.
void writeCallback(Addr address, DataBlock &data)
bool receiveWriteCompleteAck()
void setRubyType(RubyRequestType type)
int m_load_waiting_on_store_cycles
CoalescedRequest(uint64_t _seqNum)
const PortID InvalidPortID
void recvFunctional(PacketPtr)
Receive a functional request packet from the peer.
int outstandingCount() const override
std::vector< statistics::Histogram * > m_IssueToInitialDelayHist
Histograms for recording the breakdown of miss latency.
void evictionCallback(Addr address)
bool m_deadlock_check_scheduled
std::map< InstSeqNum, int > instPktsRemaining
PacketPtr getFirstPkt() const
void printProgress(std::ostream &out) const
Cycles is a wrapper class for representing cycle counts, i.e.
Cycles getIssueTime() const
GMTokenPort & getGMTokenPort()
statistics::Histogram & getMissTypeMachLatencyHist(uint32_t r, uint32_t t) const
statistics::Histogram m_latencyHist
Histogram for holding latency profile of all requests.
virtual void issueRequest(CoalescedRequest *crequest)=0
PerInstPackets * getInstPackets(int offset)
bool tryCacheAccess(Addr addr, RubyRequestType type, Addr pc, RubyAccessMode access_mode, int size, DataBlock *&data_ptr)
statistics::Histogram & getMissMachLatencyHist(uint32_t t) const
void recordMissLatency(CoalescedRequest *crequest, MachineType mach, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime, bool success, bool isRegion)
void addPendingReq(RubyPort::MemResponsePort *port, GPUDynInstPtr inst, bool usingRubyTester)
RubyGPUCoalescerParams Params
std::vector< PacketPtr > pkts
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
bool isDeadlockEventScheduled() const override
statistics::Histogram & getForwardRequestToFirstResponseHist(const MachineType t) const
statistics::Histogram & getMissLatencyHist()
ProbePointArg< PacketInfo > Packet
Packet probe point.
uint64_t Tick
Tick count type.
std::vector< PacketPtr > & getPackets()
std::list< PacketPtr > PerInstPackets
statistics::Histogram & getMissTypeLatencyHist(uint32_t t)
Cycles m_deadlock_threshold
statistics::Histogram & getFirstResponseToCompletionDelayHist(const MachineType t) const
bool sendTimingResp(PacketPtr pkt)
Attempt to send a timing response to the request port by calling its corresponding receive function.
RubyPort::MemResponsePort * originalPort
std::vector< statistics::Histogram * > m_InitialToForwardDelayHist
Port & getPort(const std::string &if_name, PortID idx=InvalidPortID) override
Get a port with a given name and index.
std::unordered_map< uint64_t, std::deque< CoalescedRequest * > > coalescedReqs
void checkDeadlock(Tick threshold)
UncoalescedTable(GPUCoalescer *gc)
void completeHitCallback(std::vector< PacketPtr > &mylist)
bool assumingRfOCoherence
void insertPacket(PacketPtr pkt)
EventFunctionWrapper issueEvent
void setSeqNum(uint64_t _seqNum)
bool m_runningGarnetStandalone
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
void writeCompleteCallback(Addr address, uint64_t instSeqNum, MachineType mach)
SenderState * senderState
This packet's sender state.
std::vector< statistics::Histogram * > m_ForwardToFirstResponseDelayHist
std::shared_ptr< GPUDynInst > GPUDynInstPtr
void deschedule(Event &event)
GPUCoalescer(const Params &)
EventFunctionWrapper deadlockCheckEvent
void resetStats() override
Callback to reset stats.
void hitCallback(CoalescedRequest *crequest, MachineType mach, DataBlock &data, bool success, Cycles initialRequestTime, Cycles forwardRequestTime, Cycles firstResponseTime, bool isRegion)
void readCallback(Addr address, DataBlock &data)
void initPacketsRemaining(InstSeqNum seqNum, int count)
CacheMemory * m_dataCache_ptr
uint64_t getSeqNum() const
Ports are used to interface objects to each other.
statistics::Histogram & getLatencyHist()
std::vector< std::vector< statistics::Histogram * > > m_missTypeMachLatencyHist
statistics::Histogram m_outstandReqHist
Histogram for number of outstanding requests per cycle.
void printRequestTable(std::stringstream &ss)
UncoalescedTable uncoalescedTable
bool areRequestsDone(const InstSeqNum instSeqNum)
RequestStatus makeRequest(PacketPtr pkt) override
void kernelCallback(int wavefront_id)
std::vector< statistics::Histogram * > m_missMachLatencyHist
Histograms for profiling the latencies for requests that required external messages.
statistics::Histogram m_missLatencyHist
Histogram for holding latency profile of all requests that miss in the controller connected to this s...
GPUDynInstPtr gpuDynInstPtr
int m_store_waiting_on_store_cycles
virtual void atomicCallback(Addr address, MachineType mach, const DataBlock &data)
void insertKernel(int wavefront_id, PacketPtr pkt)
statistics::Histogram & getIssueToInitialDelayHist(uint32_t t) const
int getPacketsRemaining(InstSeqNum seqNum)
int m_max_outstanding_requests
void insertPacket(PacketPtr pkt)
RubyRequestType getRubyType() const
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
int m_store_waiting_on_load_cycles
std::vector< statistics::Histogram * > m_FirstResponseToCompletionDelayHist
void print(std::ostream &out) const
CacheMemory * m_instCache_ptr
statistics::Histogram & getInitialToForwardDelayHist(const MachineType t) const
bool scheduled() const
Determine if the current event is scheduled.
RubyTester::SenderState SenderState
Generated on Sun Jul 30 2023 01:56:59 for gem5 by doxygen 1.8.17