gem5 v24.0.0.0
Loading...
Searching...
No Matches
VIPERCoalescer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2013-2015 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
33
34#include "base/logging.hh"
35#include "base/str.hh"
37#include "debug/GPUCoalescer.hh"
38#include "debug/MemoryAccess.hh"
39#include "debug/ProtocolTrace.hh"
40#include "mem/packet.hh"
49#include "params/VIPERCoalescer.hh"
50
51namespace gem5
52{
53
54namespace ruby
55{
56
58 : GPUCoalescer(p),
59 m_cache_inv_pkt(nullptr),
60 m_num_pending_invs(0)
61{
62}
63
67
68// Places an uncoalesced packet in uncoalescedTable. If the packet is a
69// special type (MemFence, scoping, etc), it is issued immediately.
70RequestStatus
72{
73 // VIPER only supports following memory request types
74 // MemSyncReq & INV_L1 : TCP cache invalidation
75 // ReadReq : cache read
76 // WriteReq : cache write
77 // AtomicOp : cache atomic
78 // Flush : flush and invalidate cache
79 //
80 // VIPER does not expect MemSyncReq & Release since compute unit
81 // does not specify an equivalent type of memory request.
82 assert((pkt->cmd == MemCmd::MemSyncReq && pkt->req->isInvL1()) ||
83 (pkt->cmd == MemCmd::MemSyncReq && pkt->req->isInvL2()) ||
84 pkt->cmd == MemCmd::ReadReq ||
85 pkt->cmd == MemCmd::WriteReq ||
86 pkt->cmd == MemCmd::FlushReq ||
87 pkt->isAtomicOp());
88
89 if (pkt->req->isInvL1() && m_cache_inv_pkt) {
90 // In VIPER protocol, the coalescer is not able to handle two or
91 // more cache invalidation requests at a time. Cache invalidation
92 // requests must be serialized to ensure that all stale data in
93 // TCP are invalidated correctly. If there's already a pending
94 // cache invalidation request, we must retry this request later
95 return RequestStatus_Aliased;
96 }
97
99
100 if (pkt->req->isInvL1()) {
101 // In VIPER protocol, a compute unit sends a MemSyncReq with INV_L1
102 // flag to invalidate TCP. Upon receiving a request of this type,
103 // VIPERCoalescer starts a cache walk to invalidate all valid entries
104 // in TCP. The request is completed once all entries are invalidated.
105 assert(!m_cache_inv_pkt);
106 m_cache_inv_pkt = pkt;
107 invTCP();
108 }
109
110 if (pkt->req->isInvL2()) {
111 invTCC(pkt);
112 }
113
114 return RequestStatus_Issued;
115}
116
117void
119{
120 PacketPtr pkt = crequest->getFirstPkt();
121
122 int proc_id = -1;
123 if (pkt != NULL && pkt->req->hasContextId()) {
124 proc_id = pkt->req->contextId();
125 }
126
127 // If valid, copy the pc to the ruby request
128 Addr pc = 0;
129 if (pkt->req->hasPC()) {
130 pc = pkt->req->getPC();
131 }
132
133 Addr line_addr = makeLineAddress(pkt->getAddr());
134
135 // Creating WriteMask that records written bytes
136 // and atomic operations. This enables partial writes
137 // and partial reads of those writes
138 DataBlock dataBlock;
139 dataBlock.clear();
140 uint32_t blockSize = RubySystem::getBlockSizeBytes();
141 std::vector<bool> accessMask(blockSize,false);
143 uint32_t tableSize = crequest->getPackets().size();
144 for (int i = 0; i < tableSize; i++) {
145 PacketPtr tmpPkt = crequest->getPackets()[i];
146 uint32_t tmpOffset = (tmpPkt->getAddr()) - line_addr;
147 uint32_t tmpSize = tmpPkt->getSize();
148 if (tmpPkt->isAtomicOp()) {
149 std::pair<int,AtomicOpFunctor *> tmpAtomicOp(tmpOffset,
150 tmpPkt->getAtomicOp());
151 atomicOps.push_back(tmpAtomicOp);
152 } else if (tmpPkt->isWrite()) {
153 dataBlock.setData(tmpPkt->getPtr<uint8_t>(),
154 tmpOffset, tmpSize);
155 }
156 for (int j = 0; j < tmpSize; j++) {
157 accessMask[tmpOffset + j] = true;
158 }
159 }
160 std::shared_ptr<RubyRequest> msg;
161 if (pkt->isAtomicOp()) {
162 msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
163 pkt->getSize(), pc, crequest->getRubyType(),
164 RubyAccessMode_Supervisor, pkt,
165 PrefetchBit_No, proc_id, 100,
166 blockSize, accessMask,
167 dataBlock, atomicOps, crequest->getSeqNum());
168 } else {
169 msg = std::make_shared<RubyRequest>(clockEdge(), pkt->getAddr(),
170 pkt->getSize(), pc, crequest->getRubyType(),
171 RubyAccessMode_Supervisor, pkt,
172 PrefetchBit_No, proc_id, 100,
173 blockSize, accessMask,
174 dataBlock, crequest->getSeqNum());
175 }
176
177 if (pkt->cmd == MemCmd::WriteReq) {
178 makeWriteCompletePkts(crequest);
179 }
180
181 DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
182 curTick(), m_version, "Coal", "Begin", "", "",
183 printAddress(msg->getPhysicalAddress()),
184 RubyRequestType_to_string(crequest->getRubyType()));
185
186 fatal_if(crequest->getRubyType() == RubyRequestType_IFETCH,
187 "there should not be any I-Fetch requests in the GPU Coalescer");
188
192 curTick());
193 }
194
195 assert(m_mandatory_q_ptr);
196 Tick latency = cyclesToTicks(
198 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
199}
200
201void
203{
204 // In VIPER protocol, for each write request, down-stream caches
205 // return two responses: writeCallback and writeCompleteCallback.
206 // We need to prepare a writeCompletePkt for each write request so
207 // that when writeCompleteCallback is called, we can respond
208 // requesting wavefront right away.
209 // writeCompletePkt inherits request and senderState of the original
210 // write request packet so that we can find the original requestor
211 // later. This assumes that request and senderState are not deleted
212 // before writeCompleteCallback is called.
213
214 auto key = crequest->getSeqNum();
215 std::vector<PacketPtr>& req_pkts = crequest->getPackets();
216
217 for (auto pkt : req_pkts) {
218 DPRINTF(GPUCoalescer, "makeWriteCompletePkts: instSeqNum %d\n",
219 key);
220 assert(pkt->cmd == MemCmd::WriteReq);
221
222 PacketPtr writeCompletePkt = new Packet(pkt->req,
224 writeCompletePkt->setAddr(pkt->getAddr());
225 writeCompletePkt->senderState = pkt->senderState;
226 m_writeCompletePktMap[key].push_back(writeCompletePkt);
227 }
228}
229
230void
232{
233 DPRINTF(GPUCoalescer, "writeCompleteCallback: instSeqNum %d addr 0x%x\n",
234 instSeqNum, addr);
235
236 auto key = instSeqNum;
237 assert(m_writeCompletePktMap.count(key) == 1 &&
238 !m_writeCompletePktMap[key].empty());
239
240 m_writeCompletePktMap[key].erase(
241 std::remove_if(
242 m_writeCompletePktMap[key].begin(),
243 m_writeCompletePktMap[key].end(),
244 [addr](PacketPtr writeCompletePkt) -> bool {
245 if (makeLineAddress(writeCompletePkt->getAddr()) == addr) {
246 RubyPort::SenderState *ss =
247 safe_cast<RubyPort::SenderState *>
248 (writeCompletePkt->senderState);
249 MemResponsePort *port = ss->port;
250 assert(port != NULL);
251
252 writeCompletePkt->senderState = ss->predecessor;
253 delete ss;
254 port->hitCallback(writeCompletePkt);
255 return true;
256 }
257 return false;
258 }
259 ),
260 m_writeCompletePktMap[key].end()
261 );
262
264
265 if (m_writeCompletePktMap[key].empty())
266 m_writeCompletePktMap.erase(key);
267}
268
269void
271{
272 assert(m_cache_inv_pkt && m_num_pending_invs > 0);
273
275
276 if (m_num_pending_invs == 0) {
278 m_cache_inv_pkt = nullptr;
279 completeHitCallback(pkt_list);
280 }
281}
282
286void
288{
289 int size = m_dataCache_ptr->getNumBlocks();
291 "There are %d Invalidations outstanding before Cache Walk\n",
293 // Walk the cache
294 for (int i = 0; i < size; i++) {
296 // Evict Read-only data
297 RubyRequestType request_type = RubyRequestType_REPLACEMENT;
298 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
299 clockEdge(), addr, 0, 0,
300 request_type, RubyAccessMode_Supervisor,
301 nullptr);
302 DPRINTF(GPUCoalescer, "Evicting addr 0x%x\n", addr);
303 assert(m_mandatory_q_ptr != NULL);
304 Tick latency = cyclesToTicks(
305 m_controller->mandatoryQueueLatency(request_type));
306 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
308 }
310 "There are %d Invalidatons outstanding after Cache Walk\n",
312}
313
314void
316{
317 for (auto& pkt : m_pending_invl2s[addr]) {
319 safe_cast<RubyPort::SenderState *>(pkt->senderState);
320 MemResponsePort *port = ss->port;
321 assert(port != nullptr);
322
323 // Now convert to MemSyncResp
324 pkt->makeResponse();
325
326 pkt->senderState = ss->predecessor;
327 delete ss;
328 port->hitCallback(pkt);
329 }
330 m_pending_invl2s.erase(addr);
331}
332
333/*
334 * Send an invalidate to a specific address in the TCC.
335 */
336void
338{
339 assert(pkt);
340 assert(pkt->req);
341
342 Addr addr = pkt->req->getPaddr();
343 RubyRequestType request_type = RubyRequestType_InvL2;
344
345 std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
346 clockEdge(), addr, 0, 0,
347 request_type, RubyAccessMode_Supervisor,
348 nullptr);
349
350 DPRINTF(GPUCoalescer, "Sending L2 invalidate to 0x%x\n", addr);
351
352 assert(m_mandatory_q_ptr);
353 Tick latency = cyclesToTicks(
354 m_controller->mandatoryQueueLatency(request_type));
355 m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
356
357 m_pending_invl2s[addr].push_back(pkt);
358}
359
360} // namespace ruby
361} // namespace gem5
#define DPRINTFR(x,...)
Definition trace.hh:224
#define DPRINTF(x,...)
Definition trace.hh:210
Tick clockEdge(Cycles cycles=Cycles(0)) const
Determine the tick when a cycle begins, by default the current one, but the argument also enables the...
Tick cyclesToTicks(Cycles c) const
Tick clockPeriod() const
@ WriteCompleteResp
Definition packet.hh:92
A Packet is used to encapsulate a transfer between two objects in the memory system (e....
Definition packet.hh:295
Addr getAddr() const
Definition packet.hh:807
bool isAtomicOp() const
Definition packet.hh:846
void setAddr(Addr _addr)
Update the address of this packet mid-transaction.
Definition packet.hh:815
SenderState * senderState
This packet's sender state.
Definition packet.hh:545
T * getPtr()
get a pointer to the data ptr.
Definition packet.hh:1225
bool isWrite() const
Definition packet.hh:594
RequestPtr req
A pointer to the original request.
Definition packet.hh:377
unsigned getSize() const
Definition packet.hh:817
AtomicOpFunctor * getAtomicOp() const
Accessor function to atomic op.
Definition packet.hh:845
MemCmd cmd
The command field of the packet.
Definition packet.hh:372
virtual Cycles mandatoryQueueLatency(const RubyRequestType &param_type)
Addr getAddressAtIdx(int idx) const
PacketPtr getFirstPkt() const
RubyRequestType getRubyType() const
std::vector< PacketPtr > & getPackets()
void setData(const uint8_t *data, int offset, int len)
Definition DataBlock.cc:170
RequestStatus makeRequest(PacketPtr pkt) override
void completeHitCallback(std::vector< PacketPtr > &mylist)
CacheMemory * m_dataCache_ptr
EventFunctionWrapper deadlockCheckEvent
void enqueue(MsgPtr message, Tick curTime, Tick delta, bool bypassStrictFIFO=false)
AbstractController * m_controller
Definition RubyPort.hh:204
MessageBuffer * m_mandatory_q_ptr
Definition RubyPort.hh:205
static uint32_t getBlockSizeBytes()
Definition RubySystem.hh:72
void makeWriteCompletePkts(CoalescedRequest *crequest)
void invTCP()
Invalidate TCP.
void invTCC(PacketPtr pkt)
std::unordered_map< uint64_t, std::vector< PacketPtr > > m_writeCompletePktMap
RequestStatus makeRequest(PacketPtr pkt) override
void issueRequest(CoalescedRequest *crequest) override
std::unordered_map< Addr, std::vector< PacketPtr > > m_pending_invl2s
void invTCCCallback(Addr address)
void invTCPCallback(Addr address)
VIPERCoalescerParams Params
void writeCompleteCallback(Addr address, uint64_t instSeqNum)
STL pair class.
Definition stl.hh:58
STL vector class.
Definition stl.hh:37
bool scheduled() const
Determine if the current event is scheduled.
Definition eventq.hh:458
void schedule(Event &event, Tick when)
Definition eventq.hh:1012
#define fatal_if(cond,...)
Conditional fatal macro that checks the supplied condition and only causes a fatal error if the condi...
Definition logging.hh:236
Bitfield< 7 > i
Definition misc_types.hh:67
Bitfield< 21 > ss
Definition misc_types.hh:60
Bitfield< 4 > pc
Bitfield< 0 > p
Bitfield< 3 > addr
Definition types.hh:84
Addr makeLineAddress(Addr addr)
Definition Address.cc:60
std::string printAddress(Addr addr)
Definition Address.cc:80
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
T safe_cast(U &&ref_or_ptr)
Definition cast.hh:74
Tick curTick()
The universal simulation clock.
Definition cur_tick.hh:46
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition types.hh:147
uint64_t Tick
Tick count type.
Definition types.hh:58
Declaration of the Packet class.

Generated on Tue Jun 18 2024 16:24:05 for gem5 by doxygen 1.11.0