gem5  v19.0.0.0
global_memory_pipeline.hh
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014-2015 Advanced Micro Devices, Inc.
3  * All rights reserved.
4  *
5  * For use for simulation and test purposes only
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * 3. Neither the name of the copyright holder nor the names of its
18  * contributors may be used to endorse or promote products derived from this
19  * software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Authors: John Kalamatianos,
34  * Sooraj Puthoor
35  */
36 
37 #ifndef __GLOBAL_MEMORY_PIPELINE_HH__
38 #define __GLOBAL_MEMORY_PIPELINE_HH__
39 
40 #include <queue>
41 #include <string>
42 
43 #include "gpu-compute/misc.hh"
44 #include "params/ComputeUnit.hh"
45 #include "sim/stats.hh"
46 
47 /*
48  * @file global_memory_pipeline.hh
49  *
50  * The global memory pipeline issues newly created global memory packets
51  * from the pipeline to DTLB. The exec() method of the memory packet issues
52  * the packet to the DTLB if there is space available in the return fifo.
53  * This stage also retires previously issued loads and stores that have
54  * returned from the memory sub-system.
55  */
56 
57 class ComputeUnit;
58 
60 {
61  public:
62  GlobalMemPipeline(const ComputeUnitParams *params);
63  void init(ComputeUnit *cu);
64  void exec();
65 
66  std::queue<GPUDynInstPtr> &getGMStRespFIFO() { return gmReturnedStores; }
67  std::queue<GPUDynInstPtr> &getGMLdRespFIFO() { return gmReturnedLoads; }
68 
77 
83  void completeRequest(GPUDynInstPtr gpuDynInst);
84 
89  void issueRequest(GPUDynInstPtr gpuDynInst);
90 
98  void handleResponse(GPUDynInstPtr gpuDynInst);
99 
100  bool
102  {
103  return gmReturnedLoads.size() < gmQueueSize;
104  }
105 
106  bool
108  {
109  return gmReturnedStores.size() < gmQueueSize;
110  }
111 
112  bool
113  isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
114  {
115  return (gmIssuedRequests.size() + pendReqs) < gmQueueSize;
116  }
117 
118  const std::string &name() const { return _name; }
119  void regStats();
120 
121  void
123  {
124  loadVrfBankConflictCycles += num_cycles;
125  }
126 
127  private:
129  std::string _name;
132 
133  // number of cycles of delaying the update of a VGPR that is the
134  // target of a load instruction (or the load component of an atomic)
135  // The delay is due to VRF bank conflicts
137  // Counters to track the inflight loads and stores
138  // so that we can provide the proper backpressure
139  // on the number of inflight memory operations.
142 
143  // The size of global memory.
145 
146  /*
147  * this buffer holds the memory responses when in-order data
148  * deilvery is used - the responses are ordered by their unique
149  * sequence number, which is monotonically increasing. when a
150  * memory request returns its "done" flag is set to true. during
151  * each tick the the GM pipeline will check if the oldest request
152  * is finished, and if so it will be removed from the queue.
153  *
154  * key: memory instruction's sequence ID
155  *
156  * value: pair holding the instruction pointer and a bool that
157  * is used to indicate whether or not the request has
158  * completed
159  */
160  std::map<uint64_t, std::pair<GPUDynInstPtr, bool>> gmOrderedRespBuffer;
161 
162  // Global Memory Request FIFO: all global memory requests
163  // are issued to this FIFO from the memory pipelines
164  std::queue<GPUDynInstPtr> gmIssuedRequests;
165 
166  // Globa Store Response FIFO: all responses of global memory
167  // stores are sent to this FIFO from TCP
168  std::queue<GPUDynInstPtr> gmReturnedStores;
169 
170  // Global Load Response FIFO: all responses of global memory
171  // loads are sent to this FIFO from TCP
172  std::queue<GPUDynInstPtr> gmReturnedLoads;
173 };
174 
175 #endif // __GLOBAL_MEMORY_PIPELINE_HH__
bool isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
std::queue< GPUDynInstPtr > gmIssuedRequests
void handleResponse(GPUDynInstPtr gpuDynInst)
this method handles responses sent to this GM pipeline by the CU.
void incLoadVRFBankConflictCycles(int num_cycles)
void completeRequest(GPUDynInstPtr gpuDynInst)
once a memory request is finished we remove it from the buffer.
GPUDynInstPtr getNextReadyResp()
find the next ready response to service.
std::map< uint64_t, std::pair< GPUDynInstPtr, bool > > gmOrderedRespBuffer
GlobalMemPipeline(const ComputeUnitParams *params)
std::queue< GPUDynInstPtr > & getGMLdRespFIFO()
std::queue< GPUDynInstPtr > gmReturnedStores
std::queue< GPUDynInstPtr > & getGMStRespFIFO()
This is a simple scalar statistic, like a counter.
Definition: statistics.hh:2508
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition: misc.hh:48
Stats::Scalar loadVrfBankConflictCycles
const std::string & name() const
void issueRequest(GPUDynInstPtr gpuDynInst)
issues a request to the pipeline - i.e., enqueue it in the request buffer.
std::queue< GPUDynInstPtr > gmReturnedLoads
void init(ComputeUnit *cu)

Generated on Fri Feb 28 2020 16:27:01 for gem5 by doxygen 1.8.13