gem5 v24.0.0.0
Loading...
Searching...
No Matches
scalar_memory_pipeline.hh
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016-2017 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef __GPU_COMPUTE_SCALAR_MEMORY_PIPELINE_HH__
33#define __GPU_COMPUTE_SCALAR_MEMORY_PIPELINE_HH__
34
35#include <queue>
36#include <string>
37
38#include "gpu-compute/misc.hh"
39#include "mem/request.hh"
40#include "params/ComputeUnit.hh"
41#include "sim/stats.hh"
42
43/*
44 * @file scalar_memory_pipeline.hh
45 *
46 * The scalar memory pipeline issues global memory packets
47 * from the scalar ALU to the DTLB and L1 Scalar Data Cache.
48 * The exec() method of the memory packet issues
49 * the packet to the DTLB if there is space available in the return fifo.
50 * This exec() method also retires previously issued loads and stores that have
51 * returned from the memory sub-system.
52 */
53
54namespace gem5
55{
56
57class ComputeUnit;
58
60{
61 public:
62 ScalarMemPipeline(const ComputeUnitParams &p, ComputeUnit &cu);
63 void exec();
64
65 std::queue<GPUDynInstPtr> &getGMReqFIFO() { return issuedRequests; }
66 std::queue<GPUDynInstPtr> &getGMStRespFIFO() { return returnedStores; }
67 std::queue<GPUDynInstPtr> &getGMLdRespFIFO() { return returnedLoads; }
68
69 void issueRequest(GPUDynInstPtr gpuDynInst);
70
72 GPUDynInstPtr gpuDynInst, bool kernelMemSync, RequestPtr req);
73
74 bool
76 {
77 return returnedLoads.size() < queueSize;
78 }
79
80 bool
82 {
83 return returnedStores.size() < queueSize;
84 }
85
86 bool
87 isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
88 {
89 return (issuedRequests.size() + pendReqs) < queueSize;
90 }
91
92 const std::string& name() const { return _name; }
93
94 private:
96 const std::string _name;
98
99 // Counters to track and limit the inflight scalar loads and stores
100 // generated by this memory pipeline.
103
104 // Scalar Memory Request FIFO: all global memory scalar requests
105 // are issued to this FIFO from the scalar memory pipelines
106 std::queue<GPUDynInstPtr> issuedRequests;
107
108 // Scalar Store Response FIFO: all responses of global memory
109 // scalar stores are sent to this FIFO from L1 Scalar Data Cache
110 std::queue<GPUDynInstPtr> returnedStores;
111
112 // Scalar Load Response FIFO: all responses of global memory
113 // scalar loads are sent to this FIFO from L1 Scalar Data Cache
114 std::queue<GPUDynInstPtr> returnedLoads;
115};
116
117} // namespace gem5
118
119#endif // __GPU_COMPUTE_SCALAR_MEMORY_PIPELINE_HH__
bool isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
const std::string & name() const
void injectScalarMemFence(GPUDynInstPtr gpuDynInst, bool kernelMemSync, RequestPtr req)
std::queue< GPUDynInstPtr > & getGMReqFIFO()
std::queue< GPUDynInstPtr > & getGMStRespFIFO()
std::queue< GPUDynInstPtr > returnedLoads
std::queue< GPUDynInstPtr > & getGMLdRespFIFO()
ScalarMemPipeline(const ComputeUnitParams &p, ComputeUnit &cu)
std::queue< GPUDynInstPtr > issuedRequests
std::queue< GPUDynInstPtr > returnedStores
void issueRequest(GPUDynInstPtr gpuDynInst)
Bitfield< 0 > p
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< Request > RequestPtr
Definition request.hh:94
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition misc.hh:49
Declaration of a request, the overall memory request consisting of the parts of the request that are ...

Generated on Tue Jun 18 2024 16:24:04 for gem5 by doxygen 1.11.0