gem5 v23.0.0.1
Loading...
Searching...
No Matches
scalar_memory_pipeline.hh
Go to the documentation of this file.
1/*
2 * Copyright (c) 2016-2017 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef __GPU_COMPUTE_SCALAR_MEMORY_PIPELINE_HH__
33#define __GPU_COMPUTE_SCALAR_MEMORY_PIPELINE_HH__
34
35#include <queue>
36#include <string>
37
38#include "gpu-compute/misc.hh"
39#include "params/ComputeUnit.hh"
40#include "sim/stats.hh"
41
42/*
43 * @file scalar_memory_pipeline.hh
44 *
45 * The scalar memory pipeline issues global memory packets
46 * from the scalar ALU to the DTLB and L1 Scalar Data Cache.
47 * The exec() method of the memory packet issues
48 * the packet to the DTLB if there is space available in the return fifo.
49 * This exec() method also retires previously issued loads and stores that have
50 * returned from the memory sub-system.
51 */
52
53namespace gem5
54{
55
56class ComputeUnit;
57
59{
60 public:
61 ScalarMemPipeline(const ComputeUnitParams &p, ComputeUnit &cu);
62 void exec();
63
64 std::queue<GPUDynInstPtr> &getGMReqFIFO() { return issuedRequests; }
65 std::queue<GPUDynInstPtr> &getGMStRespFIFO() { return returnedStores; }
66 std::queue<GPUDynInstPtr> &getGMLdRespFIFO() { return returnedLoads; }
67
68 void issueRequest(GPUDynInstPtr gpuDynInst);
69
70 bool
72 {
73 return returnedLoads.size() < queueSize;
74 }
75
76 bool
78 {
79 return returnedStores.size() < queueSize;
80 }
81
82 bool
83 isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
84 {
85 return (issuedRequests.size() + pendReqs) < queueSize;
86 }
87
88 const std::string& name() const { return _name; }
89
90 private:
92 const std::string _name;
94
95 // Counters to track and limit the inflight scalar loads and stores
96 // generated by this memory pipeline.
99
100 // Scalar Memory Request FIFO: all global memory scalar requests
101 // are issued to this FIFO from the scalar memory pipelines
102 std::queue<GPUDynInstPtr> issuedRequests;
103
104 // Scalar Store Response FIFO: all responses of global memory
105 // scalar stores are sent to this FIFO from L1 Scalar Data Cache
106 std::queue<GPUDynInstPtr> returnedStores;
107
108 // Scalar Load Response FIFO: all responses of global memory
109 // scalar loads are sent to this FIFO from L1 Scalar Data Cache
110 std::queue<GPUDynInstPtr> returnedLoads;
111};
112
113} // namespace gem5
114
115#endif // __GPU_COMPUTE_SCALAR_MEMORY_PIPELINE_HH__
bool isGMReqFIFOWrRdy(uint32_t pendReqs=0) const
const std::string & name() const
std::queue< GPUDynInstPtr > & getGMReqFIFO()
std::queue< GPUDynInstPtr > & getGMStRespFIFO()
std::queue< GPUDynInstPtr > returnedLoads
std::queue< GPUDynInstPtr > & getGMLdRespFIFO()
std::queue< GPUDynInstPtr > issuedRequests
std::queue< GPUDynInstPtr > returnedStores
void issueRequest(GPUDynInstPtr gpuDynInst)
Bitfield< 0 > p
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition misc.hh:49

Generated on Mon Jul 10 2023 15:32:03 for gem5 by doxygen 1.9.7