gem5 v24.0.0.0
Loading...
Searching...
No Matches
sopc.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Advanced Micro Devices, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its
16 * contributors may be used to endorse or promote products derived from this
17 * software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
33
34namespace gem5
35{
36
37namespace VegaISA
38{
39 // --- Inst_SOPC__S_CMP_EQ_I32 class methods ---
40
42 : Inst_SOPC(iFmt, "s_cmp_eq_i32")
43 {
44 setFlag(ALU);
45 } // Inst_SOPC__S_CMP_EQ_I32
46
48 {
49 } // ~Inst_SOPC__S_CMP_EQ_I32
50
51 // --- description from .arch file ---
52 // SCC = (S0.i == S1.i).
53 void
55 {
56 ConstScalarOperandI32 src0(gpuDynInst, instData.SSRC0);
57 ConstScalarOperandI32 src1(gpuDynInst, instData.SSRC1);
58 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
59
60 src0.read();
61 src1.read();
62
63 scc = (src0.rawData() == src1.rawData()) ? 1 : 0;
64
65 scc.write();
66 } // execute
67 // --- Inst_SOPC__S_CMP_LG_I32 class methods ---
68
70 : Inst_SOPC(iFmt, "s_cmp_lg_i32")
71 {
72 setFlag(ALU);
73 } // Inst_SOPC__S_CMP_LG_I32
74
76 {
77 } // ~Inst_SOPC__S_CMP_LG_I32
78
79 // --- description from .arch file ---
80 // SCC = (S0.i != S1.i).
81 void
83 {
84 ConstScalarOperandI32 src0(gpuDynInst, instData.SSRC0);
85 ConstScalarOperandI32 src1(gpuDynInst, instData.SSRC1);
86 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
87
88 src0.read();
89 src1.read();
90
91 scc = (src0.rawData() != src1.rawData()) ? 1 : 0;
92
93 scc.write();
94 } // execute
95 // --- Inst_SOPC__S_CMP_GT_I32 class methods ---
96
98 : Inst_SOPC(iFmt, "s_cmp_gt_i32")
99 {
100 setFlag(ALU);
101 } // Inst_SOPC__S_CMP_GT_I32
102
104 {
105 } // ~Inst_SOPC__S_CMP_GT_I32
106
107 // --- description from .arch file ---
108 // SCC = (S0.i > S1.i).
109 void
111 {
112 ConstScalarOperandI32 src0(gpuDynInst, instData.SSRC0);
113 ConstScalarOperandI32 src1(gpuDynInst, instData.SSRC1);
114 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
115
116 src0.read();
117 src1.read();
118
119 scc = (src0.rawData() > src1.rawData()) ? 1 : 0;
120
121 scc.write();
122 } // execute
123 // --- Inst_SOPC__S_CMP_GE_I32 class methods ---
124
126 : Inst_SOPC(iFmt, "s_cmp_ge_i32")
127 {
128 setFlag(ALU);
129 } // Inst_SOPC__S_CMP_GE_I32
130
132 {
133 } // ~Inst_SOPC__S_CMP_GE_I32
134
135 // --- description from .arch file ---
136 // SCC = (S0.i >= S1.i).
137 void
139 {
140 ConstScalarOperandI32 src0(gpuDynInst, instData.SSRC0);
141 ConstScalarOperandI32 src1(gpuDynInst, instData.SSRC1);
142 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
143
144 src0.read();
145 src1.read();
146
147 scc = (src0.rawData() >= src1.rawData()) ? 1 : 0;
148
149 scc.write();
150 } // execute
151 // --- Inst_SOPC__S_CMP_LT_I32 class methods ---
152
154 : Inst_SOPC(iFmt, "s_cmp_lt_i32")
155 {
156 setFlag(ALU);
157 } // Inst_SOPC__S_CMP_LT_I32
158
160 {
161 } // ~Inst_SOPC__S_CMP_LT_I32
162
163 // --- description from .arch file ---
164 // SCC = (S0.i < S1.i).
165 void
167 {
168 ConstScalarOperandI32 src0(gpuDynInst, instData.SSRC0);
169 ConstScalarOperandI32 src1(gpuDynInst, instData.SSRC1);
170 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
171
172 src0.read();
173 src1.read();
174
175 scc = (src0.rawData() < src1.rawData()) ? 1 : 0;
176
177 scc.write();
178 } // execute
179 // --- Inst_SOPC__S_CMP_LE_I32 class methods ---
180
182 : Inst_SOPC(iFmt, "s_cmp_le_i32")
183 {
184 setFlag(ALU);
185 } // Inst_SOPC__S_CMP_LE_I32
186
188 {
189 } // ~Inst_SOPC__S_CMP_LE_I32
190
191 // --- description from .arch file ---
192 // SCC = (S0.i <= S1.i).
193 void
195 {
196 ConstScalarOperandI32 src0(gpuDynInst, instData.SSRC0);
197 ConstScalarOperandI32 src1(gpuDynInst, instData.SSRC1);
198 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
199
200 src0.read();
201 src1.read();
202
203 scc = (src0.rawData() <= src1.rawData()) ? 1 : 0;
204
205 scc.write();
206 } // execute
207 // --- Inst_SOPC__S_CMP_EQ_U32 class methods ---
208
210 : Inst_SOPC(iFmt, "s_cmp_eq_u32")
211 {
212 setFlag(ALU);
213 } // Inst_SOPC__S_CMP_EQ_U32
214
216 {
217 } // ~Inst_SOPC__S_CMP_EQ_U32
218
219 // --- description from .arch file ---
220 // SCC = (S0.u == S1.u).
221 void
223 {
224 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
225 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
226 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
227
228 src0.read();
229 src1.read();
230
231 scc = (src0.rawData() == src1.rawData()) ? 1 : 0;
232
233 scc.write();
234 } // execute
235 // --- Inst_SOPC__S_CMP_LG_U32 class methods ---
236
238 : Inst_SOPC(iFmt, "s_cmp_lg_u32")
239 {
240 setFlag(ALU);
241 } // Inst_SOPC__S_CMP_LG_U32
242
244 {
245 } // ~Inst_SOPC__S_CMP_LG_U32
246
247 // --- description from .arch file ---
248 // SCC = (S0.u != S1.u).
249 void
251 {
252 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
253 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
254 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
255
256 src0.read();
257 src1.read();
258
259 scc = (src0.rawData() != src1.rawData()) ? 1 : 0;
260
261 scc.write();
262 } // execute
263 // --- Inst_SOPC__S_CMP_GT_U32 class methods ---
264
266 : Inst_SOPC(iFmt, "s_cmp_gt_u32")
267 {
268 setFlag(ALU);
269 } // Inst_SOPC__S_CMP_GT_U32
270
272 {
273 } // ~Inst_SOPC__S_CMP_GT_U32
274
275 // --- description from .arch file ---
276 // SCC = (S0.u > S1.u).
277 void
279 {
280 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
281 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
282 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
283
284 src0.read();
285 src1.read();
286
287 scc = (src0.rawData() > src1.rawData()) ? 1 : 0;
288
289 scc.write();
290 } // execute
291 // --- Inst_SOPC__S_CMP_GE_U32 class methods ---
292
294 : Inst_SOPC(iFmt, "s_cmp_ge_u32")
295 {
296 setFlag(ALU);
297 } // Inst_SOPC__S_CMP_GE_U32
298
300 {
301 } // ~Inst_SOPC__S_CMP_GE_U32
302
303 // --- description from .arch file ---
304 // SCC = (S0.u >= S1.u).
305 void
307 {
308 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
309 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
310 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
311
312 src0.read();
313 src1.read();
314
315 scc = (src0.rawData() >= src1.rawData()) ? 1 : 0;
316
317 scc.write();
318 } // execute
319 // --- Inst_SOPC__S_CMP_LT_U32 class methods ---
320
322 : Inst_SOPC(iFmt, "s_cmp_lt_u32")
323 {
324 setFlag(ALU);
325 } // Inst_SOPC__S_CMP_LT_U32
326
328 {
329 } // ~Inst_SOPC__S_CMP_LT_U32
330
331 // --- description from .arch file ---
332 // SCC = (S0.u < S1.u).
333 void
335 {
336 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
337 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
338 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
339
340 src0.read();
341 src1.read();
342
343 scc = (src0.rawData() < src1.rawData()) ? 1 : 0;
344
345 scc.write();
346 } // execute
347 // --- Inst_SOPC__S_CMP_LE_U32 class methods ---
348
350 : Inst_SOPC(iFmt, "s_cmp_le_u32")
351 {
352 setFlag(ALU);
353 } // Inst_SOPC__S_CMP_LE_U32
354
356 {
357 } // ~Inst_SOPC__S_CMP_LE_U32
358
359 // --- description from .arch file ---
360 // SCC = (S0.u <= S1.u).
361 void
363 {
364 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
365 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
366 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
367
368 src0.read();
369 src1.read();
370
371 scc = (src0.rawData() <= src1.rawData()) ? 1 : 0;
372
373 scc.write();
374 } // execute
375 // --- Inst_SOPC__S_BITCMP0_B32 class methods ---
376
378 : Inst_SOPC(iFmt, "s_bitcmp0_b32")
379 {
380 setFlag(ALU);
381 } // Inst_SOPC__S_BITCMP0_B32
382
384 {
385 } // ~Inst_SOPC__S_BITCMP0_B32
386
387 // --- description from .arch file ---
388 // SCC = (S0.u[S1.u[4:0]] == 0).
389 void
391 {
392 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
393 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
394 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
395
396 src0.read();
397 src1.read();
398
399 scc = !bits(src0.rawData(), bits(src1.rawData(), 4, 0)) ? 1 : 0;
400
401 scc.write();
402 } // execute
403 // --- Inst_SOPC__S_BITCMP1_B32 class methods ---
404
406 : Inst_SOPC(iFmt, "s_bitcmp1_b32")
407 {
408 setFlag(ALU);
409 } // Inst_SOPC__S_BITCMP1_B32
410
412 {
413 } // ~Inst_SOPC__S_BITCMP1_B32
414
415 // --- description from .arch file ---
416 // SCC = (S0.u[S1.u[4:0]] == 1).
417 void
419 {
420 ConstScalarOperandU32 src0(gpuDynInst, instData.SSRC0);
421 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
422 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
423
424 src0.read();
425 src1.read();
426
427 scc = bits(src0.rawData(), bits(src1.rawData(), 4, 0)) ? 1 : 0;
428
429 scc.write();
430 } // execute
431 // --- Inst_SOPC__S_BITCMP0_B64 class methods ---
432
434 : Inst_SOPC(iFmt, "s_bitcmp0_b64")
435 {
436 setFlag(ALU);
437 } // Inst_SOPC__S_BITCMP0_B64
438
440 {
441 } // ~Inst_SOPC__S_BITCMP0_B64
442
443 // --- description from .arch file ---
444 // SCC = (S0.u64[S1.u[5:0]] == 0).
445 void
447 {
448 ConstScalarOperandU64 src0(gpuDynInst, instData.SSRC0);
449 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
450 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
451
452 src0.read();
453 src1.read();
454
455 scc = !bits(src0.rawData(), bits(src1.rawData(), 5, 0)) ? 1 : 0;
456
457 scc.write();
458 } // execute
459 // --- Inst_SOPC__S_BITCMP1_B64 class methods ---
460
462 : Inst_SOPC(iFmt, "s_bitcmp1_b64")
463 {
464 setFlag(ALU);
465 } // Inst_SOPC__S_BITCMP1_B64
466
468 {
469 } // ~Inst_SOPC__S_BITCMP1_B64
470
471 // --- description from .arch file ---
472 // SCC = (S0.u64[S1.u[5:0]] == 1).
473 void
475 {
476 ConstScalarOperandU64 src0(gpuDynInst, instData.SSRC0);
477 ConstScalarOperandU32 src1(gpuDynInst, instData.SSRC1);
478 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
479
480 src0.read();
481 src1.read();
482
483 scc = bits(src0.rawData(), bits(src1.rawData(), 5, 0)) ? 1 : 0;
484
485 scc.write();
486 } // execute
487 // --- Inst_SOPC__S_SETVSKIP class methods ---
488
490 : Inst_SOPC(iFmt, "s_setvskip")
491 {
492 } // Inst_SOPC__S_SETVSKIP
493
495 {
496 } // ~Inst_SOPC__S_SETVSKIP
497
498 // --- description from .arch file ---
499 // VSKIP = S0.u[S1.u[4:0]].
500 // Enables and disables VSKIP mode.
501 // When VSKIP is enabled, no VOP*/M*BUF/MIMG/DS/FLAT/EXP instuctions are
502 // issued.
503 // If any vector operations are outstanding, S_WAITCNT must be issued
504 // before executing.
505 // This instruction requires one waitstate after executing (e.g. S_NOP 0).
506 // Example:
507 // s_waitcnt 0
508 // s_setvskip 1, 0 // Enable vskip mode.
509 // s_nop 1
510 void
512 {
514 } // execute
515 // --- Inst_SOPC__S_SET_GPR_IDX_ON class methods ---
516
518 : Inst_SOPC(iFmt, "s_set_gpr_idx_on")
519 {
520 } // Inst_SOPC__S_SET_GPR_IDX_ON
521
523 {
524 } // ~Inst_SOPC__S_SET_GPR_IDX_ON
525
526 // --- description from .arch file ---
527 // MODE.gpr_idx_en = 1;
528 // M0[7:0] = S0.u[7:0];
529 // M0[15:12] = SIMM4 (direct contents of S1 field);
530 // // Remaining bits of M0 are unmodified.
531 // Enable GPR indexing mode. Vector operations after this will perform
532 // relative GPR addressing based on the contents of M0. The structure
533 // SQ_M0_GPR_IDX_WORD may be used to decode M0.
534 // The raw contents of the S1 field are read and used to set the enable
535 // bits. S1[0] = VSRC0_REL, S1[1] = VSRC1_REL, S1[2] = VSRC2_REL and
536 // S1[3] = VDST_REL.
537 void
542 // --- Inst_SOPC__S_CMP_EQ_U64 class methods ---
543
545 : Inst_SOPC(iFmt, "s_cmp_eq_u64")
546 {
547 setFlag(ALU);
548 } // Inst_SOPC__S_CMP_EQ_U64
549
551 {
552 } // ~Inst_SOPC__S_CMP_EQ_U64
553
554 // --- description from .arch file ---
555 // SCC = (S0.i64 == S1.i64).
556 void
558 {
559 ConstScalarOperandI64 src0(gpuDynInst, instData.SSRC0);
560 ConstScalarOperandI64 src1(gpuDynInst, instData.SSRC1);
561 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
562
563 src0.read();
564 src1.read();
565
566 scc = (src0.rawData() == src1.rawData()) ? 1 : 0;
567
568 scc.write();
569 } // execute
570 // --- Inst_SOPC__S_CMP_LG_U64 class methods ---
571
573 : Inst_SOPC(iFmt, "s_cmp_lg_u64")
574 {
575 setFlag(ALU);
576 } // Inst_SOPC__S_CMP_LG_U64
577
579 {
580 } // ~Inst_SOPC__S_CMP_LG_U64
581
582 // --- description from .arch file ---
583 // SCC = (S0.i64 != S1.i64).
584 void
586 {
587 ConstScalarOperandI64 src0(gpuDynInst, instData.SSRC0);
588 ConstScalarOperandI64 src1(gpuDynInst, instData.SSRC1);
589 ScalarOperandU32 scc(gpuDynInst, REG_SCC);
590
591 src0.read();
592 src1.read();
593
594 scc = (src0.rawData() != src1.rawData()) ? 1 : 0;
595
596 scc.write();
597 } // execute
598} // namespace VegaISA
599} // namespace gem5
void setFlag(Flags flag)
void execute(GPUDynInstPtr) override
Definition sopc.cc:390
void execute(GPUDynInstPtr) override
Definition sopc.cc:446
void execute(GPUDynInstPtr) override
Definition sopc.cc:418
void execute(GPUDynInstPtr) override
Definition sopc.cc:474
void execute(GPUDynInstPtr) override
Definition sopc.cc:54
void execute(GPUDynInstPtr) override
Definition sopc.cc:222
void execute(GPUDynInstPtr) override
Definition sopc.cc:557
void execute(GPUDynInstPtr) override
Definition sopc.cc:138
void execute(GPUDynInstPtr) override
Definition sopc.cc:306
void execute(GPUDynInstPtr) override
Definition sopc.cc:110
void execute(GPUDynInstPtr) override
Definition sopc.cc:278
void execute(GPUDynInstPtr) override
Definition sopc.cc:194
void execute(GPUDynInstPtr) override
Definition sopc.cc:362
void execute(GPUDynInstPtr) override
Definition sopc.cc:82
void execute(GPUDynInstPtr) override
Definition sopc.cc:250
void execute(GPUDynInstPtr) override
Definition sopc.cc:585
void execute(GPUDynInstPtr) override
Definition sopc.cc:166
void execute(GPUDynInstPtr) override
Definition sopc.cc:334
void execute(GPUDynInstPtr) override
Definition sopc.cc:511
void execute(GPUDynInstPtr) override
Definition sopc.cc:538
void read() override
read from and write to the underlying register(s) that this operand is referring to.
Definition operand.hh:409
std::enable_if< Condition, DataType >::type rawData() const
we store scalar data in a std::array, however if we need the full operand data we use this method to ...
Definition operand.hh:392
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition bitfield.hh:79
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
Definition binary32.hh:36
std::shared_ptr< GPUDynInst > GPUDynInstPtr
Definition misc.hh:49

Generated on Tue Jun 18 2024 16:23:48 for gem5 by doxygen 1.11.0