gem5  v21.2.1.1
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
macromem.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2014, 2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2007-2008 The Florida State University
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
42 
43 #include <sstream>
44 
45 #include "arch/arm/generated/decoder.hh"
47 #include "base/compiler.hh"
48 
49 namespace gem5
50 {
51 
52 using namespace ArmISAInst;
53 
54 namespace ArmISA
55 {
56 
57 MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
58  OpClass __opClass, IntRegIndex rn,
59  bool index, bool up, bool user, bool writeback,
60  bool load, uint32_t reglist) :
61  PredMacroOp(mnem, machInst, __opClass)
62 {
63  uint32_t regs = reglist;
64  uint32_t ones = number_of_ones(reglist);
65  uint32_t mem_ops = ones;
66 
67  // Copy the base address register if we overwrite it, or if this instruction
68  // is basically a no-op (we have to do something)
69  bool copy_base = (bits(reglist, rn) && load) || !ones;
70  bool force_user = user & !bits(reglist, 15);
71  bool exception_ret = user & bits(reglist, 15);
72  bool pc_temp = load && writeback && bits(reglist, 15);
73 
74  if (!ones) {
75  numMicroops = 1;
76  } else if (load) {
77  numMicroops = ((ones + 1) / 2)
78  + ((ones % 2 == 0 && exception_ret) ? 1 : 0)
79  + (copy_base ? 1 : 0)
80  + (writeback? 1 : 0)
81  + (pc_temp ? 1 : 0);
82  } else {
83  numMicroops = ones + (writeback ? 1 : 0);
84  }
85 
87 
88  uint32_t addr = 0;
89 
90  if (!up)
91  addr = (ones << 2) - 4;
92 
93  if (!index)
94  addr += 4;
95 
96  StaticInstPtr *uop = microOps;
97 
98  // Add 0 to Rn and stick it in ureg0.
99  // This is equivalent to a move.
100  if (copy_base)
101  *uop++ = new MicroAddiUop(machInst, INTREG_UREG0, rn, 0);
102 
103  unsigned reg = 0;
104  while (mem_ops != 0) {
105  // Do load operations in pairs if possible
106  if (load && mem_ops >= 2 &&
107  !(mem_ops == 2 && bits(regs,INTREG_PC) && exception_ret)) {
108  // 64-bit memory operation
109  // Find 2 set register bits (clear them after finding)
110  unsigned reg_idx1;
111  unsigned reg_idx2;
112 
113  // Find the first register
114  while (!bits(regs, reg)) reg++;
115  replaceBits(regs, reg, 0);
116  reg_idx1 = force_user ? intRegInMode(MODE_USER, reg) : reg;
117 
118  // Find the second register
119  while (!bits(regs, reg)) reg++;
120  replaceBits(regs, reg, 0);
121  reg_idx2 = force_user ? intRegInMode(MODE_USER, reg) : reg;
122 
123  // Load into temp reg if necessary
124  if (reg_idx2 == INTREG_PC && pc_temp)
125  reg_idx2 = INTREG_UREG1;
126 
127  // Actually load both registers from memory
128  *uop = new MicroLdr2Uop(machInst, reg_idx1, reg_idx2,
129  copy_base ? INTREG_UREG0 : rn, up, addr);
130 
131  if (!writeback && reg_idx2 == INTREG_PC) {
132  // No writeback if idx==pc, set appropriate flags
133  (*uop)->setFlag(StaticInst::IsControl);
134  (*uop)->setFlag(StaticInst::IsIndirectControl);
135 
136  if (!(condCode == COND_AL || condCode == COND_UC))
137  (*uop)->setFlag(StaticInst::IsCondControl);
138  else
139  (*uop)->setFlag(StaticInst::IsUncondControl);
140  }
141 
142  if (up) addr += 8;
143  else addr -= 8;
144  mem_ops -= 2;
145  } else {
146  // 32-bit memory operation
147  // Find register for operation
148  unsigned reg_idx;
149  while (!bits(regs, reg)) reg++;
150  replaceBits(regs, reg, 0);
151  reg_idx = force_user ? intRegInMode(MODE_USER, reg) : reg;
152 
153  if (load) {
154  if (writeback && reg_idx == INTREG_PC) {
155  // If this instruction changes the PC and performs a
156  // writeback, ensure the pc load/branch is the last uop.
157  // Load into a temp reg here.
158  *uop = new MicroLdrUop(machInst, INTREG_UREG1,
159  copy_base ? INTREG_UREG0 : rn, up, addr);
160  } else if (reg_idx == INTREG_PC && exception_ret) {
161  // Special handling for exception return
162  *uop = new MicroLdrRetUop(machInst, reg_idx,
163  copy_base ? INTREG_UREG0 : rn, up, addr);
164  } else {
165  // standard single load uop
166  *uop = new MicroLdrUop(machInst, reg_idx,
167  copy_base ? INTREG_UREG0 : rn, up, addr);
168  }
169 
170  // Loading pc as last operation? Set appropriate flags.
171  if (!writeback && reg_idx == INTREG_PC) {
172  (*uop)->setFlag(StaticInst::IsControl);
173  (*uop)->setFlag(StaticInst::IsIndirectControl);
174 
175  if (!(condCode == COND_AL || condCode == COND_UC))
176  (*uop)->setFlag(StaticInst::IsCondControl);
177  else
178  (*uop)->setFlag(StaticInst::IsUncondControl);
179  }
180  } else {
181  *uop = new MicroStrUop(machInst, reg_idx, rn, up, addr);
182  }
183 
184  if (up) addr += 4;
185  else addr -= 4;
186  --mem_ops;
187  }
188 
189  // Load/store micro-op generated, go to next uop
190  ++uop;
191  }
192 
193  if (writeback && ones) {
194  // Perform writeback uop operation
195  if (up)
196  *uop++ = new MicroAddiUop(machInst, rn, rn, ones * 4);
197  else
198  *uop++ = new MicroSubiUop(machInst, rn, rn, ones * 4);
199 
200  // Write PC after address writeback?
201  if (pc_temp) {
202  if (exception_ret) {
203  *uop = new MicroUopRegMovRet(machInst, 0, INTREG_UREG1);
204  } else {
205  *uop = new MicroUopRegMov(machInst, INTREG_PC, INTREG_UREG1);
206  }
207  (*uop)->setFlag(StaticInst::IsControl);
208  (*uop)->setFlag(StaticInst::IsIndirectControl);
209 
210  if (!(condCode == COND_AL || condCode == COND_UC))
211  (*uop)->setFlag(StaticInst::IsCondControl);
212  else
213  (*uop)->setFlag(StaticInst::IsUncondControl);
214 
215  if (rn == INTREG_SP)
216  (*uop)->setFlag(StaticInst::IsReturn);
217 
218  ++uop;
219  }
220  }
221 
222  --uop;
223  (*uop)->setLastMicroop();
225 
226  /* Take the control flags from the last microop for the macroop */
227  if ((*uop)->isControl())
228  setFlag(StaticInst::IsControl);
229  if ((*uop)->isCondCtrl())
230  setFlag(StaticInst::IsCondControl);
231  if ((*uop)->isUncondCtrl())
232  setFlag(StaticInst::IsUncondControl);
233  if ((*uop)->isIndirectCtrl())
234  setFlag(StaticInst::IsIndirectControl);
235  if ((*uop)->isReturn())
236  setFlag(StaticInst::IsReturn);
237 
238  for (StaticInstPtr *uop = microOps; !(*uop)->isLastMicroop(); uop++) {
239  (*uop)->setDelayedCommit();
240  }
241 }
242 
243 PairMemOp::PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
244  uint32_t size, bool fp, bool load, bool noAlloc,
245  bool signExt, bool exclusive, bool acrel,
246  int64_t imm, AddrMode mode,
247  IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2) :
248  PredMacroOp(mnem, machInst, __opClass)
249 {
250  bool post = (mode == AddrMd_PostIndex);
251  bool writeback = (mode != AddrMd_Offset);
252 
253  if (load) {
254  // Use integer rounding to round up loads of size 4
255  numMicroops = (post ? 0 : 1) + ((size + 4) / 8) + (writeback ? 1 : 0);
256  } else {
257  numMicroops = (post ? 0 : 1) + (size / 4) + (writeback ? 1 : 0);
258  }
260 
261  StaticInstPtr *uop = microOps;
262 
263  rn = makeSP(rn);
264 
265  if (!post) {
266  *uop++ = new MicroAddXiSpAlignUop(machInst, INTREG_UREG0, rn,
267  post ? 0 : imm);
268  }
269 
270  if (fp) {
271  if (size == 16) {
272  if (load) {
273  *uop++ = new MicroLdFp16Uop(machInst, rt,
274  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
275  *uop++ = new MicroLdFp16Uop(machInst, rt2,
276  post ? rn : INTREG_UREG0, 16, noAlloc, exclusive, acrel);
277  } else {
278  *uop++ = new MicroStrQBFpXImmUop(machInst, rt,
279  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
280  *uop++ = new MicroStrQTFpXImmUop(machInst, rt,
281  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
282  *uop++ = new MicroStrQBFpXImmUop(machInst, rt2,
283  post ? rn : INTREG_UREG0, 16, noAlloc, exclusive, acrel);
284  *uop++ = new MicroStrQTFpXImmUop(machInst, rt2,
285  post ? rn : INTREG_UREG0, 16, noAlloc, exclusive, acrel);
286  }
287  } else if (size == 8) {
288  if (load) {
289  *uop++ = new MicroLdPairFp8Uop(machInst, rt, rt2,
290  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
291  } else {
292  *uop++ = new MicroStrFpXImmUop(machInst, rt,
293  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
294  *uop++ = new MicroStrFpXImmUop(machInst, rt2,
295  post ? rn : INTREG_UREG0, 8, noAlloc, exclusive, acrel);
296  }
297  } else if (size == 4) {
298  if (load) {
299  *uop++ = new MicroLdrDFpXImmUop(machInst, rt, rt2,
300  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
301  } else {
302  *uop++ = new MicroStrDFpXImmUop(machInst, rt, rt2,
303  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
304  }
305  }
306  } else {
307  if (size == 8) {
308  if (load) {
309  *uop++ = new MicroLdPairUop(machInst, rt, rt2,
310  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
311  } else {
312  *uop++ = new MicroStrXImmUop(machInst, rt, post ? rn : INTREG_UREG0,
313  0, noAlloc, exclusive, acrel);
314  *uop++ = new MicroStrXImmUop(machInst, rt2, post ? rn : INTREG_UREG0,
315  size, noAlloc, exclusive, acrel);
316  }
317  } else if (size == 4) {
318  if (load) {
319  if (signExt) {
320  *uop++ = new MicroLdrDSXImmUop(machInst, rt, rt2,
321  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
322  } else {
323  *uop++ = new MicroLdrDUXImmUop(machInst, rt, rt2,
324  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
325  }
326  } else {
327  *uop++ = new MicroStrDXImmUop(machInst, rt, rt2,
328  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
329  }
330  }
331  }
332 
333  if (writeback) {
334  *uop++ = new MicroAddXiUop(machInst, rn, post ? rn : INTREG_UREG0,
335  post ? imm : 0);
336  }
337 
338  assert(uop == &microOps[numMicroops]);
339  (*--uop)->setLastMicroop();
341 
342  for (StaticInstPtr *curUop = microOps;
343  !(*curUop)->isLastMicroop(); curUop++) {
344  (*curUop)->setDelayedCommit();
345  }
346 }
347 
348 BigFpMemImmOp::BigFpMemImmOp(const char *mnem, ExtMachInst machInst,
349  OpClass __opClass, bool load, IntRegIndex dest,
350  IntRegIndex base, int64_t imm) :
351  PredMacroOp(mnem, machInst, __opClass)
352 {
353  numMicroops = load ? 1 : 2;
355 
356  StaticInstPtr *uop = microOps;
357 
358  if (load) {
359  *uop = new MicroLdFp16Uop(machInst, dest, base, imm);
360  } else {
361  *uop = new MicroStrQBFpXImmUop(machInst, dest, base, imm);
362  (*uop)->setDelayedCommit();
363  *++uop = new MicroStrQTFpXImmUop(machInst, dest, base, imm);
364  }
365  (*uop)->setLastMicroop();
367 }
368 
369 BigFpMemPostOp::BigFpMemPostOp(const char *mnem, ExtMachInst machInst,
370  OpClass __opClass, bool load, IntRegIndex dest,
371  IntRegIndex base, int64_t imm) :
372  PredMacroOp(mnem, machInst, __opClass)
373 {
374  numMicroops = load ? 2 : 3;
376 
377  StaticInstPtr *uop = microOps;
378 
379  if (load) {
380  *uop++ = new MicroLdFp16Uop(machInst, dest, base, 0);
381  } else {
382  *uop++= new MicroStrQBFpXImmUop(machInst, dest, base, 0);
383  *uop++ = new MicroStrQTFpXImmUop(machInst, dest, base, 0);
384  }
385  *uop = new MicroAddXiUop(machInst, base, base, imm);
386  (*uop)->setLastMicroop();
388 
389  for (StaticInstPtr *curUop = microOps;
390  !(*curUop)->isLastMicroop(); curUop++) {
391  (*curUop)->setDelayedCommit();
392  }
393 }
394 
395 BigFpMemPreOp::BigFpMemPreOp(const char *mnem, ExtMachInst machInst,
396  OpClass __opClass, bool load, IntRegIndex dest,
397  IntRegIndex base, int64_t imm) :
398  PredMacroOp(mnem, machInst, __opClass)
399 {
400  numMicroops = load ? 2 : 3;
402 
403  StaticInstPtr *uop = microOps;
404 
405  if (load) {
406  *uop++ = new MicroLdFp16Uop(machInst, dest, base, imm);
407  } else {
408  *uop++ = new MicroStrQBFpXImmUop(machInst, dest, base, imm);
409  *uop++ = new MicroStrQTFpXImmUop(machInst, dest, base, imm);
410  }
411  *uop = new MicroAddXiUop(machInst, base, base, imm);
412  (*uop)->setLastMicroop();
414 
415  for (StaticInstPtr *curUop = microOps;
416  !(*curUop)->isLastMicroop(); curUop++) {
417  (*curUop)->setDelayedCommit();
418  }
419 }
420 
421 BigFpMemRegOp::BigFpMemRegOp(const char *mnem, ExtMachInst machInst,
422  OpClass __opClass, bool load, IntRegIndex dest,
423  IntRegIndex base, IntRegIndex offset,
424  ArmExtendType type, int64_t imm) :
425  PredMacroOp(mnem, machInst, __opClass)
426 {
427  numMicroops = load ? 1 : 2;
429 
430  StaticInstPtr *uop = microOps;
431 
432  if (load) {
433  *uop = new MicroLdFp16RegUop(machInst, dest, base,
434  offset, type, imm);
435  } else {
436  *uop = new MicroStrQBFpXRegUop(machInst, dest, base,
437  offset, type, imm);
438  (*uop)->setDelayedCommit();
439  *++uop = new MicroStrQTFpXRegUop(machInst, dest, base,
440  offset, type, imm);
441  }
442 
443  (*uop)->setLastMicroop();
445 }
446 
447 BigFpMemLitOp::BigFpMemLitOp(const char *mnem, ExtMachInst machInst,
448  OpClass __opClass, IntRegIndex dest,
449  int64_t imm) :
450  PredMacroOp(mnem, machInst, __opClass)
451 {
452  numMicroops = 1;
454 
455  microOps[0] = new MicroLdFp16LitUop(machInst, dest, imm);
456  microOps[0]->setLastMicroop();
458 }
459 
460 VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
461  unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
462  unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
463  PredMacroOp(mnem, machInst, __opClass)
464 {
465  assert(regs > 0 && regs <= 4);
466  assert(regs % elems == 0);
467 
468  numMicroops = (regs > 2) ? 2 : 1;
469  bool wb = (rm != 15);
470  bool deinterleave = (elems > 1);
471 
472  if (wb) numMicroops++;
473  if (deinterleave) numMicroops += (regs / elems);
475 
476  RegIndex rMid = deinterleave ? VecSpecialElem : vd * 2;
477 
478  uint32_t noAlign = 0;
479 
480  unsigned uopIdx = 0;
481  switch (regs) {
482  case 4:
483  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
484  size, machInst, rMid, rn, 0, align);
485  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
486  size, machInst, rMid + 4, rn, 16, noAlign);
487  break;
488  case 3:
489  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
490  size, machInst, rMid, rn, 0, align);
491  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
492  size, machInst, rMid + 4, rn, 16, noAlign);
493  break;
494  case 2:
495  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
496  size, machInst, rMid, rn, 0, align);
497  break;
498  case 1:
499  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
500  size, machInst, rMid, rn, 0, align);
501  break;
502  default:
503  // Unknown number of registers
504  microOps[uopIdx++] = new Unknown(machInst);
505  }
506  if (wb) {
507  if (rm != 15 && rm != 13) {
508  microOps[uopIdx++] =
509  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
510  } else {
511  microOps[uopIdx++] =
512  new MicroAddiUop(machInst, rn, rn, regs * 8);
513  }
514  }
515  if (deinterleave) {
516  switch (elems) {
517  case 4:
518  assert(regs == 4);
519  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon8Uop>(
520  size, machInst, vd * 2, rMid, inc * 2);
521  break;
522  case 3:
523  assert(regs == 3);
524  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon6Uop>(
525  size, machInst, vd * 2, rMid, inc * 2);
526  break;
527  case 2:
528  assert(regs == 4 || regs == 2);
529  if (regs == 4) {
530  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
531  size, machInst, vd * 2, rMid, inc * 2);
532  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
533  size, machInst, vd * 2 + 2, rMid + 4, inc * 2);
534  } else {
535  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
536  size, machInst, vd * 2, rMid, inc * 2);
537  }
538  break;
539  default:
540  // Bad number of elements to deinterleave
541  microOps[uopIdx++] = new Unknown(machInst);
542  }
543  }
544  assert(uopIdx == numMicroops);
545 
546  for (unsigned i = 0; i < numMicroops - 1; i++) {
547  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
548  assert(uopPtr);
549  uopPtr->setDelayedCommit();
550  }
553 }
554 
555 VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
556  OpClass __opClass, bool all, unsigned elems,
557  RegIndex rn, RegIndex vd, unsigned regs,
558  unsigned inc, uint32_t size, uint32_t align,
559  RegIndex rm, unsigned lane) :
560  PredMacroOp(mnem, machInst, __opClass)
561 {
562  assert(regs > 0 && regs <= 4);
563  assert(regs % elems == 0);
564 
565  unsigned eBytes = (1 << size);
566  unsigned loadSize = eBytes * elems;
567  [[maybe_unused]] unsigned loadRegs =
568  (loadSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
569 
570  assert(loadRegs > 0 && loadRegs <= 4);
571 
572  numMicroops = 1;
573  bool wb = (rm != 15);
574 
575  if (wb) numMicroops++;
576  numMicroops += (regs / elems);
578 
579  RegIndex ufp0 = VecSpecialElem;
580 
581  unsigned uopIdx = 0;
582  switch (loadSize) {
583  case 1:
584  microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>(
585  machInst, ufp0, rn, 0, align);
586  break;
587  case 2:
588  if (eBytes == 2) {
589  microOps[uopIdx++] = new MicroLdrNeon2Uop<uint16_t>(
590  machInst, ufp0, rn, 0, align);
591  } else {
592  microOps[uopIdx++] = new MicroLdrNeon2Uop<uint8_t>(
593  machInst, ufp0, rn, 0, align);
594  }
595  break;
596  case 3:
597  microOps[uopIdx++] = new MicroLdrNeon3Uop<uint8_t>(
598  machInst, ufp0, rn, 0, align);
599  break;
600  case 4:
601  switch (eBytes) {
602  case 1:
603  microOps[uopIdx++] = new MicroLdrNeon4Uop<uint8_t>(
604  machInst, ufp0, rn, 0, align);
605  break;
606  case 2:
607  microOps[uopIdx++] = new MicroLdrNeon4Uop<uint16_t>(
608  machInst, ufp0, rn, 0, align);
609  break;
610  case 4:
611  microOps[uopIdx++] = new MicroLdrNeon4Uop<uint32_t>(
612  machInst, ufp0, rn, 0, align);
613  break;
614  }
615  break;
616  case 6:
617  microOps[uopIdx++] = new MicroLdrNeon6Uop<uint16_t>(
618  machInst, ufp0, rn, 0, align);
619  break;
620  case 8:
621  switch (eBytes) {
622  case 2:
623  microOps[uopIdx++] = new MicroLdrNeon8Uop<uint16_t>(
624  machInst, ufp0, rn, 0, align);
625  break;
626  case 4:
627  microOps[uopIdx++] = new MicroLdrNeon8Uop<uint32_t>(
628  machInst, ufp0, rn, 0, align);
629  break;
630  }
631  break;
632  case 12:
633  microOps[uopIdx++] = new MicroLdrNeon12Uop<uint32_t>(
634  machInst, ufp0, rn, 0, align);
635  break;
636  case 16:
637  microOps[uopIdx++] = new MicroLdrNeon16Uop<uint32_t>(
638  machInst, ufp0, rn, 0, align);
639  break;
640  default:
641  // Unrecognized load size
642  microOps[uopIdx++] = new Unknown(machInst);
643  }
644  if (wb) {
645  if (rm != 15 && rm != 13) {
646  microOps[uopIdx++] =
647  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
648  } else {
649  microOps[uopIdx++] =
650  new MicroAddiUop(machInst, rn, rn, loadSize);
651  }
652  }
653  switch (elems) {
654  case 4:
655  assert(regs == 4);
656  switch (size) {
657  case 0:
658  if (all) {
659  microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint8_t>(
660  machInst, vd * 2, ufp0, inc * 2);
661  } else {
662  microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint8_t>(
663  machInst, vd * 2, ufp0, inc * 2, lane);
664  }
665  break;
666  case 1:
667  if (all) {
668  microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint16_t>(
669  machInst, vd * 2, ufp0, inc * 2);
670  } else {
671  microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint16_t>(
672  machInst, vd * 2, ufp0, inc * 2, lane);
673  }
674  break;
675  case 2:
676  if (all) {
677  microOps[uopIdx++] = new MicroUnpackAllNeon4to8Uop<uint32_t>(
678  machInst, vd * 2, ufp0, inc * 2);
679  } else {
680  microOps[uopIdx++] = new MicroUnpackNeon4to8Uop<uint32_t>(
681  machInst, vd * 2, ufp0, inc * 2, lane);
682  }
683  break;
684  default:
685  // Bad size
686  microOps[uopIdx++] = new Unknown(machInst);
687  break;
688  }
689  break;
690  case 3:
691  assert(regs == 3);
692  switch (size) {
693  case 0:
694  if (all) {
695  microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint8_t>(
696  machInst, vd * 2, ufp0, inc * 2);
697  } else {
698  microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint8_t>(
699  machInst, vd * 2, ufp0, inc * 2, lane);
700  }
701  break;
702  case 1:
703  if (all) {
704  microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint16_t>(
705  machInst, vd * 2, ufp0, inc * 2);
706  } else {
707  microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint16_t>(
708  machInst, vd * 2, ufp0, inc * 2, lane);
709  }
710  break;
711  case 2:
712  if (all) {
713  microOps[uopIdx++] = new MicroUnpackAllNeon4to6Uop<uint32_t>(
714  machInst, vd * 2, ufp0, inc * 2);
715  } else {
716  microOps[uopIdx++] = new MicroUnpackNeon4to6Uop<uint32_t>(
717  machInst, vd * 2, ufp0, inc * 2, lane);
718  }
719  break;
720  default:
721  // Bad size
722  microOps[uopIdx++] = new Unknown(machInst);
723  break;
724  }
725  break;
726  case 2:
727  assert(regs == 2);
728  assert(loadRegs <= 2);
729  switch (size) {
730  case 0:
731  if (all) {
732  microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint8_t>(
733  machInst, vd * 2, ufp0, inc * 2);
734  } else {
735  microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint8_t>(
736  machInst, vd * 2, ufp0, inc * 2, lane);
737  }
738  break;
739  case 1:
740  if (all) {
741  microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint16_t>(
742  machInst, vd * 2, ufp0, inc * 2);
743  } else {
744  microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint16_t>(
745  machInst, vd * 2, ufp0, inc * 2, lane);
746  }
747  break;
748  case 2:
749  if (all) {
750  microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint32_t>(
751  machInst, vd * 2, ufp0, inc * 2);
752  } else {
753  microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint32_t>(
754  machInst, vd * 2, ufp0, inc * 2, lane);
755  }
756  break;
757  default:
758  // Bad size
759  microOps[uopIdx++] = new Unknown(machInst);
760  break;
761  }
762  break;
763  case 1:
764  assert(regs == 1 || (all && regs == 2));
765  assert(loadRegs <= 2);
766  for (unsigned offset = 0; offset < regs; offset++) {
767  switch (size) {
768  case 0:
769  if (all) {
770  microOps[uopIdx++] =
771  new MicroUnpackAllNeon2to2Uop<uint8_t>(
772  machInst, (vd + offset) * 2, ufp0, inc * 2);
773  } else {
774  microOps[uopIdx++] =
775  new MicroUnpackNeon2to2Uop<uint8_t>(
776  machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
777  }
778  break;
779  case 1:
780  if (all) {
781  microOps[uopIdx++] =
782  new MicroUnpackAllNeon2to2Uop<uint16_t>(
783  machInst, (vd + offset) * 2, ufp0, inc * 2);
784  } else {
785  microOps[uopIdx++] =
786  new MicroUnpackNeon2to2Uop<uint16_t>(
787  machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
788  }
789  break;
790  case 2:
791  if (all) {
792  microOps[uopIdx++] =
793  new MicroUnpackAllNeon2to2Uop<uint32_t>(
794  machInst, (vd + offset) * 2, ufp0, inc * 2);
795  } else {
796  microOps[uopIdx++] =
797  new MicroUnpackNeon2to2Uop<uint32_t>(
798  machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
799  }
800  break;
801  default:
802  // Bad size
803  microOps[uopIdx++] = new Unknown(machInst);
804  break;
805  }
806  }
807  break;
808  default:
809  // Bad number of elements to unpack
810  microOps[uopIdx++] = new Unknown(machInst);
811  }
812  assert(uopIdx == numMicroops);
813 
814  for (unsigned i = 0; i < numMicroops - 1; i++) {
815  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
816  assert(uopPtr);
817  uopPtr->setDelayedCommit();
818  }
821 }
822 
823 VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
824  unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
825  unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
826  PredMacroOp(mnem, machInst, __opClass)
827 {
828  assert(regs > 0 && regs <= 4);
829  assert(regs % elems == 0);
830 
831  numMicroops = (regs > 2) ? 2 : 1;
832  bool wb = (rm != 15);
833  bool interleave = (elems > 1);
834 
835  if (wb) numMicroops++;
836  if (interleave) numMicroops += (regs / elems);
838 
839  uint32_t noAlign = 0;
840 
841  RegIndex rMid = interleave ? VecSpecialElem : vd * 2;
842 
843  unsigned uopIdx = 0;
844  if (interleave) {
845  switch (elems) {
846  case 4:
847  assert(regs == 4);
848  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>(
849  size, machInst, rMid, vd * 2, inc * 2);
850  break;
851  case 3:
852  assert(regs == 3);
853  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon6Uop>(
854  size, machInst, rMid, vd * 2, inc * 2);
855  break;
856  case 2:
857  assert(regs == 4 || regs == 2);
858  if (regs == 4) {
859  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
860  size, machInst, rMid, vd * 2, inc * 2);
861  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
862  size, machInst, rMid + 4, vd * 2 + 2, inc * 2);
863  } else {
864  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
865  size, machInst, rMid, vd * 2, inc * 2);
866  }
867  break;
868  default:
869  // Bad number of elements to interleave
870  microOps[uopIdx++] = new Unknown(machInst);
871  }
872  }
873  switch (regs) {
874  case 4:
875  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
876  size, machInst, rMid, rn, 0, align);
877  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
878  size, machInst, rMid + 4, rn, 16, noAlign);
879  break;
880  case 3:
881  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
882  size, machInst, rMid, rn, 0, align);
883  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
884  size, machInst, rMid + 4, rn, 16, noAlign);
885  break;
886  case 2:
887  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
888  size, machInst, rMid, rn, 0, align);
889  break;
890  case 1:
891  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
892  size, machInst, rMid, rn, 0, align);
893  break;
894  default:
895  // Unknown number of registers
896  microOps[uopIdx++] = new Unknown(machInst);
897  }
898  if (wb) {
899  if (rm != 15 && rm != 13) {
900  microOps[uopIdx++] =
901  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
902  } else {
903  microOps[uopIdx++] =
904  new MicroAddiUop(machInst, rn, rn, regs * 8);
905  }
906  }
907  assert(uopIdx == numMicroops);
908 
909  for (unsigned i = 0; i < numMicroops - 1; i++) {
910  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
911  assert(uopPtr);
912  uopPtr->setDelayedCommit();
913  }
916 }
917 
918 VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
919  OpClass __opClass, bool all, unsigned elems,
920  RegIndex rn, RegIndex vd, unsigned regs,
921  unsigned inc, uint32_t size, uint32_t align,
922  RegIndex rm, unsigned lane) :
923  PredMacroOp(mnem, machInst, __opClass)
924 {
925  assert(!all);
926  assert(regs > 0 && regs <= 4);
927  assert(regs % elems == 0);
928 
929  unsigned eBytes = (1 << size);
930  unsigned storeSize = eBytes * elems;
931  [[maybe_unused]] unsigned storeRegs =
932  (storeSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
933 
934  assert(storeRegs > 0 && storeRegs <= 4);
935 
936  numMicroops = 1;
937  bool wb = (rm != 15);
938 
939  if (wb) numMicroops++;
940  numMicroops += (regs / elems);
942 
943  RegIndex ufp0 = VecSpecialElem;
944 
945  unsigned uopIdx = 0;
946  switch (elems) {
947  case 4:
948  assert(regs == 4);
949  switch (size) {
950  case 0:
951  microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>(
952  machInst, ufp0, vd * 2, inc * 2, lane);
953  break;
954  case 1:
955  microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint16_t>(
956  machInst, ufp0, vd * 2, inc * 2, lane);
957  break;
958  case 2:
959  microOps[uopIdx++] = new MicroPackNeon8to4Uop<uint32_t>(
960  machInst, ufp0, vd * 2, inc * 2, lane);
961  break;
962  default:
963  // Bad size
964  microOps[uopIdx++] = new Unknown(machInst);
965  break;
966  }
967  break;
968  case 3:
969  assert(regs == 3);
970  switch (size) {
971  case 0:
972  microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint8_t>(
973  machInst, ufp0, vd * 2, inc * 2, lane);
974  break;
975  case 1:
976  microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint16_t>(
977  machInst, ufp0, vd * 2, inc * 2, lane);
978  break;
979  case 2:
980  microOps[uopIdx++] = new MicroPackNeon6to4Uop<uint32_t>(
981  machInst, ufp0, vd * 2, inc * 2, lane);
982  break;
983  default:
984  // Bad size
985  microOps[uopIdx++] = new Unknown(machInst);
986  break;
987  }
988  break;
989  case 2:
990  assert(regs == 2);
991  assert(storeRegs <= 2);
992  switch (size) {
993  case 0:
994  microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint8_t>(
995  machInst, ufp0, vd * 2, inc * 2, lane);
996  break;
997  case 1:
998  microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint16_t>(
999  machInst, ufp0, vd * 2, inc * 2, lane);
1000  break;
1001  case 2:
1002  microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint32_t>(
1003  machInst, ufp0, vd * 2, inc * 2, lane);
1004  break;
1005  default:
1006  // Bad size
1007  microOps[uopIdx++] = new Unknown(machInst);
1008  break;
1009  }
1010  break;
1011  case 1:
1012  assert(regs == 1 || (all && regs == 2));
1013  assert(storeRegs <= 2);
1014  for (unsigned offset = 0; offset < regs; offset++) {
1015  switch (size) {
1016  case 0:
1017  microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint8_t>(
1018  machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
1019  break;
1020  case 1:
1021  microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint16_t>(
1022  machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
1023  break;
1024  case 2:
1025  microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint32_t>(
1026  machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
1027  break;
1028  default:
1029  // Bad size
1030  microOps[uopIdx++] = new Unknown(machInst);
1031  break;
1032  }
1033  }
1034  break;
1035  default:
1036  // Bad number of elements to unpack
1037  microOps[uopIdx++] = new Unknown(machInst);
1038  }
1039  switch (storeSize) {
1040  case 1:
1041  microOps[uopIdx++] = new MicroStrNeon1Uop<uint8_t>(
1042  machInst, ufp0, rn, 0, align);
1043  break;
1044  case 2:
1045  if (eBytes == 2) {
1046  microOps[uopIdx++] = new MicroStrNeon2Uop<uint16_t>(
1047  machInst, ufp0, rn, 0, align);
1048  } else {
1049  microOps[uopIdx++] = new MicroStrNeon2Uop<uint8_t>(
1050  machInst, ufp0, rn, 0, align);
1051  }
1052  break;
1053  case 3:
1054  microOps[uopIdx++] = new MicroStrNeon3Uop<uint8_t>(
1055  machInst, ufp0, rn, 0, align);
1056  break;
1057  case 4:
1058  switch (eBytes) {
1059  case 1:
1060  microOps[uopIdx++] = new MicroStrNeon4Uop<uint8_t>(
1061  machInst, ufp0, rn, 0, align);
1062  break;
1063  case 2:
1064  microOps[uopIdx++] = new MicroStrNeon4Uop<uint16_t>(
1065  machInst, ufp0, rn, 0, align);
1066  break;
1067  case 4:
1068  microOps[uopIdx++] = new MicroStrNeon4Uop<uint32_t>(
1069  machInst, ufp0, rn, 0, align);
1070  break;
1071  }
1072  break;
1073  case 6:
1074  microOps[uopIdx++] = new MicroStrNeon6Uop<uint16_t>(
1075  machInst, ufp0, rn, 0, align);
1076  break;
1077  case 8:
1078  switch (eBytes) {
1079  case 2:
1080  microOps[uopIdx++] = new MicroStrNeon8Uop<uint16_t>(
1081  machInst, ufp0, rn, 0, align);
1082  break;
1083  case 4:
1084  microOps[uopIdx++] = new MicroStrNeon8Uop<uint32_t>(
1085  machInst, ufp0, rn, 0, align);
1086  break;
1087  }
1088  break;
1089  case 12:
1090  microOps[uopIdx++] = new MicroStrNeon12Uop<uint32_t>(
1091  machInst, ufp0, rn, 0, align);
1092  break;
1093  case 16:
1094  microOps[uopIdx++] = new MicroStrNeon16Uop<uint32_t>(
1095  machInst, ufp0, rn, 0, align);
1096  break;
1097  default:
1098  // Bad store size
1099  microOps[uopIdx++] = new Unknown(machInst);
1100  }
1101  if (wb) {
1102  if (rm != 15 && rm != 13) {
1103  microOps[uopIdx++] =
1104  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
1105  } else {
1106  microOps[uopIdx++] =
1107  new MicroAddiUop(machInst, rn, rn, storeSize);
1108  }
1109  }
1110  assert(uopIdx == numMicroops);
1111 
1112  for (unsigned i = 0; i < numMicroops - 1; i++) {
1113  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
1114  assert(uopPtr);
1115  uopPtr->setDelayedCommit();
1116  }
1117  microOps[0]->setFirstMicroop();
1119 }
1120 
1121 VldMultOp64::VldMultOp64(const char *mnem, ExtMachInst machInst,
1122  OpClass __opClass, RegIndex rn, RegIndex vd,
1123  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1124  uint8_t numStructElems, uint8_t numRegs, bool wb) :
1125  PredMacroOp(mnem, machInst, __opClass)
1126 {
1128  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1129  bool baseIsSP = isSP((IntRegIndex) rnsp);
1130 
1131  numMicroops = wb ? 1 : 0;
1132 
1133  int totNumBytes = numRegs * dataSize / 8;
1134  assert(totNumBytes <= 64);
1135 
1136  // The guiding principle here is that no more than 16 bytes can be
1137  // transferred at a time
1138  int numMemMicroops = totNumBytes / 16;
1139  int residuum = totNumBytes % 16;
1140  if (residuum)
1141  ++numMemMicroops;
1142  numMicroops += numMemMicroops;
1143 
1144  int numMarshalMicroops = numRegs / 2 + (numRegs % 2 ? 1 : 0);
1145  numMicroops += numMarshalMicroops;
1146 
1148  unsigned uopIdx = 0;
1149  uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
1150 
1151  int i = 0;
1152  for (; i < numMemMicroops - 1; ++i) {
1153  microOps[uopIdx++] = new MicroNeonLoad64(
1154  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1155  baseIsSP, 16 /* accSize */, eSize);
1156  }
1157  microOps[uopIdx++] = new MicroNeonLoad64(
1158  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1159  residuum ? residuum : 16 /* accSize */, eSize);
1160 
1161  // Writeback microop: the post-increment amount is encoded in "Rm": a
1162  // 64-bit general register OR as '11111' for an immediate value equal to
1163  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1164  if (wb) {
1165  if (rm != ((RegIndex) INTREG_X31)) {
1166  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1167  UXTX, 0);
1168  } else {
1169  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1170  totNumBytes);
1171  }
1172  }
1173 
1174  for (int i = 0; i < numMarshalMicroops; ++i) {
1175  switch(numRegs) {
1176  case 1: microOps[uopIdx++] = new MicroDeintNeon64_1Reg(
1177  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1178  numStructElems, 1, i /* step */);
1179  break;
1180  case 2: microOps[uopIdx++] = new MicroDeintNeon64_2Reg(
1181  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1182  numStructElems, 2, i /* step */);
1183  break;
1184  case 3: microOps[uopIdx++] = new MicroDeintNeon64_3Reg(
1185  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1186  numStructElems, 3, i /* step */);
1187  break;
1188  case 4: microOps[uopIdx++] = new MicroDeintNeon64_4Reg(
1189  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1190  numStructElems, 4, i /* step */);
1191  break;
1192  default: panic("Invalid number of registers");
1193  }
1194 
1195  }
1196 
1197  assert(uopIdx == numMicroops);
1198 
1199  for (int i = 0; i < numMicroops - 1; ++i) {
1201  }
1202  microOps[0]->setFirstMicroop();
1204 }
1205 
1206 VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst,
1207  OpClass __opClass, RegIndex rn, RegIndex vd,
1208  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1209  uint8_t numStructElems, uint8_t numRegs, bool wb) :
1210  PredMacroOp(mnem, machInst, __opClass)
1211 {
1213  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1214  bool baseIsSP = isSP((IntRegIndex) rnsp);
1215 
1216  numMicroops = wb ? 1 : 0;
1217 
1218  int totNumBytes = numRegs * dataSize / 8;
1219  assert(totNumBytes <= 64);
1220 
1221  // The guiding principle here is that no more than 16 bytes can be
1222  // transferred at a time
1223  int numMemMicroops = totNumBytes / 16;
1224  int residuum = totNumBytes % 16;
1225  if (residuum)
1226  ++numMemMicroops;
1227  numMicroops += numMemMicroops;
1228 
1229  int numMarshalMicroops = totNumBytes > 32 ? 2 : 1;
1230  numMicroops += numMarshalMicroops;
1231 
1233  unsigned uopIdx = 0;
1234 
1235  for (int i = 0; i < numMarshalMicroops; ++i) {
1236  switch (numRegs) {
1237  case 1: microOps[uopIdx++] = new MicroIntNeon64_1Reg(
1238  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1239  numStructElems, 1, i /* step */);
1240  break;
1241  case 2: microOps[uopIdx++] = new MicroIntNeon64_2Reg(
1242  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1243  numStructElems, 2, i /* step */);
1244  break;
1245  case 3: microOps[uopIdx++] = new MicroIntNeon64_3Reg(
1246  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1247  numStructElems, 3, i /* step */);
1248  break;
1249  case 4: microOps[uopIdx++] = new MicroIntNeon64_4Reg(
1250  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1251  numStructElems, 4, i /* step */);
1252  break;
1253  default: panic("Invalid number of registers");
1254  }
1255  }
1256 
1257  uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
1258 
1259  int i = 0;
1260  for (; i < numMemMicroops - 1; ++i) {
1261  microOps[uopIdx++] = new MicroNeonStore64(
1262  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1263  baseIsSP, 16 /* accSize */, eSize);
1264  }
1265  microOps[uopIdx++] = new MicroNeonStore64(
1266  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1267  residuum ? residuum : 16 /* accSize */, eSize);
1268 
1269  // Writeback microop: the post-increment amount is encoded in "Rm": a
1270  // 64-bit general register OR as '11111' for an immediate value equal to
1271  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1272  if (wb) {
1273  if (rm != ((RegIndex) INTREG_X31)) {
1274  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1275  UXTX, 0);
1276  } else {
1277  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1278  totNumBytes);
1279  }
1280  }
1281 
1282  assert(uopIdx == numMicroops);
1283 
1284  for (int i = 0; i < numMicroops - 1; i++) {
1286  }
1287  microOps[0]->setFirstMicroop();
1289 }
1290 
1291 VldSingleOp64::VldSingleOp64(const char *mnem, ExtMachInst machInst,
1292  OpClass __opClass, RegIndex rn, RegIndex vd,
1293  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1294  uint8_t numStructElems, uint8_t index, bool wb,
1295  bool replicate) :
1296  PredMacroOp(mnem, machInst, __opClass),
1297  eSize(0), dataSize(0), numStructElems(0), index(0),
1298  wb(false), replicate(false)
1299 
1300 {
1302  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1303  bool baseIsSP = isSP((IntRegIndex) rnsp);
1304 
1305  numMicroops = wb ? 1 : 0;
1306 
1307  int eSizeBytes = 1 << eSize;
1308  int totNumBytes = numStructElems * eSizeBytes;
1309  assert(totNumBytes <= 64);
1310 
1311  // The guiding principle here is that no more than 16 bytes can be
1312  // transferred at a time
1313  int numMemMicroops = totNumBytes / 16;
1314  int residuum = totNumBytes % 16;
1315  if (residuum)
1316  ++numMemMicroops;
1317  numMicroops += numMemMicroops;
1318 
1319  int numMarshalMicroops = numStructElems / 2 + (numStructElems % 2 ? 1 : 0);
1320  numMicroops += numMarshalMicroops;
1321 
1323  unsigned uopIdx = 0;
1324 
1325  uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
1326 
1327  int i = 0;
1328  for (; i < numMemMicroops - 1; ++i) {
1329  microOps[uopIdx++] = new MicroNeonLoad64(
1330  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1331  baseIsSP, 16 /* accSize */, eSize);
1332  }
1333  microOps[uopIdx++] = new MicroNeonLoad64(
1334  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1335  residuum ? residuum : 16 /* accSize */, eSize);
1336 
1337  // Writeback microop: the post-increment amount is encoded in "Rm": a
1338  // 64-bit general register OR as '11111' for an immediate value equal to
1339  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1340  if (wb) {
1341  if (rm != ((RegIndex) INTREG_X31)) {
1342  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1343  UXTX, 0);
1344  } else {
1345  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1346  totNumBytes);
1347  }
1348  }
1349 
1350  for (int i = 0; i < numMarshalMicroops; ++i) {
1351  microOps[uopIdx++] = new MicroUnpackNeon64(
1352  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1353  numStructElems, index, i /* step */, replicate);
1354  }
1355 
1356  assert(uopIdx == numMicroops);
1357 
1358  for (int i = 0; i < numMicroops - 1; i++) {
1360  }
1361  microOps[0]->setFirstMicroop();
1363 }
1364 
1365 VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst,
1366  OpClass __opClass, RegIndex rn, RegIndex vd,
1367  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1368  uint8_t numStructElems, uint8_t index, bool wb,
1369  bool replicate) :
1370  PredMacroOp(mnem, machInst, __opClass),
1371  eSize(0), dataSize(0), numStructElems(0), index(0),
1372  wb(false), replicate(false)
1373 {
1375  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1376  bool baseIsSP = isSP((IntRegIndex) rnsp);
1377 
1378  numMicroops = wb ? 1 : 0;
1379 
1380  int eSizeBytes = 1 << eSize;
1381  int totNumBytes = numStructElems * eSizeBytes;
1382  assert(totNumBytes <= 64);
1383 
1384  // The guiding principle here is that no more than 16 bytes can be
1385  // transferred at a time
1386  int numMemMicroops = totNumBytes / 16;
1387  int residuum = totNumBytes % 16;
1388  if (residuum)
1389  ++numMemMicroops;
1390  numMicroops += numMemMicroops;
1391 
1392  int numMarshalMicroops = totNumBytes > 32 ? 2 : 1;
1393  numMicroops += numMarshalMicroops;
1394 
1396  unsigned uopIdx = 0;
1397 
1398  for (int i = 0; i < numMarshalMicroops; ++i) {
1399  microOps[uopIdx++] = new MicroPackNeon64(
1400  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1401  numStructElems, index, i /* step */, replicate);
1402  }
1403 
1404  uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
1405 
1406  int i = 0;
1407  for (; i < numMemMicroops - 1; ++i) {
1408  microOps[uopIdx++] = new MicroNeonStore64(
1409  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1410  baseIsSP, 16 /* accsize */, eSize);
1411  }
1412  microOps[uopIdx++] = new MicroNeonStore64(
1413  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1414  residuum ? residuum : 16 /* accSize */, eSize);
1415 
1416  // Writeback microop: the post-increment amount is encoded in "Rm": a
1417  // 64-bit general register OR as '11111' for an immediate value equal to
1418  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1419  if (wb) {
1420  if (rm != ((RegIndex) INTREG_X31)) {
1421  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1422  UXTX, 0);
1423  } else {
1424  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1425  totNumBytes);
1426  }
1427  }
1428 
1429  assert(uopIdx == numMicroops);
1430 
1431  for (int i = 0; i < numMicroops - 1; i++) {
1433  }
1434  microOps[0]->setFirstMicroop();
1436 }
1437 
1438 MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
1439  OpClass __opClass, IntRegIndex rn,
1440  RegIndex vd, bool single, bool up,
1441  bool writeback, bool load, uint32_t offset) :
1442  PredMacroOp(mnem, machInst, __opClass)
1443 {
1444  int i = 0;
1445 
1446  // The lowest order bit selects fldmx (set) or fldmd (clear). These seem
1447  // to be functionally identical except that fldmx is deprecated. For now
1448  // we'll assume they're otherwise interchangable.
1449  int count = (single ? offset : (offset / 2));
1450  numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
1452 
1453  int64_t addr = 0;
1454 
1455  if (!up)
1456  addr = 4 * offset;
1457 
1458  bool tempUp = up;
1459  for (int j = 0; j < count; j++) {
1460  if (load) {
1461  if (single) {
1462  microOps[i++] = new MicroLdrFpUop(machInst, vd++, rn,
1463  tempUp, addr);
1464  } else {
1465  microOps[i++] = new MicroLdrDBFpUop(machInst, vd++, rn,
1466  tempUp, addr);
1467  microOps[i++] = new MicroLdrDTFpUop(machInst, vd++, rn, tempUp,
1468  addr + (up ? 4 : -4));
1469  }
1470  } else {
1471  if (single) {
1472  microOps[i++] = new MicroStrFpUop(machInst, vd++, rn,
1473  tempUp, addr);
1474  } else {
1475  microOps[i++] = new MicroStrDBFpUop(machInst, vd++, rn,
1476  tempUp, addr);
1477  microOps[i++] = new MicroStrDTFpUop(machInst, vd++, rn, tempUp,
1478  addr + (up ? 4 : -4));
1479  }
1480  }
1481  if (!tempUp) {
1482  addr -= (single ? 4 : 8);
1483  // The microops don't handle negative displacement, so turn if we
1484  // hit zero, flip polarity and start adding.
1485  if (addr <= 0) {
1486  tempUp = true;
1487  addr = -addr;
1488  }
1489  } else {
1490  addr += (single ? 4 : 8);
1491  }
1492  }
1493 
1494  if (writeback) {
1495  if (up) {
1496  microOps[i++] =
1497  new MicroAddiUop(machInst, rn, rn, 4 * offset);
1498  } else {
1499  microOps[i++] =
1500  new MicroSubiUop(machInst, rn, rn, 4 * offset);
1501  }
1502  }
1503 
1504  assert(numMicroops == i);
1505  microOps[0]->setFirstMicroop();
1507 
1508  for (StaticInstPtr *curUop = microOps;
1509  !(*curUop)->isLastMicroop(); curUop++) {
1510  MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
1511  assert(uopPtr);
1512  uopPtr->setDelayedCommit();
1513  }
1514 }
1515 
1516 std::string
1518  Addr pc, const loader::SymbolTable *symtab) const
1519 {
1520  std::stringstream ss;
1521  printMnemonic(ss);
1522  printIntReg(ss, ura);
1523  ss << ", ";
1524  printIntReg(ss, urb);
1525  ss << ", ";
1526  ccprintf(ss, "#%d", imm);
1527  return ss.str();
1528 }
1529 
1530 std::string
1532  Addr pc, const loader::SymbolTable *symtab) const
1533 {
1534  std::stringstream ss;
1535  printMnemonic(ss);
1536  printIntReg(ss, ura);
1537  ss << ", ";
1538  printIntReg(ss, urb);
1539  ss << ", ";
1540  ccprintf(ss, "#%d", imm);
1541  return ss.str();
1542 }
1543 
1544 std::string
1546  Addr pc, const loader::SymbolTable *symtab) const
1547 {
1548  std::stringstream ss;
1549  printMnemonic(ss);
1550  ss << "[PC,CPSR]";
1551  return ss.str();
1552 }
1553 
1554 std::string
1556  Addr pc, const loader::SymbolTable *symtab) const
1557 {
1558  std::stringstream ss;
1559  printMnemonic(ss);
1560  printIntReg(ss, ura);
1561  ccprintf(ss, ", ");
1562  printIntReg(ss, urb);
1563  printExtendOperand(false, ss, (IntRegIndex)urc, type, shiftAmt);
1564  return ss.str();
1565 }
1566 
1567 std::string
1569  Addr pc, const loader::SymbolTable *symtab) const
1570 {
1571  std::stringstream ss;
1572  printMnemonic(ss);
1573  printIntReg(ss, ura);
1574  ss << ", ";
1575  printIntReg(ss, urb);
1576  return ss.str();
1577 }
1578 
1579 std::string
1581  Addr pc, const loader::SymbolTable *symtab) const
1582 {
1583  std::stringstream ss;
1584  printMnemonic(ss);
1585  printIntReg(ss, ura);
1586  ss << ", ";
1587  printIntReg(ss, urb);
1588  ss << ", ";
1589  printIntReg(ss, urc);
1590  return ss.str();
1591 }
1592 
1593 std::string
1595  Addr pc, const loader::SymbolTable *symtab) const
1596 {
1597  std::stringstream ss;
1598  printMnemonic(ss);
1599  if (isFloating())
1600  printFloatReg(ss, ura);
1601  else
1602  printIntReg(ss, ura);
1603  ss << ", [";
1604  printIntReg(ss, urb);
1605  ss << ", ";
1606  ccprintf(ss, "#%d", imm);
1607  ss << "]";
1608  return ss.str();
1609 }
1610 
1611 std::string
1613  Addr pc, const loader::SymbolTable *symtab) const
1614 {
1615  std::stringstream ss;
1616  printMnemonic(ss);
1617  printIntReg(ss, dest);
1618  ss << ",";
1619  printIntReg(ss, dest2);
1620  ss << ", [";
1621  printIntReg(ss, urb);
1622  ss << ", ";
1623  ccprintf(ss, "#%d", imm);
1624  ss << "]";
1625  return ss.str();
1626 }
1627 
1628 } // namespace ArmISA
1629 } // namespace gem5
gem5::ArmISA::VldMultOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:237
gem5::ArmISA::VldSingleOp64::VldSingleOp64
VldSingleOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t index, bool wb, bool replicate=false)
Definition: macromem.cc:1291
gem5::ArmISA::MicroIntOp::ura
RegIndex ura
Definition: macromem.hh:365
gem5::ArmISA::number_of_ones
static unsigned int number_of_ones(int32_t val)
Definition: macromem.hh:56
gem5::ArmISA::intRegInMode
static int intRegInMode(OperatingMode mode, int reg)
Definition: int.hh:475
gem5::ArmISA::BigFpMemLitOp::BigFpMemLitOp
BigFpMemLitOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, IntRegIndex dest, int64_t imm)
Definition: macromem.cc:447
gem5::ArmISA::MicroIntImmOp::ura
RegIndex ura
Definition: macromem.hh:328
gem5::ArmISA::VstMultOp64::eSize
uint8_t eSize
Definition: macromem.hh:249
gem5::ArmISA::VldSingleOp64::replicate
bool replicate
Definition: macromem.hh:262
gem5::ArmISA::VstSingleOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:273
gem5::ArmISA::isSP
static bool isSP(IntRegIndex reg)
Definition: int.hh:528
gem5::ArmISA::BigFpMemPostOp::BigFpMemPostOp
BigFpMemPostOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
Definition: macromem.cc:369
gem5::ArmISA::MicroIntRegXOp::urb
RegIndex urb
Definition: macromem.hh:381
gem5::MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:47
gem5::ArmISA::up
Bitfield< 23 > up
Definition: types.hh:124
gem5::ArmISA::writeback
Bitfield< 21 > writeback
Definition: types.hh:126
gem5::ArmISA::MicroOp
Base class for Memory microops.
Definition: macromem.hh:70
gem5::ArmISA::PredOp::condCode
ConditionCode condCode
Definition: pred_inst.hh:220
gem5::replaceBits
constexpr void replaceBits(T &val, unsigned first, unsigned last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
Definition: bitfield.hh:197
gem5::ArmISA::ArmStaticInst::printMnemonic
void printMnemonic(std::ostream &os, const std::string &suffix="", bool withPred=true, bool withCond64=false, ConditionCode cond64=COND_UC) const
Definition: static_inst.cc:377
gem5::ArmISA::PredMacroOp::numMicroops
uint32_t numMicroops
Definition: pred_inst.hh:346
gem5::ArmISA::VldMultOp64::numRegs
uint8_t numRegs
Definition: macromem.hh:237
gem5::PowerISA::vx
Bitfield< 29 > vx
Definition: misc.hh:70
gem5::ArmISA::MicroIntImmOp::urb
RegIndex urb
Definition: macromem.hh:328
gem5::ArmISA::MicroIntRegXOp::urc
RegIndex urc
Definition: macromem.hh:381
gem5::ArmISA::ArmExtendType
ArmExtendType
Definition: types.hh:215
gem5::ArmISA::PairMemOp::AddrMode
AddrMode
Definition: macromem.hh:476
gem5::loader::SymbolTable
Definition: symtab.hh:65
gem5::ArmISA::VldMultOp64::wb
bool wb
Definition: macromem.hh:238
gem5::ArmISA::MicroSetPCCPSR::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1545
gem5::ArmISA::MicroMemPairOp::urb
RegIndex urb
Definition: macromem.hh:441
gem5::ArmISA::MMU::AllowUnaligned
@ AllowUnaligned
Definition: mmu.hh:108
gem5::ArmISA::MMU::ArmFlags
ArmFlags
Definition: mmu.hh:97
gem5::RefCountingPtr::get
T * get() const
Directly access the pointer itself without taking a reference.
Definition: refcnt.hh:227
gem5::ArmISA::VstMultOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:249
gem5::X86ISA::base
Bitfield< 51, 12 > base
Definition: pagetable.hh:141
gem5::ArmISA::MicroMemPairOp::dest
RegIndex dest
Definition: macromem.hh:441
gem5::ArmISA::MicroMemPairOp::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1612
gem5::ArmISA::i
Bitfield< 7 > i
Definition: misc_types.hh:67
gem5::ArmISA::fp
Bitfield< 19, 16 > fp
Definition: misc_types.hh:177
gem5::ArmISA::VstMultOp::VstMultOp
VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, unsigned width, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm)
Definition: macromem.cc:823
gem5::ArmISA::MicroIntRegXOp::shiftAmt
uint32_t shiftAmt
Definition: macromem.hh:383
gem5::ArmISA::ArmStaticInst::machInst
ExtMachInst machInst
Definition: static_inst.hh:151
gem5::ArmISA::MicroIntRegXOp::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1555
gem5::ccprintf
void ccprintf(cp::Print &print)
Definition: cprintf.hh:130
macromem.hh
gem5::ArmISA::VldSingleOp64::wb
bool wb
Definition: macromem.hh:262
sc_dt::align
void align(const scfx_rep &lhs, const scfx_rep &rhs, int &new_wp, int &len_mant, scfx_mant_ref &lhs_mant, scfx_mant_ref &rhs_mant)
Definition: scfx_rep.cc:2083
sc_dt::inc
void inc(scfx_mant &mant)
Definition: scfx_mant.hh:341
gem5::RefCountingPtr< StaticInst >
gem5::ArmISA::MicroIntImmXOp::ura
RegIndex ura
Definition: macromem.hh:345
gem5::ArmISA::VstSingleOp::VstSingleOp
VstSingleOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool all, unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm, unsigned lane)
Definition: macromem.cc:918
gem5::ArmISA::VstSingleOp64::VstSingleOp64
VstSingleOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t index, bool wb, bool replicate=false)
Definition: macromem.cc:1365
gem5::ArmISA::VstSingleOp64::wb
bool wb
Definition: macromem.hh:274
gem5::ArmISA::MicroMemPairOp::imm
int32_t imm
Definition: macromem.hh:443
gem5::SparcISA::INTREG_UREG0
@ INTREG_UREG0
Definition: int.hh:57
gem5::StaticInst::isFloating
bool isFloating() const
Definition: static_inst.hh:178
gem5::ArmISA::MicroIntImmXOp::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1531
gem5::ArmISA::ArmStaticInst::printExtendOperand
void printExtendOperand(bool firstOperand, std::ostream &os, IntRegIndex rm, ArmExtendType type, int64_t shiftAmt) const
Definition: static_inst.cc:562
gem5::ArmISA::j
Bitfield< 24 > j
Definition: misc_types.hh:57
gem5::ArmISA::MicroIntRegXOp::ura
RegIndex ura
Definition: macromem.hh:381
gem5::ArmISA::MacroMemOp::MacroMemOp
MacroMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, IntRegIndex rn, bool index, bool up, bool user, bool writeback, bool load, uint32_t reglist)
Definition: macromem.cc:57
gem5::ArmISA::BigFpMemImmOp::BigFpMemImmOp
BigFpMemImmOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
Definition: macromem.cc:348
gem5::ArmISA::MicroIntMov::urb
RegIndex urb
Definition: macromem.hh:309
gem5::ArmISA::MicroMemOp::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1594
gem5::ArmISA::VstSingleOp64::eSize
uint8_t eSize
Definition: macromem.hh:273
gem5::ArmISA::MicroIntImmXOp::imm
int64_t imm
Definition: macromem.hh:346
gem5::ArmISA::VstSingleOp64::index
uint8_t index
Definition: macromem.hh:273
gem5::ArmISA::rt
Bitfield< 15, 12 > rt
Definition: types.hh:115
gem5::ArmISA::COND_UC
@ COND_UC
Definition: cc.hh:84
gem5::ArmISA::rm
Bitfield< 3, 0 > rm
Definition: types.hh:118
gem5::X86ISA::type
type
Definition: misc.hh:733
gem5::ArmISA::VldMultOp64::VldMultOp64
VldMultOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t numRegs, bool wb)
Definition: macromem.cc:1121
gem5::ArmISA::PredMacroOp::microOps
StaticInstPtr * microOps
Definition: pred_inst.hh:347
gem5::RefCounted::count
int count
Definition: refcnt.hh:67
gem5::ArmISA::VstMultOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:249
gem5::ArmISA::VldSingleOp64::eSize
uint8_t eSize
Definition: macromem.hh:261
gem5::ArmISA::VldSingleOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:261
gem5::ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:144
gem5::bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:76
compiler.hh
gem5::ArmISA::MicroIntOp::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1580
gem5::ArmISA::MODE_USER
@ MODE_USER
Definition: types.hh:281
gem5::ArmISA::VldMultOp64::eSize
uint8_t eSize
Definition: macromem.hh:237
gem5::ArmISA::MicroIntRegXOp::type
ArmExtendType type
Definition: macromem.hh:382
gem5::ArmISA::COND_AL
@ COND_AL
Definition: cc.hh:83
gem5::Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
gem5::ArmISA::VstSingleOp64::replicate
bool replicate
Definition: macromem.hh:274
gem5::ArmISA::BigFpMemRegOp::BigFpMemRegOp
BigFpMemRegOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, IntRegIndex offset, ArmExtendType type, int64_t imm)
Definition: macromem.cc:421
gem5::StaticInst::isLastMicroop
bool isLastMicroop() const
Definition: static_inst.hh:209
gem5::StaticInst::setLastMicroop
void setLastMicroop()
Definition: static_inst.hh:226
neon64_mem.hh
gem5::ArmISA::VecSpecialElem
const int VecSpecialElem
Definition: vec.hh:84
gem5::ArmISA::PredMacroOp
Base class for predicated macro-operations.
Definition: pred_inst.hh:342
gem5::ArmISA::MicroIntOp::urb
RegIndex urb
Definition: macromem.hh:365
gem5::StaticInst::setDelayedCommit
void setDelayedCommit()
Definition: static_inst.hh:227
gem5::X86ISA::reg
Bitfield< 5, 3 > reg
Definition: types.hh:92
gem5::ArmISA::makeSP
static IntRegIndex makeSP(IntRegIndex reg)
Definition: int.hh:512
gem5::X86ISA::ExtMachInst
Definition: types.hh:206
gem5::ArmISA::MacroVFPMemOp::MacroVFPMemOp
MacroVFPMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, IntRegIndex rn, RegIndex vd, bool single, bool up, bool writeback, bool load, uint32_t offset)
Definition: macromem.cc:1438
gem5::ArmISA::VstMultOp64::wb
bool wb
Definition: macromem.hh:250
gem5::context_switch_task_id::Unknown
@ Unknown
Definition: request.hh:85
gem5::ArmISA::MicroIntOp::urc
RegIndex urc
Definition: macromem.hh:365
gem5::ArmISA::imm
Bitfield< 7, 0 > imm
Definition: types.hh:132
gem5::ArmISA::PairMemOp::PairMemOp
PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, uint32_t size, bool fp, bool load, bool noAlloc, bool signExt, bool exclusive, bool acrel, int64_t imm, AddrMode mode, IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2)
Definition: macromem.cc:243
gem5::ArmISA::ArmStaticInst::printFloatReg
void printFloatReg(std::ostream &os, RegIndex reg_idx) const
Definition: static_inst.cc:345
gem5::ArmISA::UXTX
@ UXTX
Definition: types.hh:220
gem5::ArmISA::VldMultOp::VldMultOp
VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm)
Definition: macromem.cc:460
gem5::ArmISA::ss
Bitfield< 21 > ss
Definition: misc_types.hh:60
gem5::ArmISA::PairMemOp::AddrMd_PostIndex
@ AddrMd_PostIndex
Definition: macromem.hh:480
gem5::StaticInst::setFirstMicroop
void setFirstMicroop()
Definition: static_inst.hh:225
gem5::ArmISA::VldSingleOp::VldSingleOp
VldSingleOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool all, unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm, unsigned lane)
Definition: macromem.cc:555
gem5::ArmISA::VstMultOp64::VstMultOp64
VstMultOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t numRegs, bool wb)
Definition: macromem.cc:1206
gem5::ArmISA::BigFpMemPreOp::BigFpMemPreOp
BigFpMemPreOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
Definition: macromem.cc:395
gem5::MipsISA::pc
Bitfield< 4 > pc
Definition: pra_constants.hh:243
gem5::ArmISA::NumVecV8ArchRegs
const int NumVecV8ArchRegs
Definition: vec.hh:77
gem5::ArmISA::VstSingleOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:273
gem5::ArmISA::MicroIntImmXOp::urb
RegIndex urb
Definition: macromem.hh:345
gem5::ArmISA::MicroIntImmOp::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1517
gem5::ArmISA::VldSingleOp64::index
uint8_t index
Definition: macromem.hh:261
gem5::ArmISA::MicroIntMov::generateDisassembly
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1568
gem5::ArmISA::MicroMemPairOp::dest2
RegIndex dest2
Definition: macromem.hh:441
gem5::RegIndex
uint16_t RegIndex
Definition: types.hh:176
gem5::StaticInst::setFlag
void setFlag(Flags f)
Definition: static_inst.hh:228
gem5::ArmISA::PairMemOp::AddrMd_Offset
@ AddrMd_Offset
Definition: macromem.hh:478
gem5::ArmISA::ArmStaticInst::printIntReg
void printIntReg(std::ostream &os, RegIndex reg_idx, uint8_t opWidth=0) const
Print a register name for disassembly given the unique dependence tag number (FP or int).
Definition: static_inst.cc:299
gem5
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
Definition: tlb.cc:60
gem5::ArmISA::VldMultOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:237
gem5::ArmISA::MicroIntImmOp::imm
int32_t imm
Definition: macromem.hh:329
gem5::ArmISA::rn
Bitfield< 19, 16 > rn
Definition: types.hh:113
gem5::ArmISA::VstMultOp64::numRegs
uint8_t numRegs
Definition: macromem.hh:249
gem5::ArmISA::MicroIntMov::ura
RegIndex ura
Definition: macromem.hh:309
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:178
gem5::ArmISA::VldSingleOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:261
gem5::ArmISA::mode
Bitfield< 4, 0 > mode
Definition: misc_types.hh:74
gem5::X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:84

Generated on Wed May 4 2022 12:13:47 for gem5 by doxygen 1.8.17