gem5  v21.0.0.0
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
macromem.cc
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2014, 2020 ARM Limited
3  * All rights reserved
4  *
5  * The license below extends only to copyright in the software and shall
6  * not be construed as granting a license to any other intellectual
7  * property including but not limited to intellectual property relating
8  * to a hardware implementation of the functionality of the software
9  * licensed hereunder. You may use the software subject to the license
10  * terms below provided that you ensure that this notice is replicated
11  * unmodified and in its entirety in all distributions of the software,
12  * modified or unmodified, in source code or in binary form.
13  *
14  * Copyright (c) 2007-2008 The Florida State University
15  * All rights reserved.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions are
19  * met: redistributions of source code must retain the above copyright
20  * notice, this list of conditions and the following disclaimer;
21  * redistributions in binary form must reproduce the above copyright
22  * notice, this list of conditions and the following disclaimer in the
23  * documentation and/or other materials provided with the distribution;
24  * neither the name of the copyright holders nor the names of its
25  * contributors may be used to endorse or promote products derived from
26  * this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40 
42 
43 #include <sstream>
44 
45 #include "arch/arm/generated/decoder.hh"
47 
48 using namespace ArmISAInst;
49 
50 namespace ArmISA
51 {
52 
53 MacroMemOp::MacroMemOp(const char *mnem, ExtMachInst machInst,
54  OpClass __opClass, IntRegIndex rn,
55  bool index, bool up, bool user, bool writeback,
56  bool load, uint32_t reglist) :
57  PredMacroOp(mnem, machInst, __opClass)
58 {
59  uint32_t regs = reglist;
60  uint32_t ones = number_of_ones(reglist);
61  uint32_t mem_ops = ones;
62 
63  // Copy the base address register if we overwrite it, or if this instruction
64  // is basically a no-op (we have to do something)
65  bool copy_base = (bits(reglist, rn) && load) || !ones;
66  bool force_user = user & !bits(reglist, 15);
67  bool exception_ret = user & bits(reglist, 15);
68  bool pc_temp = load && writeback && bits(reglist, 15);
69 
70  if (!ones) {
71  numMicroops = 1;
72  } else if (load) {
73  numMicroops = ((ones + 1) / 2)
74  + ((ones % 2 == 0 && exception_ret) ? 1 : 0)
75  + (copy_base ? 1 : 0)
76  + (writeback? 1 : 0)
77  + (pc_temp ? 1 : 0);
78  } else {
79  numMicroops = ones + (writeback ? 1 : 0);
80  }
81 
83 
84  uint32_t addr = 0;
85 
86  if (!up)
87  addr = (ones << 2) - 4;
88 
89  if (!index)
90  addr += 4;
91 
92  StaticInstPtr *uop = microOps;
93 
94  // Add 0 to Rn and stick it in ureg0.
95  // This is equivalent to a move.
96  if (copy_base)
97  *uop++ = new MicroAddiUop(machInst, INTREG_UREG0, rn, 0);
98 
99  unsigned reg = 0;
100  while (mem_ops != 0) {
101  // Do load operations in pairs if possible
102  if (load && mem_ops >= 2 &&
103  !(mem_ops == 2 && bits(regs,INTREG_PC) && exception_ret)) {
104  // 64-bit memory operation
105  // Find 2 set register bits (clear them after finding)
106  unsigned reg_idx1;
107  unsigned reg_idx2;
108 
109  // Find the first register
110  while (!bits(regs, reg)) reg++;
111  replaceBits(regs, reg, 0);
112  reg_idx1 = force_user ? intRegInMode(MODE_USER, reg) : reg;
113 
114  // Find the second register
115  while (!bits(regs, reg)) reg++;
116  replaceBits(regs, reg, 0);
117  reg_idx2 = force_user ? intRegInMode(MODE_USER, reg) : reg;
118 
119  // Load into temp reg if necessary
120  if (reg_idx2 == INTREG_PC && pc_temp)
121  reg_idx2 = INTREG_UREG1;
122 
123  // Actually load both registers from memory
124  *uop = new MicroLdr2Uop(machInst, reg_idx1, reg_idx2,
125  copy_base ? INTREG_UREG0 : rn, up, addr);
126 
127  if (!writeback && reg_idx2 == INTREG_PC) {
128  // No writeback if idx==pc, set appropriate flags
129  (*uop)->setFlag(StaticInst::IsControl);
130  (*uop)->setFlag(StaticInst::IsIndirectControl);
131 
132  if (!(condCode == COND_AL || condCode == COND_UC))
133  (*uop)->setFlag(StaticInst::IsCondControl);
134  else
135  (*uop)->setFlag(StaticInst::IsUncondControl);
136  }
137 
138  if (up) addr += 8;
139  else addr -= 8;
140  mem_ops -= 2;
141  } else {
142  // 32-bit memory operation
143  // Find register for operation
144  unsigned reg_idx;
145  while (!bits(regs, reg)) reg++;
146  replaceBits(regs, reg, 0);
147  reg_idx = force_user ? intRegInMode(MODE_USER, reg) : reg;
148 
149  if (load) {
150  if (writeback && reg_idx == INTREG_PC) {
151  // If this instruction changes the PC and performs a
152  // writeback, ensure the pc load/branch is the last uop.
153  // Load into a temp reg here.
154  *uop = new MicroLdrUop(machInst, INTREG_UREG1,
155  copy_base ? INTREG_UREG0 : rn, up, addr);
156  } else if (reg_idx == INTREG_PC && exception_ret) {
157  // Special handling for exception return
158  *uop = new MicroLdrRetUop(machInst, reg_idx,
159  copy_base ? INTREG_UREG0 : rn, up, addr);
160  } else {
161  // standard single load uop
162  *uop = new MicroLdrUop(machInst, reg_idx,
163  copy_base ? INTREG_UREG0 : rn, up, addr);
164  }
165 
166  // Loading pc as last operation? Set appropriate flags.
167  if (!writeback && reg_idx == INTREG_PC) {
168  (*uop)->setFlag(StaticInst::IsControl);
169  (*uop)->setFlag(StaticInst::IsIndirectControl);
170 
171  if (!(condCode == COND_AL || condCode == COND_UC))
172  (*uop)->setFlag(StaticInst::IsCondControl);
173  else
174  (*uop)->setFlag(StaticInst::IsUncondControl);
175  }
176  } else {
177  *uop = new MicroStrUop(machInst, reg_idx, rn, up, addr);
178  }
179 
180  if (up) addr += 4;
181  else addr -= 4;
182  --mem_ops;
183  }
184 
185  // Load/store micro-op generated, go to next uop
186  ++uop;
187  }
188 
189  if (writeback && ones) {
190  // Perform writeback uop operation
191  if (up)
192  *uop++ = new MicroAddiUop(machInst, rn, rn, ones * 4);
193  else
194  *uop++ = new MicroSubiUop(machInst, rn, rn, ones * 4);
195 
196  // Write PC after address writeback?
197  if (pc_temp) {
198  if (exception_ret) {
199  *uop = new MicroUopRegMovRet(machInst, 0, INTREG_UREG1);
200  } else {
201  *uop = new MicroUopRegMov(machInst, INTREG_PC, INTREG_UREG1);
202  }
203  (*uop)->setFlag(StaticInst::IsControl);
204  (*uop)->setFlag(StaticInst::IsIndirectControl);
205 
206  if (!(condCode == COND_AL || condCode == COND_UC))
207  (*uop)->setFlag(StaticInst::IsCondControl);
208  else
209  (*uop)->setFlag(StaticInst::IsUncondControl);
210 
211  if (rn == INTREG_SP)
212  (*uop)->setFlag(StaticInst::IsReturn);
213 
214  ++uop;
215  }
216  }
217 
218  --uop;
219  (*uop)->setLastMicroop();
221 
222  /* Take the control flags from the last microop for the macroop */
223  if ((*uop)->isControl())
224  setFlag(StaticInst::IsControl);
225  if ((*uop)->isCondCtrl())
226  setFlag(StaticInst::IsCondControl);
227  if ((*uop)->isUncondCtrl())
228  setFlag(StaticInst::IsUncondControl);
229  if ((*uop)->isIndirectCtrl())
230  setFlag(StaticInst::IsIndirectControl);
231  if ((*uop)->isReturn())
232  setFlag(StaticInst::IsReturn);
233 
234  for (StaticInstPtr *uop = microOps; !(*uop)->isLastMicroop(); uop++) {
235  (*uop)->setDelayedCommit();
236  }
237 }
238 
239 PairMemOp::PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
240  uint32_t size, bool fp, bool load, bool noAlloc,
241  bool signExt, bool exclusive, bool acrel,
242  int64_t imm, AddrMode mode,
244  PredMacroOp(mnem, machInst, __opClass)
245 {
246  bool post = (mode == AddrMd_PostIndex);
247  bool writeback = (mode != AddrMd_Offset);
248 
249  if (load) {
250  // Use integer rounding to round up loads of size 4
251  numMicroops = (post ? 0 : 1) + ((size + 4) / 8) + (writeback ? 1 : 0);
252  } else {
253  numMicroops = (post ? 0 : 1) + (size / 4) + (writeback ? 1 : 0);
254  }
256 
257  StaticInstPtr *uop = microOps;
258 
259  rn = makeSP(rn);
260 
261  if (!post) {
262  *uop++ = new MicroAddXiSpAlignUop(machInst, INTREG_UREG0, rn,
263  post ? 0 : imm);
264  }
265 
266  if (fp) {
267  if (size == 16) {
268  if (load) {
269  *uop++ = new MicroLdFp16Uop(machInst, rt,
270  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
271  *uop++ = new MicroLdFp16Uop(machInst, rt2,
272  post ? rn : INTREG_UREG0, 16, noAlloc, exclusive, acrel);
273  } else {
274  *uop++ = new MicroStrQBFpXImmUop(machInst, rt,
275  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
276  *uop++ = new MicroStrQTFpXImmUop(machInst, rt,
277  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
278  *uop++ = new MicroStrQBFpXImmUop(machInst, rt2,
279  post ? rn : INTREG_UREG0, 16, noAlloc, exclusive, acrel);
280  *uop++ = new MicroStrQTFpXImmUop(machInst, rt2,
281  post ? rn : INTREG_UREG0, 16, noAlloc, exclusive, acrel);
282  }
283  } else if (size == 8) {
284  if (load) {
285  *uop++ = new MicroLdPairFp8Uop(machInst, rt, rt2,
286  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
287  } else {
288  *uop++ = new MicroStrFpXImmUop(machInst, rt,
289  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
290  *uop++ = new MicroStrFpXImmUop(machInst, rt2,
291  post ? rn : INTREG_UREG0, 8, noAlloc, exclusive, acrel);
292  }
293  } else if (size == 4) {
294  if (load) {
295  *uop++ = new MicroLdrDFpXImmUop(machInst, rt, rt2,
296  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
297  } else {
298  *uop++ = new MicroStrDFpXImmUop(machInst, rt, rt2,
299  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
300  }
301  }
302  } else {
303  if (size == 8) {
304  if (load) {
305  *uop++ = new MicroLdPairUop(machInst, rt, rt2,
306  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
307  } else {
308  *uop++ = new MicroStrXImmUop(machInst, rt, post ? rn : INTREG_UREG0,
309  0, noAlloc, exclusive, acrel);
310  *uop++ = new MicroStrXImmUop(machInst, rt2, post ? rn : INTREG_UREG0,
311  size, noAlloc, exclusive, acrel);
312  }
313  } else if (size == 4) {
314  if (load) {
315  if (signExt) {
316  *uop++ = new MicroLdrDSXImmUop(machInst, rt, rt2,
317  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
318  } else {
319  *uop++ = new MicroLdrDUXImmUop(machInst, rt, rt2,
320  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
321  }
322  } else {
323  *uop++ = new MicroStrDXImmUop(machInst, rt, rt2,
324  post ? rn : INTREG_UREG0, 0, noAlloc, exclusive, acrel);
325  }
326  }
327  }
328 
329  if (writeback) {
330  *uop++ = new MicroAddXiUop(machInst, rn, post ? rn : INTREG_UREG0,
331  post ? imm : 0);
332  }
333 
334  assert(uop == &microOps[numMicroops]);
335  (*--uop)->setLastMicroop();
337 
338  for (StaticInstPtr *curUop = microOps;
339  !(*curUop)->isLastMicroop(); curUop++) {
340  (*curUop)->setDelayedCommit();
341  }
342 }
343 
344 BigFpMemImmOp::BigFpMemImmOp(const char *mnem, ExtMachInst machInst,
345  OpClass __opClass, bool load, IntRegIndex dest,
346  IntRegIndex base, int64_t imm) :
347  PredMacroOp(mnem, machInst, __opClass)
348 {
349  numMicroops = load ? 1 : 2;
351 
352  StaticInstPtr *uop = microOps;
353 
354  if (load) {
355  *uop = new MicroLdFp16Uop(machInst, dest, base, imm);
356  } else {
357  *uop = new MicroStrQBFpXImmUop(machInst, dest, base, imm);
358  (*uop)->setDelayedCommit();
359  *++uop = new MicroStrQTFpXImmUop(machInst, dest, base, imm);
360  }
361  (*uop)->setLastMicroop();
363 }
364 
365 BigFpMemPostOp::BigFpMemPostOp(const char *mnem, ExtMachInst machInst,
366  OpClass __opClass, bool load, IntRegIndex dest,
367  IntRegIndex base, int64_t imm) :
368  PredMacroOp(mnem, machInst, __opClass)
369 {
370  numMicroops = load ? 2 : 3;
372 
373  StaticInstPtr *uop = microOps;
374 
375  if (load) {
376  *uop++ = new MicroLdFp16Uop(machInst, dest, base, 0);
377  } else {
378  *uop++= new MicroStrQBFpXImmUop(machInst, dest, base, 0);
379  *uop++ = new MicroStrQTFpXImmUop(machInst, dest, base, 0);
380  }
381  *uop = new MicroAddXiUop(machInst, base, base, imm);
382  (*uop)->setLastMicroop();
384 
385  for (StaticInstPtr *curUop = microOps;
386  !(*curUop)->isLastMicroop(); curUop++) {
387  (*curUop)->setDelayedCommit();
388  }
389 }
390 
391 BigFpMemPreOp::BigFpMemPreOp(const char *mnem, ExtMachInst machInst,
392  OpClass __opClass, bool load, IntRegIndex dest,
393  IntRegIndex base, int64_t imm) :
394  PredMacroOp(mnem, machInst, __opClass)
395 {
396  numMicroops = load ? 2 : 3;
398 
399  StaticInstPtr *uop = microOps;
400 
401  if (load) {
402  *uop++ = new MicroLdFp16Uop(machInst, dest, base, imm);
403  } else {
404  *uop++ = new MicroStrQBFpXImmUop(machInst, dest, base, imm);
405  *uop++ = new MicroStrQTFpXImmUop(machInst, dest, base, imm);
406  }
407  *uop = new MicroAddXiUop(machInst, base, base, imm);
408  (*uop)->setLastMicroop();
410 
411  for (StaticInstPtr *curUop = microOps;
412  !(*curUop)->isLastMicroop(); curUop++) {
413  (*curUop)->setDelayedCommit();
414  }
415 }
416 
417 BigFpMemRegOp::BigFpMemRegOp(const char *mnem, ExtMachInst machInst,
418  OpClass __opClass, bool load, IntRegIndex dest,
420  ArmExtendType type, int64_t imm) :
421  PredMacroOp(mnem, machInst, __opClass)
422 {
423  numMicroops = load ? 1 : 2;
425 
426  StaticInstPtr *uop = microOps;
427 
428  if (load) {
429  *uop = new MicroLdFp16RegUop(machInst, dest, base,
430  offset, type, imm);
431  } else {
432  *uop = new MicroStrQBFpXRegUop(machInst, dest, base,
433  offset, type, imm);
434  (*uop)->setDelayedCommit();
435  *++uop = new MicroStrQTFpXRegUop(machInst, dest, base,
436  offset, type, imm);
437  }
438 
439  (*uop)->setLastMicroop();
441 }
442 
443 BigFpMemLitOp::BigFpMemLitOp(const char *mnem, ExtMachInst machInst,
444  OpClass __opClass, IntRegIndex dest,
445  int64_t imm) :
446  PredMacroOp(mnem, machInst, __opClass)
447 {
448  numMicroops = 1;
450 
451  microOps[0] = new MicroLdFp16LitUop(machInst, dest, imm);
452  microOps[0]->setLastMicroop();
454 }
455 
456 VldMultOp::VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
457  unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
458  unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
459  PredMacroOp(mnem, machInst, __opClass)
460 {
461  assert(regs > 0 && regs <= 4);
462  assert(regs % elems == 0);
463 
464  numMicroops = (regs > 2) ? 2 : 1;
465  bool wb = (rm != 15);
466  bool deinterleave = (elems > 1);
467 
468  if (wb) numMicroops++;
469  if (deinterleave) numMicroops += (regs / elems);
471 
472  RegIndex rMid = deinterleave ? VecSpecialElem : vd * 2;
473 
474  uint32_t noAlign = 0;
475 
476  unsigned uopIdx = 0;
477  switch (regs) {
478  case 4:
479  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
480  size, machInst, rMid, rn, 0, align);
481  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
482  size, machInst, rMid + 4, rn, 16, noAlign);
483  break;
484  case 3:
485  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
486  size, machInst, rMid, rn, 0, align);
487  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
488  size, machInst, rMid + 4, rn, 16, noAlign);
489  break;
490  case 2:
491  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon16Uop>(
492  size, machInst, rMid, rn, 0, align);
493  break;
494  case 1:
495  microOps[uopIdx++] = newNeonMemInst<MicroLdrNeon8Uop>(
496  size, machInst, rMid, rn, 0, align);
497  break;
498  default:
499  // Unknown number of registers
500  microOps[uopIdx++] = new Unknown(machInst);
501  }
502  if (wb) {
503  if (rm != 15 && rm != 13) {
504  microOps[uopIdx++] =
505  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
506  } else {
507  microOps[uopIdx++] =
508  new MicroAddiUop(machInst, rn, rn, regs * 8);
509  }
510  }
511  if (deinterleave) {
512  switch (elems) {
513  case 4:
514  assert(regs == 4);
515  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon8Uop>(
516  size, machInst, vd * 2, rMid, inc * 2);
517  break;
518  case 3:
519  assert(regs == 3);
520  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon6Uop>(
521  size, machInst, vd * 2, rMid, inc * 2);
522  break;
523  case 2:
524  assert(regs == 4 || regs == 2);
525  if (regs == 4) {
526  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
527  size, machInst, vd * 2, rMid, inc * 2);
528  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
529  size, machInst, vd * 2 + 2, rMid + 4, inc * 2);
530  } else {
531  microOps[uopIdx++] = newNeonMixInst<MicroDeintNeon4Uop>(
532  size, machInst, vd * 2, rMid, inc * 2);
533  }
534  break;
535  default:
536  // Bad number of elements to deinterleave
537  microOps[uopIdx++] = new Unknown(machInst);
538  }
539  }
540  assert(uopIdx == numMicroops);
541 
542  for (unsigned i = 0; i < numMicroops - 1; i++) {
543  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
544  assert(uopPtr);
545  uopPtr->setDelayedCommit();
546  }
549 }
550 
551 VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
552  OpClass __opClass, bool all, unsigned elems,
553  RegIndex rn, RegIndex vd, unsigned regs,
554  unsigned inc, uint32_t size, uint32_t align,
555  RegIndex rm, unsigned lane) :
556  PredMacroOp(mnem, machInst, __opClass)
557 {
558  assert(regs > 0 && regs <= 4);
559  assert(regs % elems == 0);
560 
561  unsigned eBytes = (1 << size);
562  unsigned loadSize = eBytes * elems;
563  M5_VAR_USED unsigned loadRegs =
564  (loadSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
565 
566  assert(loadRegs > 0 && loadRegs <= 4);
567 
568  numMicroops = 1;
569  bool wb = (rm != 15);
570 
571  if (wb) numMicroops++;
572  numMicroops += (regs / elems);
574 
575  RegIndex ufp0 = VecSpecialElem;
576 
577  unsigned uopIdx = 0;
578  switch (loadSize) {
579  case 1:
580  microOps[uopIdx++] = new MicroLdrNeon1Uop<uint8_t>(
581  machInst, ufp0, rn, 0, align);
582  break;
583  case 2:
584  if (eBytes == 2) {
585  microOps[uopIdx++] = new MicroLdrNeon2Uop<uint16_t>(
586  machInst, ufp0, rn, 0, align);
587  } else {
588  microOps[uopIdx++] = new MicroLdrNeon2Uop<uint8_t>(
589  machInst, ufp0, rn, 0, align);
590  }
591  break;
592  case 3:
593  microOps[uopIdx++] = new MicroLdrNeon3Uop<uint8_t>(
594  machInst, ufp0, rn, 0, align);
595  break;
596  case 4:
597  switch (eBytes) {
598  case 1:
599  microOps[uopIdx++] = new MicroLdrNeon4Uop<uint8_t>(
600  machInst, ufp0, rn, 0, align);
601  break;
602  case 2:
603  microOps[uopIdx++] = new MicroLdrNeon4Uop<uint16_t>(
604  machInst, ufp0, rn, 0, align);
605  break;
606  case 4:
607  microOps[uopIdx++] = new MicroLdrNeon4Uop<uint32_t>(
608  machInst, ufp0, rn, 0, align);
609  break;
610  }
611  break;
612  case 6:
613  microOps[uopIdx++] = new MicroLdrNeon6Uop<uint16_t>(
614  machInst, ufp0, rn, 0, align);
615  break;
616  case 8:
617  switch (eBytes) {
618  case 2:
619  microOps[uopIdx++] = new MicroLdrNeon8Uop<uint16_t>(
620  machInst, ufp0, rn, 0, align);
621  break;
622  case 4:
623  microOps[uopIdx++] = new MicroLdrNeon8Uop<uint32_t>(
624  machInst, ufp0, rn, 0, align);
625  break;
626  }
627  break;
628  case 12:
629  microOps[uopIdx++] = new MicroLdrNeon12Uop<uint32_t>(
630  machInst, ufp0, rn, 0, align);
631  break;
632  case 16:
633  microOps[uopIdx++] = new MicroLdrNeon16Uop<uint32_t>(
634  machInst, ufp0, rn, 0, align);
635  break;
636  default:
637  // Unrecognized load size
638  microOps[uopIdx++] = new Unknown(machInst);
639  }
640  if (wb) {
641  if (rm != 15 && rm != 13) {
642  microOps[uopIdx++] =
643  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
644  } else {
645  microOps[uopIdx++] =
646  new MicroAddiUop(machInst, rn, rn, loadSize);
647  }
648  }
649  switch (elems) {
650  case 4:
651  assert(regs == 4);
652  switch (size) {
653  case 0:
654  if (all) {
655  microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint8_t>(
656  machInst, vd * 2, ufp0, inc * 2);
657  } else {
658  microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint8_t>(
659  machInst, vd * 2, ufp0, inc * 2, lane);
660  }
661  break;
662  case 1:
663  if (all) {
664  microOps[uopIdx++] = new MicroUnpackAllNeon2to8Uop<uint16_t>(
665  machInst, vd * 2, ufp0, inc * 2);
666  } else {
667  microOps[uopIdx++] = new MicroUnpackNeon2to8Uop<uint16_t>(
668  machInst, vd * 2, ufp0, inc * 2, lane);
669  }
670  break;
671  case 2:
672  if (all) {
673  microOps[uopIdx++] = new MicroUnpackAllNeon4to8Uop<uint32_t>(
674  machInst, vd * 2, ufp0, inc * 2);
675  } else {
676  microOps[uopIdx++] = new MicroUnpackNeon4to8Uop<uint32_t>(
677  machInst, vd * 2, ufp0, inc * 2, lane);
678  }
679  break;
680  default:
681  // Bad size
682  microOps[uopIdx++] = new Unknown(machInst);
683  break;
684  }
685  break;
686  case 3:
687  assert(regs == 3);
688  switch (size) {
689  case 0:
690  if (all) {
691  microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint8_t>(
692  machInst, vd * 2, ufp0, inc * 2);
693  } else {
694  microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint8_t>(
695  machInst, vd * 2, ufp0, inc * 2, lane);
696  }
697  break;
698  case 1:
699  if (all) {
700  microOps[uopIdx++] = new MicroUnpackAllNeon2to6Uop<uint16_t>(
701  machInst, vd * 2, ufp0, inc * 2);
702  } else {
703  microOps[uopIdx++] = new MicroUnpackNeon2to6Uop<uint16_t>(
704  machInst, vd * 2, ufp0, inc * 2, lane);
705  }
706  break;
707  case 2:
708  if (all) {
709  microOps[uopIdx++] = new MicroUnpackAllNeon4to6Uop<uint32_t>(
710  machInst, vd * 2, ufp0, inc * 2);
711  } else {
712  microOps[uopIdx++] = new MicroUnpackNeon4to6Uop<uint32_t>(
713  machInst, vd * 2, ufp0, inc * 2, lane);
714  }
715  break;
716  default:
717  // Bad size
718  microOps[uopIdx++] = new Unknown(machInst);
719  break;
720  }
721  break;
722  case 2:
723  assert(regs == 2);
724  assert(loadRegs <= 2);
725  switch (size) {
726  case 0:
727  if (all) {
728  microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint8_t>(
729  machInst, vd * 2, ufp0, inc * 2);
730  } else {
731  microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint8_t>(
732  machInst, vd * 2, ufp0, inc * 2, lane);
733  }
734  break;
735  case 1:
736  if (all) {
737  microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint16_t>(
738  machInst, vd * 2, ufp0, inc * 2);
739  } else {
740  microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint16_t>(
741  machInst, vd * 2, ufp0, inc * 2, lane);
742  }
743  break;
744  case 2:
745  if (all) {
746  microOps[uopIdx++] = new MicroUnpackAllNeon2to4Uop<uint32_t>(
747  machInst, vd * 2, ufp0, inc * 2);
748  } else {
749  microOps[uopIdx++] = new MicroUnpackNeon2to4Uop<uint32_t>(
750  machInst, vd * 2, ufp0, inc * 2, lane);
751  }
752  break;
753  default:
754  // Bad size
755  microOps[uopIdx++] = new Unknown(machInst);
756  break;
757  }
758  break;
759  case 1:
760  assert(regs == 1 || (all && regs == 2));
761  assert(loadRegs <= 2);
762  for (unsigned offset = 0; offset < regs; offset++) {
763  switch (size) {
764  case 0:
765  if (all) {
766  microOps[uopIdx++] =
767  new MicroUnpackAllNeon2to2Uop<uint8_t>(
768  machInst, (vd + offset) * 2, ufp0, inc * 2);
769  } else {
770  microOps[uopIdx++] =
771  new MicroUnpackNeon2to2Uop<uint8_t>(
772  machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
773  }
774  break;
775  case 1:
776  if (all) {
777  microOps[uopIdx++] =
778  new MicroUnpackAllNeon2to2Uop<uint16_t>(
779  machInst, (vd + offset) * 2, ufp0, inc * 2);
780  } else {
781  microOps[uopIdx++] =
782  new MicroUnpackNeon2to2Uop<uint16_t>(
783  machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
784  }
785  break;
786  case 2:
787  if (all) {
788  microOps[uopIdx++] =
789  new MicroUnpackAllNeon2to2Uop<uint32_t>(
790  machInst, (vd + offset) * 2, ufp0, inc * 2);
791  } else {
792  microOps[uopIdx++] =
793  new MicroUnpackNeon2to2Uop<uint32_t>(
794  machInst, (vd + offset) * 2, ufp0, inc * 2, lane);
795  }
796  break;
797  default:
798  // Bad size
799  microOps[uopIdx++] = new Unknown(machInst);
800  break;
801  }
802  }
803  break;
804  default:
805  // Bad number of elements to unpack
806  microOps[uopIdx++] = new Unknown(machInst);
807  }
808  assert(uopIdx == numMicroops);
809 
810  for (unsigned i = 0; i < numMicroops - 1; i++) {
811  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
812  assert(uopPtr);
813  uopPtr->setDelayedCommit();
814  }
817 }
818 
819 VstMultOp::VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
820  unsigned elems, RegIndex rn, RegIndex vd, unsigned regs,
821  unsigned inc, uint32_t size, uint32_t align, RegIndex rm) :
822  PredMacroOp(mnem, machInst, __opClass)
823 {
824  assert(regs > 0 && regs <= 4);
825  assert(regs % elems == 0);
826 
827  numMicroops = (regs > 2) ? 2 : 1;
828  bool wb = (rm != 15);
829  bool interleave = (elems > 1);
830 
831  if (wb) numMicroops++;
832  if (interleave) numMicroops += (regs / elems);
834 
835  uint32_t noAlign = 0;
836 
837  RegIndex rMid = interleave ? VecSpecialElem : vd * 2;
838 
839  unsigned uopIdx = 0;
840  if (interleave) {
841  switch (elems) {
842  case 4:
843  assert(regs == 4);
844  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon8Uop>(
845  size, machInst, rMid, vd * 2, inc * 2);
846  break;
847  case 3:
848  assert(regs == 3);
849  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon6Uop>(
850  size, machInst, rMid, vd * 2, inc * 2);
851  break;
852  case 2:
853  assert(regs == 4 || regs == 2);
854  if (regs == 4) {
855  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
856  size, machInst, rMid, vd * 2, inc * 2);
857  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
858  size, machInst, rMid + 4, vd * 2 + 2, inc * 2);
859  } else {
860  microOps[uopIdx++] = newNeonMixInst<MicroInterNeon4Uop>(
861  size, machInst, rMid, vd * 2, inc * 2);
862  }
863  break;
864  default:
865  // Bad number of elements to interleave
866  microOps[uopIdx++] = new Unknown(machInst);
867  }
868  }
869  switch (regs) {
870  case 4:
871  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
872  size, machInst, rMid, rn, 0, align);
873  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
874  size, machInst, rMid + 4, rn, 16, noAlign);
875  break;
876  case 3:
877  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
878  size, machInst, rMid, rn, 0, align);
879  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
880  size, machInst, rMid + 4, rn, 16, noAlign);
881  break;
882  case 2:
883  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon16Uop>(
884  size, machInst, rMid, rn, 0, align);
885  break;
886  case 1:
887  microOps[uopIdx++] = newNeonMemInst<MicroStrNeon8Uop>(
888  size, machInst, rMid, rn, 0, align);
889  break;
890  default:
891  // Unknown number of registers
892  microOps[uopIdx++] = new Unknown(machInst);
893  }
894  if (wb) {
895  if (rm != 15 && rm != 13) {
896  microOps[uopIdx++] =
897  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
898  } else {
899  microOps[uopIdx++] =
900  new MicroAddiUop(machInst, rn, rn, regs * 8);
901  }
902  }
903  assert(uopIdx == numMicroops);
904 
905  for (unsigned i = 0; i < numMicroops - 1; i++) {
906  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
907  assert(uopPtr);
908  uopPtr->setDelayedCommit();
909  }
912 }
913 
914 VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
915  OpClass __opClass, bool all, unsigned elems,
916  RegIndex rn, RegIndex vd, unsigned regs,
917  unsigned inc, uint32_t size, uint32_t align,
918  RegIndex rm, unsigned lane) :
919  PredMacroOp(mnem, machInst, __opClass)
920 {
921  assert(!all);
922  assert(regs > 0 && regs <= 4);
923  assert(regs % elems == 0);
924 
925  unsigned eBytes = (1 << size);
926  unsigned storeSize = eBytes * elems;
927  M5_VAR_USED unsigned storeRegs =
928  (storeSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
929 
930  assert(storeRegs > 0 && storeRegs <= 4);
931 
932  numMicroops = 1;
933  bool wb = (rm != 15);
934 
935  if (wb) numMicroops++;
936  numMicroops += (regs / elems);
938 
939  RegIndex ufp0 = VecSpecialElem;
940 
941  unsigned uopIdx = 0;
942  switch (elems) {
943  case 4:
944  assert(regs == 4);
945  switch (size) {
946  case 0:
947  microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint8_t>(
948  machInst, ufp0, vd * 2, inc * 2, lane);
949  break;
950  case 1:
951  microOps[uopIdx++] = new MicroPackNeon8to2Uop<uint16_t>(
952  machInst, ufp0, vd * 2, inc * 2, lane);
953  break;
954  case 2:
955  microOps[uopIdx++] = new MicroPackNeon8to4Uop<uint32_t>(
956  machInst, ufp0, vd * 2, inc * 2, lane);
957  break;
958  default:
959  // Bad size
960  microOps[uopIdx++] = new Unknown(machInst);
961  break;
962  }
963  break;
964  case 3:
965  assert(regs == 3);
966  switch (size) {
967  case 0:
968  microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint8_t>(
969  machInst, ufp0, vd * 2, inc * 2, lane);
970  break;
971  case 1:
972  microOps[uopIdx++] = new MicroPackNeon6to2Uop<uint16_t>(
973  machInst, ufp0, vd * 2, inc * 2, lane);
974  break;
975  case 2:
976  microOps[uopIdx++] = new MicroPackNeon6to4Uop<uint32_t>(
977  machInst, ufp0, vd * 2, inc * 2, lane);
978  break;
979  default:
980  // Bad size
981  microOps[uopIdx++] = new Unknown(machInst);
982  break;
983  }
984  break;
985  case 2:
986  assert(regs == 2);
987  assert(storeRegs <= 2);
988  switch (size) {
989  case 0:
990  microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint8_t>(
991  machInst, ufp0, vd * 2, inc * 2, lane);
992  break;
993  case 1:
994  microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint16_t>(
995  machInst, ufp0, vd * 2, inc * 2, lane);
996  break;
997  case 2:
998  microOps[uopIdx++] = new MicroPackNeon4to2Uop<uint32_t>(
999  machInst, ufp0, vd * 2, inc * 2, lane);
1000  break;
1001  default:
1002  // Bad size
1003  microOps[uopIdx++] = new Unknown(machInst);
1004  break;
1005  }
1006  break;
1007  case 1:
1008  assert(regs == 1 || (all && regs == 2));
1009  assert(storeRegs <= 2);
1010  for (unsigned offset = 0; offset < regs; offset++) {
1011  switch (size) {
1012  case 0:
1013  microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint8_t>(
1014  machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
1015  break;
1016  case 1:
1017  microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint16_t>(
1018  machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
1019  break;
1020  case 2:
1021  microOps[uopIdx++] = new MicroPackNeon2to2Uop<uint32_t>(
1022  machInst, ufp0, (vd + offset) * 2, inc * 2, lane);
1023  break;
1024  default:
1025  // Bad size
1026  microOps[uopIdx++] = new Unknown(machInst);
1027  break;
1028  }
1029  }
1030  break;
1031  default:
1032  // Bad number of elements to unpack
1033  microOps[uopIdx++] = new Unknown(machInst);
1034  }
1035  switch (storeSize) {
1036  case 1:
1037  microOps[uopIdx++] = new MicroStrNeon1Uop<uint8_t>(
1038  machInst, ufp0, rn, 0, align);
1039  break;
1040  case 2:
1041  if (eBytes == 2) {
1042  microOps[uopIdx++] = new MicroStrNeon2Uop<uint16_t>(
1043  machInst, ufp0, rn, 0, align);
1044  } else {
1045  microOps[uopIdx++] = new MicroStrNeon2Uop<uint8_t>(
1046  machInst, ufp0, rn, 0, align);
1047  }
1048  break;
1049  case 3:
1050  microOps[uopIdx++] = new MicroStrNeon3Uop<uint8_t>(
1051  machInst, ufp0, rn, 0, align);
1052  break;
1053  case 4:
1054  switch (eBytes) {
1055  case 1:
1056  microOps[uopIdx++] = new MicroStrNeon4Uop<uint8_t>(
1057  machInst, ufp0, rn, 0, align);
1058  break;
1059  case 2:
1060  microOps[uopIdx++] = new MicroStrNeon4Uop<uint16_t>(
1061  machInst, ufp0, rn, 0, align);
1062  break;
1063  case 4:
1064  microOps[uopIdx++] = new MicroStrNeon4Uop<uint32_t>(
1065  machInst, ufp0, rn, 0, align);
1066  break;
1067  }
1068  break;
1069  case 6:
1070  microOps[uopIdx++] = new MicroStrNeon6Uop<uint16_t>(
1071  machInst, ufp0, rn, 0, align);
1072  break;
1073  case 8:
1074  switch (eBytes) {
1075  case 2:
1076  microOps[uopIdx++] = new MicroStrNeon8Uop<uint16_t>(
1077  machInst, ufp0, rn, 0, align);
1078  break;
1079  case 4:
1080  microOps[uopIdx++] = new MicroStrNeon8Uop<uint32_t>(
1081  machInst, ufp0, rn, 0, align);
1082  break;
1083  }
1084  break;
1085  case 12:
1086  microOps[uopIdx++] = new MicroStrNeon12Uop<uint32_t>(
1087  machInst, ufp0, rn, 0, align);
1088  break;
1089  case 16:
1090  microOps[uopIdx++] = new MicroStrNeon16Uop<uint32_t>(
1091  machInst, ufp0, rn, 0, align);
1092  break;
1093  default:
1094  // Bad store size
1095  microOps[uopIdx++] = new Unknown(machInst);
1096  }
1097  if (wb) {
1098  if (rm != 15 && rm != 13) {
1099  microOps[uopIdx++] =
1100  new MicroAddUop(machInst, rn, rn, rm, 0, ArmISA::LSL);
1101  } else {
1102  microOps[uopIdx++] =
1103  new MicroAddiUop(machInst, rn, rn, storeSize);
1104  }
1105  }
1106  assert(uopIdx == numMicroops);
1107 
1108  for (unsigned i = 0; i < numMicroops - 1; i++) {
1109  MicroOp * uopPtr = dynamic_cast<MicroOp *>(microOps[i].get());
1110  assert(uopPtr);
1111  uopPtr->setDelayedCommit();
1112  }
1113  microOps[0]->setFirstMicroop();
1115 }
1116 
1117 VldMultOp64::VldMultOp64(const char *mnem, ExtMachInst machInst,
1118  OpClass __opClass, RegIndex rn, RegIndex vd,
1119  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1120  uint8_t numStructElems, uint8_t numRegs, bool wb) :
1121  PredMacroOp(mnem, machInst, __opClass)
1122 {
1124  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1125  bool baseIsSP = isSP((IntRegIndex) rnsp);
1126 
1127  numMicroops = wb ? 1 : 0;
1128 
1129  int totNumBytes = numRegs * dataSize / 8;
1130  assert(totNumBytes <= 64);
1131 
1132  // The guiding principle here is that no more than 16 bytes can be
1133  // transferred at a time
1134  int numMemMicroops = totNumBytes / 16;
1135  int residuum = totNumBytes % 16;
1136  if (residuum)
1137  ++numMemMicroops;
1138  numMicroops += numMemMicroops;
1139 
1140  int numMarshalMicroops = numRegs / 2 + (numRegs % 2 ? 1 : 0);
1141  numMicroops += numMarshalMicroops;
1142 
1144  unsigned uopIdx = 0;
1145  uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
1146 
1147  int i = 0;
1148  for (; i < numMemMicroops - 1; ++i) {
1149  microOps[uopIdx++] = new MicroNeonLoad64(
1150  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1151  baseIsSP, 16 /* accSize */, eSize);
1152  }
1153  microOps[uopIdx++] = new MicroNeonLoad64(
1154  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1155  residuum ? residuum : 16 /* accSize */, eSize);
1156 
1157  // Writeback microop: the post-increment amount is encoded in "Rm": a
1158  // 64-bit general register OR as '11111' for an immediate value equal to
1159  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1160  if (wb) {
1161  if (rm != ((RegIndex) INTREG_X31)) {
1162  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1163  UXTX, 0);
1164  } else {
1165  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1166  totNumBytes);
1167  }
1168  }
1169 
1170  for (int i = 0; i < numMarshalMicroops; ++i) {
1171  switch(numRegs) {
1172  case 1: microOps[uopIdx++] = new MicroDeintNeon64_1Reg(
1173  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1174  numStructElems, 1, i /* step */);
1175  break;
1176  case 2: microOps[uopIdx++] = new MicroDeintNeon64_2Reg(
1177  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1178  numStructElems, 2, i /* step */);
1179  break;
1180  case 3: microOps[uopIdx++] = new MicroDeintNeon64_3Reg(
1181  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1182  numStructElems, 3, i /* step */);
1183  break;
1184  case 4: microOps[uopIdx++] = new MicroDeintNeon64_4Reg(
1185  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1186  numStructElems, 4, i /* step */);
1187  break;
1188  default: panic("Invalid number of registers");
1189  }
1190 
1191  }
1192 
1193  assert(uopIdx == numMicroops);
1194 
1195  for (int i = 0; i < numMicroops - 1; ++i) {
1197  }
1198  microOps[0]->setFirstMicroop();
1200 }
1201 
1202 VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst,
1203  OpClass __opClass, RegIndex rn, RegIndex vd,
1204  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1205  uint8_t numStructElems, uint8_t numRegs, bool wb) :
1206  PredMacroOp(mnem, machInst, __opClass)
1207 {
1209  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1210  bool baseIsSP = isSP((IntRegIndex) rnsp);
1211 
1212  numMicroops = wb ? 1 : 0;
1213 
1214  int totNumBytes = numRegs * dataSize / 8;
1215  assert(totNumBytes <= 64);
1216 
1217  // The guiding principle here is that no more than 16 bytes can be
1218  // transferred at a time
1219  int numMemMicroops = totNumBytes / 16;
1220  int residuum = totNumBytes % 16;
1221  if (residuum)
1222  ++numMemMicroops;
1223  numMicroops += numMemMicroops;
1224 
1225  int numMarshalMicroops = totNumBytes > 32 ? 2 : 1;
1226  numMicroops += numMarshalMicroops;
1227 
1229  unsigned uopIdx = 0;
1230 
1231  for (int i = 0; i < numMarshalMicroops; ++i) {
1232  switch (numRegs) {
1233  case 1: microOps[uopIdx++] = new MicroIntNeon64_1Reg(
1234  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1235  numStructElems, 1, i /* step */);
1236  break;
1237  case 2: microOps[uopIdx++] = new MicroIntNeon64_2Reg(
1238  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1239  numStructElems, 2, i /* step */);
1240  break;
1241  case 3: microOps[uopIdx++] = new MicroIntNeon64_3Reg(
1242  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1243  numStructElems, 3, i /* step */);
1244  break;
1245  case 4: microOps[uopIdx++] = new MicroIntNeon64_4Reg(
1246  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1247  numStructElems, 4, i /* step */);
1248  break;
1249  default: panic("Invalid number of registers");
1250  }
1251  }
1252 
1253  uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
1254 
1255  int i = 0;
1256  for (; i < numMemMicroops - 1; ++i) {
1257  microOps[uopIdx++] = new MicroNeonStore64(
1258  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1259  baseIsSP, 16 /* accSize */, eSize);
1260  }
1261  microOps[uopIdx++] = new MicroNeonStore64(
1262  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1263  residuum ? residuum : 16 /* accSize */, eSize);
1264 
1265  // Writeback microop: the post-increment amount is encoded in "Rm": a
1266  // 64-bit general register OR as '11111' for an immediate value equal to
1267  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1268  if (wb) {
1269  if (rm != ((RegIndex) INTREG_X31)) {
1270  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1271  UXTX, 0);
1272  } else {
1273  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1274  totNumBytes);
1275  }
1276  }
1277 
1278  assert(uopIdx == numMicroops);
1279 
1280  for (int i = 0; i < numMicroops - 1; i++) {
1282  }
1283  microOps[0]->setFirstMicroop();
1285 }
1286 
1287 VldSingleOp64::VldSingleOp64(const char *mnem, ExtMachInst machInst,
1288  OpClass __opClass, RegIndex rn, RegIndex vd,
1289  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1290  uint8_t numStructElems, uint8_t index, bool wb,
1291  bool replicate) :
1292  PredMacroOp(mnem, machInst, __opClass),
1293  eSize(0), dataSize(0), numStructElems(0), index(0),
1294  wb(false), replicate(false)
1295 
1296 {
1298  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1299  bool baseIsSP = isSP((IntRegIndex) rnsp);
1300 
1301  numMicroops = wb ? 1 : 0;
1302 
1303  int eSizeBytes = 1 << eSize;
1304  int totNumBytes = numStructElems * eSizeBytes;
1305  assert(totNumBytes <= 64);
1306 
1307  // The guiding principle here is that no more than 16 bytes can be
1308  // transferred at a time
1309  int numMemMicroops = totNumBytes / 16;
1310  int residuum = totNumBytes % 16;
1311  if (residuum)
1312  ++numMemMicroops;
1313  numMicroops += numMemMicroops;
1314 
1315  int numMarshalMicroops = numStructElems / 2 + (numStructElems % 2 ? 1 : 0);
1316  numMicroops += numMarshalMicroops;
1317 
1319  unsigned uopIdx = 0;
1320 
1321  uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
1322 
1323  int i = 0;
1324  for (; i < numMemMicroops - 1; ++i) {
1325  microOps[uopIdx++] = new MicroNeonLoad64(
1326  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1327  baseIsSP, 16 /* accSize */, eSize);
1328  }
1329  microOps[uopIdx++] = new MicroNeonLoad64(
1330  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1331  residuum ? residuum : 16 /* accSize */, eSize);
1332 
1333  // Writeback microop: the post-increment amount is encoded in "Rm": a
1334  // 64-bit general register OR as '11111' for an immediate value equal to
1335  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1336  if (wb) {
1337  if (rm != ((RegIndex) INTREG_X31)) {
1338  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1339  UXTX, 0);
1340  } else {
1341  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1342  totNumBytes);
1343  }
1344  }
1345 
1346  for (int i = 0; i < numMarshalMicroops; ++i) {
1347  microOps[uopIdx++] = new MicroUnpackNeon64(
1348  machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
1349  numStructElems, index, i /* step */, replicate);
1350  }
1351 
1352  assert(uopIdx == numMicroops);
1353 
1354  for (int i = 0; i < numMicroops - 1; i++) {
1356  }
1357  microOps[0]->setFirstMicroop();
1359 }
1360 
1361 VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst,
1362  OpClass __opClass, RegIndex rn, RegIndex vd,
1363  RegIndex rm, uint8_t eSize, uint8_t dataSize,
1364  uint8_t numStructElems, uint8_t index, bool wb,
1365  bool replicate) :
1366  PredMacroOp(mnem, machInst, __opClass),
1367  eSize(0), dataSize(0), numStructElems(0), index(0),
1368  wb(false), replicate(false)
1369 {
1371  RegIndex rnsp = (RegIndex) makeSP((IntRegIndex) rn);
1372  bool baseIsSP = isSP((IntRegIndex) rnsp);
1373 
1374  numMicroops = wb ? 1 : 0;
1375 
1376  int eSizeBytes = 1 << eSize;
1377  int totNumBytes = numStructElems * eSizeBytes;
1378  assert(totNumBytes <= 64);
1379 
1380  // The guiding principle here is that no more than 16 bytes can be
1381  // transferred at a time
1382  int numMemMicroops = totNumBytes / 16;
1383  int residuum = totNumBytes % 16;
1384  if (residuum)
1385  ++numMemMicroops;
1386  numMicroops += numMemMicroops;
1387 
1388  int numMarshalMicroops = totNumBytes > 32 ? 2 : 1;
1389  numMicroops += numMarshalMicroops;
1390 
1392  unsigned uopIdx = 0;
1393 
1394  for (int i = 0; i < numMarshalMicroops; ++i) {
1395  microOps[uopIdx++] = new MicroPackNeon64(
1396  machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
1397  numStructElems, index, i /* step */, replicate);
1398  }
1399 
1400  uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
1401 
1402  int i = 0;
1403  for (; i < numMemMicroops - 1; ++i) {
1404  microOps[uopIdx++] = new MicroNeonStore64(
1405  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
1406  baseIsSP, 16 /* accsize */, eSize);
1407  }
1408  microOps[uopIdx++] = new MicroNeonStore64(
1409  machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags, baseIsSP,
1410  residuum ? residuum : 16 /* accSize */, eSize);
1411 
1412  // Writeback microop: the post-increment amount is encoded in "Rm": a
1413  // 64-bit general register OR as '11111' for an immediate value equal to
1414  // the total number of bytes transferred (i.e. 8, 16, 24, 32, 48 or 64)
1415  if (wb) {
1416  if (rm != ((RegIndex) INTREG_X31)) {
1417  microOps[uopIdx++] = new MicroAddXERegUop(machInst, rnsp, rnsp, rm,
1418  UXTX, 0);
1419  } else {
1420  microOps[uopIdx++] = new MicroAddXiUop(machInst, rnsp, rnsp,
1421  totNumBytes);
1422  }
1423  }
1424 
1425  assert(uopIdx == numMicroops);
1426 
1427  for (int i = 0; i < numMicroops - 1; i++) {
1429  }
1430  microOps[0]->setFirstMicroop();
1432 }
1433 
1434 MacroVFPMemOp::MacroVFPMemOp(const char *mnem, ExtMachInst machInst,
1435  OpClass __opClass, IntRegIndex rn,
1436  RegIndex vd, bool single, bool up,
1437  bool writeback, bool load, uint32_t offset) :
1438  PredMacroOp(mnem, machInst, __opClass)
1439 {
1440  int i = 0;
1441 
1442  // The lowest order bit selects fldmx (set) or fldmd (clear). These seem
1443  // to be functionally identical except that fldmx is deprecated. For now
1444  // we'll assume they're otherwise interchangable.
1445  int count = (single ? offset : (offset / 2));
1446  numMicroops = count * (single ? 1 : 2) + (writeback ? 1 : 0);
1448 
1449  int64_t addr = 0;
1450 
1451  if (!up)
1452  addr = 4 * offset;
1453 
1454  bool tempUp = up;
1455  for (int j = 0; j < count; j++) {
1456  if (load) {
1457  if (single) {
1458  microOps[i++] = new MicroLdrFpUop(machInst, vd++, rn,
1459  tempUp, addr);
1460  } else {
1461  microOps[i++] = new MicroLdrDBFpUop(machInst, vd++, rn,
1462  tempUp, addr);
1463  microOps[i++] = new MicroLdrDTFpUop(machInst, vd++, rn, tempUp,
1464  addr + (up ? 4 : -4));
1465  }
1466  } else {
1467  if (single) {
1468  microOps[i++] = new MicroStrFpUop(machInst, vd++, rn,
1469  tempUp, addr);
1470  } else {
1471  microOps[i++] = new MicroStrDBFpUop(machInst, vd++, rn,
1472  tempUp, addr);
1473  microOps[i++] = new MicroStrDTFpUop(machInst, vd++, rn, tempUp,
1474  addr + (up ? 4 : -4));
1475  }
1476  }
1477  if (!tempUp) {
1478  addr -= (single ? 4 : 8);
1479  // The microops don't handle negative displacement, so turn if we
1480  // hit zero, flip polarity and start adding.
1481  if (addr <= 0) {
1482  tempUp = true;
1483  addr = -addr;
1484  }
1485  } else {
1486  addr += (single ? 4 : 8);
1487  }
1488  }
1489 
1490  if (writeback) {
1491  if (up) {
1492  microOps[i++] =
1493  new MicroAddiUop(machInst, rn, rn, 4 * offset);
1494  } else {
1495  microOps[i++] =
1496  new MicroSubiUop(machInst, rn, rn, 4 * offset);
1497  }
1498  }
1499 
1500  assert(numMicroops == i);
1501  microOps[0]->setFirstMicroop();
1503 
1504  for (StaticInstPtr *curUop = microOps;
1505  !(*curUop)->isLastMicroop(); curUop++) {
1506  MicroOp * uopPtr = dynamic_cast<MicroOp *>(curUop->get());
1507  assert(uopPtr);
1508  uopPtr->setDelayedCommit();
1509  }
1510 }
1511 
1512 std::string
1514  Addr pc, const Loader::SymbolTable *symtab) const
1515 {
1516  std::stringstream ss;
1517  printMnemonic(ss);
1518  printIntReg(ss, ura);
1519  ss << ", ";
1520  printIntReg(ss, urb);
1521  ss << ", ";
1522  ccprintf(ss, "#%d", imm);
1523  return ss.str();
1524 }
1525 
1526 std::string
1528  Addr pc, const Loader::SymbolTable *symtab) const
1529 {
1530  std::stringstream ss;
1531  printMnemonic(ss);
1532  printIntReg(ss, ura);
1533  ss << ", ";
1534  printIntReg(ss, urb);
1535  ss << ", ";
1536  ccprintf(ss, "#%d", imm);
1537  return ss.str();
1538 }
1539 
1540 std::string
1542  Addr pc, const Loader::SymbolTable *symtab) const
1543 {
1544  std::stringstream ss;
1545  printMnemonic(ss);
1546  ss << "[PC,CPSR]";
1547  return ss.str();
1548 }
1549 
1550 std::string
1552  Addr pc, const Loader::SymbolTable *symtab) const
1553 {
1554  std::stringstream ss;
1555  printMnemonic(ss);
1556  printIntReg(ss, ura);
1557  ccprintf(ss, ", ");
1558  printIntReg(ss, urb);
1560  return ss.str();
1561 }
1562 
1563 std::string
1565  Addr pc, const Loader::SymbolTable *symtab) const
1566 {
1567  std::stringstream ss;
1568  printMnemonic(ss);
1569  printIntReg(ss, ura);
1570  ss << ", ";
1571  printIntReg(ss, urb);
1572  return ss.str();
1573 }
1574 
1575 std::string
1577  Addr pc, const Loader::SymbolTable *symtab) const
1578 {
1579  std::stringstream ss;
1580  printMnemonic(ss);
1581  printIntReg(ss, ura);
1582  ss << ", ";
1583  printIntReg(ss, urb);
1584  ss << ", ";
1585  printIntReg(ss, urc);
1586  return ss.str();
1587 }
1588 
1589 std::string
1591  Addr pc, const Loader::SymbolTable *symtab) const
1592 {
1593  std::stringstream ss;
1594  printMnemonic(ss);
1595  if (isFloating())
1596  printFloatReg(ss, ura);
1597  else
1598  printIntReg(ss, ura);
1599  ss << ", [";
1600  printIntReg(ss, urb);
1601  ss << ", ";
1602  ccprintf(ss, "#%d", imm);
1603  ss << "]";
1604  return ss.str();
1605 }
1606 
1607 std::string
1609  Addr pc, const Loader::SymbolTable *symtab) const
1610 {
1611  std::stringstream ss;
1612  printMnemonic(ss);
1613  printIntReg(ss, dest);
1614  ss << ",";
1615  printIntReg(ss, dest2);
1616  ss << ", [";
1617  printIntReg(ss, urb);
1618  ss << ", ";
1619  ccprintf(ss, "#%d", imm);
1620  ss << "]";
1621  return ss.str();
1622 }
1623 
1624 }
ArmISA::PairMemOp::AddrMode
AddrMode
Definition: macromem.hh:441
ArmISA::VldMultOp64::wb
bool wb
Definition: macromem.hh:203
ArmISA::PredOp::condCode
ConditionCode condCode
Definition: pred_inst.hh:214
ArmISA::MicroMemPairOp::dest
RegIndex dest
Definition: macromem.hh:406
ArmISA::fp
Bitfield< 19, 16 > fp
Definition: miscregs_types.hh:173
ArmISA::MicroOp
Base class for Memory microops.
Definition: macromem.hh:65
ArmISA::number_of_ones
static unsigned int number_of_ones(int32_t val)
Definition: macromem.hh:51
ArmISA::MicroIntRegXOp::ura
RegIndex ura
Definition: macromem.hh:346
ArmISA::VstMultOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:214
ArmISA::MicroIntRegXOp::urc
RegIndex urc
Definition: macromem.hh:346
ArmISA::MicroIntOp::urc
RegIndex urc
Definition: macromem.hh:330
ArmISA::MicroIntImmXOp::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1527
ArmISA::INTREG_PC
@ INTREG_PC
Definition: intregs.hh:72
ArmISA::MicroIntImmOp::imm
int32_t imm
Definition: macromem.hh:294
MipsISA::index
Bitfield< 30, 0 > index
Definition: pra_constants.hh:44
ArmISA::MicroIntRegXOp::urb
RegIndex urb
Definition: macromem.hh:346
ArmISA::i
Bitfield< 7 > i
Definition: miscregs_types.hh:63
ArmISA::BigFpMemRegOp::BigFpMemRegOp
BigFpMemRegOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, IntRegIndex offset, ArmExtendType type, int64_t imm)
Definition: macromem.cc:417
ArmISA::VstMultOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:214
ArmISA::BigFpMemPreOp::BigFpMemPreOp
BigFpMemPreOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
Definition: macromem.cc:391
ArmISA::VldMultOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:202
ArmISA::VldSingleOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:226
ArmISA::MicroMemPairOp::dest2
RegIndex dest2
Definition: macromem.hh:406
ArmISA::COND_UC
@ COND_UC
Definition: ccregs.hh:79
ArmISA::isSP
static bool isSP(IntRegIndex reg)
Definition: intregs.hh:515
ArmISA::VldSingleOp::VldSingleOp
VldSingleOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool all, unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm, unsigned lane)
Definition: macromem.cc:551
ArmISA::BigFpMemPostOp::BigFpMemPostOp
BigFpMemPostOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
Definition: macromem.cc:365
ArmISA::MacroVFPMemOp::MacroVFPMemOp
MacroVFPMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, IntRegIndex rn, RegIndex vd, bool single, bool up, bool writeback, bool load, uint32_t offset)
Definition: macromem.cc:1434
ArmISA::MicroMemOp::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1590
Loader::SymbolTable
Definition: symtab.hh:58
ArmISA::IntRegIndex
IntRegIndex
Definition: intregs.hh:51
ArmISA::MicroIntRegXOp::shiftAmt
uint32_t shiftAmt
Definition: macromem.hh:348
ArmISA::VstSingleOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:238
ArmISA::VldMultOp64::VldMultOp64
VldMultOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t numRegs, bool wb)
Definition: macromem.cc:1117
ArmISA::MicroIntRegXOp::type
ArmExtendType type
Definition: macromem.hh:347
X86ISA::base
Bitfield< 51, 12 > base
Definition: pagetable.hh:138
ArmISA::MicroIntImmXOp::imm
int64_t imm
Definition: macromem.hh:311
StaticInst::machInst
const TheISA::ExtMachInst machInst
The binary machine instruction.
Definition: static_inst.hh:259
ArmISA::ArmStaticInst::printIntReg
void printIntReg(std::ostream &os, RegIndex reg_idx, uint8_t opWidth=0) const
Print a register name for disassembly given the unique dependence tag number (FP or int).
Definition: static_inst.cc:296
ArmISA::VldSingleOp64::index
uint8_t index
Definition: macromem.hh:226
ArmISA::MicroIntMov::urb
RegIndex urb
Definition: macromem.hh:274
RefCounted::count
int count
Definition: refcnt.hh:64
ArmISA::NumVecV8ArchRegs
const int NumVecV8ArchRegs
Definition: registers.hh:86
ArmISA::MicroMemPairOp::urb
RegIndex urb
Definition: macromem.hh:406
ArmISA
Definition: ccregs.hh:41
X86ISA::reg
Bitfield< 5, 3 > reg
Definition: types.hh:88
ContextSwitchTaskId::Unknown
@ Unknown
Definition: request.hh:79
ArmISA::INTREG_SP
@ INTREG_SP
Definition: intregs.hh:68
ArmISA::PairMemOp::AddrMd_Offset
@ AddrMd_Offset
Definition: macromem.hh:442
macromem.hh
ArmISA::rt
Bitfield< 15, 12 > rt
Definition: types.hh:124
sc_dt::align
void align(const scfx_rep &lhs, const scfx_rep &rhs, int &new_wp, int &len_mant, scfx_mant_ref &lhs_mant, scfx_mant_ref &rhs_mant)
Definition: scfx_rep.cc:2083
sc_dt::inc
void inc(scfx_mant &mant)
Definition: scfx_mant.hh:341
StaticInst::setDelayedCommit
void setDelayedCommit()
Definition: static_inst.hh:225
ArmISA::MicroIntRegXOp::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1551
ArmISA::ArmStaticInst::printExtendOperand
void printExtendOperand(bool firstOperand, std::ostream &os, IntRegIndex rm, ArmExtendType type, int64_t shiftAmt) const
Definition: static_inst.cc:559
ArmISA::j
Bitfield< 24 > j
Definition: miscregs_types.hh:54
ArmISA::VstMultOp::VstMultOp
VstMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, unsigned width, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm)
Definition: macromem.cc:819
ArmISA::rn
Bitfield< 19, 16 > rn
Definition: types.hh:122
PowerISA::vx
Bitfield< 29 > vx
Definition: miscregs.hh:63
ArmISA::VldSingleOp64::replicate
bool replicate
Definition: macromem.hh:227
ArmISA::VstSingleOp64::wb
bool wb
Definition: macromem.hh:239
ArmISA::TLB::AllowUnaligned
@ AllowUnaligned
Definition: tlb.hh:122
ArmISA::ss
Bitfield< 21 > ss
Definition: miscregs_types.hh:56
ArmISA::BigFpMemLitOp::BigFpMemLitOp
BigFpMemLitOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, IntRegIndex dest, int64_t imm)
Definition: macromem.cc:443
ArmISA::COND_AL
@ COND_AL
Definition: ccregs.hh:78
ArmISA::imm
Bitfield< 7, 0 > imm
Definition: types.hh:141
ArmISA::MicroIntOp::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1576
MipsISA::pc
Bitfield< 4 > pc
Definition: pra_constants.hh:240
ArmISAInst
Definition: tme64.cc:45
StaticInst::setFlag
void setFlag(Flags f)
Definition: static_inst.hh:226
ArmISA::mode
Bitfield< 4, 0 > mode
Definition: miscregs_types.hh:70
ArmISA::LSL
@ LSL
Definition: types.hh:568
ArmISA::VstMultOp64::wb
bool wb
Definition: macromem.hh:215
ArmISA::PairMemOp::PairMemOp
PairMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, uint32_t size, bool fp, bool load, bool noAlloc, bool signExt, bool exclusive, bool acrel, int64_t imm, AddrMode mode, IntRegIndex rn, IntRegIndex rt, IntRegIndex rt2)
Definition: macromem.cc:239
ArmISA::BigFpMemImmOp::BigFpMemImmOp
BigFpMemImmOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool load, IntRegIndex dest, IntRegIndex base, int64_t imm)
Definition: macromem.cc:344
ArmISA::TLB::ArmFlags
ArmFlags
Definition: tlb.hh:112
ArmISA::PredMacroOp
Base class for predicated macro-operations.
Definition: pred_inst.hh:336
ArmISA::MicroMemPairOp::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1608
ArmISA::VldSingleOp64::eSize
uint8_t eSize
Definition: macromem.hh:226
ArmISA::MicroMemPairOp::imm
int32_t imm
Definition: macromem.hh:408
ArmISA::writeback
Bitfield< 21 > writeback
Definition: types.hh:135
ArmISA::MicroSetPCCPSR::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1541
Addr
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:148
ArmISA::rm
Bitfield< 3, 0 > rm
Definition: types.hh:127
neon64_mem.hh
ArmISA::makeSP
static IntRegIndex makeSP(IntRegIndex reg)
Definition: intregs.hh:499
ArmISA::VldMultOp64::eSize
uint8_t eSize
Definition: macromem.hh:202
X86ISA::addr
Bitfield< 3 > addr
Definition: types.hh:80
ArmISA::ArmExtendType
ArmExtendType
Definition: types.hh:575
ArmISA::ArmStaticInst::printFloatReg
void printFloatReg(std::ostream &os, RegIndex reg_idx) const
Definition: static_inst.cc:342
StaticInst::isLastMicroop
bool isLastMicroop() const
Definition: static_inst.hh:207
ArmISA::VldSingleOp64::dataSize
uint8_t dataSize
Definition: macromem.hh:226
ArmISA::MicroIntOp::ura
RegIndex ura
Definition: macromem.hh:330
ArmISA::VstMultOp64::eSize
uint8_t eSize
Definition: macromem.hh:214
ArmISA::MicroIntImmXOp::ura
RegIndex ura
Definition: macromem.hh:310
ArmISA::VldMultOp64::numRegs
uint8_t numRegs
Definition: macromem.hh:202
RegIndex
uint16_t RegIndex
Definition: types.hh:52
ArmISA::VstSingleOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:238
ArmISA::MicroIntImmOp::ura
RegIndex ura
Definition: macromem.hh:293
ArmISA::MicroIntImmOp::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1513
ArmISA::MicroIntMov::ura
RegIndex ura
Definition: macromem.hh:274
ArmISA::INTREG_UREG0
@ INTREG_UREG0
Definition: intregs.hh:113
ArmISA::VstSingleOp64::eSize
uint8_t eSize
Definition: macromem.hh:238
ArmISA::ArmStaticInst::printMnemonic
void printMnemonic(std::ostream &os, const std::string &suffix="", bool withPred=true, bool withCond64=false, ConditionCode cond64=COND_UC) const
Definition: static_inst.cc:374
ArmISA::INTREG_X31
@ INTREG_X31
Definition: intregs.hh:158
ccprintf
void ccprintf(cp::Print &print)
Definition: cprintf.hh:127
ArmISA::VecSpecialElem
const int VecSpecialElem
Definition: registers.hh:113
ArmISA::VstSingleOp64::index
uint8_t index
Definition: macromem.hh:238
ArmISA::up
Bitfield< 23 > up
Definition: types.hh:133
bits
constexpr T bits(T val, unsigned first, unsigned last)
Extract the bitfield from position 'first' to 'last' (inclusive) from 'val' and right justify it.
Definition: bitfield.hh:73
StaticInst::setFirstMicroop
void setFirstMicroop()
Definition: static_inst.hh:223
ArmISA::intRegInMode
static int intRegInMode(OperatingMode mode, int reg)
Definition: intregs.hh:462
X86ISA::type
type
Definition: misc.hh:727
StaticInst::isFloating
bool isFloating() const
Definition: static_inst.hh:176
ArmISA::PredMacroOp::numMicroops
uint32_t numMicroops
Definition: pred_inst.hh:340
RefCountingPtr< StaticInst >
ArmISA::MicroIntOp::urb
RegIndex urb
Definition: macromem.hh:330
ArmISA::VstMultOp64::numRegs
uint8_t numRegs
Definition: macromem.hh:214
ArmISA::MODE_USER
@ MODE_USER
Definition: types.hh:636
ArmISA::VldMultOp::VldMultOp
VldMultOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm)
Definition: macromem.cc:456
ArmISA::MicroIntMov::generateDisassembly
std::string generateDisassembly(Addr pc, const Loader::SymbolTable *symtab) const override
Internal function to generate disassembly string.
Definition: macromem.cc:1564
ArmISA::PairMemOp::AddrMd_PostIndex
@ AddrMd_PostIndex
Definition: macromem.hh:444
ArmISA::VstSingleOp64::VstSingleOp64
VstSingleOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t index, bool wb, bool replicate=false)
Definition: macromem.cc:1361
ArmISA::VldMultOp64::numStructElems
uint8_t numStructElems
Definition: macromem.hh:202
ArmISA::INTREG_UREG1
@ INTREG_UREG1
Definition: intregs.hh:114
ArmISA::MicroIntImmOp::urb
RegIndex urb
Definition: macromem.hh:293
ArmISA::MicroIntImmXOp::urb
RegIndex urb
Definition: macromem.hh:310
StaticInst::setLastMicroop
void setLastMicroop()
Definition: static_inst.hh:224
ArmISA::VstSingleOp::VstSingleOp
VstSingleOp(const char *mnem, ExtMachInst machInst, OpClass __opClass, bool all, unsigned elems, RegIndex rn, RegIndex vd, unsigned regs, unsigned inc, uint32_t size, uint32_t align, RegIndex rm, unsigned lane)
Definition: macromem.cc:914
ArmISA::VstMultOp64::VstMultOp64
VstMultOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t numRegs, bool wb)
Definition: macromem.cc:1202
ArmISA::VldSingleOp64::wb
bool wb
Definition: macromem.hh:227
replaceBits
constexpr void replaceBits(T &val, unsigned first, unsigned last, B bit_val)
A convenience function to replace bits first to last of val with bit_val in place.
Definition: bitfield.hh:174
panic
#define panic(...)
This implements a cprintf based panic() function.
Definition: logging.hh:171
ArmISA::offset
Bitfield< 23, 0 > offset
Definition: types.hh:153
ArmISA::VstSingleOp64::replicate
bool replicate
Definition: macromem.hh:239
ArmISA::VldSingleOp64::VldSingleOp64
VldSingleOp64(const char *mnem, ExtMachInst machInst, OpClass __opClass, RegIndex rn, RegIndex vd, RegIndex rm, uint8_t eSize, uint8_t dataSize, uint8_t numStructElems, uint8_t index, bool wb, bool replicate=false)
Definition: macromem.cc:1287
ArmISA::UXTX
@ UXTX
Definition: types.hh:579
ArmISA::PredMacroOp::microOps
StaticInstPtr * microOps
Definition: pred_inst.hh:341
MipsISA::ExtMachInst
uint64_t ExtMachInst
Definition: types.hh:39
RefCountingPtr::get
T * get() const
Directly access the pointer itself without taking a reference.
Definition: refcnt.hh:224

Generated on Tue Mar 23 2021 19:41:19 for gem5 by doxygen 1.8.17