gem5  v22.1.0.0
aapcs32.hh
Go to the documentation of this file.
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met: redistributions of source code must retain the above copyright
7  * notice, this list of conditions and the following disclaimer;
8  * redistributions in binary form must reproduce the above copyright
9  * notice, this list of conditions and the following disclaimer in the
10  * documentation and/or other materials provided with the distribution;
11  * neither the name of the copyright holders nor the names of its
12  * contributors may be used to endorse or promote products derived from
13  * this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef __ARCH_ARM_AAPCS32_HH__
29 #define __ARCH_ARM_AAPCS32_HH__
30 
31 #include <algorithm>
32 #include <array>
33 #include <type_traits>
34 #include <utility>
35 
36 #include "arch/arm/regs/int.hh"
37 #include "arch/arm/regs/vec.hh"
38 #include "arch/arm/utility.hh"
39 #include "base/intmath.hh"
40 #include "cpu/thread_context.hh"
41 #include "mem/port_proxy.hh"
44 #include "sim/full_system.hh"
45 #include "sim/guest_abi.hh"
46 #include "sim/proxy_ptr.hh"
47 
48 namespace gem5
49 {
50 
51 class ThreadContext;
52 
53 struct Aapcs32
54 {
55  struct State
56  {
57  bool stackUsed=false; // Whether anything has been put on the stack.
58 
59  int ncrn=0; // Next general purpose register number.
60  Addr nsaa; // Next stacked argument address.
61 
62  // The maximum allowed general purpose register number.
63  static const int MAX_CRN = 3;
64 
66 
67  explicit State(const ThreadContext *tc) :
68  nsaa(tc->getReg(ArmISA::int_reg::Spx))
69  {}
70  };
71 };
72 
73 GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi);
74 namespace guest_abi
75 {
76 
77 /*
78  * Composite Types
79  */
80 
81 template <typename T, typename Enabled=void>
82 struct IsAapcs32Composite : public std::false_type {};
83 
84 template <typename T>
85 struct IsAapcs32Composite<T, typename std::enable_if_t<
86  (std::is_array_v<T> || std::is_class_v<T> || std::is_union_v<T>) &&
87  // VarArgs is technically a composite type, but it's not a normal argument.
88  !IsVarArgsV<T>
89  >> : public std::true_type
90 {};
91 
92 template <typename T>
94 
95 // Homogeneous Aggregates
96 // These *should* be any aggregate type which has only one type of member, but
97 // we can't actually detect that or manipulate that with templates. Instead,
98 // we approximate that by detecting only arrays with that property.
99 
100 template <typename T, std::size_t count, typename Enabled=void>
102 
103 template <typename T>
104 struct IsAapcs32HomogeneousAggregate : public std::false_type {};
105 
106 template <typename E, size_t N>
107 struct IsAapcs32HomogeneousAggregate<E[N]> : public std::true_type {};
108 
109 template <typename T>
112 
114 {
115  template <typename T>
116  static T
118  {
119  state.stackUsed = true;
120 
121  // The alignment is the larger of 4 or the natural alignment of T.
122  size_t align = std::max<size_t>(4, alignof(T));
123  // Increase the size to the next multiple of 4.
124  size_t size = roundUp(sizeof(T), 4);
125 
126  // Align the stack.
127  state.nsaa = roundUp(state.nsaa, align);
128 
129  // Extract the value from it.
130  ConstVPtr<T> val(state.nsaa, tc);
131 
132  // Move the nsaa past this argument.
133  state.nsaa += size;
134 
135  // Return the value we extracted.
136  return gtoh(*val, ArmISA::byteOrder(tc));
137  }
138 };
139 
140 
141 /*
142  * Integer arguments and return values.
143  */
144 
145 template <typename Integer>
146 struct Result<Aapcs32, Integer, typename std::enable_if_t<
147  std::is_integral_v<Integer> && (sizeof(Integer) < sizeof(uint32_t))>>
148 {
149  static void
150  store(ThreadContext *tc, const Integer &i)
151  {
152  uint32_t val = std::is_signed_v<Integer> ?
153  sext<sizeof(Integer) * 8>(i) : i;
155  }
156 };
157 
158 template <typename Integer>
159 struct Result<Aapcs32, Integer, typename std::enable_if_t<
160  std::is_integral_v<Integer> && (sizeof(Integer) == sizeof(uint32_t))>>
161 {
162  static void
163  store(ThreadContext *tc, const Integer &i)
164  {
165  tc->setReg(ArmISA::int_reg::R0, (uint32_t)i);
166  }
167 };
168 
169 template <typename Integer>
170 struct Result<Aapcs32, Integer, typename std::enable_if_t<
171  std::is_integral_v<Integer> && (sizeof(Integer) == sizeof(uint64_t))>>
172 {
173  static void
174  store(ThreadContext *tc, const Integer &i)
175  {
176  if (ArmISA::byteOrder(tc) == ByteOrder::little) {
177  tc->setReg(ArmISA::int_reg::R0, (uint32_t)(i >> 0));
178  tc->setReg(ArmISA::int_reg::R1, (uint32_t)(i >> 32));
179  } else {
180  tc->setReg(ArmISA::int_reg::R0, (uint32_t)(i >> 32));
181  tc->setReg(ArmISA::int_reg::R1, (uint32_t)(i >> 0));
182  }
183  }
184 };
185 
186 template <typename Integer>
187 struct Argument<Aapcs32, Integer, typename std::enable_if_t<
188  std::is_integral_v<Integer> && (sizeof(Integer) <= sizeof(uint32_t))
189  >> : public Aapcs32ArgumentBase
190 {
191  static Integer
193  {
194  if (state.ncrn <= state.MAX_CRN) {
195  return tc->getReg(ArmISA::intRegClass[state.ncrn++]);
196  }
197 
198  // Max out the ncrn since we effectively exhausted it.
199  state.ncrn = state.MAX_CRN + 1;
200 
201  return loadFromStack<Integer>(tc, state);
202  }
203 };
204 
205 template <typename Integer>
206 struct Argument<Aapcs32, Integer, typename std::enable_if_t<
207  std::is_integral_v<Integer> && (sizeof(Integer) > sizeof(uint32_t))
208  >> : public Aapcs32ArgumentBase
209 {
210  static Integer
212  {
213  if (alignof(Integer) == 8 && (state.ncrn % 2))
214  state.ncrn++;
215 
216  if (sizeof(Integer) == sizeof(uint64_t) &&
217  state.ncrn + 1 <= state.MAX_CRN) {
218  Integer low, high;
219  if (ArmISA::byteOrder(tc) == ByteOrder::little) {
220  low = tc->getReg(ArmISA::intRegClass[state.ncrn++]) & mask(32);
221  high = tc->getReg(ArmISA::intRegClass[state.ncrn++]) &
222  mask(32);
223  } else {
224  high = tc->getReg(ArmISA::intRegClass[state.ncrn++]) &
225  mask(32);
226  low = tc->getReg(ArmISA::intRegClass[state.ncrn++]) & mask(32);
227  }
228  return low | (high << 32);
229  }
230 
231  // Max out the ncrn since we effectively exhausted it.
232  state.ncrn = state.MAX_CRN + 1;
233 
234  return loadFromStack<Integer>(tc, state);
235  }
236 };
237 
238 
239 /*
240  * Floating point and Short-Vector arguments and return values.
241  */
242 
243 template <typename Float>
244 struct Result<Aapcs32, Float, typename std::enable_if_t<
245  std::is_floating_point_v<Float>>>
246 {
247  static void
248  store(ThreadContext *tc, const Float &f, Aapcs32::State &state)
249  {
250  auto i = floatToBits(f);
251  storeResult<Aapcs32, decltype(i)>(tc, i, state);
252  };
253 };
254 
255 template <typename Float>
256 struct Argument<Aapcs32, Float, typename std::enable_if_t<
257  std::is_floating_point_v<Float>>> : public Aapcs32ArgumentBase
258 {
259  static Float
261  {
262  if (sizeof(Float) == sizeof(uint32_t)) {
263  return bitsToFloat32(
264  getArgument<Aapcs32, uint32_t>(tc, state));
265  } else {
266  return bitsToFloat64(
267  getArgument<Aapcs32, uint64_t>(tc, state));
268  }
269  }
270 };
271 
272 
273 /*
274  * Composite arguments and return values.
275  */
276 
277 template <typename Composite>
278 struct Result<Aapcs32, Composite, typename std::enable_if_t<
279  IsAapcs32CompositeV<Composite>>>
280 {
281  static void
282  store(ThreadContext *tc, const Composite &composite,
284  {
285  if (sizeof(Composite) <= sizeof(uint32_t)) {
286  Composite cp = htog(composite, ArmISA::byteOrder(tc));
287  uint32_t val;
288  memcpy((void *)&val, (void *)&cp, sizeof(Composite));
289  val = gtoh(val, ArmISA::byteOrder(tc));
291  } else {
292  VPtr<Composite> cp(state.retAddr, tc);
293  *cp = htog(composite, ArmISA::byteOrder(tc));
294  }
295  }
296 
297  static void
299  {
300  if (sizeof(Composite) > sizeof(uint32_t))
301  state.retAddr = tc->getReg(ArmISA::intRegClass[state.ncrn++]);
302  }
303 };
304 
305 template <typename Composite>
306 struct Argument<Aapcs32, Composite, typename std::enable_if_t<
307  IsAapcs32CompositeV<Composite>>> :
308  public Aapcs32ArgumentBase
309 {
310  static Composite
312  {
313  size_t bytes = sizeof(Composite);
314  using Chunk = uint32_t;
315 
316  const int chunk_size = sizeof(Chunk);
317  const int regs = (bytes + chunk_size - 1) / chunk_size;
318 
319  if (bytes <= chunk_size) {
320  if (state.ncrn++ <= state.MAX_CRN) {
321  alignas(alignof(Composite)) uint32_t val =
322  tc->getReg(ArmISA::intRegClass[state.ncrn++]);
323  val = htog(val, ArmISA::byteOrder(tc));
324  return gtoh(*(Composite *)&val, ArmISA::byteOrder(tc));
325  }
326  }
327 
328  if (alignof(Composite) == 8 && (state.ncrn % 2))
329  state.ncrn++;
330 
331  if (state.ncrn + regs - 1 <= state.MAX_CRN) {
332  alignas(alignof(Composite)) uint8_t buf[bytes];
333  for (int i = 0; i < regs; i++) {
334  Chunk val = tc->getReg(ArmISA::intRegClass[state.ncrn++]);
335  val = htog(val, ArmISA::byteOrder(tc));
336  size_t to_copy = std::min<size_t>(bytes, chunk_size);
337  memcpy(buf + i * chunk_size, &val, to_copy);
338  bytes -= to_copy;
339  }
340  return gtoh(*(Composite *)buf, ArmISA::byteOrder(tc));
341  }
342 
343  if (!state.stackUsed && state.ncrn <= state.MAX_CRN) {
344  alignas(alignof(Composite)) uint8_t buf[bytes];
345 
346  int offset = 0;
347  while (state.ncrn <= state.MAX_CRN) {
348  Chunk val = tc->getReg(ArmISA::intRegClass[state.ncrn++]);
349  val = htog(val, ArmISA::byteOrder(tc));
350  size_t to_copy = std::min<size_t>(bytes, chunk_size);
351  memcpy(buf + offset, &val, to_copy);
352  offset += to_copy;
353  bytes -= to_copy;
354  }
355 
356  if (bytes) {
357  TranslatingPortProxy fs_proxy(tc);
358  SETranslatingPortProxy se_proxy(tc);
359  PortProxy &virt_proxy = FullSystem ? fs_proxy : se_proxy;
360 
361  virt_proxy.readBlob(
362  state.nsaa, buf, bytes);
363 
364  state.stackUsed = true;
365  state.nsaa += roundUp(bytes, 4);
366  state.ncrn = state.MAX_CRN + 1;
367  }
368 
369  return gtoh(*(Composite *)buf, ArmISA::byteOrder(tc));
370  }
371 
372  state.ncrn = state.MAX_CRN + 1;
373 
374  return loadFromStack<Composite>(tc, state);
375  }
376 };
377 
378 } // namespace guest_abi
379 
380 
381 /*
382  * VFP ABI variant.
383  */
384 
385 struct Aapcs32Vfp : public Aapcs32
386 {
387  struct State : public Aapcs32::State
388  {
389  bool variadic=false; // Whether this function is variadic.
390 
391  // Whether the various single and double precision registers have
392  // been allocated.
393  std::array<bool, 16> s;
394  std::array<bool, 8> d;
395 
396  explicit State(const ThreadContext *tc) : Aapcs32::State(tc)
397  {
398  s.fill(false);
399  d.fill(false);
400  }
401 
402  int
403  allocate(float, int count)
404  {
405  int last = 0;
406  for (int i = 0; i <= s.size() - count; i++) {
407  if (s[i]) {
408  last = i + 1;
409  continue;
410  }
411  if (i - last + 1 == count) {
412  for (int j = 0; j < count; j++) {
413  s[last + j] = true;
414  d[(last + j) / 2] = true;
415  }
416  return last;
417  }
418  }
419  s.fill(true);
420  d.fill(true);
421  return -1;
422  }
423 
424  int
425  allocate(double, int count)
426  {
427  int last = 0;
428  for (int i = 0; i <= d.size() - count; i++) {
429  if (d[i]) {
430  last = i + 1;
431  continue;
432  }
433  if (i - last + 1 == count) {
434  for (int j = 0; j < count; j++) {
435  d[last + j] = true;
436  s[(last + j) * 2] = true;
437  s[(last + j) * 2 + 1] = true;
438  }
439  return last;
440  }
441  }
442  s.fill(true);
443  d.fill(true);
444  return -1;
445  }
446  };
447 };
448 
449 GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi);
450 namespace guest_abi
451 {
452 
453 /*
454  * Integer arguments and return values.
455  */
456 
457 template <typename Integer>
458 struct Result<Aapcs32Vfp, Integer, typename std::enable_if_t<
459  std::is_integral_v<Integer>>> : public Result<Aapcs32, Integer>
460 {};
461 
462 template <typename Integer>
463 struct Argument<Aapcs32Vfp, Integer, typename std::enable_if_t<
464  std::is_integral_v<Integer>>> : public Argument<Aapcs32, Integer>
465 {};
466 
467 
468 /*
469  * Floating point arguments and return values.
470  */
471 
472 template <typename Float>
473 struct Result<Aapcs32Vfp, Float, typename std::enable_if_t<
474  std::is_floating_point_v<Float>>>
475 {
476  static void
478  {
479  if (state.variadic) {
480  storeResult<Aapcs32, Float>(tc, f, state);
481  return;
482  }
483 
484  auto bytes = floatToBits(f);
485  auto *vec_elems = static_cast<ArmISA::VecElem *>(&bytes);
486  constexpr int chunks = sizeof(Float) / sizeof(ArmISA::VecElem);
487  for (int chunk = 0; chunk < chunks; chunk++)
488  tc->setReg(ArmISA::vecElemClass[chunk], vec_elems[chunk]);
489  };
490 };
491 
492 template <typename Float>
493 struct Argument<Aapcs32Vfp, Float, typename std::enable_if_t<
494  std::is_floating_point_v<Float>>> : public Aapcs32ArgumentBase
495 {
496  static Float
498  {
499  if (state.variadic)
500  return getArgument<Aapcs32, Float>(tc, state);
501 
502  const int index = state.allocate(Float{}, 1);
503 
504  if (index < 0)
505  return loadFromStack<Float>(tc, state);
506 
507  decltype(floatToBits(Float{})) result;
508  auto *vec_elems = static_cast<ArmISA::VecElem *>(&result);
509 
510  constexpr int chunks = sizeof(Float) / sizeof(ArmISA::VecElem);
511  for (int chunk = 0; chunk < chunks; chunk++)
512  vec_elems[chunk] = tc->getReg(ArmISA::vecElemClass[chunk]);
513 
514  return bitsToFloat(result);
515  }
516 };
517 
518 
519 /*
520  * Composite arguments and return values which are not Homogeneous Aggregates.
521  */
522 
523 template <typename Composite>
524 struct Result<Aapcs32Vfp, Composite, typename std::enable_if_t<
525  IsAapcs32CompositeV<Composite> &&
526  !IsAapcs32HomogeneousAggregateV<Composite>>> :
527  public Result<Aapcs32, Composite>
528 {};
529 
530 template <typename Composite>
531 struct Argument<Aapcs32Vfp, Composite, typename std::enable_if_t<
532  IsAapcs32CompositeV<Composite> &&
533  !IsAapcs32HomogeneousAggregateV<Composite>>> :
534  public Argument<Aapcs32, Composite>
535 {};
536 
537 
538 /*
539  * Homogeneous Aggregate argument and return values.
540  */
541 
542 template <typename T>
543 struct Aapcs32ArrayType { using Type = void; };
544 
545 template <typename E, size_t N>
546 struct Aapcs32ArrayType<E[N]> { using Type = E; };
547 
548 template <typename HA>
549 struct Argument<Aapcs32Vfp, HA, typename std::enable_if_t<
550  IsAapcs32HomogeneousAggregateV<HA>>> :
551  public Aapcs32ArgumentBase
552 {
553  static bool
555  {
556  using Elem = typename Aapcs32ArrayType<HA>::Type;
557  constexpr size_t Count = sizeof(HA) / sizeof(Elem);
558  return state.variadic || !std::is_floating_point_v<Elem> ||
559  Count > 4;
560  }
561 
562  static HA
564  {
565  using Elem = typename Aapcs32ArrayType<HA>::Type;
566  constexpr size_t Count = sizeof(HA) / sizeof(Elem);
567 
568  if (useBaseABI(state))
569  return getArgument<Aapcs32, HA>(tc, state);
570 
571  const int base = state.allocate(Elem{}, Count);
572  if (base >= 0) {
573  constexpr int lane_per_reg = 16 / sizeof(Elem);
574  HA ha;
575  for (int i = 0; i < Count; i++) {
576  const int index = base + i;
577  const int reg = index / lane_per_reg;
578  const int lane = index % lane_per_reg;
579 
582  tc->getReg(id, &val);
583  ha[i] = val.as<Elem>()[lane];
584  }
585  return ha;
586  }
587 
588  return loadFromStack<HA>(tc, state);
589  }
590 
591  static void
593  {
594  if (useBaseABI(state))
596  }
597 };
598 
599 template <typename HA>
600 struct Result<Aapcs32Vfp, HA,
601  typename std::enable_if_t<IsAapcs32HomogeneousAggregateV<HA>>>
602 {
603  static bool
605  {
606  using Elem = typename Aapcs32ArrayType<HA>::Type;
607  constexpr size_t Count = sizeof(HA) / sizeof(Elem);
608  return state.variadic || !std::is_floating_point_v<Elem> ||
609  Count > 4;
610  }
611 
612  static HA
614  {
615  using Elem = typename Aapcs32ArrayType<HA>::Type;
616  constexpr size_t Count = sizeof(HA) / sizeof(Elem);
617 
618  if (useBaseABI(state)) {
619  storeResult<Aapcs32, HA>(tc, ha, state);
620  return;
621  }
622 
623  constexpr int lane_per_reg = 16 / sizeof(Elem);
624  for (int i = 0; i < Count; i++) {
625  const int reg = i / lane_per_reg;
626  const int lane = i % lane_per_reg;
627 
630  tc->getReg(id, &val);
631  val.as<Elem>()[lane] = ha[i];
632  tc->setReg(id, &val);
633  }
634  }
635 
636  static void
638  {
639  if (useBaseABI(state))
641  }
642 };
643 
644 
645 /*
646  * Varargs
647  */
648 
649 template <typename ...Types>
650 struct Argument<Aapcs32Vfp, VarArgs<Types...>>
651 {
652  static VarArgs<Types...>
654  {
655  state.variadic = true;
656  return getArgument<Aapcs32, VarArgs<Types...>>(tc, state);
657  }
658 };
659 
660 } // namespace guest_abi
661 } // namespace gem5
662 
663 #endif // __ARCH_ARM_AAPCS32_HH__
This object is a proxy for a port or other object which implements the functional response protocol,...
Definition: port_proxy.hh:87
void readBlob(Addr addr, void *p, int size) const
Higher level interfaces based on the above.
Definition: port_proxy.hh:182
Register ID: describe an architectural register with its class and index.
Definition: reg_class.hh:91
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal getReg(const RegId &reg) const
virtual void setReg(const RegId &reg, RegVal val)
This proxy attempts to translate virtual addresses using the TLBs.
Vector Register Abstraction This generic class is the model in a particularization of MVC,...
Definition: vec_reg.hh:124
static constexpr T roundUp(const T &val, const U &align)
This function is used to align addresses in memory.
Definition: intmath.hh:260
constexpr uint64_t mask(unsigned nbits)
Generate a 64-bit mask of 'nbits' 1s, right justified.
Definition: bitfield.hh:63
atomic_var_t state
Definition: helpers.cc:188
constexpr RegId Spx
Definition: int.hh:238
constexpr RegId R1
Definition: int.hh:187
constexpr RegId R0
Definition: int.hh:186
ByteOrder byteOrder(const ThreadContext *tc)
Definition: utility.hh:357
Bitfield< 7 > i
Definition: misc_types.hh:67
Bitfield< 23, 0 > offset
Definition: types.hh:144
constexpr RegClass vecElemClass
Definition: vec.hh:105
constexpr RegClass intRegClass
Definition: int.hh:173
Bitfield< 39 > ha
Definition: misc_types.hh:550
Bitfield< 24 > j
Definition: misc_types.hh:57
constexpr RegClass vecRegClass
Definition: vec.hh:101
uint32_t VecElem
Definition: vec.hh:63
Bitfield< 30, 0 > index
Bitfield< 56 > f
Definition: pagetable.hh:53
Bitfield< 5, 3 > reg
Definition: types.hh:92
Bitfield< 31, 0 > E
Definition: int.hh:56
Bitfield< 51, 12 > base
Definition: pagetable.hh:141
Bitfield< 63 > val
Definition: misc.hh:776
T[count] Aapcs32HomogeneousAggregate
Definition: aapcs32.hh:101
constexpr bool IsAapcs32CompositeV
Definition: aapcs32.hh:93
struct IsAapcs64Hfa< E[N], typename std::enable_if_t< std::is_floating_point_v< E > &&N<=4 > > :public std::true_type{};template< typename T >constexpr bool IsAapcs64HfaV=IsAapcs64Hfa< T >::value;template< typename T, typename Enabled=void >struct IsAapcs64Hva :public std::false_type {};template< typename E, size_t N >struct IsAapcs64Hva< E[N], typename std::enable_if_t< IsAapcs64ShortVectorV< E > &&N<=4 > > :public std::true_type{};template< typename T >constexpr bool IsAapcs64HvaV=IsAapcs64Hva< T >::value;template< typename T, typename Enabled=void >struct IsAapcs64Hxa :public std::false_type {};template< typename T >struct IsAapcs64Hxa< T, typename std::enable_if_t< IsAapcs64HfaV< T >||IsAapcs64HvaV< T > > > :public std::true_type{};template< typename T >constexpr bool IsAapcs64HxaV=IsAapcs64Hxa< T >::value;struct Aapcs64ArgumentBase{ template< typename T > static T loadFromStack(ThreadContext *tc, Aapcs64::State &state) { size_t align=std::max< size_t >(8, alignof(T));size_t size=roundUp(sizeof(T), 8);state.nsaa=roundUp(state.nsaa, align);ConstVPtr< T > val(state.nsaa, tc);state.nsaa+=size;return gtoh(*val, ArmISA::byteOrder(tc));}};template< typename Float >struct Argument< Aapcs64, Float, typename std::enable_if_t< std::is_floating_point_v< Float >||IsAapcs64ShortVectorV< Float > > > :public Aapcs64ArgumentBase{ static Float get(ThreadContext *tc, Aapcs64::State &state) { if(state.nsrn<=state.MAX_SRN) { RegId id=ArmISA::vecRegClass[state.nsrn++];ArmISA::VecRegContainer vc;tc->getReg(id, &vc);return vc.as< Float >()[0];} return loadFromStack< Float >(tc, state);}};template< typename Float >struct Result< Aapcs64, Float, typename std::enable_if_t< std::is_floating_point_v< Float >||IsAapcs64ShortVectorV< Float > > >{ static void store(ThreadContext *tc, const Float &f) { RegId id=ArmISA::vecRegClass[0];ArmISA::VecRegContainer reg;tc-> getReg(id, &reg)
constexpr bool IsAapcs32HomogeneousAggregateV
Definition: aapcs32.hh:110
static Arg getArgument(ThreadContext *tc, typename ABI::State &state)
Definition: layout.hh:171
Reference material can be found at the JEDEC website: UFS standard http://www.jedec....
high
Definition: intmath.hh:176
static uint64_t floatToBits(double val)
Definition: types.hh:202
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
Definition: types.hh:147
T gtoh(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:194
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
Definition: root.cc:220
static double bitsToFloat(uint64_t val)
Definition: types.hh:229
T htog(T value, ByteOrder guest_byte_order)
Definition: byteswap.hh:187
static double bitsToFloat64(uint64_t val)
Definition: types.hh:218
static float bitsToFloat32(uint32_t val)
Definition: types.hh:206
GEM5_DEPRECATED_NAMESPACE(GuestABI, guest_abi)
void align(const scfx_rep &lhs, const scfx_rep &rhs, int &new_wp, int &len_mant, scfx_mant_ref &lhs_mant, scfx_mant_ref &rhs_mant)
Definition: scfx_rep.cc:2051
Overload hash function for BasicBlockRange type.
Definition: misc.hh:2826
PortProxy Object Declaration.
std::array< bool, 16 > s
Definition: aapcs32.hh:393
State(const ThreadContext *tc)
Definition: aapcs32.hh:396
int allocate(double, int count)
Definition: aapcs32.hh:425
int allocate(float, int count)
Definition: aapcs32.hh:403
std::array< bool, 8 > d
Definition: aapcs32.hh:394
static const int MAX_CRN
Definition: aapcs32.hh:63
State(const ThreadContext *tc)
Definition: aapcs32.hh:67
static T loadFromStack(ThreadContext *tc, Aapcs32::State &state)
Definition: aapcs32.hh:117
static VarArgs< Types... > get(ThreadContext *tc, typename Aapcs32Vfp::State &state)
Definition: aapcs32.hh:653
static void store(ThreadContext *tc, const Float &f, Aapcs32Vfp::State &state)
Definition: aapcs32.hh:477
static HA store(ThreadContext *tc, const HA &ha, Aapcs32Vfp::State &state)
Definition: aapcs32.hh:613
static void store(ThreadContext *tc, const Composite &composite, Aapcs32::State &state)
Definition: aapcs32.hh:282
static void store(ThreadContext *tc, const Float &f, Aapcs32::State &state)
Definition: aapcs32.hh:248

Generated on Wed Dec 21 2022 10:22:24 for gem5 by doxygen 1.9.1