28#ifndef __ARCH_ARM_AAPCS32_HH__
29#define __ARCH_ARM_AAPCS32_HH__
68 nsaa(tc->getReg(ArmISA::int_reg::Spx))
80template <
typename T,
typename Enabled=
void>
85 (std::is_array_v<T> || std::is_class_v<T> || std::is_union_v<T>) &&
88 >> :
public std::true_type
99template <
typename T, std::
size_t count,
typename Enabled=
void>
105template <
typename E,
size_t N>
114 template <
typename T>
118 state.stackUsed =
true;
121 size_t align = std::max<size_t>(4,
alignof(T));
123 size_t size =
roundUp(
sizeof(T), 4);
144template <
typename Integer>
146 std::is_integral_v<Integer> && (sizeof(Integer) < sizeof(uint32_t))>>
151 uint32_t
val = std::is_signed_v<Integer> ?
157template <
typename Integer>
159 std::is_integral_v<Integer> && (sizeof(Integer) == sizeof(uint32_t))>>
168template <
typename Integer>
170 std::is_integral_v<Integer> && (sizeof(Integer) == sizeof(uint64_t))>>
185template <
typename Integer>
187 std::is_integral_v<Integer> && (sizeof(Integer) <= sizeof(uint32_t))
200 return loadFromStack<Integer>(tc,
state);
204template <
typename Integer>
206 std::is_integral_v<Integer> && (sizeof(Integer) > sizeof(uint32_t))
212 if (
alignof(Integer) == 8 && (
state.ncrn % 2))
215 if (
sizeof(Integer) ==
sizeof(uint64_t) &&
227 return low | (high << 32);
233 return loadFromStack<Integer>(tc,
state);
242template <
typename Float>
244 std::is_floating_point_v<Float>>>
254template <
typename Float>
261 if (
sizeof(Float) ==
sizeof(uint32_t)) {
276template <
typename Composite>
278 IsAapcs32CompositeV<Composite>>>
284 if (
sizeof(Composite) <=
sizeof(uint32_t)) {
287 memcpy((
void *)&
val, (
void *)&cp,
sizeof(Composite));
299 if (
sizeof(Composite) >
sizeof(uint32_t))
304template <
typename Composite>
306 IsAapcs32CompositeV<Composite>>> :
312 size_t bytes =
sizeof(Composite);
313 using Chunk = uint32_t;
315 const int chunk_size =
sizeof(Chunk);
316 const int regs = (bytes + chunk_size - 1) / chunk_size;
318 if (bytes <= chunk_size) {
320 alignas(
alignof(Composite)) uint32_t
val =
327 if (
alignof(Composite) == 8 && (
state.ncrn % 2))
331 alignas(
alignof(Composite)) uint8_t buf[bytes];
332 for (
int i = 0;
i < regs;
i++) {
335 size_t to_copy = std::min<size_t>(bytes, chunk_size);
336 memcpy(buf +
i * chunk_size, &
val, to_copy);
343 alignas(
alignof(Composite)) uint8_t buf[bytes];
349 size_t to_copy = std::min<size_t>(bytes, chunk_size);
361 state.nsaa, buf, bytes);
363 state.stackUsed =
true;
373 return loadFromStack<Composite>(tc,
state);
392 std::array<bool, 16>
s;
393 std::array<bool, 8>
d;
405 for (
int i = 0;
i <=
s.size() -
count;
i++) {
410 if (
i - last + 1 ==
count) {
411 for (
int j = 0; j <
count; j++) {
413 d[(last + j) / 2] =
true;
427 for (
int i = 0;
i <=
d.size() -
count;
i++) {
432 if (
i - last + 1 ==
count) {
433 for (
int j = 0; j <
count; j++) {
435 s[(last + j) * 2] =
true;
436 s[(last + j) * 2 + 1] =
true;
455template <
typename Integer>
457 std::is_integral_v<Integer>>> :
public Result<Aapcs32, Integer>
460template <
typename Integer>
462 std::is_integral_v<Integer>>> :
public Argument<Aapcs32, Integer>
470template <
typename Float>
472 std::is_floating_point_v<Float>>>
477 if (
state.variadic) {
485 for (
int chunk = 0; chunk < chunks; chunk++)
490template <
typename Float>
503 return loadFromStack<Float>(tc,
state);
509 for (
int chunk = 0; chunk < chunks; chunk++)
521template <
typename Composite>
523 IsAapcs32CompositeV<Composite> &&
524 !IsAapcs32HomogeneousAggregateV<Composite>>> :
525 public Result<Aapcs32, Composite>
528template <
typename Composite>
530 IsAapcs32CompositeV<Composite> &&
531 !IsAapcs32HomogeneousAggregateV<Composite>>> :
543template <
typename E,
size_t N>
546template <
typename HA>
548 IsAapcs32HomogeneousAggregateV<HA>>> :
555 constexpr size_t Count =
sizeof(HA) /
sizeof(Elem);
556 return state.variadic || !std::is_floating_point_v<Elem> ||
564 constexpr size_t Count =
sizeof(HA) /
sizeof(Elem);
566 if (useBaseABI(
state))
569 const int base =
state.allocate(Elem{}, Count);
571 constexpr int lane_per_reg = 16 /
sizeof(Elem);
573 for (
int i = 0;
i < Count;
i++) {
575 const int reg =
index / lane_per_reg;
576 const int lane =
index % lane_per_reg;
581 ha[
i] =
val.as<Elem>()[lane];
586 return loadFromStack<HA>(tc,
state);
592 if (useBaseABI(
state))
597template <
typename HA>
599 typename
std::enable_if_t<IsAapcs32HomogeneousAggregateV<HA>>>
605 constexpr size_t Count =
sizeof(HA) /
sizeof(Elem);
606 return state.variadic || !std::is_floating_point_v<Elem> ||
614 constexpr size_t Count =
sizeof(HA) /
sizeof(Elem);
616 if (useBaseABI(
state)) {
621 constexpr int lane_per_reg = 16 /
sizeof(Elem);
622 for (
int i = 0;
i < Count;
i++) {
623 const int reg =
i / lane_per_reg;
624 const int lane =
i % lane_per_reg;
629 val.as<Elem>()[lane] =
ha[
i];
637 if (useBaseABI(
state))
647template <
typename ...Types>
653 state.variadic =
true;
This object is a proxy for a port or other object which implements the functional response protocol,...
void readBlob(Addr addr, void *p, uint64_t size) const
Higher level interfaces based on the above.
Register ID: describe an architectural register with its class and index.
ThreadContext is the external interface to all thread state for anything outside of the CPU.
virtual RegVal getReg(const RegId ®) const
virtual void setReg(const RegId ®, RegVal val)
This proxy attempts to translate virtual addresses using the TLBs.
Vector Register Abstraction This generic class is the model in a particularization of MVC,...
static constexpr T roundUp(const T &val, const U &align)
This function is used to align addresses in memory.
constexpr uint64_t sext(uint64_t val)
Sign-extend an N-bit value to 64 bits.
ByteOrder byteOrder(const ThreadContext *tc)
constexpr RegClass vecElemClass
constexpr RegClass intRegClass
constexpr RegClass vecRegClass
constexpr bool IsAapcs32CompositeV
constexpr bool IsAapcs32HomogeneousAggregateV
static void storeResult(ThreadContext *tc, const Ret &ret, typename ABI::State &state)
T[count] Aapcs32HomogeneousAggregate
static Arg getArgument(ThreadContext *tc, typename ABI::State &state)
Copyright (c) 2024 - Pranith Kumar Copyright (c) 2020 Inria All rights reserved.
static uint64_t floatToBits(double val)
uint64_t Addr
Address type This will probably be moved somewhere else in the near future.
T gtoh(T value, ByteOrder guest_byte_order)
bool FullSystem
The FullSystem variable can be used to determine the current mode of simulation.
static double bitsToFloat(uint64_t val)
T htog(T value, ByteOrder guest_byte_order)
static double bitsToFloat64(uint64_t val)
static float bitsToFloat32(uint32_t val)
Overload hash function for BasicBlockRange type.
PortProxy Object Declaration.
State(const ThreadContext *tc)
int allocate(double, int count)
int allocate(float, int count)
State(const ThreadContext *tc)
static T loadFromStack(ThreadContext *tc, Aapcs32::State &state)
static Float get(ThreadContext *tc, Aapcs32Vfp::State &state)
static HA get(ThreadContext *tc, Aapcs32Vfp::State &state)
static bool useBaseABI(Aapcs32Vfp::State &state)
static void prepare(ThreadContext *tc, Aapcs32Vfp::State &state)
static VarArgs< Types... > get(ThreadContext *tc, typename Aapcs32Vfp::State &state)
static Composite get(ThreadContext *tc, Aapcs32::State &state)
static Float get(ThreadContext *tc, Aapcs32::State &state)
static Integer get(ThreadContext *tc, Aapcs32::State &state)
static Integer get(ThreadContext *tc, Aapcs32::State &state)
static void store(ThreadContext *tc, const Float &f, Aapcs32Vfp::State &state)
static void prepare(ThreadContext *tc, Aapcs32Vfp::State &state)
static bool useBaseABI(Aapcs32Vfp::State &state)
static HA store(ThreadContext *tc, const HA &ha, Aapcs32Vfp::State &state)
static void store(ThreadContext *tc, const Composite &composite, Aapcs32::State &state)
static void prepare(ThreadContext *tc, Aapcs32::State &state)
static void store(ThreadContext *tc, const Float &f, Aapcs32::State &state)
static void store(ThreadContext *tc, const Integer &i)
static void store(ThreadContext *tc, const Integer &i)
static void store(ThreadContext *tc, const Integer &i)