[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 40/45] linux-user/aarch64: Implement SME signal handling
From: |
Richard Henderson |
Subject: |
[PATCH v5 40/45] linux-user/aarch64: Implement SME signal handling |
Date: |
Wed, 6 Jul 2022 13:54:06 +0530 |
Set the SM bit in the SVE record on signal delivery, create the ZA record.
Restore SM and ZA state according to the records present on return.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/aarch64/signal.c | 167 +++++++++++++++++++++++++++++++++---
1 file changed, 154 insertions(+), 13 deletions(-)
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index 22d0b8b4ec..6a2c6e06d2 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -104,6 +104,22 @@ struct target_sve_context {
#define TARGET_SVE_SIG_FLAG_SM 1
+#define TARGET_ZA_MAGIC 0x54366345
+
+struct target_za_context {
+ struct target_aarch64_ctx head;
+ uint16_t vl;
+ uint16_t reserved[3];
+ /* The actual ZA data immediately follows. */
+};
+
+#define TARGET_ZA_SIG_REGS_OFFSET \
+ QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES)
+#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \
+ (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N))
+#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
+ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -176,9 +192,9 @@ static void target_setup_end_record(struct
target_aarch64_ctx *end)
}
static void target_setup_sve_record(struct target_sve_context *sve,
- CPUARMState *env, int vq, int size)
+ CPUARMState *env, int size)
{
- int i, j;
+ int i, j, vq = sve_vq(env);
memset(sve, 0, sizeof(*sve));
__put_user(TARGET_SVE_MAGIC, &sve->head.magic);
@@ -207,6 +223,35 @@ static void target_setup_sve_record(struct
target_sve_context *sve,
}
}
+static void target_setup_za_record(struct target_za_context *za,
+ CPUARMState *env, int size)
+{
+ int vq = sme_vq(env);
+ int vl = vq * TARGET_SVE_VQ_BYTES;
+ int i, j;
+
+ memset(za, 0, sizeof(*za));
+ __put_user(TARGET_ZA_MAGIC, &za->head.magic);
+ __put_user(size, &za->head.size);
+ __put_user(vl, &za->vl);
+
+ if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+ return;
+ }
+ assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq));
+
+ /*
+ * Note that ZA vectors are stored as a byte stream,
+ * with each byte element at a subsequent address.
+ */
+ for (i = 0; i < vl; ++i) {
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+ for (j = 0; j < vq * 2; ++j) {
+ __put_user_e(env->zarray[i].d[j], z + j, le);
+ }
+ }
+}
+
static void target_restore_general_frame(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -252,16 +297,28 @@ static void target_restore_fpsimd_record(CPUARMState *env,
static bool target_restore_sve_record(CPUARMState *env,
struct target_sve_context *sve,
- int size)
+ int size, int *svcr)
{
- int i, j, vl, vq;
+ int i, j, vl, vq, flags;
+ bool sm;
- if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
+ __get_user(vl, &sve->vl);
+ __get_user(flags, &sve->flags);
+
+ sm = flags & TARGET_SVE_SIG_FLAG_SM;
+
+ /* The cpu must support Streaming or Non-streaming SVE. */
+ if (sm
+ ? !cpu_isar_feature(aa64_sme, env_archcpu(env))
+ : !cpu_isar_feature(aa64_sve, env_archcpu(env))) {
return false;
}
- __get_user(vl, &sve->vl);
- vq = sve_vq(env);
+ /*
+ * Note that we cannot use sve_vq() because that depends on the
+ * current setting of PSTATE.SM, not the state to be restored.
+ */
+ vq = sve_vqm1_for_el_sm(env, 0, sm) + 1;
/* Reject mismatched VL. */
if (vl != vq * TARGET_SVE_VQ_BYTES) {
@@ -278,6 +335,8 @@ static bool target_restore_sve_record(CPUARMState *env,
return false;
}
+ *svcr = FIELD_DP64(*svcr, SVCR, SM, sm);
+
/*
* Note that SVE regs are stored as a byte stream, with each byte element
* at a subsequent address. This corresponds to a little-endian load
@@ -304,15 +363,57 @@ static bool target_restore_sve_record(CPUARMState *env,
return true;
}
+static bool target_restore_za_record(CPUARMState *env,
+ struct target_za_context *za,
+ int size, int *svcr)
+{
+ int i, j, vl, vq;
+
+ if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ return false;
+ }
+
+ __get_user(vl, &za->vl);
+ vq = sme_vq(env);
+
+ /* Reject mismatched VL. */
+ if (vl != vq * TARGET_SVE_VQ_BYTES) {
+ return false;
+ }
+
+ /* Accept empty record -- used to clear PSTATE.ZA. */
+ if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) {
+ return true;
+ }
+
+ /* Reject non-empty but incomplete record. */
+ if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) {
+ return false;
+ }
+
+ *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1);
+
+ for (i = 0; i < vl; ++i) {
+ uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
+ for (j = 0; j < vq * 2; ++j) {
+ __get_user_e(env->zarray[i].d[j], z + j, le);
+ }
+ }
+ return true;
+}
+
static int target_restore_sigframe(CPUARMState *env,
struct target_rt_sigframe *sf)
{
struct target_aarch64_ctx *ctx, *extra = NULL;
struct target_fpsimd_context *fpsimd = NULL;
struct target_sve_context *sve = NULL;
+ struct target_za_context *za = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
int sve_size = 0;
+ int za_size = 0;
+ int svcr = 0;
target_restore_general_frame(env, sf);
@@ -350,6 +451,14 @@ static int target_restore_sigframe(CPUARMState *env,
sve_size = size;
break;
+ case TARGET_ZA_MAGIC:
+ if (za || size < sizeof(struct target_za_context)) {
+ goto err;
+ }
+ za = (struct target_za_context *)ctx;
+ za_size = size;
+ break;
+
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
goto err;
@@ -381,9 +490,16 @@ static int target_restore_sigframe(CPUARMState *env,
}
/* SVE data, if present, overwrites FPSIMD data. */
- if (sve && !target_restore_sve_record(env, sve, sve_size)) {
+ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
goto err;
}
+ if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
+ goto err;
+ }
+ if (env->svcr != svcr) {
+ env->svcr = svcr;
+ arm_rebuild_hflags(env);
+ }
unlock_user(extra, extra_datap, 0);
return 0;
@@ -451,7 +567,8 @@ static void target_setup_frame(int usig, struct
target_sigaction *ka,
.total_size = offsetof(struct target_rt_sigframe,
uc.tuc_mcontext.__reserved),
};
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
+ int sve_size = 0, za_size = 0;
struct target_rt_sigframe *frame;
struct target_rt_frame_record *fr;
abi_ulong frame_addr, return_addr;
@@ -461,11 +578,20 @@ static void target_setup_frame(int usig, struct
target_sigaction *ka,
&layout);
/* SVE state needs saving only if it exists. */
- if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
- vq = sve_vq(env);
- sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
+ cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16);
sve_ofs = alloc_sigframe_space(sve_size, &layout);
}
+ if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ /* ZA state needs saving only if it is enabled. */
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
+ } else {
+ za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0);
+ }
+ za_ofs = alloc_sigframe_space(za_size, &layout);
+ }
if (layout.extra_ofs) {
/* Reserve space for the extra end marker. The standard end marker
@@ -512,7 +638,10 @@ static void target_setup_frame(int usig, struct
target_sigaction *ka,
target_setup_end_record((void *)frame + layout.extra_end_ofs);
}
if (sve_ofs) {
- target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
+ target_setup_sve_record((void *)frame + sve_ofs, env, sve_size);
+ }
+ if (za_ofs) {
+ target_setup_za_record((void *)frame + za_ofs, env, za_size);
}
/* Set up the stack frame for unwinding. */
@@ -536,6 +665,18 @@ static void target_setup_frame(int usig, struct
target_sigaction *ka,
env->btype = 2;
}
+ /*
+ * Invoke the signal handler with both SM and ZA disabled.
+ * When clearing SM, ResetSVEState, per SMSTOP.
+ */
+ if (FIELD_EX64(env->svcr, SVCR, SM)) {
+ arm_reset_sve_state(env);
+ }
+ if (env->svcr) {
+ env->svcr = 0;
+ arm_rebuild_hflags(env);
+ }
+
if (info) {
tswap_siginfo(&frame->info, info);
env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
--
2.34.1
- [PATCH v5 35/45] linux-user/aarch64: Add SM bit to SVE signal context, (continued)
- [PATCH v5 35/45] linux-user/aarch64: Add SM bit to SVE signal context, Richard Henderson, 2022/07/06
- [PATCH v5 38/45] linux-user/aarch64: Verify extra record lock succeeded, Richard Henderson, 2022/07/06
- [PATCH v5 37/45] linux-user/aarch64: Do not allow duplicate or short sve records, Richard Henderson, 2022/07/06
- [PATCH v5 29/45] target/arm: Implement REVD, Richard Henderson, 2022/07/06
- [PATCH v5 39/45] linux-user/aarch64: Move sve record checks into restore, Richard Henderson, 2022/07/06
- [PATCH v5 34/45] linux-user/aarch64: Reset PSTATE.SM on syscalls, Richard Henderson, 2022/07/06
- [PATCH v5 36/45] linux-user/aarch64: Tidy target_restore_sigframe error return, Richard Henderson, 2022/07/06
- [PATCH v5 41/45] linux-user: Rename sve prctls, Richard Henderson, 2022/07/06
- [PATCH v5 40/45] linux-user/aarch64: Implement SME signal handling,
Richard Henderson <=
- [PATCH v5 42/45] linux-user/aarch64: Implement PR_SME_GET_VL, PR_SME_SET_VL, Richard Henderson, 2022/07/06
- [PATCH v5 43/45] target/arm: Only set ZEN in reset if SVE present, Richard Henderson, 2022/07/06
- [PATCH v5 44/45] target/arm: Enable SME for user-only, Richard Henderson, 2022/07/06
- [PATCH v5 45/45] linux-user/aarch64: Add SME related hwcap entries, Richard Henderson, 2022/07/06
- Re: [PATCH v5 00/45] target/arm: Scalable Matrix Extension, Peter Maydell, 2022/07/07