Skip to content

Commit 29e8910

Browse files
Marc Zyngierwilldeacon
authored andcommitted
KVM: arm64: Simplify handling of ARCH_WORKAROUND_2
Owing to the fact that the host kernel is always mitigated, we can drastically simplify the WA2 handling by keeping the mitigation state ON when entering the guest. This means the guest is either unaffected or not mitigated. This results in a nice simplification of the mitigation space, and the removal of a lot of code that was never really used anyway. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Will Deacon <will@kernel.org>
1 parent c287620 commit 29e8910

14 files changed

Lines changed: 41 additions & 163 deletions

File tree

arch/arm64/include/asm/kvm_asm.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99

1010
#include <asm/virt.h>
1111

12-
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
13-
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
14-
1512
#define ARM_EXIT_WITH_SERROR_BIT 31
1613
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
1714
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
383383
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
384384
}
385385

386-
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
387-
{
388-
return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
389-
}
390-
391-
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
392-
bool flag)
393-
{
394-
if (flag)
395-
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
396-
else
397-
vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
398-
}
399-
400386
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
401387
{
402388
if (vcpu_mode_is_32bit(vcpu)) {

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -526,23 +526,6 @@ static inline int kvm_map_vectors(void)
526526
}
527527
#endif
528528

529-
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
530-
531-
static inline int hyp_map_aux_data(void)
532-
{
533-
int cpu, err;
534-
535-
for_each_possible_cpu(cpu) {
536-
u64 *ptr;
537-
538-
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
539-
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
540-
if (err)
541-
return err;
542-
}
543-
return 0;
544-
}
545-
546529
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
547530

548531
/*

arch/arm64/include/uapi/asm/kvm.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -242,6 +242,15 @@ struct kvm_vcpu_events {
242242
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
243243
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
244244
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
245+
246+
/*
247+
* Only two states can be presented by the host kernel:
248+
* - NOT_REQUIRED: the guest doesn't need to do anything
249+
* - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
250+
*
251+
* All the other values are deprecated. The host still accepts all
252+
* values (they are ABI), but will narrow them to the above two.
253+
*/
245254
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
246255
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
247256
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1

arch/arm64/kernel/cpu_errata.c

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -108,20 +108,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
108108

109109
int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN;
110110

111-
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
112-
__le32 *origptr, __le32 *updptr,
113-
int nr_inst)
114-
{
115-
BUG_ON(nr_inst != 1);
116-
/*
117-
* Only allow mitigation on EL1 entry/exit and guest
118-
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
119-
* be flipped.
120-
*/
121-
if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
122-
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
123-
}
124-
125111
#ifdef CONFIG_ARM64_ERRATUM_1463225
126112
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
127113

arch/arm64/kernel/image-vars.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,10 @@ __efistub__ctype = _ctype;
6464
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
6565

6666
/* Alternative callbacks for init-time patching of nVHE hyp code. */
67-
KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
6867
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
6968
KVM_NVHE_ALIAS(kvm_update_va_mask);
7069

7170
/* Global kernel state accessed by nVHE hyp code. */
72-
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
7371
KVM_NVHE_ALIAS(kvm_host_data);
7472
KVM_NVHE_ALIAS(kvm_vgic_global_state);
7573

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1549,10 +1549,6 @@ static int init_hyp_mode(void)
15491549
}
15501550
}
15511551

1552-
err = hyp_map_aux_data();
1553-
if (err)
1554-
kvm_err("Cannot map host auxiliary data: %d\n", err);
1555-
15561552
return 0;
15571553

15581554
out_err:

arch/arm64/kvm/hyp/hyp-entry.S

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -116,33 +116,6 @@ el1_hvc_guest:
116116
ARM_SMCCC_ARCH_WORKAROUND_2)
117117
cbnz w1, el1_trap
118118

119-
alternative_cb arm64_enable_wa2_handling
120-
b wa2_end
121-
alternative_cb_end
122-
get_vcpu_ptr x2, x0
123-
ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
124-
125-
// Sanitize the argument and update the guest flags
126-
ldr x1, [sp, #8] // Guest's x1
127-
clz w1, w1 // Murphy's device:
128-
lsr w1, w1, #5 // w1 = !!w1 without using
129-
eor w1, w1, #1 // the flags...
130-
bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
131-
str x0, [x2, #VCPU_WORKAROUND_FLAGS]
132-
133-
/* Check that we actually need to perform the call */
134-
hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
135-
cbz x0, wa2_end
136-
137-
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
138-
smc #0
139-
140-
/* Don't leak data from the SMC call */
141-
mov x3, xzr
142-
wa2_end:
143-
mov x2, xzr
144-
mov x1, xzr
145-
146119
wa_epilogue:
147120
mov x0, xzr
148121
add sp, sp, #16

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -479,35 +479,6 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
479479
return false;
480480
}
481481

482-
static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
483-
{
484-
if (!cpus_have_final_cap(ARM64_SPECTRE_V4))
485-
return false;
486-
487-
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
488-
}
489-
490-
static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
491-
{
492-
/*
493-
* The host runs with the workaround always present. If the
494-
* guest wants it disabled, so be it...
495-
*/
496-
if (__needs_ssbd_off(vcpu) &&
497-
__hyp_this_cpu_read(arm64_ssbd_callback_required))
498-
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
499-
}
500-
501-
static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
502-
{
503-
/*
504-
* If the guest has disabled the workaround, bring it back on.
505-
*/
506-
if (__needs_ssbd_off(vcpu) &&
507-
__hyp_this_cpu_read(arm64_ssbd_callback_required))
508-
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
509-
}
510-
511482
static inline void __kvm_unexpected_el2_exception(void)
512483
{
513484
unsigned long addr, fixup;

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -202,17 +202,13 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
202202

203203
__debug_switch_to_guest(vcpu);
204204

205-
__set_guest_arch_workaround_state(vcpu);
206-
207205
do {
208206
/* Jump in the fire! */
209207
exit_code = __guest_enter(vcpu, host_ctxt);
210208

211209
/* And we're baaack! */
212210
} while (fixup_guest_exit(vcpu, &exit_code));
213211

214-
__set_host_arch_workaround_state(vcpu);
215-
216212
__sysreg_save_state_nvhe(guest_ctxt);
217213
__sysreg32_save_state(vcpu);
218214
__timer_disable_traps(vcpu);

0 commit comments

Comments
 (0)