Skip to content

Commit 9ef2b48

Browse files
committed
KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled
Patching the EL2 exception vectors is integral to the Spectre-v2 workaround, where it can be necessary to execute CPU-specific sequences to nobble the branch predictor before running the hypervisor text proper. Remove the dependency on CONFIG_RANDOMIZE_BASE and allow the EL2 vectors to be patched even when KASLR is not enabled. Fixes: 7a13201 ("KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE") Reported-by: kernel test robot <lkp@intel.com> Link: https://lore.kernel.org/r/202009221053.Jv1XsQUZ%lkp@intel.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 31c84d6 commit 9ef2b48

6 files changed

Lines changed: 36 additions & 58 deletions

File tree

arch/arm64/include/asm/kvm_asm.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
9999
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
100100
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
101101

102-
#ifdef CONFIG_RANDOMIZE_BASE
103102
extern atomic_t arm64_el2_vector_last_slot;
104103
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
105104
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
106-
#endif
107105

108106
extern void __kvm_flush_vm_context(void);
109107
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
#include <asm/page.h>
1111
#include <asm/memory.h>
12+
#include <asm/mmu.h>
1213
#include <asm/cpufeature.h>
1314

1415
/*
@@ -430,7 +431,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
430431
return ret;
431432
}
432433

433-
#ifdef CONFIG_RANDOMIZE_BASE
434434
/*
435435
* EL2 vectors can be mapped and rerouted in a number of ways,
436436
* depending on the kernel configuration and CPU present:
@@ -451,12 +451,9 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
451451
* VHE, as we don't have hypervisor-specific mappings. If the system
452452
* is VHE and yet selects this capability, it will be ignored.
453453
*/
454-
#include <asm/mmu.h>
455-
456454
extern void *__kvm_bp_vect_base;
457455
extern int __kvm_harden_el2_vector_slot;
458456

459-
/* This is called on both VHE and !VHE systems */
460457
static inline void *kvm_get_hyp_vector(void)
461458
{
462459
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
@@ -480,52 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
480477
return vect;
481478
}
482479

483-
/* This is only called on a !VHE system */
484-
static inline int kvm_map_vectors(void)
485-
{
486-
/*
487-
* SV2 = ARM64_SPECTRE_V2
488-
* HEL2 = ARM64_HARDEN_EL2_VECTORS
489-
*
490-
* !SV2 + !HEL2 -> use direct vectors
491-
* SV2 + !HEL2 -> use hardened vectors in place
492-
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
493-
* SV2 + HEL2 -> use hardened vertors and use exec mapping
494-
*/
495-
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
496-
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
497-
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
498-
}
499-
500-
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
501-
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
502-
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
503-
504-
/*
505-
* Always allocate a spare vector slot, as we don't
506-
* know yet which CPUs have a BP hardening slot that
507-
* we can reuse.
508-
*/
509-
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
510-
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
511-
return create_hyp_exec_mappings(vect_pa, size,
512-
&__kvm_bp_vect_base);
513-
}
514-
515-
return 0;
516-
}
517-
#else
518-
static inline void *kvm_get_hyp_vector(void)
519-
{
520-
return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
521-
}
522-
523-
static inline int kvm_map_vectors(void)
524-
{
525-
return 0;
526-
}
527-
#endif
528-
529480
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
530481

531482
/*

arch/arm64/kernel/proton-pack.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,6 @@ enum mitigation_state arm64_get_spectre_v2_state(void)
177177
}
178178

179179
#ifdef CONFIG_KVM
180-
#ifdef CONFIG_RANDOMIZE_BASE
181180
#include <asm/cacheflush.h>
182181
#include <asm/kvm_asm.h>
183182

@@ -235,7 +234,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
235234
{
236235
__this_cpu_write(bp_hardening_data.fn, fn);
237236
}
238-
#endif /* CONFIG_RANDOMIZE_BASE */
239237
#endif /* CONFIG_KVM */
240238

241239
static void call_smc_arch_workaround_1(void)

arch/arm64/kvm/arm.c

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
12561256
}
12571257
}
12581258

1259+
static int kvm_map_vectors(void)
1260+
{
1261+
/*
1262+
* SV2 = ARM64_SPECTRE_V2
1263+
* HEL2 = ARM64_HARDEN_EL2_VECTORS
1264+
*
1265+
* !SV2 + !HEL2 -> use direct vectors
1266+
* SV2 + !HEL2 -> use hardened vectors in place
1267+
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
1268+
* SV2 + HEL2 -> use hardened vectors and use exec mapping
1269+
*/
1270+
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
1271+
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
1272+
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
1273+
}
1274+
1275+
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
1276+
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
1277+
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
1278+
1279+
/*
1280+
* Always allocate a spare vector slot, as we don't
1281+
* know yet which CPUs have a BP hardening slot that
1282+
* we can reuse.
1283+
*/
1284+
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
1285+
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
1286+
return create_hyp_exec_mappings(vect_pa, size,
1287+
&__kvm_bp_vect_base);
1288+
}
1289+
1290+
return 0;
1291+
}
1292+
12591293
static void cpu_init_hyp_mode(void)
12601294
{
12611295
phys_addr_t pgd_ptr;

arch/arm64/kvm/hyp/Makefile

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
1010
-DDISABLE_BRANCH_PROFILING \
1111
$(DISABLE_STACKLEAK_PLUGIN)
1212

13-
obj-$(CONFIG_KVM) += vhe/ nvhe/
14-
obj-$(CONFIG_RANDOMIZE_BASE) += smccc_wa.o
13+
obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o

arch/arm64/kvm/hyp/hyp-entry.S

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
259259
valid_vect el1_error // Error 32-bit EL1
260260
SYM_CODE_END(__kvm_hyp_vector)
261261

262-
#ifdef CONFIG_RANDOMIZE_BASE
263262
.macro hyp_ventry
264263
.align 7
265264
1: esb
@@ -309,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
309308
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
310309
.org 1b
311310
SYM_CODE_END(__bp_harden_hyp_vecs)
312-
#endif

0 commit comments

Comments
 (0)