Skip to content

Commit 0a21ac0

Browse files
committed
Merge branch 'for-next/ghostbusters' into for-next/core
Fix and subsequently rewrite Spectre mitigations, including the addition of support for PR_SPEC_DISABLE_NOEXEC. (Will Deacon and Marc Zyngier) * for-next/ghostbusters: (22 commits) arm64: Add support for PR_SPEC_DISABLE_NOEXEC prctl() option arm64: Pull in task_stack_page() to Spectre-v4 mitigation code KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled arm64: Get rid of arm64_ssbd_state KVM: arm64: Convert ARCH_WORKAROUND_2 to arm64_get_spectre_v4_state() KVM: arm64: Get rid of kvm_arm_have_ssbd() KVM: arm64: Simplify handling of ARCH_WORKAROUND_2 arm64: Rewrite Spectre-v4 mitigation code arm64: Move SSBD prctl() handler alongside other spectre mitigation code arm64: Rename ARM64_SSBD to ARM64_SPECTRE_V4 arm64: Treat SSBS as a non-strict system feature arm64: Group start_thread() functions together KVM: arm64: Set CSV2 for guests on hardware unaffected by Spectre-v2 arm64: Rewrite Spectre-v2 mitigation code arm64: Introduce separate file for spectre mitigations and reporting arm64: Rename ARM64_HARDEN_BRANCH_PREDICTOR to ARM64_SPECTRE_V2 KVM: arm64: Simplify install_bp_hardening_cb() KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE arm64: Remove Spectre-related CONFIG_* options arm64: Run ARCH_WORKAROUND_2 enabling code on all CPUs ...
2 parents 57b8b1b + 780c083 commit 0a21ac0

32 files changed

Lines changed: 981 additions & 1054 deletions

arch/arm64/Kconfig

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1172,32 +1172,6 @@ config UNMAP_KERNEL_AT_EL0
11721172

11731173
If unsure, say Y.
11741174

1175-
config HARDEN_BRANCH_PREDICTOR
1176-
bool "Harden the branch predictor against aliasing attacks" if EXPERT
1177-
default y
1178-
help
1179-
Speculation attacks against some high-performance processors rely on
1180-
being able to manipulate the branch predictor for a victim context by
1181-
executing aliasing branches in the attacker context. Such attacks
1182-
can be partially mitigated against by clearing internal branch
1183-
predictor state and limiting the prediction logic in some situations.
1184-
1185-
This config option will take CPU-specific actions to harden the
1186-
branch predictor against aliasing attacks and may rely on specific
1187-
instruction sequences or control bits being set by the system
1188-
firmware.
1189-
1190-
If unsure, say Y.
1191-
1192-
config ARM64_SSBD
1193-
bool "Speculative Store Bypass Disable" if EXPERT
1194-
default y
1195-
help
1196-
This enables mitigation of the bypassing of previous stores
1197-
by speculative loads.
1198-
1199-
If unsure, say Y.
1200-
12011175
config RODATA_FULL_DEFAULT_ENABLED
12021176
bool "Apply r/o permissions of VM areas also to their linear aliases"
12031177
default y

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,13 @@
3131
#define ARM64_HAS_DCPOP 21
3232
#define ARM64_SVE 22
3333
#define ARM64_UNMAP_KERNEL_AT_EL0 23
34-
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
34+
#define ARM64_SPECTRE_V2 24
3535
#define ARM64_HAS_RAS_EXTN 25
3636
#define ARM64_WORKAROUND_843419 26
3737
#define ARM64_HAS_CACHE_IDC 27
3838
#define ARM64_HAS_CACHE_DIC 28
3939
#define ARM64_HW_DBM 29
40-
#define ARM64_SSBD 30
40+
#define ARM64_SPECTRE_V4 30
4141
#define ARM64_MISMATCHED_CACHE_TYPE 31
4242
#define ARM64_HAS_STAGE2_FWB 32
4343
#define ARM64_HAS_CRC32 33

arch/arm64/include/asm/cpufeature.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -698,30 +698,6 @@ static inline bool system_supports_tlb_range(void)
698698
cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
699699
}
700700

701-
#define ARM64_BP_HARDEN_UNKNOWN -1
702-
#define ARM64_BP_HARDEN_WA_NEEDED 0
703-
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
704-
705-
int get_spectre_v2_workaround_state(void);
706-
707-
#define ARM64_SSBD_UNKNOWN -1
708-
#define ARM64_SSBD_FORCE_DISABLE 0
709-
#define ARM64_SSBD_KERNEL 1
710-
#define ARM64_SSBD_FORCE_ENABLE 2
711-
#define ARM64_SSBD_MITIGATED 3
712-
713-
static inline int arm64_get_ssbd_state(void)
714-
{
715-
#ifdef CONFIG_ARM64_SSBD
716-
extern int ssbd_state;
717-
return ssbd_state;
718-
#else
719-
return ARM64_SSBD_UNKNOWN;
720-
#endif
721-
}
722-
723-
void arm64_set_ssbd_mitigation(bool state);
724-
725701
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
726702

727703
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)

arch/arm64/include/asm/kvm_asm.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99

1010
#include <asm/virt.h>
1111

12-
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
13-
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
14-
1512
#define ARM_EXIT_WITH_SERROR_BIT 31
1613
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
1714
#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
@@ -102,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
10299
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
103100
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
104101

105-
#ifdef CONFIG_KVM_INDIRECT_VECTORS
106102
extern atomic_t arm64_el2_vector_last_slot;
107103
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
108104
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
109-
#endif
110105

111106
extern void __kvm_flush_vm_context(void);
112107
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
383383
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
384384
}
385385

386-
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
387-
{
388-
return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
389-
}
390-
391-
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
392-
bool flag)
393-
{
394-
if (flag)
395-
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
396-
else
397-
vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
398-
}
399-
400386
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
401387
{
402388
if (vcpu_mode_is_32bit(vcpu)) {

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -631,46 +631,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
631631
static inline void kvm_clr_pmu_events(u32 clr) {}
632632
#endif
633633

634-
#define KVM_BP_HARDEN_UNKNOWN -1
635-
#define KVM_BP_HARDEN_WA_NEEDED 0
636-
#define KVM_BP_HARDEN_NOT_REQUIRED 1
637-
638-
static inline int kvm_arm_harden_branch_predictor(void)
639-
{
640-
switch (get_spectre_v2_workaround_state()) {
641-
case ARM64_BP_HARDEN_WA_NEEDED:
642-
return KVM_BP_HARDEN_WA_NEEDED;
643-
case ARM64_BP_HARDEN_NOT_REQUIRED:
644-
return KVM_BP_HARDEN_NOT_REQUIRED;
645-
case ARM64_BP_HARDEN_UNKNOWN:
646-
default:
647-
return KVM_BP_HARDEN_UNKNOWN;
648-
}
649-
}
650-
651-
#define KVM_SSBD_UNKNOWN -1
652-
#define KVM_SSBD_FORCE_DISABLE 0
653-
#define KVM_SSBD_KERNEL 1
654-
#define KVM_SSBD_FORCE_ENABLE 2
655-
#define KVM_SSBD_MITIGATED 3
656-
657-
static inline int kvm_arm_have_ssbd(void)
658-
{
659-
switch (arm64_get_ssbd_state()) {
660-
case ARM64_SSBD_FORCE_DISABLE:
661-
return KVM_SSBD_FORCE_DISABLE;
662-
case ARM64_SSBD_KERNEL:
663-
return KVM_SSBD_KERNEL;
664-
case ARM64_SSBD_FORCE_ENABLE:
665-
return KVM_SSBD_FORCE_ENABLE;
666-
case ARM64_SSBD_MITIGATED:
667-
return KVM_SSBD_MITIGATED;
668-
case ARM64_SSBD_UNKNOWN:
669-
default:
670-
return KVM_SSBD_UNKNOWN;
671-
}
672-
}
673-
674634
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
675635
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
676636

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 8 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
#include <asm/page.h>
1111
#include <asm/memory.h>
12+
#include <asm/mmu.h>
1213
#include <asm/cpufeature.h>
1314

1415
/*
@@ -430,19 +431,17 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
430431
return ret;
431432
}
432433

433-
#ifdef CONFIG_KVM_INDIRECT_VECTORS
434434
/*
435435
* EL2 vectors can be mapped and rerouted in a number of ways,
436436
* depending on the kernel configuration and CPU present:
437437
*
438-
* - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
439-
* hardening sequence is placed in one of the vector slots, which is
440-
* executed before jumping to the real vectors.
438+
* - If the CPU is affected by Spectre-v2, the hardening sequence is
439+
* placed in one of the vector slots, which is executed before jumping
440+
* to the real vectors.
441441
*
442-
* - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
443-
* ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
444-
* hardening sequence is mapped next to the idmap page, and executed
445-
* before jumping to the real vectors.
442+
* - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
443+
* containing the hardening sequence is mapped next to the idmap page,
444+
* and executed before jumping to the real vectors.
446445
*
447446
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
448447
* empty slot is selected, mapped next to the idmap page, and
@@ -452,19 +451,16 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
452451
* VHE, as we don't have hypervisor-specific mappings. If the system
453452
* is VHE and yet selects this capability, it will be ignored.
454453
*/
455-
#include <asm/mmu.h>
456-
457454
extern void *__kvm_bp_vect_base;
458455
extern int __kvm_harden_el2_vector_slot;
459456

460-
/* This is called on both VHE and !VHE systems */
461457
static inline void *kvm_get_hyp_vector(void)
462458
{
463459
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
464460
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
465461
int slot = -1;
466462

467-
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
463+
if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
468464
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
469465
slot = data->hyp_vectors_slot;
470466
}
@@ -481,76 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
481477
return vect;
482478
}
483479

484-
/* This is only called on a !VHE system */
485-
static inline int kvm_map_vectors(void)
486-
{
487-
/*
488-
* HBP = ARM64_HARDEN_BRANCH_PREDICTOR
489-
* HEL2 = ARM64_HARDEN_EL2_VECTORS
490-
*
491-
* !HBP + !HEL2 -> use direct vectors
492-
* HBP + !HEL2 -> use hardened vectors in place
493-
* !HBP + HEL2 -> allocate one vector slot and use exec mapping
494-
* HBP + HEL2 -> use hardened vertors and use exec mapping
495-
*/
496-
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
497-
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
498-
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
499-
}
500-
501-
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
502-
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
503-
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
504-
505-
/*
506-
* Always allocate a spare vector slot, as we don't
507-
* know yet which CPUs have a BP hardening slot that
508-
* we can reuse.
509-
*/
510-
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
511-
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
512-
return create_hyp_exec_mappings(vect_pa, size,
513-
&__kvm_bp_vect_base);
514-
}
515-
516-
return 0;
517-
}
518-
#else
519-
static inline void *kvm_get_hyp_vector(void)
520-
{
521-
return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
522-
}
523-
524-
static inline int kvm_map_vectors(void)
525-
{
526-
return 0;
527-
}
528-
#endif
529-
530-
#ifdef CONFIG_ARM64_SSBD
531-
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
532-
533-
static inline int hyp_map_aux_data(void)
534-
{
535-
int cpu, err;
536-
537-
for_each_possible_cpu(cpu) {
538-
u64 *ptr;
539-
540-
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
541-
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
542-
if (err)
543-
return err;
544-
}
545-
return 0;
546-
}
547-
#else
548-
static inline int hyp_map_aux_data(void)
549-
{
550-
return 0;
551-
}
552-
#endif
553-
554480
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
555481

556482
/*

arch/arm64/include/asm/mmu.h

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@ struct bp_hardening_data {
4848
bp_hardening_cb_t fn;
4949
};
5050

51-
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
5251
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
5352

5453
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
@@ -60,21 +59,13 @@ static inline void arm64_apply_bp_hardening(void)
6059
{
6160
struct bp_hardening_data *d;
6261

63-
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
62+
if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
6463
return;
6564

6665
d = arm64_get_bp_hardening_data();
6766
if (d->fn)
6867
d->fn();
6968
}
70-
#else
71-
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
72-
{
73-
return NULL;
74-
}
75-
76-
static inline void arm64_apply_bp_hardening(void) { }
77-
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
7869

7970
extern void arm64_memblock_init(void);
8071
extern void paging_init(void);

0 commit comments

Comments
 (0)