Skip to content

Commit 96d389c

Browse files
robherringwilldeacon
authored andcommitted
arm64: Add workaround for Arm Cortex-A77 erratum 1508412
On Cortex-A77 r0p0 and r1p0, a sequence of a non-cacheable or device load and a store exclusive or PAR_EL1 read can cause a deadlock. The workaround requires a DMB SY before and after a PAR_EL1 register read. In addition, it's possible an interrupt (doing a device read) or KVM guest exit could be taken between the DMB and PAR read, so we also need a DMB before returning from interrupt and before returning to a guest. A deadlock is still possible with the workaround as KVM guests must also have the workaround. IOW, a malicious guest can deadlock an affected systems. This workaround also depends on a firmware counterpart to enable the h/w to insert DMB SY after load and store exclusive instructions. See the errata document SDEN-1152370 v10 [1] for more information. [1] https://static.docs.arm.com/101992/0010/Arm_Cortex_A77_MP074_Software_Developer_Errata_Notice_v10.pdf Signed-off-by: Rob Herring <robh@kernel.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Julien Thierry <julien.thierry.kdev@gmail.com> Cc: kvmarm@lists.cs.columbia.edu Link: https://lore.kernel.org/r/20201028182839.166037-2-robh@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
1 parent 8a6b88e commit 96d389c

13 files changed

Lines changed: 66 additions & 15 deletions

File tree

Documentation/arm64/silicon-errata.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,8 @@ stable kernels.
9090
+----------------+-----------------+-----------------+-----------------------------+
9191
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
9292
+----------------+-----------------+-----------------+-----------------------------+
93+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
94+
+----------------+-----------------+-----------------+-----------------------------+
9395
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
9496
+----------------+-----------------+-----------------+-----------------------------+
9597
| ARM | Neoverse-N1 | #1349291 | N/A |

arch/arm64/Kconfig

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -636,6 +636,26 @@ config ARM64_ERRATUM_1542419
636636

637637
If unsure, say Y.
638638

639+
config ARM64_ERRATUM_1508412
640+
bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
641+
default y
642+
help
643+
This option adds a workaround for Arm Cortex-A77 erratum 1508412.
644+
645+
Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence
646+
of a store-exclusive or read of PAR_EL1 and a load with device or
647+
non-cacheable memory attributes. The workaround depends on a firmware
648+
counterpart.
649+
650+
KVM guests must also have the workaround implemented or they can
651+
deadlock the system.
652+
653+
Work around the issue by inserting DMB SY barriers around PAR_EL1
654+
register reads and warning KVM users. The DMB barrier is sufficient
655+
to prevent a speculative PAR_EL1 read.
656+
657+
If unsure, say Y.
658+
639659
config CAVIUM_ERRATUM_22375
640660
bool "Cavium erratum 22375, 24313"
641661
default y

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,8 @@
6565
#define ARM64_HAS_ARMv8_4_TTL 55
6666
#define ARM64_HAS_TLB_RANGE 56
6767
#define ARM64_MTE 57
68+
#define ARM64_WORKAROUND_1508412 58
6869

69-
#define ARM64_NCAPS 58
70+
#define ARM64_NCAPS 59
7071

7172
#endif /* __ASM_CPUCAPS_H */

arch/arm64/include/asm/sysreg.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1007,6 +1007,7 @@
10071007

10081008
#include <linux/build_bug.h>
10091009
#include <linux/types.h>
1010+
#include <asm/alternative.h>
10101011

10111012
#define __DEFINE_MRS_MSR_S_REGNUM \
10121013
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
@@ -1095,6 +1096,14 @@
10951096
write_sysreg_s(__scs_new, sysreg); \
10961097
} while (0)
10971098

1099+
#define read_sysreg_par() ({ \
1100+
u64 par; \
1101+
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
1102+
par = read_sysreg(par_el1); \
1103+
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
1104+
par; \
1105+
})
1106+
10981107
#endif
10991108

11001109
#endif /* __ASM_SYSREG_H */

arch/arm64/kernel/cpu_errata.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -522,6 +522,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
522522
.matches = has_neoverse_n1_erratum_1542419,
523523
.cpu_enable = cpu_enable_trap_ctr_access,
524524
},
525+
#endif
526+
#ifdef CONFIG_ARM64_ERRATUM_1508412
527+
{
528+
/* we depend on the firmware portion for correctness */
529+
.desc = "ARM erratum 1508412 (kernel portion)",
530+
.capability = ARM64_WORKAROUND_1508412,
531+
ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
532+
0, 0,
533+
1, 0),
534+
},
525535
#endif
526536
{
527537
}

arch/arm64/kernel/entry.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -365,6 +365,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
365365
br x30
366366
#endif
367367
.else
368+
/* Ensure any device/NC reads complete */
369+
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
370+
368371
eret
369372
.endif
370373
sb

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1719,7 +1719,8 @@ int kvm_arch_init(void *opaque)
17191719
return -ENODEV;
17201720
}
17211721

1722-
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
1722+
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
1723+
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
17231724
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
17241725
"Only trusted guests should be used on this system.\n");
17251726

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -140,9 +140,9 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
140140
* We do need to save/restore PAR_EL1 though, as we haven't
141141
* saved the guest context yet, and we may return early...
142142
*/
143-
par = read_sysreg(par_el1);
143+
par = read_sysreg_par();
144144
if (!__kvm_at("s1e1r", far))
145-
tmp = read_sysreg(par_el1);
145+
tmp = read_sysreg_par();
146146
else
147147
tmp = SYS_PAR_EL1_F; /* back to the guest */
148148
write_sysreg(par, par_el1);
@@ -421,7 +421,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
421421
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
422422
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
423423
handle_tx2_tvm(vcpu))
424-
return true;
424+
goto guest;
425425

426426
/*
427427
* We trap the first access to the FP/SIMD to save the host context
@@ -431,13 +431,13 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
431431
* Similarly for trapped SVE accesses.
432432
*/
433433
if (__hyp_handle_fpsimd(vcpu))
434-
return true;
434+
goto guest;
435435

436436
if (__hyp_handle_ptrauth(vcpu))
437-
return true;
437+
goto guest;
438438

439439
if (!__populate_fault_info(vcpu))
440-
return true;
440+
goto guest;
441441

442442
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
443443
bool valid;
@@ -452,7 +452,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
452452
int ret = __vgic_v2_perform_cpuif_access(vcpu);
453453

454454
if (ret == 1)
455-
return true;
455+
goto guest;
456456

457457
/* Promote an illegal access to an SError.*/
458458
if (ret == -1)
@@ -468,12 +468,17 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
468468
int ret = __vgic_v3_perform_cpuif_access(vcpu);
469469

470470
if (ret == 1)
471-
return true;
471+
goto guest;
472472
}
473473

474474
exit:
475475
/* Return to the host kernel and handle the exit */
476476
return false;
477+
478+
guest:
479+
/* Re-enter the guest */
480+
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
481+
return true;
477482
}
478483

479484
static inline void __kvm_unexpected_el2_exception(void)

arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
4343
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
4444
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
4545
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
46-
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
46+
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
4747
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
4848

4949
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ void __noreturn hyp_panic(void)
250250
{
251251
u64 spsr = read_sysreg_el2(SYS_SPSR);
252252
u64 elr = read_sysreg_el2(SYS_ELR);
253-
u64 par = read_sysreg(par_el1);
253+
u64 par = read_sysreg_par();
254254
bool restore_host = true;
255255
struct kvm_cpu_context *host_ctxt;
256256
struct kvm_vcpu *vcpu;

0 commit comments

Comments
 (0)