Skip to content

Commit 962f8e6

Browse files
committed
Merge tag 'powerpc-cve-2020-4788' into fixes
From Daniel's cover letter: IBM Power9 processors can speculatively operate on data in the L1 cache before it has been completely validated, via a way-prediction mechanism. It is not possible for an attacker to determine the contents of impermissible memory using this method, since these systems implement a combination of hardware and software security measures to prevent scenarios where protected data could be leaked. However these measures don't address the scenario where an attacker induces the operating system to speculatively execute instructions using data that the attacker controls. This can be used for example to speculatively bypass "kernel user access prevention" techniques, as discovered by Anthony Steinhauser of Google's Safeside Project. This is not an attack by itself, but there is a possibility it could be used in conjunction with side-channels or other weaknesses in the privileged code to construct an attack. This issue can be mitigated by flushing the L1 cache between privilege boundaries of concern. This patch series flushes the L1 cache on kernel entry (patch 2) and after the kernel performs any user accesses (patch 3). It also adds a self-test and performs some related cleanups.
2 parents cd81acc + da631f7 commit 962f8e6

23 files changed

Lines changed: 693 additions & 147 deletions

File tree

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2858,6 +2858,8 @@
28582858
mds=off [X86]
28592859
tsx_async_abort=off [X86]
28602860
kvm.nx_huge_pages=off [X86]
2861+
no_entry_flush [PPC]
2862+
no_uaccess_flush [PPC]
28612863

28622864
Exceptions:
28632865
This does not have any effect on
@@ -3186,6 +3188,8 @@
31863188

31873189
noefi Disable EFI runtime services support.
31883190

3191+
no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel.
3192+
31893193
noexec [IA-64]
31903194

31913195
noexec [X86]
@@ -3235,6 +3239,9 @@
32353239
nospec_store_bypass_disable
32363240
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
32373241

3242+
no_uaccess_flush
3243+
[PPC] Don't flush the L1-D cache after accessing user data.
3244+
32383245
noxsave [BUGS=X86] Disables x86 extended register state save
32393246
and restore using xsave. The kernel will fallback to
32403247
enabling legacy floating-point and sse state.

arch/powerpc/include/asm/book3s/64/kup-radix.h

Lines changed: 42 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#endif
2828
.endm
2929

30+
#ifdef CONFIG_PPC_KUAP
3031
.macro kuap_check_amr gpr1, gpr2
3132
#ifdef CONFIG_PPC_KUAP_DEBUG
3233
BEGIN_MMU_FTR_SECTION_NESTED(67)
@@ -38,6 +39,7 @@
3839
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
3940
#endif
4041
.endm
42+
#endif
4143

4244
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
4345
#ifdef CONFIG_PPC_KUAP
@@ -61,6 +63,8 @@
6163

6264
#else /* !__ASSEMBLY__ */
6365

66+
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
67+
6468
#ifdef CONFIG_PPC_KUAP
6569

6670
#include <asm/mmu.h>
@@ -103,8 +107,16 @@ static inline void kuap_check_amr(void)
103107

104108
static inline unsigned long get_kuap(void)
105109
{
110+
/*
111+
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
112+
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
113+
* cause restore_user_access to do a flush.
114+
*
115+
* This has no effect in terms of actually blocking things on hash,
116+
* so it doesn't break anything.
117+
*/
106118
if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
107-
return 0;
119+
return AMR_KUAP_BLOCKED;
108120

109121
return mfspr(SPRN_AMR);
110122
}
@@ -123,6 +135,29 @@ static inline void set_kuap(unsigned long value)
123135
isync();
124136
}
125137

138+
static inline bool
139+
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
140+
{
141+
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
142+
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
143+
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
144+
}
145+
#else /* CONFIG_PPC_KUAP */
146+
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
147+
148+
static inline unsigned long kuap_get_and_check_amr(void)
149+
{
150+
return 0UL;
151+
}
152+
153+
static inline unsigned long get_kuap(void)
154+
{
155+
return AMR_KUAP_BLOCKED;
156+
}
157+
158+
static inline void set_kuap(unsigned long value) { }
159+
#endif /* !CONFIG_PPC_KUAP */
160+
126161
static __always_inline void allow_user_access(void __user *to, const void __user *from,
127162
unsigned long size, unsigned long dir)
128163
{
@@ -142,44 +177,27 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
142177
unsigned long size, unsigned long dir)
143178
{
144179
set_kuap(AMR_KUAP_BLOCKED);
180+
if (static_branch_unlikely(&uaccess_flush_key))
181+
do_uaccess_flush();
145182
}
146183

147184
static inline unsigned long prevent_user_access_return(void)
148185
{
149186
unsigned long flags = get_kuap();
150187

151188
set_kuap(AMR_KUAP_BLOCKED);
189+
if (static_branch_unlikely(&uaccess_flush_key))
190+
do_uaccess_flush();
152191

153192
return flags;
154193
}
155194

156195
static inline void restore_user_access(unsigned long flags)
157196
{
158197
set_kuap(flags);
198+
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
199+
do_uaccess_flush();
159200
}
160-
161-
static inline bool
162-
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
163-
{
164-
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
165-
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
166-
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
167-
}
168-
#else /* CONFIG_PPC_KUAP */
169-
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
170-
{
171-
}
172-
173-
static inline void kuap_check_amr(void)
174-
{
175-
}
176-
177-
static inline unsigned long kuap_get_and_check_amr(void)
178-
{
179-
return 0;
180-
}
181-
#endif /* CONFIG_PPC_KUAP */
182-
183201
#endif /* __ASSEMBLY__ */
184202

185203
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */

arch/powerpc/include/asm/exception-64s.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,18 @@
5757
nop; \
5858
nop
5959

60+
#define ENTRY_FLUSH_SLOT \
61+
ENTRY_FLUSH_FIXUP_SECTION; \
62+
nop; \
63+
nop; \
64+
nop;
65+
6066
/*
6167
* r10 must be free to use, r13 must be paca
6268
*/
6369
#define INTERRUPT_TO_KERNEL \
64-
STF_ENTRY_BARRIER_SLOT
70+
STF_ENTRY_BARRIER_SLOT; \
71+
ENTRY_FLUSH_SLOT
6572

6673
/*
6774
* Macros for annotating the expected destination of (h)rfid
@@ -137,6 +144,9 @@
137144
RFSCV; \
138145
b rfscv_flush_fallback
139146

147+
#else /* __ASSEMBLY__ */
148+
/* Prototype for function defined in exceptions-64s.S */
149+
void do_uaccess_flush(void);
140150
#endif /* __ASSEMBLY__ */
141151

142152
#endif /* _ASM_POWERPC_EXCEPTION_H */

arch/powerpc/include/asm/feature-fixups.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,22 @@ label##3: \
205205
FTR_ENTRY_OFFSET 955b-956b; \
206206
.popsection;
207207

208+
#define UACCESS_FLUSH_FIXUP_SECTION \
209+
959: \
210+
.pushsection __uaccess_flush_fixup,"a"; \
211+
.align 2; \
212+
960: \
213+
FTR_ENTRY_OFFSET 959b-960b; \
214+
.popsection;
215+
216+
#define ENTRY_FLUSH_FIXUP_SECTION \
217+
957: \
218+
.pushsection __entry_flush_fixup,"a"; \
219+
.align 2; \
220+
958: \
221+
FTR_ENTRY_OFFSET 957b-958b; \
222+
.popsection;
223+
208224
#define RFI_FLUSH_FIXUP_SECTION \
209225
951: \
210226
.pushsection __rfi_flush_fixup,"a"; \
@@ -237,8 +253,11 @@ label##3: \
237253
#include <linux/types.h>
238254

239255
extern long stf_barrier_fallback;
256+
extern long entry_flush_fallback;
240257
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
241258
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
259+
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
260+
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
242261
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
243262
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
244263
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;

arch/powerpc/include/asm/kup.h

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
#define KUAP_CURRENT_WRITE 8
1515
#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
1616

17-
#ifdef CONFIG_PPC64
17+
#ifdef CONFIG_PPC_BOOK3S_64
1818
#include <asm/book3s/64/kup-radix.h>
1919
#endif
2020
#ifdef CONFIG_PPC_8xx
@@ -35,6 +35,9 @@
3535
.macro kuap_check current, gpr
3636
.endm
3737

38+
.macro kuap_check_amr gpr1, gpr2
39+
.endm
40+
3841
#endif
3942

4043
#else /* !__ASSEMBLY__ */
@@ -53,17 +56,28 @@ static inline void setup_kuep(bool disabled) { }
5356
void setup_kuap(bool disabled);
5457
#else
5558
static inline void setup_kuap(bool disabled) { }
59+
60+
static inline bool
61+
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
62+
{
63+
return false;
64+
}
65+
66+
static inline void kuap_check_amr(void) { }
67+
68+
/*
69+
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
70+
* the L1D cache after user accesses. Only include the empty stubs for other
71+
* platforms.
72+
*/
73+
#ifndef CONFIG_PPC_BOOK3S_64
5674
static inline void allow_user_access(void __user *to, const void __user *from,
5775
unsigned long size, unsigned long dir) { }
5876
static inline void prevent_user_access(void __user *to, const void __user *from,
5977
unsigned long size, unsigned long dir) { }
6078
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
6179
static inline void restore_user_access(unsigned long flags) { }
62-
static inline bool
63-
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
64-
{
65-
return false;
66-
}
80+
#endif /* CONFIG_PPC_BOOK3S_64 */
6781
#endif /* CONFIG_PPC_KUAP */
6882

6983
static inline void allow_read_from_user(const void __user *from, unsigned long size)

arch/powerpc/include/asm/security_features.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,19 @@ static inline bool security_ftr_enabled(u64 feature)
8686
// Software required to flush link stack on context switch
8787
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
8888

89+
// The L1-D cache should be flushed when entering the kernel
90+
#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
91+
92+
// The L1-D cache should be flushed after user accesses from the kernel
93+
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
8994

9095
// Features enabled by default
9196
#define SEC_FTR_DEFAULT \
9297
(SEC_FTR_L1D_FLUSH_HV | \
9398
SEC_FTR_L1D_FLUSH_PR | \
9499
SEC_FTR_BNDS_CHK_SPEC_BAR | \
100+
SEC_FTR_L1D_FLUSH_ENTRY | \
101+
SEC_FTR_L1D_FLUSH_UACCESS | \
95102
SEC_FTR_FAVOUR_SECURITY)
96103

97104
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */

arch/powerpc/include/asm/setup.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,12 +52,16 @@ enum l1d_flush_type {
5252
};
5353

5454
void setup_rfi_flush(enum l1d_flush_type, bool enable);
55+
void setup_entry_flush(bool enable);
56+
void setup_uaccess_flush(bool enable);
5557
void do_rfi_flush_fixups(enum l1d_flush_type types);
5658
#ifdef CONFIG_PPC_BARRIER_NOSPEC
5759
void setup_barrier_nospec(void);
5860
#else
5961
static inline void setup_barrier_nospec(void) { };
6062
#endif
63+
void do_uaccess_flush_fixups(enum l1d_flush_type types);
64+
void do_entry_flush_fixups(enum l1d_flush_type types);
6165
void do_barrier_nospec_fixups(bool enable);
6266
extern bool barrier_nospec_enabled;
6367

0 commit comments

Comments
 (0)