Skip to content

Commit 43c8341

Browse files
committed
Merge tag 'x86_seves_for_v5.10_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV-ES fixes from Borislav Petkov: "A couple of changes to the SEV-ES code to perform more stringent hypervisor checks before enabling encryption (Joerg Roedel)" * tag 'x86_seves_for_v5.10_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev-es: Do not support MMIO to/from encrypted memory x86/head/64: Check SEV encryption before switching to kernel page-table x86/boot/compressed/64: Check SEV encryption in 64-bit boot-path x86/boot/compressed/64: Sanity-check CPUID results in the early #VC handler x86/boot/compressed/64: Introduce sev_status
2 parents f4c7914 + 2411cd8 commit 43c8341

8 files changed

Lines changed: 167 additions & 8 deletions

File tree

arch/x86/boot/compressed/ident_map_64.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,7 @@ void initialize_identity_maps(void *rmode)
164164
add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
165165

166166
/* Load the new page-table. */
167+
sev_verify_cbit(top_level_pgt);
167168
write_cr3(top_level_pgt);
168169
}
169170

arch/x86/boot/compressed/mem_encrypt.S

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,9 @@ SYM_FUNC_START(get_sev_encryption_bit)
6868
SYM_FUNC_END(get_sev_encryption_bit)
6969

7070
.code64
71+
72+
#include "../../kernel/sev_verify_cbit.S"
73+
7174
SYM_FUNC_START(set_sev_encryption_mask)
7275
#ifdef CONFIG_AMD_MEM_ENCRYPT
7376
push %rbp
@@ -81,6 +84,19 @@ SYM_FUNC_START(set_sev_encryption_mask)
8184

8285
bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
8386

87+
/*
88+
* Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
89+
* get_sev_encryption_bit() because this function is 32-bit code and
90+
* shared between 64-bit and 32-bit boot path.
91+
*/
92+
movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
93+
rdmsr
94+
95+
/* Store MSR value in sev_status */
96+
shlq $32, %rdx
97+
orq %rdx, %rax
98+
movq %rax, sev_status(%rip)
99+
84100
.Lno_sev_mask:
85101
movq %rbp, %rsp /* Restore original stack pointer */
86102

@@ -96,5 +112,7 @@ SYM_FUNC_END(set_sev_encryption_mask)
96112

97113
#ifdef CONFIG_AMD_MEM_ENCRYPT
98114
.balign 8
99-
SYM_DATA(sme_me_mask, .quad 0)
115+
SYM_DATA(sme_me_mask, .quad 0)
116+
SYM_DATA(sev_status, .quad 0)
117+
SYM_DATA(sev_check_data, .quad 0)
100118
#endif

arch/x86/boot/compressed/misc.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,4 +159,6 @@ void boot_page_fault(void);
159159
void boot_stage1_vc(void);
160160
void boot_stage2_vc(void);
161161

162+
unsigned long sev_verify_cbit(unsigned long cr3);
163+
162164
#endif /* BOOT_COMPRESSED_MISC_H */

arch/x86/kernel/head_64.S

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,21 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
161161

162162
/* Setup early boot stage 4-/5-level pagetables. */
163163
addq phys_base(%rip), %rax
164+
165+
/*
166+
* For SEV guests: Verify that the C-bit is correct. A malicious
167+
* hypervisor could lie about the C-bit position to perform a ROP
168+
* attack on the guest by writing to the unencrypted stack and wait for
169+
* the next RET instruction.
170+
* %rsi carries pointer to realmode data and is callee-clobbered. Save
171+
* and restore it.
172+
*/
173+
pushq %rsi
174+
movq %rax, %rdi
175+
call sev_verify_cbit
176+
popq %rsi
177+
178+
/* Switch to new page-table */
164179
movq %rax, %cr3
165180

166181
/* Ensure I am executing from virtual addresses */
@@ -279,6 +294,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
279294
SYM_CODE_END(secondary_startup_64)
280295

281296
#include "verify_cpu.S"
297+
#include "sev_verify_cbit.S"
282298

283299
#ifdef CONFIG_HOTPLUG_CPU
284300
/*

arch/x86/kernel/sev-es-shared.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,32 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
178178
goto fail;
179179
regs->dx = val >> 32;
180180

181+
/*
182+
* This is a VC handler and the #VC is only raised when SEV-ES is
183+
* active, which means SEV must be active too. Do sanity checks on the
184+
* CPUID results to make sure the hypervisor does not trick the kernel
185+
* into the no-sev path. This could map sensitive data unencrypted and
186+
* make it accessible to the hypervisor.
187+
*
188+
* In particular, check for:
189+
* - Hypervisor CPUID bit
190+
* - Availability of CPUID leaf 0x8000001f
191+
* - SEV CPUID bit.
192+
*
193+
* The hypervisor might still report the wrong C-bit position, but this
194+
* can't be checked here.
195+
*/
196+
197+
if ((fn == 1 && !(regs->cx & BIT(31))))
198+
/* Hypervisor bit */
199+
goto fail;
200+
else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
201+
/* SEV leaf check */
202+
goto fail;
203+
else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
204+
/* SEV bit */
205+
goto fail;
206+
181207
/* Skip over the CPUID two-byte opcode */
182208
regs->ip += 2;
183209

arch/x86/kernel/sev-es.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -374,8 +374,8 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
374374
return ES_EXCEPTION;
375375
}
376376

377-
static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
378-
unsigned long vaddr, phys_addr_t *paddr)
377+
static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
378+
unsigned long vaddr, phys_addr_t *paddr)
379379
{
380380
unsigned long va = (unsigned long)vaddr;
381381
unsigned int level;
@@ -394,15 +394,19 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
394394
if (user_mode(ctxt->regs))
395395
ctxt->fi.error_code |= X86_PF_USER;
396396

397-
return false;
397+
return ES_EXCEPTION;
398398
}
399399

400+
if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
401+
/* Emulated MMIO to/from encrypted memory not supported */
402+
return ES_UNSUPPORTED;
403+
400404
pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
401405
pa |= va & ~page_level_mask(level);
402406

403407
*paddr = pa;
404408

405-
return true;
409+
return ES_OK;
406410
}
407411

408412
/* Include code shared with pre-decompression boot stage */
@@ -731,6 +735,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
731735
{
732736
u64 exit_code, exit_info_1, exit_info_2;
733737
unsigned long ghcb_pa = __pa(ghcb);
738+
enum es_result res;
734739
phys_addr_t paddr;
735740
void __user *ref;
736741

@@ -740,11 +745,12 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
740745

741746
exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
742747

743-
if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) {
744-
if (!read)
748+
res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
749+
if (res != ES_OK) {
750+
if (res == ES_EXCEPTION && !read)
745751
ctxt->fi.error_code |= X86_PF_WRITE;
746752

747-
return ES_EXCEPTION;
753+
return res;
748754
}
749755

750756
exit_info_1 = paddr;

arch/x86/kernel/sev_verify_cbit.S

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* sev_verify_cbit.S - Code for verification of the C-bit position reported
4+
* by the Hypervisor when running with SEV enabled.
5+
*
6+
* Copyright (c) 2020 Joerg Roedel (jroedel@suse.de)
7+
*
8+
* sev_verify_cbit() is called before switching to a new long-mode page-table
9+
* at boot.
10+
*
11+
* Verify that the C-bit position is correct by writing a random value to
12+
* an encrypted memory location while on the current page-table. Then it
13+
* switches to the new page-table to verify the memory content is still the
14+
* same. After that it switches back to the current page-table and when the
15+
* check succeeded it returns. If the check failed the code invalidates the
16+
* stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
17+
* make sure no interrupt or exception can get the CPU out of the hlt loop.
18+
*
19+
* New page-table pointer is expected in %rdi (first parameter)
20+
*
21+
*/
22+
SYM_FUNC_START(sev_verify_cbit)
23+
#ifdef CONFIG_AMD_MEM_ENCRYPT
24+
/* First check if a C-bit was detected */
25+
movq sme_me_mask(%rip), %rsi
26+
testq %rsi, %rsi
27+
jz 3f
28+
29+
/* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
30+
movq sev_status(%rip), %rsi
31+
testq %rsi, %rsi
32+
jz 3f
33+
34+
/* Save CR4 in %rsi */
35+
movq %cr4, %rsi
36+
37+
/* Disable Global Pages */
38+
movq %rsi, %rdx
39+
andq $(~X86_CR4_PGE), %rdx
40+
movq %rdx, %cr4
41+
42+
/*
43+
* Verified that running under SEV - now get a random value using
44+
* RDRAND. This instruction is mandatory when running as an SEV guest.
45+
*
46+
* Don't bail out of the loop if RDRAND returns errors. It is better to
47+
* prevent forward progress than to work with a non-random value here.
48+
*/
49+
1: rdrand %rdx
50+
jnc 1b
51+
52+
/* Store value to memory and keep it in %rdx */
53+
movq %rdx, sev_check_data(%rip)
54+
55+
/* Backup current %cr3 value to restore it later */
56+
movq %cr3, %rcx
57+
58+
/* Switch to new %cr3 - This might unmap the stack */
59+
movq %rdi, %cr3
60+
61+
/*
62+
* Compare value in %rdx with memory location. If C-bit is incorrect
63+
* this would read the encrypted data and make the check fail.
64+
*/
65+
cmpq %rdx, sev_check_data(%rip)
66+
67+
/* Restore old %cr3 */
68+
movq %rcx, %cr3
69+
70+
/* Restore previous CR4 */
71+
movq %rsi, %cr4
72+
73+
/* Check CMPQ result */
74+
je 3f
75+
76+
/*
77+
* The check failed, prevent any forward progress to prevent ROP
78+
* attacks, invalidate the stack and go into a hlt loop.
79+
*/
80+
xorq %rsp, %rsp
81+
subq $0x1000, %rsp
82+
2: hlt
83+
jmp 2b
84+
3:
85+
#endif
86+
/* Return page-table pointer */
87+
movq %rdi, %rax
88+
ret
89+
SYM_FUNC_END(sev_verify_cbit)

arch/x86/mm/mem_encrypt.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
*/
4040
u64 sme_me_mask __section(".data") = 0;
4141
u64 sev_status __section(".data") = 0;
42+
u64 sev_check_data __section(".data") = 0;
4243
EXPORT_SYMBOL(sme_me_mask);
4344
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
4445
EXPORT_SYMBOL_GPL(sev_enable_key);

0 commit comments

Comments
 (0)