|
21 | 21 | #include <linux/mm.h> |
22 | 22 |
|
23 | 23 | #include <asm/cpu_entry_area.h> |
| 24 | +#include <asm/stacktrace.h> |
24 | 25 | #include <asm/sev-es.h> |
25 | 26 | #include <asm/insn-eval.h> |
26 | 27 | #include <asm/fpu/internal.h> |
@@ -214,6 +215,9 @@ static __always_inline void sev_es_put_ghcb(struct ghcb_state *state) |
214 | 215 | } |
215 | 216 | } |
216 | 217 |
|
| 218 | +/* Needed in vc_early_forward_exception */ |
| 219 | +void do_early_exception(struct pt_regs *regs, int trapnr); |
| 220 | + |
217 | 221 | static inline u64 sev_es_rd_ghcb_msr(void) |
218 | 222 | { |
219 | 223 | return __rdmsr(MSR_AMD64_SEV_ES_GHCB); |
@@ -402,6 +406,71 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, |
402 | 406 | /* Include code shared with pre-decompression boot stage */ |
403 | 407 | #include "sev-es-shared.c" |
404 | 408 |
|
| 409 | +static u64 get_jump_table_addr(void) |
| 410 | +{ |
| 411 | + struct ghcb_state state; |
| 412 | + unsigned long flags; |
| 413 | + struct ghcb *ghcb; |
| 414 | + u64 ret = 0; |
| 415 | + |
| 416 | + local_irq_save(flags); |
| 417 | + |
| 418 | + ghcb = sev_es_get_ghcb(&state); |
| 419 | + |
| 420 | + vc_ghcb_invalidate(ghcb); |
| 421 | + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); |
| 422 | + ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE); |
| 423 | + ghcb_set_sw_exit_info_2(ghcb, 0); |
| 424 | + |
| 425 | + sev_es_wr_ghcb_msr(__pa(ghcb)); |
| 426 | + VMGEXIT(); |
| 427 | + |
| 428 | + if (ghcb_sw_exit_info_1_is_valid(ghcb) && |
| 429 | + ghcb_sw_exit_info_2_is_valid(ghcb)) |
| 430 | + ret = ghcb->save.sw_exit_info_2; |
| 431 | + |
| 432 | + sev_es_put_ghcb(&state); |
| 433 | + |
| 434 | + local_irq_restore(flags); |
| 435 | + |
| 436 | + return ret; |
| 437 | +} |
| 438 | + |
| 439 | +int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) |
| 440 | +{ |
| 441 | + u16 startup_cs, startup_ip; |
| 442 | + phys_addr_t jump_table_pa; |
| 443 | + u64 jump_table_addr; |
| 444 | + u16 __iomem *jump_table; |
| 445 | + |
| 446 | + jump_table_addr = get_jump_table_addr(); |
| 447 | + |
| 448 | + /* On UP guests there is no jump table so this is not a failure */ |
| 449 | + if (!jump_table_addr) |
| 450 | + return 0; |
| 451 | + |
| 452 | + /* Check if AP Jump Table is page-aligned */ |
| 453 | + if (jump_table_addr & ~PAGE_MASK) |
| 454 | + return -EINVAL; |
| 455 | + |
| 456 | + jump_table_pa = jump_table_addr & PAGE_MASK; |
| 457 | + |
| 458 | + startup_cs = (u16)(rmh->trampoline_start >> 4); |
| 459 | + startup_ip = (u16)(rmh->sev_es_trampoline_start - |
| 460 | + rmh->trampoline_start); |
| 461 | + |
| 462 | + jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE); |
| 463 | + if (!jump_table) |
| 464 | + return -EIO; |
| 465 | + |
| 466 | + writew(startup_ip, &jump_table[0]); |
| 467 | + writew(startup_cs, &jump_table[1]); |
| 468 | + |
| 469 | + iounmap(jump_table); |
| 470 | + |
| 471 | + return 0; |
| 472 | +} |
| 473 | + |
405 | 474 | static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) |
406 | 475 | { |
407 | 476 | struct pt_regs *regs = ctxt->regs; |
|
0 commit comments