aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2018-03-27 18:07:11 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-03-28 04:42:57 -0400
commit07344b15a994acadbe439aa4e75127ed1ccca099 (patch)
tree2fe67e65764d6975111d44881fb5b513440710ff
parent547edaca247abf910e32f0cd883ba83b8fc6d0ed (diff)
x86/boot: Fix SEV boot failure from change to __PHYSICAL_MASK_SHIFT
In arch/x86/boot/compressed/kaslr_64.c, CONFIG_AMD_MEM_ENCRYPT support was initially #undef'd to support SME with minimal effort. When support for SEV was added, the #undef remained and some minimal support for setting the encryption bit was added for building identity mapped pagetable entries. Commit b83ce5ee9147 ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52") changed __PHYSICAL_MASK_SHIFT from 46 to 52 in support of 5-level paging. This change resulted in SEV guests failing to boot because the encryption bit was no longer being automatically masked out. The compressed boot path now requires sme_me_mask to be defined in order for the pagetable functions, such as pud_present(), to properly mask out the encryption bit (currently bit 47) when evaluating pagetable entries. Add an sme_me_mask variable in arch/x86/boot/compressed/mem_encrypt.S, which is set when SEV is active, delete the #undef CONFIG_AMD_MEM_ENCRYPT from arch/x86/boot/compressed/kaslr_64.c and use sme_me_mask when building the identify mapped pagetable entries. Fixes: b83ce5ee9147 ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52") Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Link: https://lkml.kernel.org/r/20180327220711.8702.55842.stgit@tlendack-t1.amdoffice.net
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c14
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S17
-rw-r--r--arch/x86/boot/compressed/misc.h2
3 files changed, 15 insertions, 18 deletions
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
index b5e5e02f8cde..522d11431433 100644
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ b/arch/x86/boot/compressed/kaslr_64.c
@@ -16,13 +16,6 @@
16#define __pa(x) ((unsigned long)(x)) 16#define __pa(x) ((unsigned long)(x))
17#define __va(x) ((void *)((unsigned long)(x))) 17#define __va(x) ((void *)((unsigned long)(x)))
18 18
19/*
20 * The pgtable.h and mm/ident_map.c includes make use of the SME related
21 * information which is not used in the compressed image support. Un-define
22 * the SME support to avoid any compile and link errors.
23 */
24#undef CONFIG_AMD_MEM_ENCRYPT
25
26/* No PAGE_TABLE_ISOLATION support needed either: */ 19/* No PAGE_TABLE_ISOLATION support needed either: */
27#undef CONFIG_PAGE_TABLE_ISOLATION 20#undef CONFIG_PAGE_TABLE_ISOLATION
28 21
@@ -85,13 +78,14 @@ static struct x86_mapping_info mapping_info;
85/* Locates and clears a region for a new top level page table. */ 78/* Locates and clears a region for a new top level page table. */
86void initialize_identity_maps(void) 79void initialize_identity_maps(void)
87{ 80{
88 unsigned long sev_me_mask = get_sev_encryption_mask(); 81 /* If running as an SEV guest, the encryption mask is required. */
82 set_sev_encryption_mask();
89 83
90 /* Init mapping_info with run-time function/buffer pointers. */ 84 /* Init mapping_info with run-time function/buffer pointers. */
91 mapping_info.alloc_pgt_page = alloc_pgt_page; 85 mapping_info.alloc_pgt_page = alloc_pgt_page;
92 mapping_info.context = &pgt_data; 86 mapping_info.context = &pgt_data;
93 mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask; 87 mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
94 mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask; 88 mapping_info.kernpg_flag = _KERNPG_TABLE;
95 89
96 /* 90 /*
97 * It should be impossible for this not to already be true, 91 * It should be impossible for this not to already be true,
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index 54f5f6625a73..eaa843a52907 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -88,9 +88,7 @@ ENTRY(get_sev_encryption_bit)
88ENDPROC(get_sev_encryption_bit) 88ENDPROC(get_sev_encryption_bit)
89 89
90 .code64 90 .code64
91ENTRY(get_sev_encryption_mask) 91ENTRY(set_sev_encryption_mask)
92 xor %rax, %rax
93
94#ifdef CONFIG_AMD_MEM_ENCRYPT 92#ifdef CONFIG_AMD_MEM_ENCRYPT
95 push %rbp 93 push %rbp
96 push %rdx 94 push %rdx
@@ -101,9 +99,7 @@ ENTRY(get_sev_encryption_mask)
101 testl %eax, %eax 99 testl %eax, %eax
102 jz .Lno_sev_mask 100 jz .Lno_sev_mask
103 101
104 xor %rdx, %rdx 102 bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
105 bts %rax, %rdx /* Create the encryption mask */
106 mov %rdx, %rax /* ... and return it */
107 103
108.Lno_sev_mask: 104.Lno_sev_mask:
109 movq %rbp, %rsp /* Restore original stack pointer */ 105 movq %rbp, %rsp /* Restore original stack pointer */
@@ -112,9 +108,16 @@ ENTRY(get_sev_encryption_mask)
112 pop %rbp 108 pop %rbp
113#endif 109#endif
114 110
111 xor %rax, %rax
115 ret 112 ret
116ENDPROC(get_sev_encryption_mask) 113ENDPROC(set_sev_encryption_mask)
117 114
118 .data 115 .data
119enc_bit: 116enc_bit:
120 .int 0xffffffff 117 .int 0xffffffff
118
119#ifdef CONFIG_AMD_MEM_ENCRYPT
120 .balign 8
121GLOBAL(sme_me_mask)
122 .quad 0
123#endif
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 4d369c308ed7..9e11be4cae19 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -114,6 +114,6 @@ static inline void console_init(void)
114{ } 114{ }
115#endif 115#endif
116 116
117unsigned long get_sev_encryption_mask(void); 117void set_sev_encryption_mask(void);
118 118
119#endif 119#endif