aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2017-07-17 17:10:07 -0400
committerIngo Molnar <mingo@kernel.org>2017-07-18 05:38:00 -0400
commit21729f81ce8ae76a6995681d40e16f7ce8075db4 (patch)
tree20fc96d3f1970ef516d27ed3e48305b6a24461ed
parentfd7e315988b784509ba3f1b42f539bd0b1fca9bb (diff)
x86/mm: Provide general kernel support for memory encryption
Changes to the existing page table macros will allow the SME support to be enabled in a simple fashion with minimal changes to files that use these macros. Since the memory encryption mask will now be part of the regular pagetable macros, we introduce two new macros (_PAGE_TABLE_NOENC and _KERNPG_TABLE_NOENC) to allow for early pagetable creation/initialization without the encryption mask before SME becomes active. Two new pgprot() macros are defined to allow setting or clearing the page encryption mask. The FIXMAP_PAGE_NOCACHE define is introduced for use with MMIO. SME does not support encryption for MMIO areas so this define removes the encryption mask from the page attribute. Two new macros are introduced (__sme_pa() / __sme_pa_nodebug()) to allow creating a physical address with the encryption mask. These are used when working with the cr3 register so that the PGD can be encrypted. The current __va() macro is updated so that the virtual address is generated based off of the physical address without the encryption mask thus allowing the same virtual address to be generated regardless of whether encryption is enabled for that physical location or not. Also, an early initialization function is added for SME. If SME is active, this function: - Updates the early_pmd_flags so that early page faults create mappings with the encryption mask. - Updates the __supported_pte_mask to include the encryption mask. - Updates the protection_map entries to include the encryption mask so that user-space allocations will automatically have the encryption mask applied. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Dave Young <dyoung@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Toshimitsu Kani <toshi.kani@hpe.com> Cc: kasan-dev@googlegroups.com Cc: kvm@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-efi@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/b36e952c4c39767ae7f0a41cf5345adf27438480.1500319216.git.thomas.lendacky@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/boot/compressed/pagetable.c7
-rw-r--r--arch/x86/include/asm/fixmap.h7
-rw-r--r--arch/x86/include/asm/mem_encrypt.h13
-rw-r--r--arch/x86/include/asm/page_types.h3
-rw-r--r--arch/x86/include/asm/pgtable.h9
-rw-r--r--arch/x86/include/asm/pgtable_types.h45
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/head64.c11
-rw-r--r--arch/x86/kernel/head_64.S20
-rw-r--r--arch/x86/mm/kasan_init_64.c4
-rw-r--r--arch/x86/mm/mem_encrypt.c17
-rw-r--r--arch/x86/mm/pageattr.c3
-rw-r--r--arch/x86/mm/tlb.c4
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/linux/mem_encrypt.h8
16 files changed, 133 insertions, 35 deletions
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 28029be47fbb..f1aa43854bed 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -15,6 +15,13 @@
15#define __pa(x) ((unsigned long)(x)) 15#define __pa(x) ((unsigned long)(x))
16#define __va(x) ((void *)((unsigned long)(x))) 16#define __va(x) ((void *)((unsigned long)(x)))
17 17
18/*
19 * The pgtable.h and mm/ident_map.c includes make use of the SME related
20 * information which is not used in the compressed image support. Un-define
21 * the SME support to avoid any compile and link errors.
22 */
23#undef CONFIG_AMD_MEM_ENCRYPT
24
18#include "misc.h" 25#include "misc.h"
19 26
20/* These actually do the work of building the kernel identity maps. */ 27/* These actually do the work of building the kernel identity maps. */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index b65155cc3760..d9ff226cb489 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -157,6 +157,13 @@ static inline void __set_fixmap(enum fixed_addresses idx,
157} 157}
158#endif 158#endif
159 159
160/*
161 * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not
162 * supported for MMIO addresses, so make sure that the memory encryption
163 * mask is not part of the page attributes.
164 */
165#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
166
160#include <asm-generic/fixmap.h> 167#include <asm-generic/fixmap.h>
161 168
162#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags) 169#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 475e34f53793..dbae7a5a347d 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,6 +21,8 @@
21 21
22extern unsigned long sme_me_mask; 22extern unsigned long sme_me_mask;
23 23
24void __init sme_early_init(void);
25
24void __init sme_encrypt_kernel(void); 26void __init sme_encrypt_kernel(void);
25void __init sme_enable(void); 27void __init sme_enable(void);
26 28
@@ -28,11 +30,22 @@ void __init sme_enable(void);
28 30
29#define sme_me_mask 0UL 31#define sme_me_mask 0UL
30 32
33static inline void __init sme_early_init(void) { }
34
31static inline void __init sme_encrypt_kernel(void) { } 35static inline void __init sme_encrypt_kernel(void) { }
32static inline void __init sme_enable(void) { } 36static inline void __init sme_enable(void) { }
33 37
34#endif /* CONFIG_AMD_MEM_ENCRYPT */ 38#endif /* CONFIG_AMD_MEM_ENCRYPT */
35 39
40/*
41 * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
42 * writing to or comparing values from the cr3 register. Having the
43 * encryption mask set in cr3 enables the PGD entry to be encrypted and
44 * avoid special case handling of PGD allocations.
45 */
46#define __sme_pa(x) (__pa(x) | sme_me_mask)
47#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
48
36#endif /* __ASSEMBLY__ */ 49#endif /* __ASSEMBLY__ */
37 50
38#endif /* __X86_MEM_ENCRYPT_H__ */ 51#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 7bd0099384ca..b98ed9d14630 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/mem_encrypt.h>
6 7
7/* PAGE_SHIFT determines the page size */ 8/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT 12 9#define PAGE_SHIFT 12
@@ -15,7 +16,7 @@
15#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 16#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
16#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 17#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
17 18
18#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) 19#define __PHYSICAL_MASK ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1)))
19#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 20#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
20 21
21/* Cast *PAGE_MASK to a signed type so that it is sign-extended if 22/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index b64ea527edfb..c6452cb12c0b 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#include <linux/mem_encrypt.h>
4#include <asm/page.h> 5#include <asm/page.h>
5#include <asm/pgtable_types.h> 6#include <asm/pgtable_types.h>
6 7
@@ -13,6 +14,12 @@
13 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ 14 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
14 : (prot)) 15 : (prot))
15 16
17/*
18 * Macros to add or remove encryption attribute
19 */
20#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
21#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
22
16#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
17#include <asm/x86_init.h> 24#include <asm/x86_init.h>
18 25
@@ -38,6 +45,8 @@ extern struct list_head pgd_list;
38 45
39extern struct mm_struct *pgd_page_get_mm(struct page *page); 46extern struct mm_struct *pgd_page_get_mm(struct page *page);
40 47
48extern pmdval_t early_pmd_flags;
49
41#ifdef CONFIG_PARAVIRT 50#ifdef CONFIG_PARAVIRT
42#include <asm/paravirt.h> 51#include <asm/paravirt.h>
43#else /* !CONFIG_PARAVIRT */ 52#else /* !CONFIG_PARAVIRT */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index bf9638e1ee42..de32ca32928a 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -2,6 +2,8 @@
2#define _ASM_X86_PGTABLE_DEFS_H 2#define _ASM_X86_PGTABLE_DEFS_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#include <linux/mem_encrypt.h>
6
5#include <asm/page_types.h> 7#include <asm/page_types.h>
6 8
7#define FIRST_USER_ADDRESS 0UL 9#define FIRST_USER_ADDRESS 0UL
@@ -121,10 +123,10 @@
121 123
122#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) 124#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
123 125
124#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ 126#define _PAGE_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
125 _PAGE_ACCESSED | _PAGE_DIRTY) 127 _PAGE_ACCESSED | _PAGE_DIRTY)
126#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ 128#define _KERNPG_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | \
127 _PAGE_DIRTY) 129 _PAGE_ACCESSED | _PAGE_DIRTY)
128 130
129/* 131/*
130 * Set of bits not changed in pte_modify. The pte's 132 * Set of bits not changed in pte_modify. The pte's
@@ -191,18 +193,29 @@ enum page_cache_mode {
191#define __PAGE_KERNEL_IO (__PAGE_KERNEL) 193#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
192#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) 194#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
193 195
194#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) 196#ifndef __ASSEMBLY__
195#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) 197
196#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 198#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
197#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) 199
198#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) 200#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
199#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) 201 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC)
200#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) 202#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
201#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) 203 _PAGE_DIRTY | _PAGE_ENC)
202#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) 204
203 205#define PAGE_KERNEL __pgprot(__PAGE_KERNEL | _PAGE_ENC)
204#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) 206#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
205#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) 207#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
208#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
209#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
210#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
211#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
212#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
213#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
214
215#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
216#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
217
218#endif /* __ASSEMBLY__ */
206 219
207/* xwr */ 220/* xwr */
208#define __P000 PAGE_NONE 221#define __P000 PAGE_NONE
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6a79547e8ee0..a68f70c3debc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -29,6 +29,7 @@ struct vm86;
29#include <linux/math64.h> 29#include <linux/math64.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/irqflags.h> 31#include <linux/irqflags.h>
32#include <linux/mem_encrypt.h>
32 33
33/* 34/*
34 * We handle most unaligned accesses in hardware. On the other hand 35 * We handle most unaligned accesses in hardware. On the other hand
@@ -241,7 +242,7 @@ static inline unsigned long read_cr3_pa(void)
241 242
242static inline void load_cr3(pgd_t *pgdir) 243static inline void load_cr3(pgd_t *pgdir)
243{ 244{
244 write_cr3(__pa(pgdir)); 245 write_cr3(__sme_pa(pgdir));
245} 246}
246 247
247#ifdef CONFIG_X86_32 248#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6b91e2eb8d3f..9c4e7ba6870c 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -195,7 +195,7 @@ void init_espfix_ap(int cpu)
195 195
196 pte_p = pte_offset_kernel(&pmd, addr); 196 pte_p = pte_offset_kernel(&pmd, addr);
197 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); 197 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
198 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 198 pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
199 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 199 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
200 set_pte(&pte_p[n*PTE_STRIDE], pte); 200 set_pte(&pte_p[n*PTE_STRIDE], pte);
201 201
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1f0ddcc9675c..5cd0b72a0283 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -102,7 +102,7 @@ unsigned long __head __startup_64(unsigned long physaddr)
102 102
103 pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 103 pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
104 pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 104 pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
105 pgtable_flags = _KERNPG_TABLE + sme_get_me_mask(); 105 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
106 106
107 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 107 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
108 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 108 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
@@ -177,7 +177,7 @@ static void __init reset_early_page_tables(void)
177{ 177{
178 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1)); 178 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
179 next_early_pgt = 0; 179 next_early_pgt = 0;
180 write_cr3(__pa_nodebug(early_top_pgt)); 180 write_cr3(__sme_pa_nodebug(early_top_pgt));
181} 181}
182 182
183/* Create a new PMD entry */ 183/* Create a new PMD entry */
@@ -310,6 +310,13 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
310 310
311 clear_page(init_top_pgt); 311 clear_page(init_top_pgt);
312 312
313 /*
314 * SME support may update early_pmd_flags to include the memory
315 * encryption mask, so it needs to be called before anything
316 * that may generate a page fault.
317 */
318 sme_early_init();
319
313 kasan_early_init(); 320 kasan_early_init();
314 321
315 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 322 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ec5d5e90c8f1..513cbb012ecc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -351,9 +351,9 @@ GLOBAL(name)
351NEXT_PAGE(early_top_pgt) 351NEXT_PAGE(early_top_pgt)
352 .fill 511,8,0 352 .fill 511,8,0
353#ifdef CONFIG_X86_5LEVEL 353#ifdef CONFIG_X86_5LEVEL
354 .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 354 .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
355#else 355#else
356 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 356 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
357#endif 357#endif
358 358
359NEXT_PAGE(early_dynamic_pgts) 359NEXT_PAGE(early_dynamic_pgts)
@@ -366,15 +366,15 @@ NEXT_PAGE(init_top_pgt)
366 .fill 512,8,0 366 .fill 512,8,0
367#else 367#else
368NEXT_PAGE(init_top_pgt) 368NEXT_PAGE(init_top_pgt)
369 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 369 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
370 .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 370 .org init_top_pgt + PGD_PAGE_OFFSET*8, 0
371 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 371 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
372 .org init_top_pgt + PGD_START_KERNEL*8, 0 372 .org init_top_pgt + PGD_START_KERNEL*8, 0
373 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 373 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
374 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 374 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
375 375
376NEXT_PAGE(level3_ident_pgt) 376NEXT_PAGE(level3_ident_pgt)
377 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 377 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
378 .fill 511, 8, 0 378 .fill 511, 8, 0
379NEXT_PAGE(level2_ident_pgt) 379NEXT_PAGE(level2_ident_pgt)
380 /* Since I easily can, map the first 1G. 380 /* Since I easily can, map the first 1G.
@@ -386,14 +386,14 @@ NEXT_PAGE(level2_ident_pgt)
386#ifdef CONFIG_X86_5LEVEL 386#ifdef CONFIG_X86_5LEVEL
387NEXT_PAGE(level4_kernel_pgt) 387NEXT_PAGE(level4_kernel_pgt)
388 .fill 511,8,0 388 .fill 511,8,0
389 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE 389 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
390#endif 390#endif
391 391
392NEXT_PAGE(level3_kernel_pgt) 392NEXT_PAGE(level3_kernel_pgt)
393 .fill L3_START_KERNEL,8,0 393 .fill L3_START_KERNEL,8,0
394 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ 394 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
395 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 395 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
396 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 396 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
397 397
398NEXT_PAGE(level2_kernel_pgt) 398NEXT_PAGE(level2_kernel_pgt)
399 /* 399 /*
@@ -411,7 +411,7 @@ NEXT_PAGE(level2_kernel_pgt)
411 411
412NEXT_PAGE(level2_fixmap_pgt) 412NEXT_PAGE(level2_fixmap_pgt)
413 .fill 506,8,0 413 .fill 506,8,0
414 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE 414 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
415 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 415 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
416 .fill 5,8,0 416 .fill 5,8,0
417 417
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 02c9d7553409..39d4daf5e289 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -87,7 +87,7 @@ static struct notifier_block kasan_die_notifier = {
87void __init kasan_early_init(void) 87void __init kasan_early_init(void)
88{ 88{
89 int i; 89 int i;
90 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL; 90 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
91 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; 91 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
92 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; 92 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
93 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE; 93 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
@@ -153,7 +153,7 @@ void __init kasan_init(void)
153 */ 153 */
154 memset(kasan_zero_page, 0, PAGE_SIZE); 154 memset(kasan_zero_page, 0, PAGE_SIZE);
155 for (i = 0; i < PTRS_PER_PTE; i++) { 155 for (i = 0; i < PTRS_PER_PTE; i++) {
156 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO); 156 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
157 set_pte(&kasan_zero_pte[i], pte); 157 set_pte(&kasan_zero_pte[i], pte);
158 } 158 }
159 /* Flush TLBs again to be sure that write protection applied. */ 159 /* Flush TLBs again to be sure that write protection applied. */
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 3ac6f99b095c..f973d3dc3802 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/mm.h>
15 16
16/* 17/*
17 * Since SME related variables are set early in the boot process they must 18 * Since SME related variables are set early in the boot process they must
@@ -21,6 +22,22 @@
21unsigned long sme_me_mask __section(.data) = 0; 22unsigned long sme_me_mask __section(.data) = 0;
22EXPORT_SYMBOL_GPL(sme_me_mask); 23EXPORT_SYMBOL_GPL(sme_me_mask);
23 24
25void __init sme_early_init(void)
26{
27 unsigned int i;
28
29 if (!sme_me_mask)
30 return;
31
32 early_pmd_flags = __sme_set(early_pmd_flags);
33
34 __supported_pte_mask = __sme_set(__supported_pte_mask);
35
36 /* Update the protection map with memory encryption mask */
37 for (i = 0; i < ARRAY_SIZE(protection_map); i++)
38 protection_map[i] = pgprot_encrypted(protection_map[i]);
39}
40
24void __init sme_encrypt_kernel(void) 41void __init sme_encrypt_kernel(void)
25{ 42{
26} 43}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 757b0bcdf712..7e2d6c0a64c4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -2020,6 +2020,9 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2020 if (!(page_flags & _PAGE_RW)) 2020 if (!(page_flags & _PAGE_RW))
2021 cpa.mask_clr = __pgprot(_PAGE_RW); 2021 cpa.mask_clr = __pgprot(_PAGE_RW);
2022 2022
2023 if (!(page_flags & _PAGE_ENC))
2024 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2025
2023 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); 2026 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2024 2027
2025 retval = __change_page_attr_set_clr(&cpa, 0); 2028 retval = __change_page_attr_set_clr(&cpa, 0);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 2c1b8881e9d3..593d2f76a54c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -115,7 +115,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
115 */ 115 */
116 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, 116 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen,
117 next_tlb_gen); 117 next_tlb_gen);
118 write_cr3(__pa(next->pgd)); 118 write_cr3(__sme_pa(next->pgd));
119 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 119 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
120 TLB_FLUSH_ALL); 120 TLB_FLUSH_ALL);
121 } 121 }
@@ -157,7 +157,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
157 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, next->context.ctx_id); 157 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, next->context.ctx_id);
158 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, next_tlb_gen); 158 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, next_tlb_gen);
159 this_cpu_write(cpu_tlbstate.loaded_mm, next); 159 this_cpu_write(cpu_tlbstate.loaded_mm, next);
160 write_cr3(__pa(next->pgd)); 160 write_cr3(__sme_pa(next->pgd));
161 161
162 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 162 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
163 } 163 }
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7dfa767dc680..4d7bb98f4134 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -583,6 +583,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
583#endif /* CONFIG_MMU */ 583#endif /* CONFIG_MMU */
584 584
585/* 585/*
586 * No-op macros that just return the current protection value. Defined here
587 * because these macros can be used used even if CONFIG_MMU is not defined.
588 */
589#ifndef pgprot_encrypted
590#define pgprot_encrypted(prot) (prot)
591#endif
592
593#ifndef pgprot_decrypted
594#define pgprot_decrypted(prot) (prot)
595#endif
596
597/*
586 * A facility to provide lazy MMU batching. This allows PTE updates and 598 * A facility to provide lazy MMU batching. This allows PTE updates and
587 * page invalidations to be delayed until a call to leave lazy MMU mode 599 * page invalidations to be delayed until a call to leave lazy MMU mode
588 * is issued. Some architectures may benefit from doing this, and it is 600 * is issued. Some architectures may benefit from doing this, and it is
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 570f4fcff13f..1255f09f5e42 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -35,6 +35,14 @@ static inline unsigned long sme_get_me_mask(void)
35 return sme_me_mask; 35 return sme_me_mask;
36} 36}
37 37
38/*
39 * The __sme_set() and __sme_clr() macros are useful for adding or removing
40 * the encryption mask from a value (e.g. when dealing with pagetable
41 * entries).
42 */
43#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
44#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
45
38#endif /* __ASSEMBLY__ */ 46#endif /* __ASSEMBLY__ */
39 47
40#endif /* __MEM_ENCRYPT_H__ */ 48#endif /* __MEM_ENCRYPT_H__ */