summaryrefslogtreecommitdiffstats
path: root/arch/x86/power
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2018-02-14 13:25:41 -0500
committerIngo Molnar <mingo@kernel.org>2018-02-16 04:48:49 -0500
commit91f606a8fa68264224cbc76888fa8649cdbe9990 (patch)
tree6aa21758fdeba04876a471b800c05975092bedf8 /arch/x86/power
parent98219dda2ab56ce2a967fdebf81e838d676d9ddc (diff)
x86/mm: Replace compile-time checks for 5-level paging with runtime-time checks
This patch converts the of CONFIG_X86_5LEVEL check to runtime checks for p4d folding. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-9-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/power')
-rw-r--r--arch/x86/power/hibernate_64.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 0ef5e5204968..74a532989308 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -50,7 +50,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
50{ 50{
51 pmd_t *pmd; 51 pmd_t *pmd;
52 pud_t *pud; 52 pud_t *pud;
53 p4d_t *p4d; 53 p4d_t *p4d = NULL;
54 54
55 /* 55 /*
56 * The new mapping only has to cover the page containing the image 56 * The new mapping only has to cover the page containing the image
@@ -66,7 +66,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
66 * tables used by the image kernel. 66 * tables used by the image kernel.
67 */ 67 */
68 68
69 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 69 if (pgtable_l5_enabled) {
70 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); 70 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
71 if (!p4d) 71 if (!p4d)
72 return -ENOMEM; 72 return -ENOMEM;
@@ -84,7 +84,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
84 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); 84 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
85 set_pud(pud + pud_index(restore_jump_address), 85 set_pud(pud + pud_index(restore_jump_address),
86 __pud(__pa(pmd) | _KERNPG_TABLE)); 86 __pud(__pa(pmd) | _KERNPG_TABLE));
87 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 87 if (p4d) {
88 set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE)); 88 set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
89 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE)); 89 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
90 } else { 90 } else {