aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2018-02-26 13:04:48 -0500
committerIngo Molnar <mingo@kernel.org>2018-03-12 04:37:23 -0400
commit3548e131ec6a82208f36e68d31947b0fe244c7a7 (patch)
tree2a010eaa67695e8b7a19267b675d5a4eab69724f
parenta403d798182f4f7be5e9bab56cfa37e9828fd92a (diff)
x86/boot/compressed/64: Find a place for 32-bit trampoline
If a bootloader enables 64-bit mode with 4-level paging, we might need to switch over to 5-level paging. The switching requires the disabling of paging, which works fine if kernel itself is loaded below 4G. But if the bootloader puts the kernel above 4G (not sure if anybody does this), we would lose control as soon as paging is disabled, because the code becomes unreachable to the CPU. To handle the situation, we need a trampoline in lower memory that would take care of switching on 5-level paging. This patch finds a spot in low memory for a trampoline. The heuristic is based on code in reserve_bios_regions(). We find the end of low memory based on BIOS and EBDA start addresses. The trampoline is put just before end of low memory. It's mimic approach taken to allocate memory for realtime trampoline. Tested-by: Borislav Petkov <bp@suse.de> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Shevchenko <andy.shevchenko@gmail.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Eric Biederman <ebiederm@xmission.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180226180451.86788-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/boot/compressed/misc.c6
-rw-r--r--arch/x86/boot/compressed/pgtable.h11
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c34
3 files changed, 51 insertions, 0 deletions
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b50c42455e25..8e4b55dd5df9 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -14,6 +14,7 @@
14 14
15#include "misc.h" 15#include "misc.h"
16#include "error.h" 16#include "error.h"
17#include "pgtable.h"
17#include "../string.h" 18#include "../string.h"
18#include "../voffset.h" 19#include "../voffset.h"
19 20
@@ -372,6 +373,11 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
372 debug_putaddr(output_len); 373 debug_putaddr(output_len);
373 debug_putaddr(kernel_total_size); 374 debug_putaddr(kernel_total_size);
374 375
376#ifdef CONFIG_X86_64
377 /* Report address of 32-bit trampoline */
378 debug_putaddr(trampoline_32bit);
379#endif
380
375 /* 381 /*
376 * The memory hole needed for the kernel is the larger of either 382 * The memory hole needed for the kernel is the larger of either
377 * the entire decompressed kernel plus relocation table, or the 383 * the entire decompressed kernel plus relocation table, or the
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
new file mode 100644
index 000000000000..57722a2fe2a0
--- /dev/null
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -0,0 +1,11 @@
1#ifndef BOOT_COMPRESSED_PAGETABLE_H
2#define BOOT_COMPRESSED_PAGETABLE_H
3
4#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
5
6#ifndef __ASSEMBLER__
7
8extern unsigned long *trampoline_32bit;
9
10#endif /* __ASSEMBLER__ */
11#endif /* BOOT_COMPRESSED_PAGETABLE_H */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 45c76eff2718..21d5cc1cd5fa 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,4 +1,5 @@
1#include <asm/processor.h> 1#include <asm/processor.h>
2#include "pgtable.h"
2 3
3/* 4/*
4 * __force_order is used by special_insns.h asm code to force instruction 5 * __force_order is used by special_insns.h asm code to force instruction
@@ -9,14 +10,27 @@
9 */ 10 */
10unsigned long __force_order; 11unsigned long __force_order;
11 12
13#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
14#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
15
12struct paging_config { 16struct paging_config {
13 unsigned long trampoline_start; 17 unsigned long trampoline_start;
14 unsigned long l5_required; 18 unsigned long l5_required;
15}; 19};
16 20
21/*
22 * Trampoline address will be printed by extract_kernel() for debugging
23 * purposes.
24 *
25 * Avoid putting the pointer into .bss as it will be cleared between
26 * paging_prepare() and extract_kernel().
27 */
28unsigned long *trampoline_32bit __section(.data);
29
17struct paging_config paging_prepare(void) 30struct paging_config paging_prepare(void)
18{ 31{
19 struct paging_config paging_config = {}; 32 struct paging_config paging_config = {};
33 unsigned long bios_start, ebda_start;
20 34
21 /* 35 /*
22 * Check if LA57 is desired and supported. 36 * Check if LA57 is desired and supported.
@@ -35,5 +49,25 @@ struct paging_config paging_prepare(void)
35 paging_config.l5_required = 1; 49 paging_config.l5_required = 1;
36 } 50 }
37 51
52 /*
53 * Find a suitable spot for the trampoline.
54 * This code is based on reserve_bios_regions().
55 */
56
57 ebda_start = *(unsigned short *)0x40e << 4;
58 bios_start = *(unsigned short *)0x413 << 10;
59
60 if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
61 bios_start = BIOS_START_MAX;
62
63 if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
64 bios_start = ebda_start;
65
66 /* Place the trampoline just below the end of low memory, aligned to 4k */
67 paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE;
68 paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE);
69
70 trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
71
38 return paging_config; 72 return paging_config;
39} 73}