diff options
author | Andrey Ryabinin <aryabinin@virtuozzo.com> | 2018-01-10 10:36:02 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-01-14 18:32:35 -0500 |
commit | 0d39e2669d7b0fefd2d8f9e7868ae669b364d9ba (patch) | |
tree | c02373bb8b2457a2d2a46c1b1bac755d55c0302d | |
parent | b8b9ce4b5aec8de9e23cabb0a26b78641f9ab1d6 (diff) |
x86/kasan: Panic if there is not enough memory to boot
Currently KASAN doesn't panic in case it don't have enough memory
to boot. Instead, it crashes in some random place:
kernel BUG at arch/x86/mm/physaddr.c:27!
RIP: 0010:__phys_addr+0x268/0x276
Call Trace:
kasan_populate_shadow+0x3f2/0x497
kasan_init+0x12e/0x2b2
setup_arch+0x2825/0x2a2c
start_kernel+0xc8/0x15f4
x86_64_start_reservations+0x2a/0x2c
x86_64_start_kernel+0x72/0x75
secondary_startup_64+0xa5/0xb0
Use memblock_virt_alloc_try_nid() for allocations without failure
fallback. It will panic with an out of memory message.
Reported-by: kernel test robot <xiaolong.ye@intel.com>
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Dmitry Vyukov <dvyukov@google.com>
Cc: kasan-dev@googlegroups.com
Cc: Alexander Potapenko <glider@google.com>
Cc: lkp@01.org
Link: https://lkml.kernel.org/r/20180110153602.18919-1-aryabinin@virtuozzo.com
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 47388f0c0e59..af6f2f9c6a26 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
@@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; | |||
21 | 21 | ||
22 | static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); | 22 | static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); |
23 | 23 | ||
24 | static __init void *early_alloc(size_t size, int nid) | 24 | static __init void *early_alloc(size_t size, int nid, bool panic) |
25 | { | 25 | { |
26 | return memblock_virt_alloc_try_nid_nopanic(size, size, | 26 | if (panic) |
27 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | 27 | return memblock_virt_alloc_try_nid(size, size, |
28 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | ||
29 | else | ||
30 | return memblock_virt_alloc_try_nid_nopanic(size, size, | ||
31 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | 34 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, |
@@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | |||
38 | if (boot_cpu_has(X86_FEATURE_PSE) && | 42 | if (boot_cpu_has(X86_FEATURE_PSE) && |
39 | ((end - addr) == PMD_SIZE) && | 43 | ((end - addr) == PMD_SIZE) && |
40 | IS_ALIGNED(addr, PMD_SIZE)) { | 44 | IS_ALIGNED(addr, PMD_SIZE)) { |
41 | p = early_alloc(PMD_SIZE, nid); | 45 | p = early_alloc(PMD_SIZE, nid, false); |
42 | if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) | 46 | if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) |
43 | return; | 47 | return; |
44 | else if (p) | 48 | else if (p) |
45 | memblock_free(__pa(p), PMD_SIZE); | 49 | memblock_free(__pa(p), PMD_SIZE); |
46 | } | 50 | } |
47 | 51 | ||
48 | p = early_alloc(PAGE_SIZE, nid); | 52 | p = early_alloc(PAGE_SIZE, nid, true); |
49 | pmd_populate_kernel(&init_mm, pmd, p); | 53 | pmd_populate_kernel(&init_mm, pmd, p); |
50 | } | 54 | } |
51 | 55 | ||
@@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | |||
57 | if (!pte_none(*pte)) | 61 | if (!pte_none(*pte)) |
58 | continue; | 62 | continue; |
59 | 63 | ||
60 | p = early_alloc(PAGE_SIZE, nid); | 64 | p = early_alloc(PAGE_SIZE, nid, true); |
61 | entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); | 65 | entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); |
62 | set_pte_at(&init_mm, addr, pte, entry); | 66 | set_pte_at(&init_mm, addr, pte, entry); |
63 | } while (pte++, addr += PAGE_SIZE, addr != end); | 67 | } while (pte++, addr += PAGE_SIZE, addr != end); |
@@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, | |||
75 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && | 79 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && |
76 | ((end - addr) == PUD_SIZE) && | 80 | ((end - addr) == PUD_SIZE) && |
77 | IS_ALIGNED(addr, PUD_SIZE)) { | 81 | IS_ALIGNED(addr, PUD_SIZE)) { |
78 | p = early_alloc(PUD_SIZE, nid); | 82 | p = early_alloc(PUD_SIZE, nid, false); |
79 | if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) | 83 | if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) |
80 | return; | 84 | return; |
81 | else if (p) | 85 | else if (p) |
82 | memblock_free(__pa(p), PUD_SIZE); | 86 | memblock_free(__pa(p), PUD_SIZE); |
83 | } | 87 | } |
84 | 88 | ||
85 | p = early_alloc(PAGE_SIZE, nid); | 89 | p = early_alloc(PAGE_SIZE, nid, true); |
86 | pud_populate(&init_mm, pud, p); | 90 | pud_populate(&init_mm, pud, p); |
87 | } | 91 | } |
88 | 92 | ||
@@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, | |||
101 | unsigned long next; | 105 | unsigned long next; |
102 | 106 | ||
103 | if (p4d_none(*p4d)) { | 107 | if (p4d_none(*p4d)) { |
104 | void *p = early_alloc(PAGE_SIZE, nid); | 108 | void *p = early_alloc(PAGE_SIZE, nid, true); |
105 | 109 | ||
106 | p4d_populate(&init_mm, p4d, p); | 110 | p4d_populate(&init_mm, p4d, p); |
107 | } | 111 | } |
@@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, | |||
122 | unsigned long next; | 126 | unsigned long next; |
123 | 127 | ||
124 | if (pgd_none(*pgd)) { | 128 | if (pgd_none(*pgd)) { |
125 | p = early_alloc(PAGE_SIZE, nid); | 129 | p = early_alloc(PAGE_SIZE, nid, true); |
126 | pgd_populate(&init_mm, pgd, p); | 130 | pgd_populate(&init_mm, pgd, p); |
127 | } | 131 | } |
128 | 132 | ||