diff options
author | Jacob Shin <jacob.shin@amd.com> | 2012-10-24 15:24:44 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-10-24 16:37:04 -0400 |
commit | 844ab6f993b1d32eb40512503d35ff6ad0c57030 (patch) | |
tree | 59889d2391056d36307b4c5d587f1db6c7952d30 /arch/x86/mm/init.c | |
parent | 1f2ff682ac951ed82cc043cf140d2851084512df (diff) |
x86, mm: Find_early_table_space based on ranges that are actually being mapped
Current logic finds enough space for direct mapping page tables from 0
to end. Instead, we only need to find enough space to cover mr[0].start
to mr[nr_range].end -- the range that is actually being mapped by
init_memory_mapping()
This is needed after 1bbbbe779aabe1f0768c2bf8f8c0a5583679b54a, to address
the panic reported here:
https://lkml.org/lkml/2012/10/20/160
https://lkml.org/lkml/2012/10/21/157
Signed-off-by: Jacob Shin <jacob.shin@amd.com>
Link: http://lkml.kernel.org/r/20121024195311.GB11779@jshin-Toonie
Tested-by: Tom Rini <trini@ti.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 70 |
1 files changed, 41 insertions, 29 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 8653b3a722be..bc287d62bf1e 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -29,36 +29,54 @@ int direct_gbpages | |||
29 | #endif | 29 | #endif |
30 | ; | 30 | ; |
31 | 31 | ||
32 | static void __init find_early_table_space(unsigned long end, int use_pse, | 32 | struct map_range { |
33 | int use_gbpages) | 33 | unsigned long start; |
34 | unsigned long end; | ||
35 | unsigned page_size_mask; | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * First calculate space needed for kernel direct mapping page tables to cover | ||
40 | * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB | ||
41 | * pages. Then find enough contiguous space for those page tables. | ||
42 | */ | ||
43 | static void __init find_early_table_space(struct map_range *mr, int nr_range) | ||
34 | { | 44 | { |
35 | unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; | 45 | int i; |
46 | unsigned long puds = 0, pmds = 0, ptes = 0, tables; | ||
47 | unsigned long start = 0, good_end; | ||
36 | phys_addr_t base; | 48 | phys_addr_t base; |
37 | 49 | ||
38 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 50 | for (i = 0; i < nr_range; i++) { |
39 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | 51 | unsigned long range, extra; |
40 | 52 | ||
41 | if (use_gbpages) { | 53 | range = mr[i].end - mr[i].start; |
42 | unsigned long extra; | 54 | puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; |
43 | 55 | ||
44 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 56 | if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { |
45 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 57 | extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); |
46 | } else | 58 | pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
47 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 59 | } else { |
48 | 60 | pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; | |
49 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | 61 | } |
50 | 62 | ||
51 | if (use_pse) { | 63 | if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { |
52 | unsigned long extra; | 64 | extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); |
53 | |||
54 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); | ||
55 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
56 | extra += PMD_SIZE; | 66 | extra += PMD_SIZE; |
57 | #endif | 67 | #endif |
58 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 68 | /* The first 2/4M doesn't use large pages. */ |
59 | } else | 69 | if (mr[i].start < PMD_SIZE) |
60 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 70 | extra += range; |
71 | |||
72 | ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
73 | } else { | ||
74 | ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
75 | } | ||
76 | } | ||
61 | 77 | ||
78 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | ||
79 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | ||
62 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); | 80 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
63 | 81 | ||
64 | #ifdef CONFIG_X86_32 | 82 | #ifdef CONFIG_X86_32 |
@@ -76,7 +94,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
76 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); | 94 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); |
77 | 95 | ||
78 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", | 96 | printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", |
79 | end - 1, pgt_buf_start << PAGE_SHIFT, | 97 | mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, |
80 | (pgt_buf_top << PAGE_SHIFT) - 1); | 98 | (pgt_buf_top << PAGE_SHIFT) - 1); |
81 | } | 99 | } |
82 | 100 | ||
@@ -85,12 +103,6 @@ void __init native_pagetable_reserve(u64 start, u64 end) | |||
85 | memblock_reserve(start, end - start); | 103 | memblock_reserve(start, end - start); |
86 | } | 104 | } |
87 | 105 | ||
88 | struct map_range { | ||
89 | unsigned long start; | ||
90 | unsigned long end; | ||
91 | unsigned page_size_mask; | ||
92 | }; | ||
93 | |||
94 | #ifdef CONFIG_X86_32 | 106 | #ifdef CONFIG_X86_32 |
95 | #define NR_RANGE_MR 3 | 107 | #define NR_RANGE_MR 3 |
96 | #else /* CONFIG_X86_64 */ | 108 | #else /* CONFIG_X86_64 */ |
@@ -263,7 +275,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
263 | * nodes are discovered. | 275 | * nodes are discovered. |
264 | */ | 276 | */ |
265 | if (!after_bootmem) | 277 | if (!after_bootmem) |
266 | find_early_table_space(end, use_pse, use_gbpages); | 278 | find_early_table_space(mr, nr_range); |
267 | 279 | ||
268 | for (i = 0; i < nr_range; i++) | 280 | for (i = 0; i < nr_range; i++) |
269 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, | 281 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, |