diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-16 22:39:04 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 14:59:27 -0500 |
commit | 22c8ca2ac256bb681be791858b35502b5d37e73b (patch) | |
tree | 7eab01f0ad05e228f9d9c01da85503e6fafdb4f2 /arch/x86/mm/init.c | |
parent | 6f80b68e9e515547edbacb0c37491730bf766db5 (diff) |
x86, mm: Add alloc_low_pages(num)
32bit kmap mapping needs pages to be used for low to high.
At this point those pages are still from pgt_buf_* from BRK, so it is
ok now.
But we want to move early_ioremap_page_table_range_init() out of
init_memory_mapping() and only call it one time later, that will
make page_table_range_init/page_table_kmap_check/alloc_low_page to
use memblock to get page.
memblock allocation for pages are from high to low.
So will get panic from page_table_kmap_check() that has BUG_ON to do
ordering checking.
This patch add alloc_low_pages to make it possible to allocate serveral
pages at first, and hand out pages one by one from low to high.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-28-git-send-email-yinghai@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 21173fcdb4a1..02cea14c6d0c 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -25,36 +25,45 @@ unsigned long __meminitdata pgt_buf_top; | |||
25 | 25 | ||
26 | static unsigned long min_pfn_mapped; | 26 | static unsigned long min_pfn_mapped; |
27 | 27 | ||
28 | __ref void *alloc_low_page(void) | 28 | __ref void *alloc_low_pages(unsigned int num) |
29 | { | 29 | { |
30 | unsigned long pfn; | 30 | unsigned long pfn; |
31 | void *adr; | 31 | int i; |
32 | 32 | ||
33 | #ifdef CONFIG_X86_64 | 33 | #ifdef CONFIG_X86_64 |
34 | if (after_bootmem) { | 34 | if (after_bootmem) { |
35 | adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); | 35 | unsigned int order; |
36 | 36 | ||
37 | return adr; | 37 | order = get_order((unsigned long)num << PAGE_SHIFT); |
38 | return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | | ||
39 | __GFP_ZERO, order); | ||
38 | } | 40 | } |
39 | #endif | 41 | #endif |
40 | 42 | ||
41 | if ((pgt_buf_end + 1) >= pgt_buf_top) { | 43 | if ((pgt_buf_end + num) >= pgt_buf_top) { |
42 | unsigned long ret; | 44 | unsigned long ret; |
43 | if (min_pfn_mapped >= max_pfn_mapped) | 45 | if (min_pfn_mapped >= max_pfn_mapped) |
44 | panic("alloc_low_page: ran out of memory"); | 46 | panic("alloc_low_page: ran out of memory"); |
45 | ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, | 47 | ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, |
46 | max_pfn_mapped << PAGE_SHIFT, | 48 | max_pfn_mapped << PAGE_SHIFT, |
47 | PAGE_SIZE, PAGE_SIZE); | 49 | PAGE_SIZE * num , PAGE_SIZE); |
48 | if (!ret) | 50 | if (!ret) |
49 | panic("alloc_low_page: can not alloc memory"); | 51 | panic("alloc_low_page: can not alloc memory"); |
50 | memblock_reserve(ret, PAGE_SIZE); | 52 | memblock_reserve(ret, PAGE_SIZE * num); |
51 | pfn = ret >> PAGE_SHIFT; | 53 | pfn = ret >> PAGE_SHIFT; |
52 | } else | 54 | } else { |
53 | pfn = pgt_buf_end++; | 55 | pfn = pgt_buf_end; |
56 | pgt_buf_end += num; | ||
57 | } | ||
58 | |||
59 | for (i = 0; i < num; i++) { | ||
60 | void *adr; | ||
61 | |||
62 | adr = __va((pfn + i) << PAGE_SHIFT); | ||
63 | clear_page(adr); | ||
64 | } | ||
54 | 65 | ||
55 | adr = __va(pfn * PAGE_SIZE); | 66 | return __va(pfn << PAGE_SHIFT); |
56 | clear_page(adr); | ||
57 | return adr; | ||
58 | } | 67 | } |
59 | 68 | ||
60 | /* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */ | 69 | /* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */ |