aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2011-02-18 06:30:30 -0500
committerH. Peter Anvin <hpa@zytor.com>2011-03-19 14:58:19 -0400
commite5f15b45ddf3afa2bbbb10c7ea34fb32b6de0a0e (patch)
tree7e47d9bd25670ed0ed34bc572de42c5640454695 /arch/x86
parent4981d01eada5354d81c8929d5b2836829ba3df7b (diff)
x86: Cleanup highmap after brk is concluded
Now cleanup_highmap actually is in two steps: one is early in head64.c and only clears above _end; a second one is in init_memory_mapping() and tries to clean from _brk_end to _end. It should check if those boundaries are PMD_SIZE aligned but currently does not. Also init_memory_mapping() is called several times for numa or memory hotplug, so we really should not handle initial kernel mappings there. This patch moves cleanup_highmap() down after _brk_end is settled so we can do everything in one step. Also we honor max_pfn_mapped in the implementation of cleanup_highmap. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> LKML-Reference: <alpine.DEB.2.00.1103171739050.3382@kaball-desktop> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/setup.c25
-rw-r--r--arch/x86/mm/init_64.c11
3 files changed, 9 insertions, 30 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2d2673c28aff..5655c2272adb 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
77 /* Make NULL pointers segfault */ 77 /* Make NULL pointers segfault */
78 zap_identity_mappings(); 78 zap_identity_mappings();
79 79
80 /* Cleanup the over mapped high alias */
81 cleanup_highmap();
82
83 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; 80 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
84 81
85 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { 82 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b176f2b1f45d..4a52a5f9afcb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -294,30 +294,11 @@ static void __init init_gbpages(void)
294 else 294 else
295 direct_gbpages = 0; 295 direct_gbpages = 0;
296} 296}
297
298static void __init cleanup_highmap_brk_end(void)
299{
300 pud_t *pud;
301 pmd_t *pmd;
302
303 mmu_cr4_features = read_cr4();
304
305 /*
306 * _brk_end cannot change anymore, but it and _end may be
307 * located on different 2M pages. cleanup_highmap(), however,
308 * can only consider _end when it runs, so destroy any
309 * mappings beyond _brk_end here.
310 */
311 pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
312 pmd = pmd_offset(pud, _brk_end - 1);
313 while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
314 pmd_clear(pmd);
315}
316#else 297#else
317static inline void init_gbpages(void) 298static inline void init_gbpages(void)
318{ 299{
319} 300}
320static inline void cleanup_highmap_brk_end(void) 301static void __init cleanup_highmap(void)
321{ 302{
322} 303}
323#endif 304#endif
@@ -330,8 +311,6 @@ static void __init reserve_brk(void)
330 /* Mark brk area as locked down and no longer taking any 311 /* Mark brk area as locked down and no longer taking any
331 new allocations */ 312 new allocations */
332 _brk_start = 0; 313 _brk_start = 0;
333
334 cleanup_highmap_brk_end();
335} 314}
336 315
337#ifdef CONFIG_BLK_DEV_INITRD 316#ifdef CONFIG_BLK_DEV_INITRD
@@ -950,6 +929,8 @@ void __init setup_arch(char **cmdline_p)
950 */ 929 */
951 reserve_brk(); 930 reserve_brk();
952 931
932 cleanup_highmap();
933
953 memblock.current_limit = get_max_mapped(); 934 memblock.current_limit = get_max_mapped();
954 memblock_x86_fill(); 935 memblock_x86_fill();
955 936
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index a08a62cb136e..7026505a33ba 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,6 +51,7 @@
51#include <asm/numa.h> 51#include <asm/numa.h>
52#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
53#include <asm/init.h> 53#include <asm/init.h>
54#include <asm/setup.h>
54 55
55static int __init parse_direct_gbpages_off(char *arg) 56static int __init parse_direct_gbpages_off(char *arg)
56{ 57{
@@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
293 * to the compile time generated pmds. This results in invalid pmds up 294 * to the compile time generated pmds. This results in invalid pmds up
294 * to the point where we hit the physaddr 0 mapping. 295 * to the point where we hit the physaddr 0 mapping.
295 * 296 *
296 * We limit the mappings to the region from _text to _end. _end is 297 * We limit the mappings to the region from _text to _brk_end. _brk_end
297 * rounded up to the 2MB boundary. This catches the invalid pmds as 298 * is rounded up to the 2MB boundary. This catches the invalid pmds as
298 * well, as they are located before _text: 299 * well, as they are located before _text:
299 */ 300 */
300void __init cleanup_highmap(void) 301void __init cleanup_highmap(void)
301{ 302{
302 unsigned long vaddr = __START_KERNEL_map; 303 unsigned long vaddr = __START_KERNEL_map;
303 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; 304 unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
305 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
304 pmd_t *pmd = level2_kernel_pgt; 306 pmd_t *pmd = level2_kernel_pgt;
305 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
306 307
307 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { 308 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
308 if (pmd_none(*pmd)) 309 if (pmd_none(*pmd))
309 continue; 310 continue;
310 if (vaddr < (unsigned long) _text || vaddr > end) 311 if (vaddr < (unsigned long) _text || vaddr > end)