aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2011-02-18 06:30:30 -0500
committerH. Peter Anvin <hpa@zytor.com>2011-03-19 14:58:19 -0400
commite5f15b45ddf3afa2bbbb10c7ea34fb32b6de0a0e (patch)
tree7e47d9bd25670ed0ed34bc572de42c5640454695 /arch/x86/kernel/setup.c
parent4981d01eada5354d81c8929d5b2836829ba3df7b (diff)
x86: Cleanup highmap after brk is concluded
Now cleanup_highmap actually is in two steps: one is early in head64.c and only clears above _end; a second one is in init_memory_mapping() and tries to clean from _brk_end to _end. It should check if those boundaries are PMD_SIZE aligned but currently does not. Also init_memory_mapping() is called several times for numa or memory hotplug, so we really should not handle initial kernel mappings there. This patch moves cleanup_highmap() down after _brk_end is settled so we can do everything in one step. Also we honor max_pfn_mapped in the implementation of cleanup_highmap. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> LKML-Reference: <alpine.DEB.2.00.1103171739050.3382@kaball-desktop> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r--arch/x86/kernel/setup.c25
1 files changed, 3 insertions, 22 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b176f2b1f45d..4a52a5f9afcb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -294,30 +294,11 @@ static void __init init_gbpages(void)
294 else 294 else
295 direct_gbpages = 0; 295 direct_gbpages = 0;
296} 296}
297
298static void __init cleanup_highmap_brk_end(void)
299{
300 pud_t *pud;
301 pmd_t *pmd;
302
303 mmu_cr4_features = read_cr4();
304
305 /*
306 * _brk_end cannot change anymore, but it and _end may be
307 * located on different 2M pages. cleanup_highmap(), however,
308 * can only consider _end when it runs, so destroy any
309 * mappings beyond _brk_end here.
310 */
311 pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
312 pmd = pmd_offset(pud, _brk_end - 1);
313 while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
314 pmd_clear(pmd);
315}
316#else 297#else
317static inline void init_gbpages(void) 298static inline void init_gbpages(void)
318{ 299{
319} 300}
320static inline void cleanup_highmap_brk_end(void) 301static void __init cleanup_highmap(void)
321{ 302{
322} 303}
323#endif 304#endif
@@ -330,8 +311,6 @@ static void __init reserve_brk(void)
330 /* Mark brk area as locked down and no longer taking any 311 /* Mark brk area as locked down and no longer taking any
331 new allocations */ 312 new allocations */
332 _brk_start = 0; 313 _brk_start = 0;
333
334 cleanup_highmap_brk_end();
335} 314}
336 315
337#ifdef CONFIG_BLK_DEV_INITRD 316#ifdef CONFIG_BLK_DEV_INITRD
@@ -950,6 +929,8 @@ void __init setup_arch(char **cmdline_p)
950 */ 929 */
951 reserve_brk(); 930 reserve_brk();
952 931
932 cleanup_highmap();
933
953 memblock.current_limit = get_max_mapped(); 934 memblock.current_limit = get_max_mapped();
954 memblock_x86_fill(); 935 memblock_x86_fill();
955 936