aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 07:33:58 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:58 -0500
commit12d6f21eacc21d84a809829543f2fe45c7e37319 (patch)
tree6985f2370ad238fb2a568547a5049751d7c95a69 /arch/x86
parent9a3dc7804e9856668caef41efc54179e61ffccc0 (diff)
x86: do not PSE on CONFIG_DEBUG_PAGEALLOC=y
get more testing of the c_p_a() code done by not turning off PSE on DEBUG_PAGEALLOC. this simplifies the early pagetable setup code, and tests the largepage-splitup code quite heavily. In the end, all the largepages will be split up pretty quickly, so there's no difference to how DEBUG_PAGEALLOC worked before. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/mm/pageattr_32.c12
2 files changed, 11 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index bba850b05d0e..db28aa9e2f69 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -641,13 +641,6 @@ void __init early_cpu_init(void)
641 nexgen_init_cpu(); 641 nexgen_init_cpu();
642 umc_init_cpu(); 642 umc_init_cpu();
643 early_cpu_detect(); 643 early_cpu_detect();
644
645#ifdef CONFIG_DEBUG_PAGEALLOC
646 /* pse is not compatible with on-the-fly unmapping,
647 * disable it even if the cpus claim to support it.
648 */
649 setup_clear_cpu_cap(X86_FEATURE_PSE);
650#endif
651} 644}
652 645
653/* Make sure %fs is initialized properly in idle threads */ 646/* Make sure %fs is initialized properly in idle threads */
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 9cf2fea54eb5..dd49b16b3a0e 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -61,13 +61,17 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
61static int split_large_page(pte_t *kpte, unsigned long address) 61static int split_large_page(pte_t *kpte, unsigned long address)
62{ 62{
63 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 63 pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
64 gfp_t gfp_flags = GFP_KERNEL;
64 unsigned long flags; 65 unsigned long flags;
65 unsigned long addr; 66 unsigned long addr;
66 pte_t *pbase, *tmp; 67 pte_t *pbase, *tmp;
67 struct page *base; 68 struct page *base;
68 int i, level; 69 int i, level;
69 70
70 base = alloc_pages(GFP_KERNEL, 0); 71#ifdef CONFIG_DEBUG_PAGEALLOC
72 gfp_flags = GFP_ATOMIC;
73#endif
74 base = alloc_pages(gfp_flags, 0);
71 if (!base) 75 if (!base)
72 return -ENOMEM; 76 return -ENOMEM;
73 77
@@ -219,6 +223,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
219 } 223 }
220 224
221 /* 225 /*
226 * If page allocator is not up yet then do not call c_p_a():
227 */
228 if (!debug_pagealloc_enabled)
229 return;
230
231 /*
222 * the return value is ignored - the calls cannot fail, 232 * the return value is ignored - the calls cannot fail,
223 * large pages are disabled at boot time. 233 * large pages are disabled at boot time.
224 */ 234 */