aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-17 17:17:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 18:09:34 -0400
commite7df0d88c455c915376397b4bd72a83b9ed656f7 (patch)
tree20fedca055774cf883d8d5fd74c427c78dc8ea50
parent505f6d22dbc63f333d1178dc80264e40b5c35268 (diff)
powerpc: query dynamic DEBUG_PAGEALLOC setting
We can disable debug_pagealloc processing even if the code is compiled with CONFIG_DEBUG_PAGEALLOC. This patch changes the code to query whether it is enabled or not in runtime. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/kernel/traps.c5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c36
-rw-r--r--arch/powerpc/mm/init_32.c8
3 files changed, 26 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b6becc795bb5..33c47fcc455a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -203,9 +203,8 @@ static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
203#ifdef CONFIG_SMP 203#ifdef CONFIG_SMP
204 printk("SMP NR_CPUS=%d ", NR_CPUS); 204 printk("SMP NR_CPUS=%d ", NR_CPUS);
205#endif 205#endif
206#ifdef CONFIG_DEBUG_PAGEALLOC 206 if (debug_pagealloc_enabled())
207 printk("DEBUG_PAGEALLOC "); 207 printk("DEBUG_PAGEALLOC ");
208#endif
209#ifdef CONFIG_NUMA 208#ifdef CONFIG_NUMA
210 printk("NUMA "); 209 printk("NUMA ");
211#endif 210#endif
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index ba59d5977f34..1005281be9a6 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -255,8 +255,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
255 255
256 if (ret < 0) 256 if (ret < 0)
257 break; 257 break;
258
258#ifdef CONFIG_DEBUG_PAGEALLOC 259#ifdef CONFIG_DEBUG_PAGEALLOC
259 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) 260 if (debug_pagealloc_enabled() &&
261 (paddr >> PAGE_SHIFT) < linear_map_hash_count)
260 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; 262 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
261#endif /* CONFIG_DEBUG_PAGEALLOC */ 263#endif /* CONFIG_DEBUG_PAGEALLOC */
262 } 264 }
@@ -512,17 +514,17 @@ static void __init htab_init_page_sizes(void)
512 if (mmu_has_feature(MMU_FTR_16M_PAGE)) 514 if (mmu_has_feature(MMU_FTR_16M_PAGE))
513 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 515 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
514 sizeof(mmu_psize_defaults_gp)); 516 sizeof(mmu_psize_defaults_gp));
515 found: 517found:
516#ifndef CONFIG_DEBUG_PAGEALLOC 518 if (!debug_pagealloc_enabled()) {
517 /* 519 /*
518 * Pick a size for the linear mapping. Currently, we only support 520 * Pick a size for the linear mapping. Currently, we only
519 * 16M, 1M and 4K which is the default 521 * support 16M, 1M and 4K which is the default
520 */ 522 */
521 if (mmu_psize_defs[MMU_PAGE_16M].shift) 523 if (mmu_psize_defs[MMU_PAGE_16M].shift)
522 mmu_linear_psize = MMU_PAGE_16M; 524 mmu_linear_psize = MMU_PAGE_16M;
523 else if (mmu_psize_defs[MMU_PAGE_1M].shift) 525 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
524 mmu_linear_psize = MMU_PAGE_1M; 526 mmu_linear_psize = MMU_PAGE_1M;
525#endif /* CONFIG_DEBUG_PAGEALLOC */ 527 }
526 528
527#ifdef CONFIG_PPC_64K_PAGES 529#ifdef CONFIG_PPC_64K_PAGES
528 /* 530 /*
@@ -721,10 +723,12 @@ static void __init htab_initialize(void)
721 prot = pgprot_val(PAGE_KERNEL); 723 prot = pgprot_val(PAGE_KERNEL);
722 724
723#ifdef CONFIG_DEBUG_PAGEALLOC 725#ifdef CONFIG_DEBUG_PAGEALLOC
724 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 726 if (debug_pagealloc_enabled()) {
725 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, 727 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
726 1, ppc64_rma_size)); 728 linear_map_hash_slots = __va(memblock_alloc_base(
727 memset(linear_map_hash_slots, 0, linear_map_hash_count); 729 linear_map_hash_count, 1, ppc64_rma_size));
730 memset(linear_map_hash_slots, 0, linear_map_hash_count);
731 }
728#endif /* CONFIG_DEBUG_PAGEALLOC */ 732#endif /* CONFIG_DEBUG_PAGEALLOC */
729 733
730 /* On U3 based machines, we need to reserve the DART area and 734 /* On U3 based machines, we need to reserve the DART area and
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index a10be665b645..c2b771614d4f 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -112,10 +112,10 @@ void __init MMU_setup(void)
112 if (strstr(boot_command_line, "noltlbs")) { 112 if (strstr(boot_command_line, "noltlbs")) {
113 __map_without_ltlbs = 1; 113 __map_without_ltlbs = 1;
114 } 114 }
115#ifdef CONFIG_DEBUG_PAGEALLOC 115 if (debug_pagealloc_enabled()) {
116 __map_without_bats = 1; 116 __map_without_bats = 1;
117 __map_without_ltlbs = 1; 117 __map_without_ltlbs = 1;
118#endif 118 }
119} 119}
120 120
121/* 121/*