aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-12-12 19:55:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:48 -0500
commite30825f1869a75b29a69dc8e0aaaaccc492092cf (patch)
tree33adc902e098cf23d8a8b9d97ad01b1d06e28e93
parenteefa864b701d78dc9753c70a3540a2e9ae192595 (diff)
mm/debug-pagealloc: prepare boottime configurable on/off
Until now, debug-pagealloc needs extra flags in struct page, so we need to recompile whole source code when we decide to use it. This is really painful, because it takes some time to recompile and sometimes rebuild is not possible due to third party module depending on struct page. So, we can't use this good feature in many cases. Now, we have the page extension feature that allows us to insert extra flags to outside of struct page. This gets rid of third party module issue mentioned above. And, this allows us to determine if we need extra memory for this page extension in boottime. With these property, we can avoid using debug-pagealloc in boottime with low computational overhead in the kernel built with CONFIG_DEBUG_PAGEALLOC. This will help our development process greatly. This patch is the preparation step to achive above goal. debug-pagealloc originally uses extra field of struct page, but, after this patch, it will use field of struct page_ext. Because memory for page_ext is allocated later than initialization of page allocator in CONFIG_SPARSEMEM, we should disable debug-pagealloc feature temporarily until initialization of page_ext. This patch implements this. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@sr71.net> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Jungsoo Son <jungsoo.son@lge.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h19
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/page-debug-flags.h32
-rw-r--r--include/linux/page_ext.h15
-rw-r--r--mm/Kconfig.debug1
-rw-r--r--mm/debug-pagealloc.c37
-rw-r--r--mm/page_alloc.c38
-rw-r--r--mm/page_ext.c4
8 files changed, 106 insertions, 44 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3b337efbe533..66560f1a0564 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,6 +19,7 @@
19#include <linux/bit_spinlock.h> 19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h> 20#include <linux/shrinker.h>
21#include <linux/resource.h> 21#include <linux/resource.h>
22#include <linux/page_ext.h>
22 23
23struct mempolicy; 24struct mempolicy;
24struct anon_vma; 25struct anon_vma;
@@ -2155,20 +2156,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
2155 unsigned int pages_per_huge_page); 2156 unsigned int pages_per_huge_page);
2156#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2157#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2157 2158
2159extern struct page_ext_operations debug_guardpage_ops;
2160extern struct page_ext_operations page_poisoning_ops;
2161
2158#ifdef CONFIG_DEBUG_PAGEALLOC 2162#ifdef CONFIG_DEBUG_PAGEALLOC
2159extern unsigned int _debug_guardpage_minorder; 2163extern unsigned int _debug_guardpage_minorder;
2164extern bool _debug_guardpage_enabled;
2160 2165
2161static inline unsigned int debug_guardpage_minorder(void) 2166static inline unsigned int debug_guardpage_minorder(void)
2162{ 2167{
2163 return _debug_guardpage_minorder; 2168 return _debug_guardpage_minorder;
2164} 2169}
2165 2170
2171static inline bool debug_guardpage_enabled(void)
2172{
2173 return _debug_guardpage_enabled;
2174}
2175
2166static inline bool page_is_guard(struct page *page) 2176static inline bool page_is_guard(struct page *page)
2167{ 2177{
2168 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 2178 struct page_ext *page_ext;
2179
2180 if (!debug_guardpage_enabled())
2181 return false;
2182
2183 page_ext = lookup_page_ext(page);
2184 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2169} 2185}
2170#else 2186#else
2171static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2187static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2188static inline bool debug_guardpage_enabled(void) { return false; }
2172static inline bool page_is_guard(struct page *page) { return false; } 2189static inline bool page_is_guard(struct page *page) { return false; }
2173#endif /* CONFIG_DEBUG_PAGEALLOC */ 2190#endif /* CONFIG_DEBUG_PAGEALLOC */
2174 2191
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fc2daffa9db1..6d34aa266a8c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,7 +10,6 @@
10#include <linux/rwsem.h> 10#include <linux/rwsem.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/page-debug-flags.h>
14#include <linux/uprobes.h> 13#include <linux/uprobes.h>
15#include <linux/page-flags-layout.h> 14#include <linux/page-flags-layout.h>
16#include <asm/page.h> 15#include <asm/page.h>
@@ -186,9 +185,6 @@ struct page {
186 void *virtual; /* Kernel virtual address (NULL if 185 void *virtual; /* Kernel virtual address (NULL if
187 not kmapped, ie. highmem) */ 186 not kmapped, ie. highmem) */
188#endif /* WANT_PAGE_VIRTUAL */ 187#endif /* WANT_PAGE_VIRTUAL */
189#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
190 unsigned long debug_flags; /* Use atomic bitops on this */
191#endif
192 188
193#ifdef CONFIG_KMEMCHECK 189#ifdef CONFIG_KMEMCHECK
194 /* 190 /*
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
deleted file mode 100644
index 22691f614043..000000000000
--- a/include/linux/page-debug-flags.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef LINUX_PAGE_DEBUG_FLAGS_H
2#define LINUX_PAGE_DEBUG_FLAGS_H
3
4/*
5 * page->debug_flags bits:
6 *
7 * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
8 * implement generic debug pagealloc feature. The pages are filled with
9 * poison patterns and set this flag after free_pages(). The poisoned
10 * pages are verified whether the patterns are not corrupted and clear
11 * the flag before alloc_pages().
12 */
13
14enum page_debug_flags {
15 PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
16 PAGE_DEBUG_FLAG_GUARD,
17};
18
19/*
20 * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
21 * gets turned off when no debug features are enabling it!
22 */
23
24#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
25#if !defined(CONFIG_PAGE_POISONING) && \
26 !defined(CONFIG_PAGE_GUARD) \
27/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
28#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
29#endif
30#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
31
32#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 2ccc8b414e5c..61c0f05f9069 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -10,6 +10,21 @@ struct page_ext_operations {
10#ifdef CONFIG_PAGE_EXTENSION 10#ifdef CONFIG_PAGE_EXTENSION
11 11
12/* 12/*
13 * page_ext->flags bits:
14 *
15 * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
16 * implement generic debug pagealloc feature. The pages are filled with
17 * poison patterns and set this flag after free_pages(). The poisoned
18 * pages are verified whether the patterns are not corrupted and clear
19 * the flag before alloc_pages().
20 */
21
22enum page_ext_flags {
23 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
24 PAGE_EXT_DEBUG_GUARD,
25};
26
27/*
13 * Page Extension can be considered as an extended mem_map. 28 * Page Extension can be considered as an extended mem_map.
14 * A page_ext page is associated with every page descriptor. The 29 * A page_ext page is associated with every page descriptor. The
15 * page_ext helps us add more information about the page. 30 * page_ext helps us add more information about the page.
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 1ba81c7769f7..56badfc4810a 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -12,6 +12,7 @@ config DEBUG_PAGEALLOC
12 depends on DEBUG_KERNEL 12 depends on DEBUG_KERNEL
13 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC 13 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
14 depends on !KMEMCHECK 14 depends on !KMEMCHECK
15 select PAGE_EXTENSION
15 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC 16 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
16 select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC 17 select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
17 ---help--- 18 ---help---
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
index 789ff70c8a4a..0072f2c53331 100644
--- a/mm/debug-pagealloc.c
+++ b/mm/debug-pagealloc.c
@@ -2,23 +2,49 @@
2#include <linux/string.h> 2#include <linux/string.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <linux/highmem.h> 4#include <linux/highmem.h>
5#include <linux/page-debug-flags.h> 5#include <linux/page_ext.h>
6#include <linux/poison.h> 6#include <linux/poison.h>
7#include <linux/ratelimit.h> 7#include <linux/ratelimit.h>
8 8
9static bool page_poisoning_enabled __read_mostly;
10
11static bool need_page_poisoning(void)
12{
13 return true;
14}
15
16static void init_page_poisoning(void)
17{
18 page_poisoning_enabled = true;
19}
20
21struct page_ext_operations page_poisoning_ops = {
22 .need = need_page_poisoning,
23 .init = init_page_poisoning,
24};
25
9static inline void set_page_poison(struct page *page) 26static inline void set_page_poison(struct page *page)
10{ 27{
11 __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); 28 struct page_ext *page_ext;
29
30 page_ext = lookup_page_ext(page);
31 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
12} 32}
13 33
14static inline void clear_page_poison(struct page *page) 34static inline void clear_page_poison(struct page *page)
15{ 35{
16 __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); 36 struct page_ext *page_ext;
37
38 page_ext = lookup_page_ext(page);
39 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
17} 40}
18 41
19static inline bool page_poison(struct page *page) 42static inline bool page_poison(struct page *page)
20{ 43{
21 return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); 44 struct page_ext *page_ext;
45
46 page_ext = lookup_page_ext(page);
47 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
22} 48}
23 49
24static void poison_page(struct page *page) 50static void poison_page(struct page *page)
@@ -95,6 +121,9 @@ static void unpoison_pages(struct page *page, int n)
95 121
96void kernel_map_pages(struct page *page, int numpages, int enable) 122void kernel_map_pages(struct page *page, int numpages, int enable)
97{ 123{
124 if (!page_poisoning_enabled)
125 return;
126
98 if (enable) 127 if (enable)
99 unpoison_pages(page, numpages); 128 unpoison_pages(page, numpages);
100 else 129 else
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b64666cf5865..e0a39d328ca1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -56,7 +56,7 @@
56#include <linux/prefetch.h> 56#include <linux/prefetch.h>
57#include <linux/mm_inline.h> 57#include <linux/mm_inline.h>
58#include <linux/migrate.h> 58#include <linux/migrate.h>
59#include <linux/page-debug-flags.h> 59#include <linux/page_ext.h>
60#include <linux/hugetlb.h> 60#include <linux/hugetlb.h>
61#include <linux/sched/rt.h> 61#include <linux/sched/rt.h>
62 62
@@ -425,6 +425,22 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
425 425
426#ifdef CONFIG_DEBUG_PAGEALLOC 426#ifdef CONFIG_DEBUG_PAGEALLOC
427unsigned int _debug_guardpage_minorder; 427unsigned int _debug_guardpage_minorder;
428bool _debug_guardpage_enabled __read_mostly;
429
430static bool need_debug_guardpage(void)
431{
432 return true;
433}
434
435static void init_debug_guardpage(void)
436{
437 _debug_guardpage_enabled = true;
438}
439
440struct page_ext_operations debug_guardpage_ops = {
441 .need = need_debug_guardpage,
442 .init = init_debug_guardpage,
443};
428 444
429static int __init debug_guardpage_minorder_setup(char *buf) 445static int __init debug_guardpage_minorder_setup(char *buf)
430{ 446{
@@ -443,7 +459,14 @@ __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
443static inline void set_page_guard(struct zone *zone, struct page *page, 459static inline void set_page_guard(struct zone *zone, struct page *page,
444 unsigned int order, int migratetype) 460 unsigned int order, int migratetype)
445{ 461{
446 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 462 struct page_ext *page_ext;
463
464 if (!debug_guardpage_enabled())
465 return;
466
467 page_ext = lookup_page_ext(page);
468 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
469
447 INIT_LIST_HEAD(&page->lru); 470 INIT_LIST_HEAD(&page->lru);
448 set_page_private(page, order); 471 set_page_private(page, order);
449 /* Guard pages are not available for any usage */ 472 /* Guard pages are not available for any usage */
@@ -453,12 +476,20 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
453static inline void clear_page_guard(struct zone *zone, struct page *page, 476static inline void clear_page_guard(struct zone *zone, struct page *page,
454 unsigned int order, int migratetype) 477 unsigned int order, int migratetype)
455{ 478{
456 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 479 struct page_ext *page_ext;
480
481 if (!debug_guardpage_enabled())
482 return;
483
484 page_ext = lookup_page_ext(page);
485 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
486
457 set_page_private(page, 0); 487 set_page_private(page, 0);
458 if (!is_migrate_isolate(migratetype)) 488 if (!is_migrate_isolate(migratetype))
459 __mod_zone_freepage_state(zone, (1 << order), migratetype); 489 __mod_zone_freepage_state(zone, (1 << order), migratetype);
460} 490}
461#else 491#else
492struct page_ext_operations debug_guardpage_ops = { NULL, };
462static inline void set_page_guard(struct zone *zone, struct page *page, 493static inline void set_page_guard(struct zone *zone, struct page *page,
463 unsigned int order, int migratetype) {} 494 unsigned int order, int migratetype) {}
464static inline void clear_page_guard(struct zone *zone, struct page *page, 495static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -869,6 +900,7 @@ static inline void expand(struct zone *zone, struct page *page,
869 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 900 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
870 901
871 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 902 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
903 debug_guardpage_enabled() &&
872 high < debug_guardpage_minorder()) { 904 high < debug_guardpage_minorder()) {
873 /* 905 /*
874 * Mark as guard pages (or page), that will allow to 906 * Mark as guard pages (or page), that will allow to
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 514a3bccd63f..c2cd7b15f0de 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -51,6 +51,10 @@
51 */ 51 */
52 52
53static struct page_ext_operations *page_ext_ops[] = { 53static struct page_ext_operations *page_ext_ops[] = {
54 &debug_guardpage_ops,
55#ifdef CONFIG_PAGE_POISONING
56 &page_poisoning_ops,
57#endif
54}; 58};
55 59
56static unsigned long total_usage; 60static unsigned long total_usage;