summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinayak Menon <vinmenon@codeaurora.org>2017-05-03 17:54:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 18:52:10 -0400
commitbd33ef3681359343863f2290aded182b0441edee (patch)
treee4575468fe495b08fe95c4aa8272895f843183b3
parent2872bb2d0a4952ffb721e703555cb73d40b2c2f0 (diff)
mm: enable page poisoning early at boot
On SPARSEMEM systems page poisoning is enabled after buddy is up, because of the dependency on page extension init. This causes the pages released by free_all_bootmem not to be poisoned. This either delays or misses the identification of some issues because the pages have to undergo another cycle of alloc-free-alloc for any corruption to be detected. Enable page poisoning early by getting rid of the PAGE_EXT_DEBUG_POISON flag. Since all the free pages will now be poisoned, the flag need not be verified before checking the poison during an alloc. [vinmenon@codeaurora.org: fix Kconfig] Link: http://lkml.kernel.org/r/1490878002-14423-1-git-send-email-vinmenon@codeaurora.org Link: http://lkml.kernel.org/r/1490358246-11001-1-git-send-email-vinmenon@codeaurora.org Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Acked-by: Laura Abbott <labbott@redhat.com> Tested-by: Laura Abbott <labbott@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/Kconfig.debug1
-rw-r--r--mm/page_alloc.c13
-rw-r--r--mm/page_ext.c13
-rw-r--r--mm/page_poison.c77
5 files changed, 17 insertions, 88 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 695da2a19b4c..5d22e69f51ea 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2487,7 +2487,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
2487#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2487#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2488 2488
2489extern struct page_ext_operations debug_guardpage_ops; 2489extern struct page_ext_operations debug_guardpage_ops;
2490extern struct page_ext_operations page_poisoning_ops;
2491 2490
2492#ifdef CONFIG_DEBUG_PAGEALLOC 2491#ifdef CONFIG_DEBUG_PAGEALLOC
2493extern unsigned int _debug_guardpage_minorder; 2492extern unsigned int _debug_guardpage_minorder;
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 79d0fd13b5b3..5b0adf1435de 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -42,7 +42,6 @@ config DEBUG_PAGEALLOC_ENABLE_DEFAULT
42 42
43config PAGE_POISONING 43config PAGE_POISONING
44 bool "Poison pages after freeing" 44 bool "Poison pages after freeing"
45 select PAGE_EXTENSION
46 select PAGE_POISONING_NO_SANITY if HIBERNATION 45 select PAGE_POISONING_NO_SANITY if HIBERNATION
47 ---help--- 46 ---help---
48 Fill the pages with poison patterns after free_pages() and verify 47 Fill the pages with poison patterns after free_pages() and verify
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 465391811c2e..f1f225608413 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1689,10 +1689,10 @@ static inline int check_new_page(struct page *page)
1689 return 1; 1689 return 1;
1690} 1690}
1691 1691
1692static inline bool free_pages_prezeroed(bool poisoned) 1692static inline bool free_pages_prezeroed(void)
1693{ 1693{
1694 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && 1694 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1695 page_poisoning_enabled() && poisoned; 1695 page_poisoning_enabled();
1696} 1696}
1697 1697
1698#ifdef CONFIG_DEBUG_VM 1698#ifdef CONFIG_DEBUG_VM
@@ -1746,17 +1746,10 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
1746 unsigned int alloc_flags) 1746 unsigned int alloc_flags)
1747{ 1747{
1748 int i; 1748 int i;
1749 bool poisoned = true;
1750
1751 for (i = 0; i < (1 << order); i++) {
1752 struct page *p = page + i;
1753 if (poisoned)
1754 poisoned &= page_is_poisoned(p);
1755 }
1756 1749
1757 post_alloc_hook(page, order, gfp_flags); 1750 post_alloc_hook(page, order, gfp_flags);
1758 1751
1759 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) 1752 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1760 for (i = 0; i < (1 << order); i++) 1753 for (i = 0; i < (1 << order); i++)
1761 clear_highpage(page + i); 1754 clear_highpage(page + i);
1762 1755
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 121dcffc4ec1..88ccc044b09a 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -59,9 +59,6 @@
59 59
60static struct page_ext_operations *page_ext_ops[] = { 60static struct page_ext_operations *page_ext_ops[] = {
61 &debug_guardpage_ops, 61 &debug_guardpage_ops,
62#ifdef CONFIG_PAGE_POISONING
63 &page_poisoning_ops,
64#endif
65#ifdef CONFIG_PAGE_OWNER 62#ifdef CONFIG_PAGE_OWNER
66 &page_owner_ops, 63 &page_owner_ops,
67#endif 64#endif
@@ -127,15 +124,12 @@ struct page_ext *lookup_page_ext(struct page *page)
127 struct page_ext *base; 124 struct page_ext *base;
128 125
129 base = NODE_DATA(page_to_nid(page))->node_page_ext; 126 base = NODE_DATA(page_to_nid(page))->node_page_ext;
130#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) 127#if defined(CONFIG_DEBUG_VM)
131 /* 128 /*
132 * The sanity checks the page allocator does upon freeing a 129 * The sanity checks the page allocator does upon freeing a
133 * page can reach here before the page_ext arrays are 130 * page can reach here before the page_ext arrays are
134 * allocated when feeding a range of pages to the allocator 131 * allocated when feeding a range of pages to the allocator
135 * for the first time during bootup or memory hotplug. 132 * for the first time during bootup or memory hotplug.
136 *
137 * This check is also necessary for ensuring page poisoning
138 * works as expected when enabled
139 */ 133 */
140 if (unlikely(!base)) 134 if (unlikely(!base))
141 return NULL; 135 return NULL;
@@ -204,15 +198,12 @@ struct page_ext *lookup_page_ext(struct page *page)
204{ 198{
205 unsigned long pfn = page_to_pfn(page); 199 unsigned long pfn = page_to_pfn(page);
206 struct mem_section *section = __pfn_to_section(pfn); 200 struct mem_section *section = __pfn_to_section(pfn);
207#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) 201#if defined(CONFIG_DEBUG_VM)
208 /* 202 /*
209 * The sanity checks the page allocator does upon freeing a 203 * The sanity checks the page allocator does upon freeing a
210 * page can reach here before the page_ext arrays are 204 * page can reach here before the page_ext arrays are
211 * allocated when feeding a range of pages to the allocator 205 * allocated when feeding a range of pages to the allocator
212 * for the first time during bootup or memory hotplug. 206 * for the first time during bootup or memory hotplug.
213 *
214 * This check is also necessary for ensuring page poisoning
215 * works as expected when enabled
216 */ 207 */
217 if (!section->page_ext) 208 if (!section->page_ext)
218 return NULL; 209 return NULL;
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 2e647c65916b..be19e989ccff 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -6,7 +6,6 @@
6#include <linux/poison.h> 6#include <linux/poison.h>
7#include <linux/ratelimit.h> 7#include <linux/ratelimit.h>
8 8
9static bool __page_poisoning_enabled __read_mostly;
10static bool want_page_poisoning __read_mostly; 9static bool want_page_poisoning __read_mostly;
11 10
12static int early_page_poison_param(char *buf) 11static int early_page_poison_param(char *buf)
@@ -19,74 +18,21 @@ early_param("page_poison", early_page_poison_param);
19 18
20bool page_poisoning_enabled(void) 19bool page_poisoning_enabled(void)
21{ 20{
22 return __page_poisoning_enabled;
23}
24
25static bool need_page_poisoning(void)
26{
27 return want_page_poisoning;
28}
29
30static void init_page_poisoning(void)
31{
32 /* 21 /*
33 * page poisoning is debug page alloc for some arches. If either 22 * Assumes that debug_pagealloc_enabled is set before
34 * of those options are enabled, enable poisoning 23 * free_all_bootmem.
24 * Page poisoning is debug page alloc for some arches. If
25 * either of those options are enabled, enable poisoning.
35 */ 26 */
36 if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) { 27 return (want_page_poisoning ||
37 if (!want_page_poisoning && !debug_pagealloc_enabled()) 28 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
38 return; 29 debug_pagealloc_enabled()));
39 } else {
40 if (!want_page_poisoning)
41 return;
42 }
43
44 __page_poisoning_enabled = true;
45}
46
47struct page_ext_operations page_poisoning_ops = {
48 .need = need_page_poisoning,
49 .init = init_page_poisoning,
50};
51
52static inline void set_page_poison(struct page *page)
53{
54 struct page_ext *page_ext;
55
56 page_ext = lookup_page_ext(page);
57 if (unlikely(!page_ext))
58 return;
59
60 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
61}
62
63static inline void clear_page_poison(struct page *page)
64{
65 struct page_ext *page_ext;
66
67 page_ext = lookup_page_ext(page);
68 if (unlikely(!page_ext))
69 return;
70
71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
72}
73
74bool page_is_poisoned(struct page *page)
75{
76 struct page_ext *page_ext;
77
78 page_ext = lookup_page_ext(page);
79 if (unlikely(!page_ext))
80 return false;
81
82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
83} 30}
84 31
85static void poison_page(struct page *page) 32static void poison_page(struct page *page)
86{ 33{
87 void *addr = kmap_atomic(page); 34 void *addr = kmap_atomic(page);
88 35
89 set_page_poison(page);
90 memset(addr, PAGE_POISON, PAGE_SIZE); 36 memset(addr, PAGE_POISON, PAGE_SIZE);
91 kunmap_atomic(addr); 37 kunmap_atomic(addr);
92} 38}
@@ -140,12 +86,13 @@ static void unpoison_page(struct page *page)
140{ 86{
141 void *addr; 87 void *addr;
142 88
143 if (!page_is_poisoned(page))
144 return;
145
146 addr = kmap_atomic(page); 89 addr = kmap_atomic(page);
90 /*
91 * Page poisoning when enabled poisons each and every page
92 * that is freed to buddy. Thus no extra check is done to
93 * see if a page was posioned.
94 */
147 check_poison_mem(addr, PAGE_SIZE); 95 check_poison_mem(addr, PAGE_SIZE);
148 clear_page_poison(page);
149 kunmap_atomic(addr); 96 kunmap_atomic(addr);
150} 97}
151 98