aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_poison.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_poison.c')
-rw-r--r--mm/page_poison.c77
1 files changed, 12 insertions, 65 deletions
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 2e647c65916b..be19e989ccff 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -6,7 +6,6 @@
6#include <linux/poison.h> 6#include <linux/poison.h>
7#include <linux/ratelimit.h> 7#include <linux/ratelimit.h>
8 8
9static bool __page_poisoning_enabled __read_mostly;
10static bool want_page_poisoning __read_mostly; 9static bool want_page_poisoning __read_mostly;
11 10
12static int early_page_poison_param(char *buf) 11static int early_page_poison_param(char *buf)
@@ -19,74 +18,21 @@ early_param("page_poison", early_page_poison_param);
19 18
20bool page_poisoning_enabled(void) 19bool page_poisoning_enabled(void)
21{ 20{
22 return __page_poisoning_enabled;
23}
24
25static bool need_page_poisoning(void)
26{
27 return want_page_poisoning;
28}
29
30static void init_page_poisoning(void)
31{
32 /* 21 /*
33 * page poisoning is debug page alloc for some arches. If either 22 * Assumes that debug_pagealloc_enabled is set before
34 * of those options are enabled, enable poisoning 23 * free_all_bootmem.
24 * Page poisoning is debug page alloc for some arches. If
25 * either of those options are enabled, enable poisoning.
35 */ 26 */
36 if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) { 27 return (want_page_poisoning ||
37 if (!want_page_poisoning && !debug_pagealloc_enabled()) 28 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
38 return; 29 debug_pagealloc_enabled()));
39 } else {
40 if (!want_page_poisoning)
41 return;
42 }
43
44 __page_poisoning_enabled = true;
45}
46
47struct page_ext_operations page_poisoning_ops = {
48 .need = need_page_poisoning,
49 .init = init_page_poisoning,
50};
51
52static inline void set_page_poison(struct page *page)
53{
54 struct page_ext *page_ext;
55
56 page_ext = lookup_page_ext(page);
57 if (unlikely(!page_ext))
58 return;
59
60 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
61}
62
63static inline void clear_page_poison(struct page *page)
64{
65 struct page_ext *page_ext;
66
67 page_ext = lookup_page_ext(page);
68 if (unlikely(!page_ext))
69 return;
70
71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
72}
73
74bool page_is_poisoned(struct page *page)
75{
76 struct page_ext *page_ext;
77
78 page_ext = lookup_page_ext(page);
79 if (unlikely(!page_ext))
80 return false;
81
82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
83} 30}
84 31
85static void poison_page(struct page *page) 32static void poison_page(struct page *page)
86{ 33{
87 void *addr = kmap_atomic(page); 34 void *addr = kmap_atomic(page);
88 35
89 set_page_poison(page);
90 memset(addr, PAGE_POISON, PAGE_SIZE); 36 memset(addr, PAGE_POISON, PAGE_SIZE);
91 kunmap_atomic(addr); 37 kunmap_atomic(addr);
92} 38}
@@ -140,12 +86,13 @@ static void unpoison_page(struct page *page)
140{ 86{
141 void *addr; 87 void *addr;
142 88
143 if (!page_is_poisoned(page))
144 return;
145
146 addr = kmap_atomic(page); 89 addr = kmap_atomic(page);
90 /*
91 * Page poisoning when enabled poisons each and every page
92 * that is freed to buddy. Thus no extra check is done to
93 * see if a page was posioned.
94 */
147 check_poison_mem(addr, PAGE_SIZE); 95 check_poison_mem(addr, PAGE_SIZE);
148 clear_page_poison(page);
149 kunmap_atomic(addr); 96 kunmap_atomic(addr);
150} 97}
151 98