aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLaura Abbott <labbott@fedoraproject.org>2016-03-15 17:56:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit1414c7f4f7d72d138fff35f00151d15749b5beda (patch)
treee742e525f02ae4095e2a907cf8bc5fc29bfbc3dc /mm
parent8823b1dbc05fab1a8bec275eeae4709257c2661d (diff)
mm/page_poisoning.c: allow for zero poisoning
By default, page poisoning uses a poison value (0xaa) on free. If this is changed to 0, the page is not only sanitized but zeroing on alloc with __GFP_ZERO can be skipped as well. The tradeoff is that detecting corruption from the poisoning is harder to detect. This feature also cannot be used with hibernation since pages are not guaranteed to be zeroed after hibernation. Credit to Grsecurity/PaX team for inspiring this work Signed-off-by: Laura Abbott <labbott@fedoraproject.org> Acked-by: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Michal Hocko <mhocko@suse.com> Cc: Kees Cook <keescook@chromium.org> Cc: Mathias Krause <minipli@googlemail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Jianyu Zhan <nasa4836@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig.debug14
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/page_ext.c10
-rw-r--r--mm/page_poison.c7
4 files changed, 37 insertions, 5 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 1f99f9a0deae..5c50b238b770 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -65,3 +65,17 @@ config PAGE_POISONING_NO_SANITY
65 65
66 If you are only interested in sanitization, say Y. Otherwise 66 If you are only interested in sanitization, say Y. Otherwise
67 say N. 67 say N.
68
69config PAGE_POISONING_ZERO
70 bool "Use zero for poisoning instead of random data"
71 depends on PAGE_POISONING
72 ---help---
73 Instead of using the existing poison value, fill the pages with
74 zeros. This makes it harder to detect when errors are occurring
75 due to sanitization but the zeroing at free means that it is
76 no longer necessary to write zeros when GFP_ZERO is used on
77 allocation.
78
79 Enabling page poisoning with this option will disable hibernation
80
81 If unsure, say N
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2a08349fbab2..50897dcaefdb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1405,15 +1405,24 @@ static inline int check_new_page(struct page *page)
1405 return 0; 1405 return 0;
1406} 1406}
1407 1407
1408static inline bool free_pages_prezeroed(bool poisoned)
1409{
1410 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1411 page_poisoning_enabled() && poisoned;
1412}
1413
1408static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1414static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1409 int alloc_flags) 1415 int alloc_flags)
1410{ 1416{
1411 int i; 1417 int i;
1418 bool poisoned = true;
1412 1419
1413 for (i = 0; i < (1 << order); i++) { 1420 for (i = 0; i < (1 << order); i++) {
1414 struct page *p = page + i; 1421 struct page *p = page + i;
1415 if (unlikely(check_new_page(p))) 1422 if (unlikely(check_new_page(p)))
1416 return 1; 1423 return 1;
1424 if (poisoned)
1425 poisoned &= page_is_poisoned(p);
1417 } 1426 }
1418 1427
1419 set_page_private(page, 0); 1428 set_page_private(page, 0);
@@ -1424,7 +1433,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1424 kernel_poison_pages(page, 1 << order, 1); 1433 kernel_poison_pages(page, 1 << order, 1);
1425 kasan_alloc_pages(page, order); 1434 kasan_alloc_pages(page, order);
1426 1435
1427 if (gfp_flags & __GFP_ZERO) 1436 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
1428 for (i = 0; i < (1 << order); i++) 1437 for (i = 0; i < (1 << order); i++)
1429 clear_highpage(page + i); 1438 clear_highpage(page + i);
1430 1439
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 292ca7b8debd..2d864e64f7fe 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -106,12 +106,15 @@ struct page_ext *lookup_page_ext(struct page *page)
106 struct page_ext *base; 106 struct page_ext *base;
107 107
108 base = NODE_DATA(page_to_nid(page))->node_page_ext; 108 base = NODE_DATA(page_to_nid(page))->node_page_ext;
109#ifdef CONFIG_DEBUG_VM 109#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
110 /* 110 /*
111 * The sanity checks the page allocator does upon freeing a 111 * The sanity checks the page allocator does upon freeing a
112 * page can reach here before the page_ext arrays are 112 * page can reach here before the page_ext arrays are
113 * allocated when feeding a range of pages to the allocator 113 * allocated when feeding a range of pages to the allocator
114 * for the first time during bootup or memory hotplug. 114 * for the first time during bootup or memory hotplug.
115 *
116 * This check is also necessary for ensuring page poisoning
117 * works as expected when enabled
115 */ 118 */
116 if (unlikely(!base)) 119 if (unlikely(!base))
117 return NULL; 120 return NULL;
@@ -180,12 +183,15 @@ struct page_ext *lookup_page_ext(struct page *page)
180{ 183{
181 unsigned long pfn = page_to_pfn(page); 184 unsigned long pfn = page_to_pfn(page);
182 struct mem_section *section = __pfn_to_section(pfn); 185 struct mem_section *section = __pfn_to_section(pfn);
183#ifdef CONFIG_DEBUG_VM 186#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING)
184 /* 187 /*
185 * The sanity checks the page allocator does upon freeing a 188 * The sanity checks the page allocator does upon freeing a
186 * page can reach here before the page_ext arrays are 189 * page can reach here before the page_ext arrays are
187 * allocated when feeding a range of pages to the allocator 190 * allocated when feeding a range of pages to the allocator
188 * for the first time during bootup or memory hotplug. 191 * for the first time during bootup or memory hotplug.
192 *
193 * This check is also necessary for ensuring page poisoning
194 * works as expected when enabled
189 */ 195 */
190 if (!section->page_ext) 196 if (!section->page_ext)
191 return NULL; 197 return NULL;
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 89d3bc773633..479e7ea2bea6 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -71,11 +71,14 @@ static inline void clear_page_poison(struct page *page)
71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
72} 72}
73 73
74static inline bool page_poison(struct page *page) 74bool page_is_poisoned(struct page *page)
75{ 75{
76 struct page_ext *page_ext; 76 struct page_ext *page_ext;
77 77
78 page_ext = lookup_page_ext(page); 78 page_ext = lookup_page_ext(page);
79 if (!page_ext)
80 return false;
81
79 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
80} 83}
81 84
@@ -137,7 +140,7 @@ static void unpoison_page(struct page *page)
137{ 140{
138 void *addr; 141 void *addr;
139 142
140 if (!page_poison(page)) 143 if (!page_is_poisoned(page))
141 return; 144 return;
142 145
143 addr = kmap_atomic(page); 146 addr = kmap_atomic(page);