diff options
author | Stanislaw Gruszka <sgruszka@redhat.com> | 2012-01-10 18:07:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-10 19:30:42 -0500 |
commit | c0a32fc5a2e470d0b02597b23ad79a317735253e (patch) | |
tree | 2d164edae0062918ca2088772c00b0615781353b /mm | |
parent | 1399ff86f2a2bbacbbe68fa00c5f8c752b344723 (diff) |
mm: more intensive memory corruption debugging
With CONFIG_DEBUG_PAGEALLOC configured, the CPU will generate an exception
on access (read,write) to an unallocated page, which permits us to catch
code which corrupts memory. However the kernel is trying to maximise
memory usage, hence there are usually few free pages in the system and
buggy code usually corrupts some crucial data.
This patch changes the buddy allocator to keep more free/protected pages
and to interlace free/protected and allocated pages to increase the
probability of catching corruption.
When the kernel is compiled with CONFIG_DEBUG_PAGEALLOC,
debug_guardpage_minorder defines the minimum order used by the page
allocator to grant a request. The requested size will be returned with
the remaining pages used as guard pages.
The default value of debug_guardpage_minorder is zero: no change from
current behaviour.
[akpm@linux-foundation.org: tweak documentation, s/flg/flag/]
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig.debug | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 75 |
2 files changed, 74 insertions, 6 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 8b1a477162dc..4b2443254de2 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug | |||
@@ -4,6 +4,7 @@ config DEBUG_PAGEALLOC | |||
4 | depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC | 4 | depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC |
5 | depends on !KMEMCHECK | 5 | depends on !KMEMCHECK |
6 | select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC | 6 | select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC |
7 | select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
7 | ---help--- | 8 | ---help--- |
8 | Unmap pages from the kernel linear mapping after free_pages(). | 9 | Unmap pages from the kernel linear mapping after free_pages(). |
9 | This results in a large slowdown, but helps to find certain types | 10 | This results in a large slowdown, but helps to find certain types |
@@ -22,3 +23,7 @@ config WANT_PAGE_DEBUG_FLAGS | |||
22 | config PAGE_POISONING | 23 | config PAGE_POISONING |
23 | bool | 24 | bool |
24 | select WANT_PAGE_DEBUG_FLAGS | 25 | select WANT_PAGE_DEBUG_FLAGS |
26 | |||
27 | config PAGE_GUARD | ||
28 | bool | ||
29 | select WANT_PAGE_DEBUG_FLAGS | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3cba4b67203f..93baebcc06f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/ftrace_event.h> | 57 | #include <linux/ftrace_event.h> |
58 | #include <linux/memcontrol.h> | 58 | #include <linux/memcontrol.h> |
59 | #include <linux/prefetch.h> | 59 | #include <linux/prefetch.h> |
60 | #include <linux/page-debug-flags.h> | ||
60 | 61 | ||
61 | #include <asm/tlbflush.h> | 62 | #include <asm/tlbflush.h> |
62 | #include <asm/div64.h> | 63 | #include <asm/div64.h> |
@@ -388,6 +389,37 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | |||
388 | clear_highpage(page + i); | 389 | clear_highpage(page + i); |
389 | } | 390 | } |
390 | 391 | ||
392 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
393 | unsigned int _debug_guardpage_minorder; | ||
394 | |||
395 | static int __init debug_guardpage_minorder_setup(char *buf) | ||
396 | { | ||
397 | unsigned long res; | ||
398 | |||
399 | if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { | ||
400 | printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); | ||
401 | return 0; | ||
402 | } | ||
403 | _debug_guardpage_minorder = res; | ||
404 | printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); | ||
405 | return 0; | ||
406 | } | ||
407 | __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); | ||
408 | |||
409 | static inline void set_page_guard_flag(struct page *page) | ||
410 | { | ||
411 | __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); | ||
412 | } | ||
413 | |||
414 | static inline void clear_page_guard_flag(struct page *page) | ||
415 | { | ||
416 | __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); | ||
417 | } | ||
418 | #else | ||
419 | static inline void set_page_guard_flag(struct page *page) { } | ||
420 | static inline void clear_page_guard_flag(struct page *page) { } | ||
421 | #endif | ||
422 | |||
391 | static inline void set_page_order(struct page *page, int order) | 423 | static inline void set_page_order(struct page *page, int order) |
392 | { | 424 | { |
393 | set_page_private(page, order); | 425 | set_page_private(page, order); |
@@ -445,6 +477,11 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, | |||
445 | if (page_zone_id(page) != page_zone_id(buddy)) | 477 | if (page_zone_id(page) != page_zone_id(buddy)) |
446 | return 0; | 478 | return 0; |
447 | 479 | ||
480 | if (page_is_guard(buddy) && page_order(buddy) == order) { | ||
481 | VM_BUG_ON(page_count(buddy) != 0); | ||
482 | return 1; | ||
483 | } | ||
484 | |||
448 | if (PageBuddy(buddy) && page_order(buddy) == order) { | 485 | if (PageBuddy(buddy) && page_order(buddy) == order) { |
449 | VM_BUG_ON(page_count(buddy) != 0); | 486 | VM_BUG_ON(page_count(buddy) != 0); |
450 | return 1; | 487 | return 1; |
@@ -501,11 +538,19 @@ static inline void __free_one_page(struct page *page, | |||
501 | buddy = page + (buddy_idx - page_idx); | 538 | buddy = page + (buddy_idx - page_idx); |
502 | if (!page_is_buddy(page, buddy, order)) | 539 | if (!page_is_buddy(page, buddy, order)) |
503 | break; | 540 | break; |
504 | 541 | /* | |
505 | /* Our buddy is free, merge with it and move up one order. */ | 542 | * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, |
506 | list_del(&buddy->lru); | 543 | * merge with it and move up one order. |
507 | zone->free_area[order].nr_free--; | 544 | */ |
508 | rmv_page_order(buddy); | 545 | if (page_is_guard(buddy)) { |
546 | clear_page_guard_flag(buddy); | ||
547 | set_page_private(page, 0); | ||
548 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
549 | } else { | ||
550 | list_del(&buddy->lru); | ||
551 | zone->free_area[order].nr_free--; | ||
552 | rmv_page_order(buddy); | ||
553 | } | ||
509 | combined_idx = buddy_idx & page_idx; | 554 | combined_idx = buddy_idx & page_idx; |
510 | page = page + (combined_idx - page_idx); | 555 | page = page + (combined_idx - page_idx); |
511 | page_idx = combined_idx; | 556 | page_idx = combined_idx; |
@@ -731,6 +776,23 @@ static inline void expand(struct zone *zone, struct page *page, | |||
731 | high--; | 776 | high--; |
732 | size >>= 1; | 777 | size >>= 1; |
733 | VM_BUG_ON(bad_range(zone, &page[size])); | 778 | VM_BUG_ON(bad_range(zone, &page[size])); |
779 | |||
780 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
781 | if (high < debug_guardpage_minorder()) { | ||
782 | /* | ||
783 | * Mark as guard pages (or page), that will allow to | ||
784 | * merge back to allocator when buddy will be freed. | ||
785 | * Corresponding page table entries will not be touched, | ||
786 | * pages will stay not present in virtual address space | ||
787 | */ | ||
788 | INIT_LIST_HEAD(&page[size].lru); | ||
789 | set_page_guard_flag(&page[size]); | ||
790 | set_page_private(&page[size], high); | ||
791 | /* Guard pages are not available for any usage */ | ||
792 | __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high)); | ||
793 | continue; | ||
794 | } | ||
795 | #endif | ||
734 | list_add(&page[size].lru, &area->free_list[migratetype]); | 796 | list_add(&page[size].lru, &area->free_list[migratetype]); |
735 | area->nr_free++; | 797 | area->nr_free++; |
736 | set_page_order(&page[size], high); | 798 | set_page_order(&page[size], high); |
@@ -1754,7 +1816,8 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) | |||
1754 | { | 1816 | { |
1755 | unsigned int filter = SHOW_MEM_FILTER_NODES; | 1817 | unsigned int filter = SHOW_MEM_FILTER_NODES; |
1756 | 1818 | ||
1757 | if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) | 1819 | if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || |
1820 | debug_guardpage_minorder() > 0) | ||
1758 | return; | 1821 | return; |
1759 | 1822 | ||
1760 | /* | 1823 | /* |