aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-09-16 05:50:12 -0400
committerAndi Kleen <ak@linux.intel.com>2009-09-16 05:50:12 -0400
commit2a7684a23e9c263c2a1e8b2c0027ad1836a0f9df (patch)
treeb9769d2f391d76d9c84c687aa771d36cc539025e /mm/page_alloc.c
parent888b9f7c58ebe8303bad817cd554df887a683957 (diff)
HWPOISON: check and isolate corrupted free pages v2
If memory corruption hits the free buddy pages, we can safely ignore them. No one will access them until page allocation time, then prep_new_page() will automatically check and isolate PG_hwpoison page for us (for 0-order allocation). This patch expands prep_new_page() to check every component page in a high order page allocation, in order to completely stop PG_hwpoison pages from being recirculated. Note that the common case -- only allocating a single page, doesn't do any more work than before. Allocating > order 0 does a bit more work, but that's relatively uncommon. This simple implementation may drop some innocent neighbor pages, hopefully it is not a big problem because the event should be rare enough. This patch adds some runtime costs to high order page users. [AK: Improved description] v2: Andi Kleen: Port to -mm code Move check into separate function. Don't dump stack in bad_pages for hwpoisoned pages. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andi Kleen <ak@linux.intel.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a0de15f46987..9faa7ad95ac5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -234,6 +234,12 @@ static void bad_page(struct page *page)
234 static unsigned long nr_shown; 234 static unsigned long nr_shown;
235 static unsigned long nr_unshown; 235 static unsigned long nr_unshown;
236 236
237 /* Don't complain about poisoned pages */
238 if (PageHWPoison(page)) {
239 __ClearPageBuddy(page);
240 return;
241 }
242
237 /* 243 /*
238 * Allow a burst of 60 reports, then keep quiet for that minute; 244 * Allow a burst of 60 reports, then keep quiet for that minute;
239 * or allow a steady drip of one report per second. 245 * or allow a steady drip of one report per second.
@@ -646,7 +652,7 @@ static inline void expand(struct zone *zone, struct page *page,
646/* 652/*
647 * This page is about to be returned from the page allocator 653 * This page is about to be returned from the page allocator
648 */ 654 */
649static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 655static inline int check_new_page(struct page *page)
650{ 656{
651 if (unlikely(page_mapcount(page) | 657 if (unlikely(page_mapcount(page) |
652 (page->mapping != NULL) | 658 (page->mapping != NULL) |
@@ -655,6 +661,18 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
655 bad_page(page); 661 bad_page(page);
656 return 1; 662 return 1;
657 } 663 }
664 return 0;
665}
666
667static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
668{
669 int i;
670
671 for (i = 0; i < (1 << order); i++) {
672 struct page *p = page + i;
673 if (unlikely(check_new_page(p)))
674 return 1;
675 }
658 676
659 set_page_private(page, 0); 677 set_page_private(page, 0);
660 set_page_refcounted(page); 678 set_page_refcounted(page);