aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c52
1 files changed, 24 insertions, 28 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b90a74d28485..bd330252fc77 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -231,9 +231,9 @@ static void bad_page(struct page *page)
231 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 231 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
232 KERN_EMERG "Backtrace:\n"); 232 KERN_EMERG "Backtrace:\n");
233 dump_stack(); 233 dump_stack();
234 set_page_count(page, 0); 234
235 reset_page_mapcount(page); 235 /* Leave bad fields for debug, except PageBuddy could make trouble */
236 page->mapping = NULL; 236 __ClearPageBuddy(page);
237 add_taint(TAINT_BAD_PAGE); 237 add_taint(TAINT_BAD_PAGE);
238} 238}
239 239
@@ -290,25 +290,31 @@ void prep_compound_gigantic_page(struct page *page, unsigned long order)
290} 290}
291#endif 291#endif
292 292
293static void destroy_compound_page(struct page *page, unsigned long order) 293static int destroy_compound_page(struct page *page, unsigned long order)
294{ 294{
295 int i; 295 int i;
296 int nr_pages = 1 << order; 296 int nr_pages = 1 << order;
297 int bad = 0;
297 298
298 if (unlikely(compound_order(page) != order)) 299 if (unlikely(compound_order(page) != order) ||
300 unlikely(!PageHead(page))) {
299 bad_page(page); 301 bad_page(page);
302 bad++;
303 }
300 304
301 if (unlikely(!PageHead(page)))
302 bad_page(page);
303 __ClearPageHead(page); 305 __ClearPageHead(page);
306
304 for (i = 1; i < nr_pages; i++) { 307 for (i = 1; i < nr_pages; i++) {
305 struct page *p = page + i; 308 struct page *p = page + i;
306 309
307 if (unlikely(!PageTail(p) | 310 if (unlikely(!PageTail(p) | (p->first_page != page))) {
308 (p->first_page != page)))
309 bad_page(page); 311 bad_page(page);
312 bad++;
313 }
310 __ClearPageTail(p); 314 __ClearPageTail(p);
311 } 315 }
316
317 return bad;
312} 318}
313 319
314static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 320static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
@@ -428,7 +434,8 @@ static inline void __free_one_page(struct page *page,
428 int migratetype = get_pageblock_migratetype(page); 434 int migratetype = get_pageblock_migratetype(page);
429 435
430 if (unlikely(PageCompound(page))) 436 if (unlikely(PageCompound(page)))
431 destroy_compound_page(page, order); 437 if (unlikely(destroy_compound_page(page, order)))
438 return;
432 439
433 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 440 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
434 441
@@ -465,15 +472,10 @@ static inline int free_pages_check(struct page *page)
465 if (unlikely(page_mapcount(page) | 472 if (unlikely(page_mapcount(page) |
466 (page->mapping != NULL) | 473 (page->mapping != NULL) |
467 (page_count(page) != 0) | 474 (page_count(page) != 0) |
468 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) 475 (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
469 bad_page(page); 476 bad_page(page);
470 /*
471 * For now, we report if PG_reserved was found set, but do not
472 * clear it, and do not free the page. But we shall soon need
473 * to do more, for when the ZERO_PAGE count wraps negative.
474 */
475 if (PageReserved(page))
476 return 1; 477 return 1;
478 }
477 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 479 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
478 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 480 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
479 return 0; 481 return 0;
@@ -521,11 +523,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
521{ 523{
522 unsigned long flags; 524 unsigned long flags;
523 int i; 525 int i;
524 int reserved = 0; 526 int bad = 0;
525 527
526 for (i = 0 ; i < (1 << order) ; ++i) 528 for (i = 0 ; i < (1 << order) ; ++i)
527 reserved += free_pages_check(page + i); 529 bad += free_pages_check(page + i);
528 if (reserved) 530 if (bad)
529 return; 531 return;
530 532
531 if (!PageHighMem(page)) { 533 if (!PageHighMem(page)) {
@@ -610,17 +612,11 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
610 if (unlikely(page_mapcount(page) | 612 if (unlikely(page_mapcount(page) |
611 (page->mapping != NULL) | 613 (page->mapping != NULL) |
612 (page_count(page) != 0) | 614 (page_count(page) != 0) |
613 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) 615 (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
614 bad_page(page); 616 bad_page(page);
615
616 /*
617 * For now, we report if PG_reserved was found set, but do not
618 * clear it, and do not allocate the page: as a safety net.
619 */
620 if (PageReserved(page))
621 return 1; 617 return 1;
618 }
622 619
623 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
624 set_page_private(page, 0); 620 set_page_private(page, 0);
625 set_page_refcounted(page); 621 set_page_refcounted(page);
626 622