aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-11-22 00:32:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-22 12:13:42 -0500
commit689bcebfda16d7bace742740bfb3137fff30b529 (patch)
tree7a7c1f583ed2c4838244650d83b53557ec1a8efa
parentf57e88a8d83de8d844b57e16b84d2f762fe9f092 (diff)
[PATCH] unpaged: PG_reserved bad_page
It used to be the case that PG_reserved pages were silently never freed, but in 2.6.15-rc1 they may be freed with a "Bad page state" message. We should work through such cases as they appear, fixing the code; but for now it's safer to issue the message without freeing the page, leaving PG_reserved set. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/page_alloc.c46
1 files changed, 34 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 23b84c4e1a57..1731236dec35 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -140,8 +140,7 @@ static void bad_page(const char *function, struct page *page)
140 1 << PG_reclaim | 140 1 << PG_reclaim |
141 1 << PG_slab | 141 1 << PG_slab |
142 1 << PG_swapcache | 142 1 << PG_swapcache |
143 1 << PG_writeback | 143 1 << PG_writeback );
144 1 << PG_reserved );
145 set_page_count(page, 0); 144 set_page_count(page, 0);
146 reset_page_mapcount(page); 145 reset_page_mapcount(page);
147 page->mapping = NULL; 146 page->mapping = NULL;
@@ -335,7 +334,7 @@ static inline void __free_pages_bulk (struct page *page,
335 zone->free_area[order].nr_free++; 334 zone->free_area[order].nr_free++;
336} 335}
337 336
338static inline void free_pages_check(const char *function, struct page *page) 337static inline int free_pages_check(const char *function, struct page *page)
339{ 338{
340 if ( page_mapcount(page) || 339 if ( page_mapcount(page) ||
341 page->mapping != NULL || 340 page->mapping != NULL ||
@@ -353,6 +352,12 @@ static inline void free_pages_check(const char *function, struct page *page)
353 bad_page(function, page); 352 bad_page(function, page);
354 if (PageDirty(page)) 353 if (PageDirty(page))
355 __ClearPageDirty(page); 354 __ClearPageDirty(page);
355 /*
356 * For now, we report if PG_reserved was found set, but do not
357 * clear it, and do not free the page. But we shall soon need
358 * to do more, for when the ZERO_PAGE count wraps negative.
359 */
360 return PageReserved(page);
356} 361}
357 362
358/* 363/*
@@ -392,11 +397,10 @@ void __free_pages_ok(struct page *page, unsigned int order)
392{ 397{
393 LIST_HEAD(list); 398 LIST_HEAD(list);
394 int i; 399 int i;
400 int reserved = 0;
395 401
396 arch_free_page(page, order); 402 arch_free_page(page, order);
397 403
398 mod_page_state(pgfree, 1 << order);
399
400#ifndef CONFIG_MMU 404#ifndef CONFIG_MMU
401 if (order > 0) 405 if (order > 0)
402 for (i = 1 ; i < (1 << order) ; ++i) 406 for (i = 1 ; i < (1 << order) ; ++i)
@@ -404,8 +408,12 @@ void __free_pages_ok(struct page *page, unsigned int order)
404#endif 408#endif
405 409
406 for (i = 0 ; i < (1 << order) ; ++i) 410 for (i = 0 ; i < (1 << order) ; ++i)
407 free_pages_check(__FUNCTION__, page + i); 411 reserved += free_pages_check(__FUNCTION__, page + i);
412 if (reserved)
413 return;
414
408 list_add(&page->lru, &list); 415 list_add(&page->lru, &list);
416 mod_page_state(pgfree, 1 << order);
409 kernel_map_pages(page, 1<<order, 0); 417 kernel_map_pages(page, 1<<order, 0);
410 free_pages_bulk(page_zone(page), 1, &list, order); 418 free_pages_bulk(page_zone(page), 1, &list, order);
411} 419}
@@ -463,7 +471,7 @@ void set_page_refs(struct page *page, int order)
463/* 471/*
464 * This page is about to be returned from the page allocator 472 * This page is about to be returned from the page allocator
465 */ 473 */
466static void prep_new_page(struct page *page, int order) 474static int prep_new_page(struct page *page, int order)
467{ 475{
468 if ( page_mapcount(page) || 476 if ( page_mapcount(page) ||
469 page->mapping != NULL || 477 page->mapping != NULL ||
@@ -481,12 +489,20 @@ static void prep_new_page(struct page *page, int order)
481 1 << PG_reserved ))) 489 1 << PG_reserved )))
482 bad_page(__FUNCTION__, page); 490 bad_page(__FUNCTION__, page);
483 491
492 /*
493 * For now, we report if PG_reserved was found set, but do not
494 * clear it, and do not allocate the page: as a safety net.
495 */
496 if (PageReserved(page))
497 return 1;
498
484 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 499 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
485 1 << PG_referenced | 1 << PG_arch_1 | 500 1 << PG_referenced | 1 << PG_arch_1 |
486 1 << PG_checked | 1 << PG_mappedtodisk); 501 1 << PG_checked | 1 << PG_mappedtodisk);
487 set_page_private(page, 0); 502 set_page_private(page, 0);
488 set_page_refs(page, order); 503 set_page_refs(page, order);
489 kernel_map_pages(page, 1 << order, 1); 504 kernel_map_pages(page, 1 << order, 1);
505 return 0;
490} 506}
491 507
492/* 508/*
@@ -669,11 +685,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
669 685
670 arch_free_page(page, 0); 686 arch_free_page(page, 0);
671 687
672 kernel_map_pages(page, 1, 0);
673 inc_page_state(pgfree);
674 if (PageAnon(page)) 688 if (PageAnon(page))
675 page->mapping = NULL; 689 page->mapping = NULL;
676 free_pages_check(__FUNCTION__, page); 690 if (free_pages_check(__FUNCTION__, page))
691 return;
692
693 inc_page_state(pgfree);
694 kernel_map_pages(page, 1, 0);
695
677 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 696 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
678 local_irq_save(flags); 697 local_irq_save(flags);
679 list_add(&page->lru, &pcp->list); 698 list_add(&page->lru, &pcp->list);
@@ -712,12 +731,14 @@ static struct page *
712buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 731buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
713{ 732{
714 unsigned long flags; 733 unsigned long flags;
715 struct page *page = NULL; 734 struct page *page;
716 int cold = !!(gfp_flags & __GFP_COLD); 735 int cold = !!(gfp_flags & __GFP_COLD);
717 736
737again:
718 if (order == 0) { 738 if (order == 0) {
719 struct per_cpu_pages *pcp; 739 struct per_cpu_pages *pcp;
720 740
741 page = NULL;
721 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 742 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
722 local_irq_save(flags); 743 local_irq_save(flags);
723 if (pcp->count <= pcp->low) 744 if (pcp->count <= pcp->low)
@@ -739,7 +760,8 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
739 if (page != NULL) { 760 if (page != NULL) {
740 BUG_ON(bad_range(zone, page)); 761 BUG_ON(bad_range(zone, page));
741 mod_page_state_zone(zone, pgalloc, 1 << order); 762 mod_page_state_zone(zone, pgalloc, 1 << order);
742 prep_new_page(page, order); 763 if (prep_new_page(page, order))
764 goto again;
743 765
744 if (gfp_flags & __GFP_ZERO) 766 if (gfp_flags & __GFP_ZERO)
745 prep_zero_page(page, order, gfp_flags); 767 prep_zero_page(page, order, gfp_flags);