aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c75
1 files changed, 51 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bd4de592dc23..b257720edfc8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -140,18 +140,13 @@ static void bad_page(const char *function, struct page *page)
140 1 << PG_reclaim | 140 1 << PG_reclaim |
141 1 << PG_slab | 141 1 << PG_slab |
142 1 << PG_swapcache | 142 1 << PG_swapcache |
143 1 << PG_writeback | 143 1 << PG_writeback );
144 1 << PG_reserved );
145 set_page_count(page, 0); 144 set_page_count(page, 0);
146 reset_page_mapcount(page); 145 reset_page_mapcount(page);
147 page->mapping = NULL; 146 page->mapping = NULL;
148 add_taint(TAINT_BAD_PAGE); 147 add_taint(TAINT_BAD_PAGE);
149} 148}
150 149
151#ifndef CONFIG_HUGETLB_PAGE
152#define prep_compound_page(page, order) do { } while (0)
153#define destroy_compound_page(page, order) do { } while (0)
154#else
155/* 150/*
156 * Higher-order pages are called "compound pages". They are structured thusly: 151 * Higher-order pages are called "compound pages". They are structured thusly:
157 * 152 *
@@ -205,7 +200,6 @@ static void destroy_compound_page(struct page *page, unsigned long order)
205 ClearPageCompound(p); 200 ClearPageCompound(p);
206 } 201 }
207} 202}
208#endif /* CONFIG_HUGETLB_PAGE */
209 203
210/* 204/*
211 * function for dealing with page's order in buddy system. 205 * function for dealing with page's order in buddy system.
@@ -340,7 +334,7 @@ static inline void __free_pages_bulk (struct page *page,
340 zone->free_area[order].nr_free++; 334 zone->free_area[order].nr_free++;
341} 335}
342 336
343static inline void free_pages_check(const char *function, struct page *page) 337static inline int free_pages_check(const char *function, struct page *page)
344{ 338{
345 if ( page_mapcount(page) || 339 if ( page_mapcount(page) ||
346 page->mapping != NULL || 340 page->mapping != NULL ||
@@ -358,6 +352,12 @@ static inline void free_pages_check(const char *function, struct page *page)
358 bad_page(function, page); 352 bad_page(function, page);
359 if (PageDirty(page)) 353 if (PageDirty(page))
360 __ClearPageDirty(page); 354 __ClearPageDirty(page);
355 /*
356 * For now, we report if PG_reserved was found set, but do not
357 * clear it, and do not free the page. But we shall soon need
358 * to do more, for when the ZERO_PAGE count wraps negative.
359 */
360 return PageReserved(page);
361} 361}
362 362
363/* 363/*
@@ -397,11 +397,10 @@ void __free_pages_ok(struct page *page, unsigned int order)
397{ 397{
398 LIST_HEAD(list); 398 LIST_HEAD(list);
399 int i; 399 int i;
400 int reserved = 0;
400 401
401 arch_free_page(page, order); 402 arch_free_page(page, order);
402 403
403 mod_page_state(pgfree, 1 << order);
404
405#ifndef CONFIG_MMU 404#ifndef CONFIG_MMU
406 if (order > 0) 405 if (order > 0)
407 for (i = 1 ; i < (1 << order) ; ++i) 406 for (i = 1 ; i < (1 << order) ; ++i)
@@ -409,8 +408,12 @@ void __free_pages_ok(struct page *page, unsigned int order)
409#endif 408#endif
410 409
411 for (i = 0 ; i < (1 << order) ; ++i) 410 for (i = 0 ; i < (1 << order) ; ++i)
412 free_pages_check(__FUNCTION__, page + i); 411 reserved += free_pages_check(__FUNCTION__, page + i);
412 if (reserved)
413 return;
414
413 list_add(&page->lru, &list); 415 list_add(&page->lru, &list);
416 mod_page_state(pgfree, 1 << order);
414 kernel_map_pages(page, 1<<order, 0); 417 kernel_map_pages(page, 1<<order, 0);
415 free_pages_bulk(page_zone(page), 1, &list, order); 418 free_pages_bulk(page_zone(page), 1, &list, order);
416} 419}
@@ -468,7 +471,7 @@ void set_page_refs(struct page *page, int order)
468/* 471/*
469 * This page is about to be returned from the page allocator 472 * This page is about to be returned from the page allocator
470 */ 473 */
471static void prep_new_page(struct page *page, int order) 474static int prep_new_page(struct page *page, int order)
472{ 475{
473 if ( page_mapcount(page) || 476 if ( page_mapcount(page) ||
474 page->mapping != NULL || 477 page->mapping != NULL ||
@@ -486,12 +489,20 @@ static void prep_new_page(struct page *page, int order)
486 1 << PG_reserved ))) 489 1 << PG_reserved )))
487 bad_page(__FUNCTION__, page); 490 bad_page(__FUNCTION__, page);
488 491
492 /*
493 * For now, we report if PG_reserved was found set, but do not
494 * clear it, and do not allocate the page: as a safety net.
495 */
496 if (PageReserved(page))
497 return 1;
498
489 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 499 page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
490 1 << PG_referenced | 1 << PG_arch_1 | 500 1 << PG_referenced | 1 << PG_arch_1 |
491 1 << PG_checked | 1 << PG_mappedtodisk); 501 1 << PG_checked | 1 << PG_mappedtodisk);
492 set_page_private(page, 0); 502 set_page_private(page, 0);
493 set_page_refs(page, order); 503 set_page_refs(page, order);
494 kernel_map_pages(page, 1 << order, 1); 504 kernel_map_pages(page, 1 << order, 1);
505 return 0;
495} 506}
496 507
497/* 508/*
@@ -674,11 +685,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
674 685
675 arch_free_page(page, 0); 686 arch_free_page(page, 0);
676 687
677 kernel_map_pages(page, 1, 0);
678 inc_page_state(pgfree);
679 if (PageAnon(page)) 688 if (PageAnon(page))
680 page->mapping = NULL; 689 page->mapping = NULL;
681 free_pages_check(__FUNCTION__, page); 690 if (free_pages_check(__FUNCTION__, page))
691 return;
692
693 inc_page_state(pgfree);
694 kernel_map_pages(page, 1, 0);
695
682 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 696 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
683 local_irq_save(flags); 697 local_irq_save(flags);
684 list_add(&page->lru, &pcp->list); 698 list_add(&page->lru, &pcp->list);
@@ -717,12 +731,14 @@ static struct page *
717buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 731buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
718{ 732{
719 unsigned long flags; 733 unsigned long flags;
720 struct page *page = NULL; 734 struct page *page;
721 int cold = !!(gfp_flags & __GFP_COLD); 735 int cold = !!(gfp_flags & __GFP_COLD);
722 736
737again:
723 if (order == 0) { 738 if (order == 0) {
724 struct per_cpu_pages *pcp; 739 struct per_cpu_pages *pcp;
725 740
741 page = NULL;
726 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 742 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
727 local_irq_save(flags); 743 local_irq_save(flags);
728 if (pcp->count <= pcp->low) 744 if (pcp->count <= pcp->low)
@@ -744,7 +760,8 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
744 if (page != NULL) { 760 if (page != NULL) {
745 BUG_ON(bad_range(zone, page)); 761 BUG_ON(bad_range(zone, page));
746 mod_page_state_zone(zone, pgalloc, 1 << order); 762 mod_page_state_zone(zone, pgalloc, 1 << order);
747 prep_new_page(page, order); 763 if (prep_new_page(page, order))
764 goto again;
748 765
749 if (gfp_flags & __GFP_ZERO) 766 if (gfp_flags & __GFP_ZERO)
750 prep_zero_page(page, order, gfp_flags); 767 prep_zero_page(page, order, gfp_flags);
@@ -756,9 +773,12 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
756} 773}
757 774
758#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 775#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
759#define ALLOC_HARDER 0x02 /* try to alloc harder */ 776#define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */
760#define ALLOC_HIGH 0x04 /* __GFP_HIGH set */ 777#define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */
761#define ALLOC_CPUSET 0x08 /* check for correct cpuset */ 778#define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */
779#define ALLOC_HARDER 0x10 /* try to alloc harder */
780#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
781#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
762 782
763/* 783/*
764 * Return 1 if free pages are above 'mark'. This takes into account the order 784 * Return 1 if free pages are above 'mark'. This takes into account the order
@@ -813,7 +833,14 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
813 continue; 833 continue;
814 834
815 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 835 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
816 if (!zone_watermark_ok(*z, order, (*z)->pages_low, 836 unsigned long mark;
837 if (alloc_flags & ALLOC_WMARK_MIN)
838 mark = (*z)->pages_min;
839 else if (alloc_flags & ALLOC_WMARK_LOW)
840 mark = (*z)->pages_low;
841 else
842 mark = (*z)->pages_high;
843 if (!zone_watermark_ok(*z, order, mark,
817 classzone_idx, alloc_flags)) 844 classzone_idx, alloc_flags))
818 continue; 845 continue;
819 } 846 }
@@ -854,7 +881,7 @@ restart:
854 } 881 }
855 882
856 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 883 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
857 zonelist, ALLOC_CPUSET); 884 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
858 if (page) 885 if (page)
859 goto got_pg; 886 goto got_pg;
860 887
@@ -871,7 +898,7 @@ restart:
871 * cannot run direct reclaim, or if the caller has realtime scheduling 898 * cannot run direct reclaim, or if the caller has realtime scheduling
872 * policy. 899 * policy.
873 */ 900 */
874 alloc_flags = 0; 901 alloc_flags = ALLOC_WMARK_MIN;
875 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 902 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
876 alloc_flags |= ALLOC_HARDER; 903 alloc_flags |= ALLOC_HARDER;
877 if (gfp_mask & __GFP_HIGH) 904 if (gfp_mask & __GFP_HIGH)
@@ -942,7 +969,7 @@ rebalance:
942 * under heavy pressure. 969 * under heavy pressure.
943 */ 970 */
944 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 971 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
945 zonelist, ALLOC_CPUSET); 972 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
946 if (page) 973 if (page)
947 goto got_pg; 974 goto got_pg;
948 975