diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-10-08 03:14:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-10-08 03:15:00 -0400 |
commit | 153db80f8cf74e8700cac96305b6c0b92918f17c (patch) | |
tree | c2afb28e7b3f4fbf0aacd9edd39d7f895321ca0c /mm/page_alloc.c | |
parent | 5fd03ddab7fdbc44bfb2d183a4531c26a8dbca5a (diff) | |
parent | cb655d0f3d57c23db51b981648e452988c0223f9 (diff) |
Merge commit 'v2.6.36-rc7' into core/memblock
Merge reason: Update from -rc3 to -rc7.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 768ea486df58..9536017108ec 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -589,13 +589,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
589 | { | 589 | { |
590 | int migratetype = 0; | 590 | int migratetype = 0; |
591 | int batch_free = 0; | 591 | int batch_free = 0; |
592 | int to_free = count; | ||
592 | 593 | ||
593 | spin_lock(&zone->lock); | 594 | spin_lock(&zone->lock); |
594 | zone->all_unreclaimable = 0; | 595 | zone->all_unreclaimable = 0; |
595 | zone->pages_scanned = 0; | 596 | zone->pages_scanned = 0; |
596 | 597 | ||
597 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 598 | while (to_free) { |
598 | while (count) { | ||
599 | struct page *page; | 599 | struct page *page; |
600 | struct list_head *list; | 600 | struct list_head *list; |
601 | 601 | ||
@@ -620,8 +620,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
620 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ | 620 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
621 | __free_one_page(page, zone, 0, page_private(page)); | 621 | __free_one_page(page, zone, 0, page_private(page)); |
622 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); | 622 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); |
623 | } while (--count && --batch_free && !list_empty(list)); | 623 | } while (--to_free && --batch_free && !list_empty(list)); |
624 | } | 624 | } |
625 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | ||
625 | spin_unlock(&zone->lock); | 626 | spin_unlock(&zone->lock); |
626 | } | 627 | } |
627 | 628 | ||
@@ -632,8 +633,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
632 | zone->all_unreclaimable = 0; | 633 | zone->all_unreclaimable = 0; |
633 | zone->pages_scanned = 0; | 634 | zone->pages_scanned = 0; |
634 | 635 | ||
635 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
636 | __free_one_page(page, zone, order, migratetype); | 636 | __free_one_page(page, zone, order, migratetype); |
637 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
637 | spin_unlock(&zone->lock); | 638 | spin_unlock(&zone->lock); |
638 | } | 639 | } |
639 | 640 | ||
@@ -1462,7 +1463,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
1462 | { | 1463 | { |
1463 | /* free_pages my go negative - that's OK */ | 1464 | /* free_pages my go negative - that's OK */ |
1464 | long min = mark; | 1465 | long min = mark; |
1465 | long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; | 1466 | long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; |
1466 | int o; | 1467 | int o; |
1467 | 1468 | ||
1468 | if (alloc_flags & ALLOC_HIGH) | 1469 | if (alloc_flags & ALLOC_HIGH) |
@@ -1847,6 +1848,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1847 | struct page *page = NULL; | 1848 | struct page *page = NULL; |
1848 | struct reclaim_state reclaim_state; | 1849 | struct reclaim_state reclaim_state; |
1849 | struct task_struct *p = current; | 1850 | struct task_struct *p = current; |
1851 | bool drained = false; | ||
1850 | 1852 | ||
1851 | cond_resched(); | 1853 | cond_resched(); |
1852 | 1854 | ||
@@ -1865,14 +1867,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
1865 | 1867 | ||
1866 | cond_resched(); | 1868 | cond_resched(); |
1867 | 1869 | ||
1868 | if (order != 0) | 1870 | if (unlikely(!(*did_some_progress))) |
1869 | drain_all_pages(); | 1871 | return NULL; |
1870 | 1872 | ||
1871 | if (likely(*did_some_progress)) | 1873 | retry: |
1872 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 1874 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
1873 | zonelist, high_zoneidx, | 1875 | zonelist, high_zoneidx, |
1874 | alloc_flags, preferred_zone, | 1876 | alloc_flags, preferred_zone, |
1875 | migratetype); | 1877 | migratetype); |
1878 | |||
1879 | /* | ||
1880 | * If an allocation failed after direct reclaim, it could be because | ||
1881 | * pages are pinned on the per-cpu lists. Drain them and try again | ||
1882 | */ | ||
1883 | if (!page && !drained) { | ||
1884 | drain_all_pages(); | ||
1885 | drained = true; | ||
1886 | goto retry; | ||
1887 | } | ||
1888 | |||
1876 | return page; | 1889 | return page; |
1877 | } | 1890 | } |
1878 | 1891 | ||
@@ -2424,7 +2437,7 @@ void show_free_areas(void) | |||
2424 | " all_unreclaimable? %s" | 2437 | " all_unreclaimable? %s" |
2425 | "\n", | 2438 | "\n", |
2426 | zone->name, | 2439 | zone->name, |
2427 | K(zone_page_state(zone, NR_FREE_PAGES)), | 2440 | K(zone_nr_free_pages(zone)), |
2428 | K(min_wmark_pages(zone)), | 2441 | K(min_wmark_pages(zone)), |
2429 | K(low_wmark_pages(zone)), | 2442 | K(low_wmark_pages(zone)), |
2430 | K(high_wmark_pages(zone)), | 2443 | K(high_wmark_pages(zone)), |