diff options
| author | Ingo Molnar <mingo@elte.hu> | 2010-10-07 03:43:11 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-10-07 03:43:11 -0400 |
| commit | d4f8f217b8a5d5bd02af979650418dca4caec472 (patch) | |
| tree | af047bfa9729c975e24cb7624107574e884d3a57 /mm/page_alloc.c | |
| parent | 2dfbf4dfbe47a484bae20456c12b40763b9b6af7 (diff) | |
| parent | 773e3f93577ffb493fb7c39b1a6ecf39b5748e87 (diff) | |
Merge branch 'rcu/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu into core/rcu
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a9649f4b261e..a8cfa9cc6e86 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
| 588 | { | 588 | { |
| 589 | int migratetype = 0; | 589 | int migratetype = 0; |
| 590 | int batch_free = 0; | 590 | int batch_free = 0; |
| 591 | int to_free = count; | ||
| 591 | 592 | ||
| 592 | spin_lock(&zone->lock); | 593 | spin_lock(&zone->lock); |
| 593 | zone->all_unreclaimable = 0; | 594 | zone->all_unreclaimable = 0; |
| 594 | zone->pages_scanned = 0; | 595 | zone->pages_scanned = 0; |
| 595 | 596 | ||
| 596 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 597 | while (to_free) { |
| 597 | while (count) { | ||
| 598 | struct page *page; | 598 | struct page *page; |
| 599 | struct list_head *list; | 599 | struct list_head *list; |
| 600 | 600 | ||
| @@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
| 619 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ | 619 | /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ |
| 620 | __free_one_page(page, zone, 0, page_private(page)); | 620 | __free_one_page(page, zone, 0, page_private(page)); |
| 621 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); | 621 | trace_mm_page_pcpu_drain(page, 0, page_private(page)); |
| 622 | } while (--count && --batch_free && !list_empty(list)); | 622 | } while (--to_free && --batch_free && !list_empty(list)); |
| 623 | } | 623 | } |
| 624 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | ||
| 624 | spin_unlock(&zone->lock); | 625 | spin_unlock(&zone->lock); |
| 625 | } | 626 | } |
| 626 | 627 | ||
| @@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
| 631 | zone->all_unreclaimable = 0; | 632 | zone->all_unreclaimable = 0; |
| 632 | zone->pages_scanned = 0; | 633 | zone->pages_scanned = 0; |
| 633 | 634 | ||
| 634 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
| 635 | __free_one_page(page, zone, order, migratetype); | 635 | __free_one_page(page, zone, order, migratetype); |
| 636 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | ||
| 636 | spin_unlock(&zone->lock); | 637 | spin_unlock(&zone->lock); |
| 637 | } | 638 | } |
| 638 | 639 | ||
| @@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |||
| 1461 | { | 1462 | { |
| 1462 | /* free_pages my go negative - that's OK */ | 1463 | /* free_pages my go negative - that's OK */ |
| 1463 | long min = mark; | 1464 | long min = mark; |
| 1464 | long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; | 1465 | long free_pages = zone_nr_free_pages(z) - (1 << order) + 1; |
| 1465 | int o; | 1466 | int o; |
| 1466 | 1467 | ||
| 1467 | if (alloc_flags & ALLOC_HIGH) | 1468 | if (alloc_flags & ALLOC_HIGH) |
| @@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
| 1846 | struct page *page = NULL; | 1847 | struct page *page = NULL; |
| 1847 | struct reclaim_state reclaim_state; | 1848 | struct reclaim_state reclaim_state; |
| 1848 | struct task_struct *p = current; | 1849 | struct task_struct *p = current; |
| 1850 | bool drained = false; | ||
| 1849 | 1851 | ||
| 1850 | cond_resched(); | 1852 | cond_resched(); |
| 1851 | 1853 | ||
| @@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, | |||
| 1864 | 1866 | ||
| 1865 | cond_resched(); | 1867 | cond_resched(); |
| 1866 | 1868 | ||
| 1867 | if (order != 0) | 1869 | if (unlikely(!(*did_some_progress))) |
| 1868 | drain_all_pages(); | 1870 | return NULL; |
| 1869 | 1871 | ||
| 1870 | if (likely(*did_some_progress)) | 1872 | retry: |
| 1871 | page = get_page_from_freelist(gfp_mask, nodemask, order, | 1873 | page = get_page_from_freelist(gfp_mask, nodemask, order, |
| 1872 | zonelist, high_zoneidx, | 1874 | zonelist, high_zoneidx, |
| 1873 | alloc_flags, preferred_zone, | 1875 | alloc_flags, preferred_zone, |
| 1874 | migratetype); | 1876 | migratetype); |
| 1877 | |||
| 1878 | /* | ||
| 1879 | * If an allocation failed after direct reclaim, it could be because | ||
| 1880 | * pages are pinned on the per-cpu lists. Drain them and try again | ||
| 1881 | */ | ||
| 1882 | if (!page && !drained) { | ||
| 1883 | drain_all_pages(); | ||
| 1884 | drained = true; | ||
| 1885 | goto retry; | ||
| 1886 | } | ||
| 1887 | |||
| 1875 | return page; | 1888 | return page; |
| 1876 | } | 1889 | } |
| 1877 | 1890 | ||
| @@ -2423,7 +2436,7 @@ void show_free_areas(void) | |||
| 2423 | " all_unreclaimable? %s" | 2436 | " all_unreclaimable? %s" |
| 2424 | "\n", | 2437 | "\n", |
| 2425 | zone->name, | 2438 | zone->name, |
| 2426 | K(zone_page_state(zone, NR_FREE_PAGES)), | 2439 | K(zone_nr_free_pages(zone)), |
| 2427 | K(min_wmark_pages(zone)), | 2440 | K(min_wmark_pages(zone)), |
| 2428 | K(low_wmark_pages(zone)), | 2441 | K(low_wmark_pages(zone)), |
| 2429 | K(high_wmark_pages(zone)), | 2442 | K(high_wmark_pages(zone)), |
