aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c66
1 files changed, 44 insertions, 22 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9bd339eb04c6..a8cfa9cc6e86 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
588{ 588{
589 int migratetype = 0; 589 int migratetype = 0;
590 int batch_free = 0; 590 int batch_free = 0;
591 int to_free = count;
591 592
592 spin_lock(&zone->lock); 593 spin_lock(&zone->lock);
593 zone->all_unreclaimable = 0; 594 zone->all_unreclaimable = 0;
594 zone->pages_scanned = 0; 595 zone->pages_scanned = 0;
595 596
596 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 597 while (to_free) {
597 while (count) {
598 struct page *page; 598 struct page *page;
599 struct list_head *list; 599 struct list_head *list;
600 600
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 619 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
620 __free_one_page(page, zone, 0, page_private(page)); 620 __free_one_page(page, zone, 0, page_private(page));
621 trace_mm_page_pcpu_drain(page, 0, page_private(page)); 621 trace_mm_page_pcpu_drain(page, 0, page_private(page));
622 } while (--count && --batch_free && !list_empty(list)); 622 } while (--to_free && --batch_free && !list_empty(list));
623 } 623 }
624 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
624 spin_unlock(&zone->lock); 625 spin_unlock(&zone->lock);
625} 626}
626 627
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
631 zone->all_unreclaimable = 0; 632 zone->all_unreclaimable = 0;
632 zone->pages_scanned = 0; 633 zone->pages_scanned = 0;
633 634
634 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
635 __free_one_page(page, zone, order, migratetype); 635 __free_one_page(page, zone, order, migratetype);
636 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
636 spin_unlock(&zone->lock); 637 spin_unlock(&zone->lock);
637} 638}
638 639
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1461{ 1462{
1462 /* free_pages my go negative - that's OK */ 1463 /* free_pages my go negative - that's OK */
1463 long min = mark; 1464 long min = mark;
1464 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 1465 long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
1465 int o; 1466 int o;
1466 1467
1467 if (alloc_flags & ALLOC_HIGH) 1468 if (alloc_flags & ALLOC_HIGH)
@@ -1738,7 +1739,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1738 struct page *page; 1739 struct page *page;
1739 1740
1740 /* Acquire the OOM killer lock for the zones in zonelist */ 1741 /* Acquire the OOM killer lock for the zones in zonelist */
1741 if (!try_set_zone_oom(zonelist, gfp_mask)) { 1742 if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1742 schedule_timeout_uninterruptible(1); 1743 schedule_timeout_uninterruptible(1);
1743 return NULL; 1744 return NULL;
1744 } 1745 }
@@ -1759,6 +1760,9 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1759 /* The OOM killer will not help higher order allocs */ 1760 /* The OOM killer will not help higher order allocs */
1760 if (order > PAGE_ALLOC_COSTLY_ORDER) 1761 if (order > PAGE_ALLOC_COSTLY_ORDER)
1761 goto out; 1762 goto out;
1763 /* The OOM killer does not needlessly kill tasks for lowmem */
1764 if (high_zoneidx < ZONE_NORMAL)
1765 goto out;
1762 /* 1766 /*
1763 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 1767 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1764 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 1768 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -1843,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1843 struct page *page = NULL; 1847 struct page *page = NULL;
1844 struct reclaim_state reclaim_state; 1848 struct reclaim_state reclaim_state;
1845 struct task_struct *p = current; 1849 struct task_struct *p = current;
1850 bool drained = false;
1846 1851
1847 cond_resched(); 1852 cond_resched();
1848 1853
@@ -1861,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1861 1866
1862 cond_resched(); 1867 cond_resched();
1863 1868
1864 if (order != 0) 1869 if (unlikely(!(*did_some_progress)))
1865 drain_all_pages(); 1870 return NULL;
1866 1871
1867 if (likely(*did_some_progress)) 1872retry:
1868 page = get_page_from_freelist(gfp_mask, nodemask, order, 1873 page = get_page_from_freelist(gfp_mask, nodemask, order,
1869 zonelist, high_zoneidx, 1874 zonelist, high_zoneidx,
1870 alloc_flags, preferred_zone, 1875 alloc_flags, preferred_zone,
1871 migratetype); 1876 migratetype);
1877
1878 /*
1879 * If an allocation failed after direct reclaim, it could be because
1880 * pages are pinned on the per-cpu lists. Drain them and try again
1881 */
1882 if (!page && !drained) {
1883 drain_all_pages();
1884 drained = true;
1885 goto retry;
1886 }
1887
1872 return page; 1888 return page;
1873} 1889}
1874 1890
@@ -2052,15 +2068,23 @@ rebalance:
2052 if (page) 2068 if (page)
2053 goto got_pg; 2069 goto got_pg;
2054 2070
2055 /* 2071 if (!(gfp_mask & __GFP_NOFAIL)) {
2056 * The OOM killer does not trigger for high-order 2072 /*
2057 * ~__GFP_NOFAIL allocations so if no progress is being 2073 * The oom killer is not called for high-order
2058 * made, there are no other options and retrying is 2074 * allocations that may fail, so if no progress
2059 * unlikely to help. 2075 * is being made, there are no other options and
2060 */ 2076 * retrying is unlikely to help.
2061 if (order > PAGE_ALLOC_COSTLY_ORDER && 2077 */
2062 !(gfp_mask & __GFP_NOFAIL)) 2078 if (order > PAGE_ALLOC_COSTLY_ORDER)
2063 goto nopage; 2079 goto nopage;
2080 /*
2081 * The oom killer is not called for lowmem
2082 * allocations to prevent needlessly killing
2083 * innocent tasks.
2084 */
2085 if (high_zoneidx < ZONE_NORMAL)
2086 goto nopage;
2087 }
2064 2088
2065 goto restart; 2089 goto restart;
2066 } 2090 }
@@ -2412,7 +2436,7 @@ void show_free_areas(void)
2412 " all_unreclaimable? %s" 2436 " all_unreclaimable? %s"
2413 "\n", 2437 "\n",
2414 zone->name, 2438 zone->name,
2415 K(zone_page_state(zone, NR_FREE_PAGES)), 2439 K(zone_nr_free_pages(zone)),
2416 K(min_wmark_pages(zone)), 2440 K(min_wmark_pages(zone)),
2417 K(low_wmark_pages(zone)), 2441 K(low_wmark_pages(zone)),
2418 K(high_wmark_pages(zone)), 2442 K(high_wmark_pages(zone)),
@@ -4089,8 +4113,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4089 zone_seqlock_init(zone); 4113 zone_seqlock_init(zone);
4090 zone->zone_pgdat = pgdat; 4114 zone->zone_pgdat = pgdat;
4091 4115
4092 zone->prev_priority = DEF_PRIORITY;
4093
4094 zone_pcp_init(zone); 4116 zone_pcp_init(zone);
4095 for_each_lru(l) { 4117 for_each_lru(l) {
4096 INIT_LIST_HEAD(&zone->lru[l].list); 4118 INIT_LIST_HEAD(&zone->lru[l].list);