diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2010-05-24 17:32:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-25 11:07:00 -0400 |
commit | 0aeb2339e54e40d0788a7017ecaeac7f5271e262 (patch) | |
tree | 66889ce248257e7e24c998a22994ccef222e4622 /mm | |
parent | 142762bd8d8c46345e79f0f68d3374564306972f (diff) |
vmscan: remove all_unreclaimable scan control
This scan control is abused to communicate a return value from
shrink_zones(). Write this idiomatically and remove the knob.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index cd4a5edf5be2..c55763ee8312 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -73,8 +73,6 @@ struct scan_control { | |||
73 | 73 | ||
74 | int swappiness; | 74 | int swappiness; |
75 | 75 | ||
76 | int all_unreclaimable; | ||
77 | |||
78 | int order; | 76 | int order; |
79 | 77 | ||
80 | /* | 78 | /* |
@@ -1716,14 +1714,14 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1716 | * If a zone is deemed to be full of pinned pages then just give it a light | 1714 | * If a zone is deemed to be full of pinned pages then just give it a light |
1717 | * scan then give up on it. | 1715 | * scan then give up on it. |
1718 | */ | 1716 | */ |
1719 | static void shrink_zones(int priority, struct zonelist *zonelist, | 1717 | static int shrink_zones(int priority, struct zonelist *zonelist, |
1720 | struct scan_control *sc) | 1718 | struct scan_control *sc) |
1721 | { | 1719 | { |
1722 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 1720 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); |
1723 | struct zoneref *z; | 1721 | struct zoneref *z; |
1724 | struct zone *zone; | 1722 | struct zone *zone; |
1723 | int progress = 0; | ||
1725 | 1724 | ||
1726 | sc->all_unreclaimable = 1; | ||
1727 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, | 1725 | for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, |
1728 | sc->nodemask) { | 1726 | sc->nodemask) { |
1729 | if (!populated_zone(zone)) | 1727 | if (!populated_zone(zone)) |
@@ -1739,19 +1737,19 @@ static void shrink_zones(int priority, struct zonelist *zonelist, | |||
1739 | 1737 | ||
1740 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 1738 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) |
1741 | continue; /* Let kswapd poll it */ | 1739 | continue; /* Let kswapd poll it */ |
1742 | sc->all_unreclaimable = 0; | ||
1743 | } else { | 1740 | } else { |
1744 | /* | 1741 | /* |
1745 | * Ignore cpuset limitation here. We just want to reduce | 1742 | * Ignore cpuset limitation here. We just want to reduce |
1746 | * # of used pages by us regardless of memory shortage. | 1743 | * # of used pages by us regardless of memory shortage. |
1747 | */ | 1744 | */ |
1748 | sc->all_unreclaimable = 0; | ||
1749 | mem_cgroup_note_reclaim_priority(sc->mem_cgroup, | 1745 | mem_cgroup_note_reclaim_priority(sc->mem_cgroup, |
1750 | priority); | 1746 | priority); |
1751 | } | 1747 | } |
1752 | 1748 | ||
1753 | shrink_zone(priority, zone, sc); | 1749 | shrink_zone(priority, zone, sc); |
1750 | progress = 1; | ||
1754 | } | 1751 | } |
1752 | return progress; | ||
1755 | } | 1753 | } |
1756 | 1754 | ||
1757 | /* | 1755 | /* |
@@ -1805,7 +1803,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1805 | sc->nr_scanned = 0; | 1803 | sc->nr_scanned = 0; |
1806 | if (!priority) | 1804 | if (!priority) |
1807 | disable_swap_token(); | 1805 | disable_swap_token(); |
1808 | shrink_zones(priority, zonelist, sc); | 1806 | ret = shrink_zones(priority, zonelist, sc); |
1809 | /* | 1807 | /* |
1810 | * Don't shrink slabs when reclaiming memory from | 1808 | * Don't shrink slabs when reclaiming memory from |
1811 | * over limit cgroups | 1809 | * over limit cgroups |
@@ -1842,7 +1840,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1842 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1840 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
1843 | } | 1841 | } |
1844 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 1842 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
1845 | if (!sc->all_unreclaimable && scanning_global_lru(sc)) | 1843 | if (ret && scanning_global_lru(sc)) |
1846 | ret = sc->nr_reclaimed; | 1844 | ret = sc->nr_reclaimed; |
1847 | out: | 1845 | out: |
1848 | /* | 1846 | /* |