diff options
author | MinChan Kim <minchan.kim@gmail.com> | 2009-03-31 18:19:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-01 11:59:12 -0400 |
commit | d979677c4c02f0a72db5a03ecd8184bd9d6695c8 (patch) | |
tree | 370c936bb17210db0db80df0e9b488c6ee5c11a1 /mm/vmscan.c | |
parent | 0a0dd05dd7e1a800241888cbf515bf8d3dc2e59c (diff) |
mm: shrink_all_memory(): use sc.nr_reclaimed
Commit a79311c14eae4bb946a97af25f3e1b17d625985d "vmscan: bail out of
direct reclaim after swap_cluster_max pages" moved the nr_reclaimed
counter into the scan control to accumulate the number of all reclaimed
pages in a reclaim invocation.
shrink_all_memory() can use the same mechanism. it increase code
consistency and redability.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: MinChan Kim <minchan.kim@gmail.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 301f057fd115..b15dcbb9e174 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2050,16 +2050,15 @@ unsigned long global_lru_pages(void) | |||
2050 | #ifdef CONFIG_PM | 2050 | #ifdef CONFIG_PM |
2051 | /* | 2051 | /* |
2052 | * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages | 2052 | * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages |
2053 | * from LRU lists system-wide, for given pass and priority, and returns the | 2053 | * from LRU lists system-wide, for given pass and priority. |
2054 | * number of reclaimed pages | ||
2055 | * | 2054 | * |
2056 | * For pass > 3 we also try to shrink the LRU lists that contain a few pages | 2055 | * For pass > 3 we also try to shrink the LRU lists that contain a few pages |
2057 | */ | 2056 | */ |
2058 | static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | 2057 | static void shrink_all_zones(unsigned long nr_pages, int prio, |
2059 | int pass, struct scan_control *sc) | 2058 | int pass, struct scan_control *sc) |
2060 | { | 2059 | { |
2061 | struct zone *zone; | 2060 | struct zone *zone; |
2062 | unsigned long ret = 0; | 2061 | unsigned long nr_reclaimed = 0; |
2063 | 2062 | ||
2064 | for_each_populated_zone(zone) { | 2063 | for_each_populated_zone(zone) { |
2065 | enum lru_list l; | 2064 | enum lru_list l; |
@@ -2082,14 +2081,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
2082 | 2081 | ||
2083 | zone->lru[l].nr_scan = 0; | 2082 | zone->lru[l].nr_scan = 0; |
2084 | nr_to_scan = min(nr_pages, lru_pages); | 2083 | nr_to_scan = min(nr_pages, lru_pages); |
2085 | ret += shrink_list(l, nr_to_scan, zone, | 2084 | nr_reclaimed += shrink_list(l, nr_to_scan, zone, |
2086 | sc, prio); | 2085 | sc, prio); |
2087 | if (ret >= nr_pages) | 2086 | if (nr_reclaimed >= nr_pages) { |
2088 | return ret; | 2087 | sc->nr_reclaimed = nr_reclaimed; |
2088 | return; | ||
2089 | } | ||
2089 | } | 2090 | } |
2090 | } | 2091 | } |
2091 | } | 2092 | } |
2092 | return ret; | 2093 | sc->nr_reclaimed = nr_reclaimed; |
2093 | } | 2094 | } |
2094 | 2095 | ||
2095 | /* | 2096 | /* |
@@ -2103,7 +2104,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
2103 | unsigned long shrink_all_memory(unsigned long nr_pages) | 2104 | unsigned long shrink_all_memory(unsigned long nr_pages) |
2104 | { | 2105 | { |
2105 | unsigned long lru_pages, nr_slab; | 2106 | unsigned long lru_pages, nr_slab; |
2106 | unsigned long ret = 0; | ||
2107 | int pass; | 2107 | int pass; |
2108 | struct reclaim_state reclaim_state; | 2108 | struct reclaim_state reclaim_state; |
2109 | struct scan_control sc = { | 2109 | struct scan_control sc = { |
@@ -2125,8 +2125,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2125 | if (!reclaim_state.reclaimed_slab) | 2125 | if (!reclaim_state.reclaimed_slab) |
2126 | break; | 2126 | break; |
2127 | 2127 | ||
2128 | ret += reclaim_state.reclaimed_slab; | 2128 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; |
2129 | if (ret >= nr_pages) | 2129 | if (sc.nr_reclaimed >= nr_pages) |
2130 | goto out; | 2130 | goto out; |
2131 | 2131 | ||
2132 | nr_slab -= reclaim_state.reclaimed_slab; | 2132 | nr_slab -= reclaim_state.reclaimed_slab; |
@@ -2148,18 +2148,18 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2148 | sc.may_unmap = 1; | 2148 | sc.may_unmap = 1; |
2149 | 2149 | ||
2150 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 2150 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { |
2151 | unsigned long nr_to_scan = nr_pages - ret; | 2151 | unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed; |
2152 | 2152 | ||
2153 | sc.nr_scanned = 0; | 2153 | sc.nr_scanned = 0; |
2154 | ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); | 2154 | shrink_all_zones(nr_to_scan, prio, pass, &sc); |
2155 | if (ret >= nr_pages) | 2155 | if (sc.nr_reclaimed >= nr_pages) |
2156 | goto out; | 2156 | goto out; |
2157 | 2157 | ||
2158 | reclaim_state.reclaimed_slab = 0; | 2158 | reclaim_state.reclaimed_slab = 0; |
2159 | shrink_slab(sc.nr_scanned, sc.gfp_mask, | 2159 | shrink_slab(sc.nr_scanned, sc.gfp_mask, |
2160 | global_lru_pages()); | 2160 | global_lru_pages()); |
2161 | ret += reclaim_state.reclaimed_slab; | 2161 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; |
2162 | if (ret >= nr_pages) | 2162 | if (sc.nr_reclaimed >= nr_pages) |
2163 | goto out; | 2163 | goto out; |
2164 | 2164 | ||
2165 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) | 2165 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) |
@@ -2168,21 +2168,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | /* | 2170 | /* |
2171 | * If ret = 0, we could not shrink LRUs, but there may be something | 2171 | * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be |
2172 | * in slab caches | 2172 | * something in slab caches |
2173 | */ | 2173 | */ |
2174 | if (!ret) { | 2174 | if (!sc.nr_reclaimed) { |
2175 | do { | 2175 | do { |
2176 | reclaim_state.reclaimed_slab = 0; | 2176 | reclaim_state.reclaimed_slab = 0; |
2177 | shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); | 2177 | shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); |
2178 | ret += reclaim_state.reclaimed_slab; | 2178 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; |
2179 | } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); | 2179 | } while (sc.nr_reclaimed < nr_pages && |
2180 | reclaim_state.reclaimed_slab > 0); | ||
2180 | } | 2181 | } |
2181 | 2182 | ||
2183 | |||
2182 | out: | 2184 | out: |
2183 | current->reclaim_state = NULL; | 2185 | current->reclaim_state = NULL; |
2184 | 2186 | ||
2185 | return ret; | 2187 | return sc.nr_reclaimed; |
2186 | } | 2188 | } |
2187 | #endif | 2189 | #endif |
2188 | 2190 | ||