diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 50 |
1 files changed, 39 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index f90b76086ffa..208071c48bf2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1734,7 +1734,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1734 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 1734 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
1735 | continue; | 1735 | continue; |
1736 | 1736 | ||
1737 | lru_pages += zone_lru_pages(zone); | 1737 | lru_pages += zone_reclaimable_pages(zone); |
1738 | } | 1738 | } |
1739 | } | 1739 | } |
1740 | 1740 | ||
@@ -1951,7 +1951,7 @@ loop_again: | |||
1951 | for (i = 0; i <= end_zone; i++) { | 1951 | for (i = 0; i <= end_zone; i++) { |
1952 | struct zone *zone = pgdat->node_zones + i; | 1952 | struct zone *zone = pgdat->node_zones + i; |
1953 | 1953 | ||
1954 | lru_pages += zone_lru_pages(zone); | 1954 | lru_pages += zone_reclaimable_pages(zone); |
1955 | } | 1955 | } |
1956 | 1956 | ||
1957 | /* | 1957 | /* |
@@ -1995,7 +1995,7 @@ loop_again: | |||
1995 | if (zone_is_all_unreclaimable(zone)) | 1995 | if (zone_is_all_unreclaimable(zone)) |
1996 | continue; | 1996 | continue; |
1997 | if (nr_slab == 0 && zone->pages_scanned >= | 1997 | if (nr_slab == 0 && zone->pages_scanned >= |
1998 | (zone_lru_pages(zone) * 6)) | 1998 | (zone_reclaimable_pages(zone) * 6)) |
1999 | zone_set_flag(zone, | 1999 | zone_set_flag(zone, |
2000 | ZONE_ALL_UNRECLAIMABLE); | 2000 | ZONE_ALL_UNRECLAIMABLE); |
2001 | /* | 2001 | /* |
@@ -2162,12 +2162,39 @@ void wakeup_kswapd(struct zone *zone, int order) | |||
2162 | wake_up_interruptible(&pgdat->kswapd_wait); | 2162 | wake_up_interruptible(&pgdat->kswapd_wait); |
2163 | } | 2163 | } |
2164 | 2164 | ||
2165 | unsigned long global_lru_pages(void) | 2165 | /* |
2166 | * The reclaimable count would be mostly accurate. | ||
2167 | * The less reclaimable pages may be | ||
2168 | * - mlocked pages, which will be moved to unevictable list when encountered | ||
2169 | * - mapped pages, which may require several travels to be reclaimed | ||
2170 | * - dirty pages, which is not "instantly" reclaimable | ||
2171 | */ | ||
2172 | unsigned long global_reclaimable_pages(void) | ||
2166 | { | 2173 | { |
2167 | return global_page_state(NR_ACTIVE_ANON) | 2174 | int nr; |
2168 | + global_page_state(NR_ACTIVE_FILE) | 2175 | |
2169 | + global_page_state(NR_INACTIVE_ANON) | 2176 | nr = global_page_state(NR_ACTIVE_FILE) + |
2170 | + global_page_state(NR_INACTIVE_FILE); | 2177 | global_page_state(NR_INACTIVE_FILE); |
2178 | |||
2179 | if (nr_swap_pages > 0) | ||
2180 | nr += global_page_state(NR_ACTIVE_ANON) + | ||
2181 | global_page_state(NR_INACTIVE_ANON); | ||
2182 | |||
2183 | return nr; | ||
2184 | } | ||
2185 | |||
2186 | unsigned long zone_reclaimable_pages(struct zone *zone) | ||
2187 | { | ||
2188 | int nr; | ||
2189 | |||
2190 | nr = zone_page_state(zone, NR_ACTIVE_FILE) + | ||
2191 | zone_page_state(zone, NR_INACTIVE_FILE); | ||
2192 | |||
2193 | if (nr_swap_pages > 0) | ||
2194 | nr += zone_page_state(zone, NR_ACTIVE_ANON) + | ||
2195 | zone_page_state(zone, NR_INACTIVE_ANON); | ||
2196 | |||
2197 | return nr; | ||
2171 | } | 2198 | } |
2172 | 2199 | ||
2173 | #ifdef CONFIG_HIBERNATION | 2200 | #ifdef CONFIG_HIBERNATION |
@@ -2239,7 +2266,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2239 | 2266 | ||
2240 | current->reclaim_state = &reclaim_state; | 2267 | current->reclaim_state = &reclaim_state; |
2241 | 2268 | ||
2242 | lru_pages = global_lru_pages(); | 2269 | lru_pages = global_reclaimable_pages(); |
2243 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); | 2270 | nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); |
2244 | /* If slab caches are huge, it's better to hit them first */ | 2271 | /* If slab caches are huge, it's better to hit them first */ |
2245 | while (nr_slab >= lru_pages) { | 2272 | while (nr_slab >= lru_pages) { |
@@ -2281,7 +2308,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2281 | 2308 | ||
2282 | reclaim_state.reclaimed_slab = 0; | 2309 | reclaim_state.reclaimed_slab = 0; |
2283 | shrink_slab(sc.nr_scanned, sc.gfp_mask, | 2310 | shrink_slab(sc.nr_scanned, sc.gfp_mask, |
2284 | global_lru_pages()); | 2311 | global_reclaimable_pages()); |
2285 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; | 2312 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; |
2286 | if (sc.nr_reclaimed >= nr_pages) | 2313 | if (sc.nr_reclaimed >= nr_pages) |
2287 | goto out; | 2314 | goto out; |
@@ -2298,7 +2325,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2298 | if (!sc.nr_reclaimed) { | 2325 | if (!sc.nr_reclaimed) { |
2299 | do { | 2326 | do { |
2300 | reclaim_state.reclaimed_slab = 0; | 2327 | reclaim_state.reclaimed_slab = 0; |
2301 | shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); | 2328 | shrink_slab(nr_pages, sc.gfp_mask, |
2329 | global_reclaimable_pages()); | ||
2302 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; | 2330 | sc.nr_reclaimed += reclaim_state.reclaimed_slab; |
2303 | } while (sc.nr_reclaimed < nr_pages && | 2331 | } while (sc.nr_reclaimed < nr_pages && |
2304 | reclaim_state.reclaimed_slab > 0); | 2332 | reclaim_state.reclaimed_slab > 0); |