diff options
author | Michal Hocko <mhocko@suse.com> | 2016-03-15 17:57:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 19:55:16 -0400 |
commit | 0db2cb8da89d991762ec2aece45e55ceaee34664 (patch) | |
tree | b8821119e212ee45266cc73ba36a04a84247b6f6 | |
parent | d7206a70af5c094446927b5dea8704f0f96303e3 (diff) |
mm, vmscan: make zone_reclaimable_pages more precise
zone_reclaimable_pages() is used in should_reclaim_retry() which uses it
to calculate the target for the watermark check. This means that
precise numbers are important for the correct decision.
zone_reclaimable_pages uses zone_page_state which can contain stale data
with per-cpu diffs not synced yet (the last vmstat_update might have run
1s in the past).
Use zone_page_state_snapshot() in zone_reclaimable_pages() instead.
None of the current callers is in a hot path where getting the precise
value (which involves per-cpu iteration) would cause an unreasonable
overhead.
Signed-off-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Suggested-by: David Rientjes <rientjes@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index db5722a07d4f..039f08d369a5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -195,21 +195,21 @@ static unsigned long zone_reclaimable_pages(struct zone *zone) | |||
195 | { | 195 | { |
196 | unsigned long nr; | 196 | unsigned long nr; |
197 | 197 | ||
198 | nr = zone_page_state(zone, NR_ACTIVE_FILE) + | 198 | nr = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) + |
199 | zone_page_state(zone, NR_INACTIVE_FILE) + | 199 | zone_page_state_snapshot(zone, NR_INACTIVE_FILE) + |
200 | zone_page_state(zone, NR_ISOLATED_FILE); | 200 | zone_page_state_snapshot(zone, NR_ISOLATED_FILE); |
201 | 201 | ||
202 | if (get_nr_swap_pages() > 0) | 202 | if (get_nr_swap_pages() > 0) |
203 | nr += zone_page_state(zone, NR_ACTIVE_ANON) + | 203 | nr += zone_page_state_snapshot(zone, NR_ACTIVE_ANON) + |
204 | zone_page_state(zone, NR_INACTIVE_ANON) + | 204 | zone_page_state_snapshot(zone, NR_INACTIVE_ANON) + |
205 | zone_page_state(zone, NR_ISOLATED_ANON); | 205 | zone_page_state_snapshot(zone, NR_ISOLATED_ANON); |
206 | 206 | ||
207 | return nr; | 207 | return nr; |
208 | } | 208 | } |
209 | 209 | ||
210 | bool zone_reclaimable(struct zone *zone) | 210 | bool zone_reclaimable(struct zone *zone) |
211 | { | 211 | { |
212 | return zone_page_state(zone, NR_PAGES_SCANNED) < | 212 | return zone_page_state_snapshot(zone, NR_PAGES_SCANNED) < |
213 | zone_reclaimable_pages(zone) * 6; | 213 | zone_reclaimable_pages(zone) * 6; |
214 | } | 214 | } |
215 | 215 | ||