diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-07-28 18:47:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-28 19:07:41 -0400 |
commit | bb4cc2bea6df7854d629bff114ca03237cc718d6 (patch) | |
tree | 5f164e808a79a8e84029a7d2a2772cf8c02219b2 /mm/page-writeback.c | |
parent | 71c799f4982d340fff86e751898841322f07f235 (diff) |
mm, vmscan: remove highmem_file_pages
With the reintroduction of per-zone LRU stats, highmem_file_pages is
redundant so remove it.
[mgorman@techsingularity.net: wrong stat is being accumulated in highmem_dirtyable_memory]
Link: http://lkml.kernel.org/r/20160725092324.GM10438@techsingularity.netLink: http://lkml.kernel.org/r/1469110261-7365-3-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 12 |
1 files changed, 4 insertions, 8 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 573d138fa7a5..7b5920a3500f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) | |||
299 | 299 | ||
300 | return nr_pages; | 300 | return nr_pages; |
301 | } | 301 | } |
302 | #ifdef CONFIG_HIGHMEM | ||
303 | atomic_t highmem_file_pages; | ||
304 | #endif | ||
305 | 302 | ||
306 | static unsigned long highmem_dirtyable_memory(unsigned long total) | 303 | static unsigned long highmem_dirtyable_memory(unsigned long total) |
307 | { | 304 | { |
308 | #ifdef CONFIG_HIGHMEM | 305 | #ifdef CONFIG_HIGHMEM |
309 | int node; | 306 | int node; |
310 | unsigned long x; | 307 | unsigned long x = 0; |
311 | int i; | 308 | int i; |
312 | unsigned long dirtyable = 0; | ||
313 | 309 | ||
314 | for_each_node_state(node, N_HIGH_MEMORY) { | 310 | for_each_node_state(node, N_HIGH_MEMORY) { |
315 | for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { | 311 | for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { |
@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) | |||
326 | nr_pages = zone_page_state(z, NR_FREE_PAGES); | 322 | nr_pages = zone_page_state(z, NR_FREE_PAGES); |
327 | /* watch for underflows */ | 323 | /* watch for underflows */ |
328 | nr_pages -= min(nr_pages, high_wmark_pages(z)); | 324 | nr_pages -= min(nr_pages, high_wmark_pages(z)); |
329 | dirtyable += nr_pages; | 325 | nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); |
326 | nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE); | ||
327 | x += nr_pages; | ||
330 | } | 328 | } |
331 | } | 329 | } |
332 | 330 | ||
333 | x = dirtyable + atomic_read(&highmem_file_pages); | ||
334 | |||
335 | /* | 331 | /* |
336 | * Unreclaimable memory (kernel memory or anonymous memory | 332 | * Unreclaimable memory (kernel memory or anonymous memory |
337 | * without swap) can bring down the dirtyable pages below | 333 | * without swap) can bring down the dirtyable pages below |