aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-07-03 18:01:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:28 -0400
commitd43006d503ac921c7df4f94d13c17db6f13c9d26 (patch)
tree3adf95869d1821cd5360c81dca790b3203608555 /include/linux
parent9aa41348a8d11427feec350b21dcdd4330fd20c4 (diff)
mm: vmscan: have kswapd writeback pages based on dirty pages encountered, not priority
Currently kswapd queues dirty pages for writeback if scanning at an elevated priority but the priority kswapd scans at is not related to the number of unqueued dirty encountered. Since commit "mm: vmscan: Flatten kswapd priority loop", the priority is related to the size of the LRU and the zone watermark which is no indication as to whether kswapd should write pages or not. This patch tracks if an excessive number of unqueued dirty pages are being encountered at the end of the LRU. If so, it indicates that dirty pages are being recycled before flusher threads can clean them and flags the zone so that kswapd will start writing pages until the zone is balanced. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Tested-by: Zlatko Calusic <zcalusic@bitsync.net> Cc: dormando <dormando@rydia.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mmzone.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5c76737d836b..2aaf72f7e345 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -495,6 +495,10 @@ typedef enum {
495 ZONE_CONGESTED, /* zone has many dirty pages backed by 495 ZONE_CONGESTED, /* zone has many dirty pages backed by
496 * a congested BDI 496 * a congested BDI
497 */ 497 */
498 ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
499 * many dirty file pages at the tail
500 * of the LRU.
501 */
498} zone_flags_t; 502} zone_flags_t;
499 503
500static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) 504static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -517,6 +521,11 @@ static inline int zone_is_reclaim_congested(const struct zone *zone)
517 return test_bit(ZONE_CONGESTED, &zone->flags); 521 return test_bit(ZONE_CONGESTED, &zone->flags);
518} 522}
519 523
524static inline int zone_is_reclaim_dirty(const struct zone *zone)
525{
526 return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
527}
528
520static inline int zone_is_reclaim_locked(const struct zone *zone) 529static inline int zone_is_reclaim_locked(const struct zone *zone)
521{ 530{
522 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 531 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);