aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMinchan Kim <minchan.kim@gmail.com>2011-10-31 20:06:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:44 -0400
commit39deaf8585152f1a35c1676d3d7dc6ae0fb65967 (patch)
treea7509ea61c2f1028ed7ef961aa1abd16d50905f9
parent4356f21d09283dc6d39a6f7287a65ddab61e2808 (diff)
mm: compaction: make isolate_lru_page() filter-aware
In async mode, compaction doesn't migrate dirty or writeback pages. So, it's meaningless to pick the page and re-add it to lru list. Of course, when we isolate the page in compaction, the page might be dirty or writeback but when we try to migrate the page, the page would be not dirty, writeback. So it could be migrated. But it's very unlikely as isolate and migration cycle is much faster than writeout. So, this patch helps cpu overhead and prevent unnecessary LRU churning. Signed-off-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/vmscan.c3
3 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 436ce6e7a446..80da968798ea 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -168,6 +168,8 @@ static inline int is_unevictable_lru(enum lru_list l)
168#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) 168#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
169/* Isolate active pages */ 169/* Isolate active pages */
170#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) 170#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
171/* Isolate clean file */
172#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
171 173
172/* LRU Isolation modes. */ 174/* LRU Isolation modes. */
173typedef unsigned __bitwise__ isolate_mode_t; 175typedef unsigned __bitwise__ isolate_mode_t;
diff --git a/mm/compaction.c b/mm/compaction.c
index 47f717fa4233..a0e420207ebf 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -261,6 +261,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
261 unsigned long last_pageblock_nr = 0, pageblock_nr; 261 unsigned long last_pageblock_nr = 0, pageblock_nr;
262 unsigned long nr_scanned = 0, nr_isolated = 0; 262 unsigned long nr_scanned = 0, nr_isolated = 0;
263 struct list_head *migratelist = &cc->migratepages; 263 struct list_head *migratelist = &cc->migratepages;
264 isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
264 265
265 /* Do not scan outside zone boundaries */ 266 /* Do not scan outside zone boundaries */
266 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 267 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
@@ -348,9 +349,11 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
348 continue; 349 continue;
349 } 350 }
350 351
352 if (!cc->sync)
353 mode |= ISOLATE_CLEAN;
354
351 /* Try isolate the page */ 355 /* Try isolate the page */
352 if (__isolate_lru_page(page, 356 if (__isolate_lru_page(page, mode, 0) != 0)
353 ISOLATE_ACTIVE|ISOLATE_INACTIVE, 0) != 0)
354 continue; 357 continue;
355 358
356 VM_BUG_ON(PageTransCompound(page)); 359 VM_BUG_ON(PageTransCompound(page));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ec6dbcb976d1..c007e78d7078 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1045,6 +1045,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
1045 1045
1046 ret = -EBUSY; 1046 ret = -EBUSY;
1047 1047
1048 if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
1049 return ret;
1050
1048 if (likely(get_page_unless_zero(page))) { 1051 if (likely(get_page_unless_zero(page))) {
1049 /* 1052 /*
1050 * Be careful not to clear PageLRU until after we're 1053 * Be careful not to clear PageLRU until after we're