aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-05-29 18:07:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:28 -0400
commit75b00af77ed5b5a3d55549f9e0c33f3969b9330c (patch)
tree17b59700daed9f536f50c375243ba6ce86c85945 /mm
parent4d7dcca213921fbaf08ee05359d28e4aaf2245f1 (diff)
mm: trivial cleanups in vmscan.c
Utter trivia in mm/vmscan.c, mostly just reducing the linecount slightly; most exciting change being get_scan_count() calling vmscan_swappiness() once instead of twice. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8b941f303cea..05d439dc1af9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1025,12 +1025,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1025 unsigned long *nr_scanned, struct scan_control *sc, 1025 unsigned long *nr_scanned, struct scan_control *sc,
1026 isolate_mode_t mode, enum lru_list lru) 1026 isolate_mode_t mode, enum lru_list lru)
1027{ 1027{
1028 struct list_head *src; 1028 struct list_head *src = &lruvec->lists[lru];
1029 unsigned long nr_taken = 0; 1029 unsigned long nr_taken = 0;
1030 unsigned long scan; 1030 unsigned long scan;
1031 int file = is_file_lru(lru);
1032
1033 src = &lruvec->lists[lru];
1034 1031
1035 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1036 struct page *page; 1033 struct page *page;
@@ -1058,11 +1055,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1058 } 1055 }
1059 1056
1060 *nr_scanned = scan; 1057 *nr_scanned = scan;
1061 1058 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1062 trace_mm_vmscan_lru_isolate(sc->order, 1059 nr_taken, mode, is_file_lru(lru));
1063 nr_to_scan, scan,
1064 nr_taken,
1065 mode, file);
1066 return nr_taken; 1060 return nr_taken;
1067} 1061}
1068 1062
@@ -1140,8 +1134,7 @@ static int too_many_isolated(struct zone *zone, int file,
1140} 1134}
1141 1135
1142static noinline_for_stack void 1136static noinline_for_stack void
1143putback_inactive_pages(struct lruvec *lruvec, 1137putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1144 struct list_head *page_list)
1145{ 1138{
1146 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1139 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1147 struct zone *zone = lruvec_zone(lruvec); 1140 struct zone *zone = lruvec_zone(lruvec);
@@ -1235,11 +1228,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1235 if (global_reclaim(sc)) { 1228 if (global_reclaim(sc)) {
1236 zone->pages_scanned += nr_scanned; 1229 zone->pages_scanned += nr_scanned;
1237 if (current_is_kswapd()) 1230 if (current_is_kswapd())
1238 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1231 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1239 nr_scanned);
1240 else 1232 else
1241 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1233 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1242 nr_scanned);
1243 } 1234 }
1244 spin_unlock_irq(&zone->lru_lock); 1235 spin_unlock_irq(&zone->lru_lock);
1245 1236
@@ -1534,9 +1525,9 @@ static int inactive_file_is_low(struct lruvec *lruvec)
1534 return inactive_file_is_low_global(lruvec_zone(lruvec)); 1525 return inactive_file_is_low_global(lruvec_zone(lruvec));
1535} 1526}
1536 1527
1537static int inactive_list_is_low(struct lruvec *lruvec, int file) 1528static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1538{ 1529{
1539 if (file) 1530 if (is_file_lru(lru))
1540 return inactive_file_is_low(lruvec); 1531 return inactive_file_is_low(lruvec);
1541 else 1532 else
1542 return inactive_anon_is_low(lruvec); 1533 return inactive_anon_is_low(lruvec);
@@ -1545,10 +1536,8 @@ static int inactive_list_is_low(struct lruvec *lruvec, int file)
1545static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1536static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1546 struct lruvec *lruvec, struct scan_control *sc) 1537 struct lruvec *lruvec, struct scan_control *sc)
1547{ 1538{
1548 int file = is_file_lru(lru);
1549
1550 if (is_active_lru(lru)) { 1539 if (is_active_lru(lru)) {
1551 if (inactive_list_is_low(lruvec, file)) 1540 if (inactive_list_is_low(lruvec, lru))
1552 shrink_active_list(nr_to_scan, lruvec, sc, lru); 1541 shrink_active_list(nr_to_scan, lruvec, sc, lru);
1553 return 0; 1542 return 0;
1554 } 1543 }
@@ -1630,7 +1619,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1630 * This scanning priority is essentially the inverse of IO cost. 1619 * This scanning priority is essentially the inverse of IO cost.
1631 */ 1620 */
1632 anon_prio = vmscan_swappiness(sc); 1621 anon_prio = vmscan_swappiness(sc);
1633 file_prio = 200 - vmscan_swappiness(sc); 1622 file_prio = 200 - anon_prio;
1634 1623
1635 /* 1624 /*
1636 * OK, so we have swap space and a fair amount of page cache 1625 * OK, so we have swap space and a fair amount of page cache