aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2010-05-24 17:32:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:07:00 -0400
commit8b25c6d2231b978ccce9c401e771932bde79aa9f (patch)
tree13845799e14e49465de1529680df7def59dcfeb8 /mm/vmscan.c
parent0aeb2339e54e40d0788a7017ecaeac7f5271e262 (diff)
vmscan: remove isolate_pages callback scan control
For now, we have global isolation vs. memory control group isolation, do not allow the reclaim entry function to set an arbitrary page isolation callback, we do not need that flexibility. And since we already pass around the group descriptor for the memory control group isolation case, just use it to decide which one of the two isolator functions to use. The decisions can be merged into nearby branches, so no extra cost there. In fact, we save the indirect calls. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c52
1 files changed, 28 insertions, 24 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c55763ee8312..915dceb487c1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -89,12 +89,6 @@ struct scan_control {
89 * are scanned. 89 * are scanned.
90 */ 90 */
91 nodemask_t *nodemask; 91 nodemask_t *nodemask;
92
93 /* Pluggable isolate pages callback */
94 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
95 unsigned long *scanned, int order, int mode,
96 struct zone *z, struct mem_cgroup *mem_cont,
97 int active, int file);
98}; 92};
99 93
100#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 94#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -1010,7 +1004,6 @@ static unsigned long isolate_pages_global(unsigned long nr,
1010 struct list_head *dst, 1004 struct list_head *dst,
1011 unsigned long *scanned, int order, 1005 unsigned long *scanned, int order,
1012 int mode, struct zone *z, 1006 int mode, struct zone *z,
1013 struct mem_cgroup *mem_cont,
1014 int active, int file) 1007 int active, int file)
1015{ 1008{
1016 int lru = LRU_BASE; 1009 int lru = LRU_BASE;
@@ -1154,11 +1147,11 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1154 unsigned long nr_anon; 1147 unsigned long nr_anon;
1155 unsigned long nr_file; 1148 unsigned long nr_file;
1156 1149
1157 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
1158 &page_list, &nr_scan, sc->order, mode,
1159 zone, sc->mem_cgroup, 0, file);
1160
1161 if (scanning_global_lru(sc)) { 1150 if (scanning_global_lru(sc)) {
1151 nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX,
1152 &page_list, &nr_scan,
1153 sc->order, mode,
1154 zone, 0, file);
1162 zone->pages_scanned += nr_scan; 1155 zone->pages_scanned += nr_scan;
1163 if (current_is_kswapd()) 1156 if (current_is_kswapd())
1164 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1157 __count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1166,6 +1159,16 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1166 else 1159 else
1167 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1160 __count_zone_vm_events(PGSCAN_DIRECT, zone,
1168 nr_scan); 1161 nr_scan);
1162 } else {
1163 nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX,
1164 &page_list, &nr_scan,
1165 sc->order, mode,
1166 zone, sc->mem_cgroup,
1167 0, file);
1168 /*
1169 * mem_cgroup_isolate_pages() keeps track of
1170 * scanned pages on its own.
1171 */
1169 } 1172 }
1170 1173
1171 if (nr_taken == 0) 1174 if (nr_taken == 0)
@@ -1343,16 +1346,23 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1343 1346
1344 lru_add_drain(); 1347 lru_add_drain();
1345 spin_lock_irq(&zone->lru_lock); 1348 spin_lock_irq(&zone->lru_lock);
1346 nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1347 ISOLATE_ACTIVE, zone,
1348 sc->mem_cgroup, 1, file);
1349 /*
1350 * zone->pages_scanned is used for detect zone's oom
1351 * mem_cgroup remembers nr_scan by itself.
1352 */
1353 if (scanning_global_lru(sc)) { 1349 if (scanning_global_lru(sc)) {
1350 nr_taken = isolate_pages_global(nr_pages, &l_hold,
1351 &pgscanned, sc->order,
1352 ISOLATE_ACTIVE, zone,
1353 1, file);
1354 zone->pages_scanned += pgscanned; 1354 zone->pages_scanned += pgscanned;
1355 } else {
1356 nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1357 &pgscanned, sc->order,
1358 ISOLATE_ACTIVE, zone,
1359 sc->mem_cgroup, 1, file);
1360 /*
1361 * mem_cgroup_isolate_pages() keeps track of
1362 * scanned pages on its own.
1363 */
1355 } 1364 }
1365
1356 reclaim_stat->recent_scanned[file] += nr_taken; 1366 reclaim_stat->recent_scanned[file] += nr_taken;
1357 1367
1358 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1368 __count_zone_vm_events(PGREFILL, zone, pgscanned);
@@ -1882,7 +1892,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1882 .swappiness = vm_swappiness, 1892 .swappiness = vm_swappiness,
1883 .order = order, 1893 .order = order,
1884 .mem_cgroup = NULL, 1894 .mem_cgroup = NULL,
1885 .isolate_pages = isolate_pages_global,
1886 .nodemask = nodemask, 1895 .nodemask = nodemask,
1887 }; 1896 };
1888 1897
@@ -1903,7 +1912,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1903 .swappiness = swappiness, 1912 .swappiness = swappiness,
1904 .order = 0, 1913 .order = 0,
1905 .mem_cgroup = mem, 1914 .mem_cgroup = mem,
1906 .isolate_pages = mem_cgroup_isolate_pages,
1907 }; 1915 };
1908 nodemask_t nm = nodemask_of_node(nid); 1916 nodemask_t nm = nodemask_of_node(nid);
1909 1917
@@ -1937,7 +1945,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1937 .swappiness = swappiness, 1945 .swappiness = swappiness,
1938 .order = 0, 1946 .order = 0,
1939 .mem_cgroup = mem_cont, 1947 .mem_cgroup = mem_cont,
1940 .isolate_pages = mem_cgroup_isolate_pages,
1941 .nodemask = NULL, /* we don't care the placement */ 1948 .nodemask = NULL, /* we don't care the placement */
1942 }; 1949 };
1943 1950
@@ -2015,7 +2022,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
2015 .swappiness = vm_swappiness, 2022 .swappiness = vm_swappiness,
2016 .order = order, 2023 .order = order,
2017 .mem_cgroup = NULL, 2024 .mem_cgroup = NULL,
2018 .isolate_pages = isolate_pages_global,
2019 }; 2025 };
2020 /* 2026 /*
2021 * temp_priority is used to remember the scanning priority at which 2027 * temp_priority is used to remember the scanning priority at which
@@ -2394,7 +2400,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2394 .hibernation_mode = 1, 2400 .hibernation_mode = 1,
2395 .swappiness = vm_swappiness, 2401 .swappiness = vm_swappiness,
2396 .order = 0, 2402 .order = 0,
2397 .isolate_pages = isolate_pages_global,
2398 }; 2403 };
2399 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2404 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2400 struct task_struct *p = current; 2405 struct task_struct *p = current;
@@ -2579,7 +2584,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2579 .gfp_mask = gfp_mask, 2584 .gfp_mask = gfp_mask,
2580 .swappiness = vm_swappiness, 2585 .swappiness = vm_swappiness,
2581 .order = order, 2586 .order = order,
2582 .isolate_pages = isolate_pages_global,
2583 }; 2587 };
2584 unsigned long slab_reclaimable; 2588 unsigned long slab_reclaimable;
2585 2589