aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-05-29 18:06:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:26 -0400
commit95d918fc009072c2f88ce2e8b5db2e5abfad7c3e (patch)
treea20527a9a9276f7814d4fdad780d56a7d6339d86 /mm
parent6a18adb35c27848195c938b0779ce882d63d3ed1 (diff)
mm/vmscan: remove update_isolated_counts()
update_isolated_counts() is no longer required, because lumpy-reclaim was removed. Insanity is over, now there is only one kind of inactive page. Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c60
1 files changed, 6 insertions, 54 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eaa154bd1f84..53fa8671eabd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1205,52 +1205,6 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
1205 list_splice(&pages_to_free, page_list); 1205 list_splice(&pages_to_free, page_list);
1206} 1206}
1207 1207
1208static noinline_for_stack void
1209update_isolated_counts(struct mem_cgroup_zone *mz,
1210 struct list_head *page_list,
1211 unsigned long *nr_anon,
1212 unsigned long *nr_file)
1213{
1214 struct zone *zone = mz->zone;
1215 unsigned int count[NR_LRU_LISTS] = { 0, };
1216 unsigned long nr_active = 0;
1217 struct page *page;
1218 int lru;
1219
1220 /*
1221 * Count pages and clear active flags
1222 */
1223 list_for_each_entry(page, page_list, lru) {
1224 int numpages = hpage_nr_pages(page);
1225 lru = page_lru_base_type(page);
1226 if (PageActive(page)) {
1227 lru += LRU_ACTIVE;
1228 ClearPageActive(page);
1229 nr_active += numpages;
1230 }
1231 count[lru] += numpages;
1232 }
1233
1234 preempt_disable();
1235 __count_vm_events(PGDEACTIVATE, nr_active);
1236
1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1238 -count[LRU_ACTIVE_FILE]);
1239 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1240 -count[LRU_INACTIVE_FILE]);
1241 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1242 -count[LRU_ACTIVE_ANON]);
1243 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1244 -count[LRU_INACTIVE_ANON]);
1245
1246 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1247 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1248
1249 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1250 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1251 preempt_enable();
1252}
1253
1254/* 1208/*
1255 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1209 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1256 * of reclaimed pages 1210 * of reclaimed pages
@@ -1263,8 +1217,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1263 unsigned long nr_scanned; 1217 unsigned long nr_scanned;
1264 unsigned long nr_reclaimed = 0; 1218 unsigned long nr_reclaimed = 0;
1265 unsigned long nr_taken; 1219 unsigned long nr_taken;
1266 unsigned long nr_anon;
1267 unsigned long nr_file;
1268 unsigned long nr_dirty = 0; 1220 unsigned long nr_dirty = 0;
1269 unsigned long nr_writeback = 0; 1221 unsigned long nr_writeback = 0;
1270 isolate_mode_t isolate_mode = 0; 1222 isolate_mode_t isolate_mode = 0;
@@ -1292,6 +1244,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1292 1244
1293 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, 1245 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1294 &nr_scanned, sc, isolate_mode, lru); 1246 &nr_scanned, sc, isolate_mode, lru);
1247
1248 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1249 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1250
1295 if (global_reclaim(sc)) { 1251 if (global_reclaim(sc)) {
1296 zone->pages_scanned += nr_scanned; 1252 zone->pages_scanned += nr_scanned;
1297 if (current_is_kswapd()) 1253 if (current_is_kswapd())
@@ -1306,15 +1262,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1306 if (nr_taken == 0) 1262 if (nr_taken == 0)
1307 return 0; 1263 return 0;
1308 1264
1309 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
1310
1311 nr_reclaimed = shrink_page_list(&page_list, zone, sc, 1265 nr_reclaimed = shrink_page_list(&page_list, zone, sc,
1312 &nr_dirty, &nr_writeback); 1266 &nr_dirty, &nr_writeback);
1313 1267
1314 spin_lock_irq(&zone->lru_lock); 1268 spin_lock_irq(&zone->lru_lock);
1315 1269
1316 reclaim_stat->recent_scanned[0] += nr_anon; 1270 reclaim_stat->recent_scanned[file] += nr_taken;
1317 reclaim_stat->recent_scanned[1] += nr_file;
1318 1271
1319 if (global_reclaim(sc)) { 1272 if (global_reclaim(sc)) {
1320 if (current_is_kswapd()) 1273 if (current_is_kswapd())
@@ -1327,8 +1280,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1327 1280
1328 putback_inactive_pages(mz, &page_list); 1281 putback_inactive_pages(mz, &page_list);
1329 1282
1330 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1283 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1331 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1332 1284
1333 spin_unlock_irq(&zone->lru_lock); 1285 spin_unlock_irq(&zone->lru_lock);
1334 1286