aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c11
-rw-r--r--mm/vmscan.c38
2 files changed, 48 insertions, 1 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 78eb8552818b..70db6e0a5eec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -570,6 +570,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
570 return 0; 570 return 0;
571} 571}
572 572
573int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
574{
575 unsigned long active;
576 unsigned long inactive;
577
578 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
579 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
580
581 return (active > inactive);
582}
583
573unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 584unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
574 struct zone *zone, 585 struct zone *zone,
575 enum lru_list lru) 586 enum lru_list lru)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e5245d051647..9673437a5457 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1351,12 +1351,48 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1351 return low; 1351 return low;
1352} 1352}
1353 1353
1354static int inactive_file_is_low_global(struct zone *zone)
1355{
1356 unsigned long active, inactive;
1357
1358 active = zone_page_state(zone, NR_ACTIVE_FILE);
1359 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1360
1361 return (active > inactive);
1362}
1363
1364/**
1365 * inactive_file_is_low - check if file pages need to be deactivated
1366 * @zone: zone to check
1367 * @sc: scan control of this context
1368 *
1369 * When the system is doing streaming IO, memory pressure here
1370 * ensures that active file pages get deactivated, until more
1371 * than half of the file pages are on the inactive list.
1372 *
1373 * Once we get to that situation, protect the system's working
1374 * set from being evicted by disabling active file page aging.
1375 *
1376 * This uses a different ratio than the anonymous pages, because
1377 * the page cache uses a use-once replacement algorithm.
1378 */
1379static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1380{
1381 int low;
1382
1383 if (scanning_global_lru(sc))
1384 low = inactive_file_is_low_global(zone);
1385 else
1386 low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1387 return low;
1388}
1389
1354static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1390static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1355 struct zone *zone, struct scan_control *sc, int priority) 1391 struct zone *zone, struct scan_control *sc, int priority)
1356{ 1392{
1357 int file = is_file_lru(lru); 1393 int file = is_file_lru(lru);
1358 1394
1359 if (lru == LRU_ACTIVE_FILE) { 1395 if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
1360 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1396 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1361 return 0; 1397 return 0;
1362 } 1398 }