aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d2caf7471cf1..08bc54e80862 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -47,8 +47,6 @@ struct scan_control {
47 /* Incremented by the number of inactive pages that were scanned */ 47 /* Incremented by the number of inactive pages that were scanned */
48 unsigned long nr_scanned; 48 unsigned long nr_scanned;
49 49
50 unsigned long nr_mapped; /* From page_state */
51
52 /* This context's GFP mask */ 50 /* This context's GFP mask */
53 gfp_t gfp_mask; 51 gfp_t gfp_mask;
54 52
@@ -744,7 +742,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
744 * how much memory 742 * how much memory
745 * is mapped. 743 * is mapped.
746 */ 744 */
747 mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages; 745 mapped_ratio = (global_page_state(NR_FILE_MAPPED) * 100) /
746 vm_total_pages;
748 747
749 /* 748 /*
750 * Now decide how much we really want to unmap some pages. The 749 * Now decide how much we really want to unmap some pages. The
@@ -990,7 +989,6 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
990 } 989 }
991 990
992 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 991 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
993 sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
994 sc.nr_scanned = 0; 992 sc.nr_scanned = 0;
995 if (!priority) 993 if (!priority)
996 disable_swap_token(); 994 disable_swap_token();
@@ -1075,8 +1073,6 @@ loop_again:
1075 total_scanned = 0; 1073 total_scanned = 0;
1076 nr_reclaimed = 0; 1074 nr_reclaimed = 0;
1077 sc.may_writepage = !laptop_mode; 1075 sc.may_writepage = !laptop_mode;
1078 sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
1079
1080 inc_page_state(pageoutrun); 1076 inc_page_state(pageoutrun);
1081 1077
1082 for (i = 0; i < pgdat->nr_zones; i++) { 1078 for (i = 0; i < pgdat->nr_zones; i++) {
@@ -1407,9 +1403,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1407 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 1403 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1408 unsigned long nr_to_scan = nr_pages - ret; 1404 unsigned long nr_to_scan = nr_pages - ret;
1409 1405
1410 sc.nr_mapped = global_page_state(NR_FILE_MAPPED);
1411 sc.nr_scanned = 0; 1406 sc.nr_scanned = 0;
1412
1413 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); 1407 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1414 if (ret >= nr_pages) 1408 if (ret >= nr_pages)
1415 goto out; 1409 goto out;
@@ -1548,7 +1542,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1548 struct scan_control sc = { 1542 struct scan_control sc = {
1549 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 1543 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1550 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 1544 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1551 .nr_mapped = global_page_state(NR_FILE_MAPPED),
1552 .swap_cluster_max = max_t(unsigned long, nr_pages, 1545 .swap_cluster_max = max_t(unsigned long, nr_pages,
1553 SWAP_CLUSTER_MAX), 1546 SWAP_CLUSTER_MAX),
1554 .gfp_mask = gfp_mask, 1547 .gfp_mask = gfp_mask,