aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 71a02e295037..72babac71dea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -110,7 +110,7 @@ struct shrinker {
110 * From 0 .. 100. Higher means more swappy. 110 * From 0 .. 100. Higher means more swappy.
111 */ 111 */
112int vm_swappiness = 60; 112int vm_swappiness = 60;
113static long total_memory; 113long vm_total_pages; /* The total number of pages which the VM controls */
114 114
115static LIST_HEAD(shrinker_list); 115static LIST_HEAD(shrinker_list);
116static DECLARE_RWSEM(shrinker_rwsem); 116static DECLARE_RWSEM(shrinker_rwsem);
@@ -743,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
743 * how much memory 743 * how much memory
744 * is mapped. 744 * is mapped.
745 */ 745 */
746 mapped_ratio = (sc->nr_mapped * 100) / total_memory; 746 mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
747 747
748 /* 748 /*
749 * Now decide how much we really want to unmap some pages. The 749 * Now decide how much we really want to unmap some pages. The
@@ -1482,7 +1482,6 @@ static int __init kswapd_init(void)
1482 pgdat->kswapd = find_task_by_pid(pid); 1482 pgdat->kswapd = find_task_by_pid(pid);
1483 read_unlock(&tasklist_lock); 1483 read_unlock(&tasklist_lock);
1484 } 1484 }
1485 total_memory = nr_free_pagecache_pages();
1486 hotcpu_notifier(cpu_callback, 0); 1485 hotcpu_notifier(cpu_callback, 0);
1487 return 0; 1486 return 0;
1488} 1487}