aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-06-23 05:03:47 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:52 -0400
commitbd1e22b8e0a90f9a91e4c27db14ca15773659bf7 (patch)
tree8d32bdc39977af9dd3ba577b1fa34c0106b7f18e
parente0a42726794f71336ff4b26084d453dd597471ce (diff)
[PATCH] initialise total_memory() earlier
Initialise total_memory earlier in boot. Because if for some reason we run page reclaim early in boot, we don't want total_memory to be zero when we use it as a divisor. And rename total_memory to vm_total_pages to avoid naming clashes with architectures. Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Martin Bligh <mbligh@google.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/vmscan.c5
3 files changed, 6 insertions, 6 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f1a827a972e0..dc3f3aa0c83e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -185,6 +185,7 @@ extern unsigned long try_to_free_pages(struct zone **, gfp_t);
185extern unsigned long shrink_all_memory(unsigned long nr_pages); 185extern unsigned long shrink_all_memory(unsigned long nr_pages);
186extern int vm_swappiness; 186extern int vm_swappiness;
187extern int remove_mapping(struct address_space *mapping, struct page *page); 187extern int remove_mapping(struct address_space *mapping, struct page *page);
188extern long vm_total_pages;
188 189
189#ifdef CONFIG_NUMA 190#ifdef CONFIG_NUMA
190extern int zone_reclaim_mode; 191extern int zone_reclaim_mode;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5af33186a25f..71a0b2a23f5b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1725,9 +1725,9 @@ void __meminit build_all_zonelists(void)
1725 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1725 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
1726 /* cpuset refresh routine should be here */ 1726 /* cpuset refresh routine should be here */
1727 } 1727 }
1728 1728 vm_total_pages = nr_free_pagecache_pages();
1729 printk("Built %i zonelists\n", num_online_nodes()); 1729 printk("Built %i zonelists. Total pages: %ld\n",
1730 1730 num_online_nodes(), vm_total_pages);
1731} 1731}
1732 1732
1733/* 1733/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 71a02e295037..72babac71dea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -110,7 +110,7 @@ struct shrinker {
110 * From 0 .. 100. Higher means more swappy. 110 * From 0 .. 100. Higher means more swappy.
111 */ 111 */
112int vm_swappiness = 60; 112int vm_swappiness = 60;
113static long total_memory; 113long vm_total_pages; /* The total number of pages which the VM controls */
114 114
115static LIST_HEAD(shrinker_list); 115static LIST_HEAD(shrinker_list);
116static DECLARE_RWSEM(shrinker_rwsem); 116static DECLARE_RWSEM(shrinker_rwsem);
@@ -743,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
743 * how much memory 743 * how much memory
744 * is mapped. 744 * is mapped.
745 */ 745 */
746 mapped_ratio = (sc->nr_mapped * 100) / total_memory; 746 mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
747 747
748 /* 748 /*
749 * Now decide how much we really want to unmap some pages. The 749 * Now decide how much we really want to unmap some pages. The
@@ -1482,7 +1482,6 @@ static int __init kswapd_init(void)
1482 pgdat->kswapd = find_task_by_pid(pid); 1482 pgdat->kswapd = find_task_by_pid(pid);
1483 read_unlock(&tasklist_lock); 1483 read_unlock(&tasklist_lock);
1484 } 1484 }
1485 total_memory = nr_free_pagecache_pages();
1486 hotcpu_notifier(cpu_callback, 0); 1485 hotcpu_notifier(cpu_callback, 0);
1487 return 0; 1486 return 0;
1488} 1487}