aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-03-31 18:19:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 11:59:11 -0400
commitee99c71c59f897436ec65debb99372b3146f9985 (patch)
tree051f1c43b7c7658689d4b2c23b3d8585d6464a89 /mm
parenta6dc60f8975ad96d162915e07703a4439c80dcf0 (diff)
mm: introduce for_each_populated_zone() macro
Impact: cleanup In almost cases, for_each_zone() is used with populated_zone(). It's because almost function doesn't need memoryless node information. Therefore, for_each_populated_zone() can help to make code simplify. This patch has no functional change. [akpm@linux-foundation.org: small cleanup] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c26
-rw-r--r--mm/vmscan.c4
-rw-r--r--mm/vmstat.c11
3 files changed, 8 insertions, 33 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3803ea8c27d..cbd532161f68 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -922,13 +922,10 @@ static void drain_pages(unsigned int cpu)
922 unsigned long flags; 922 unsigned long flags;
923 struct zone *zone; 923 struct zone *zone;
924 924
925 for_each_zone(zone) { 925 for_each_populated_zone(zone) {
926 struct per_cpu_pageset *pset; 926 struct per_cpu_pageset *pset;
927 struct per_cpu_pages *pcp; 927 struct per_cpu_pages *pcp;
928 928
929 if (!populated_zone(zone))
930 continue;
931
932 pset = zone_pcp(zone, cpu); 929 pset = zone_pcp(zone, cpu);
933 930
934 pcp = &pset->pcp; 931 pcp = &pset->pcp;
@@ -1879,10 +1876,7 @@ void show_free_areas(void)
1879 int cpu; 1876 int cpu;
1880 struct zone *zone; 1877 struct zone *zone;
1881 1878
1882 for_each_zone(zone) { 1879 for_each_populated_zone(zone) {
1883 if (!populated_zone(zone))
1884 continue;
1885
1886 show_node(zone); 1880 show_node(zone);
1887 printk("%s per-cpu:\n", zone->name); 1881 printk("%s per-cpu:\n", zone->name);
1888 1882
@@ -1922,12 +1916,9 @@ void show_free_areas(void)
1922 global_page_state(NR_PAGETABLE), 1916 global_page_state(NR_PAGETABLE),
1923 global_page_state(NR_BOUNCE)); 1917 global_page_state(NR_BOUNCE));
1924 1918
1925 for_each_zone(zone) { 1919 for_each_populated_zone(zone) {
1926 int i; 1920 int i;
1927 1921
1928 if (!populated_zone(zone))
1929 continue;
1930
1931 show_node(zone); 1922 show_node(zone);
1932 printk("%s" 1923 printk("%s"
1933 " free:%lukB" 1924 " free:%lukB"
@@ -1967,12 +1958,9 @@ void show_free_areas(void)
1967 printk("\n"); 1958 printk("\n");
1968 } 1959 }
1969 1960
1970 for_each_zone(zone) { 1961 for_each_populated_zone(zone) {
1971 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1962 unsigned long nr[MAX_ORDER], flags, order, total = 0;
1972 1963
1973 if (!populated_zone(zone))
1974 continue;
1975
1976 show_node(zone); 1964 show_node(zone);
1977 printk("%s: ", zone->name); 1965 printk("%s: ", zone->name);
1978 1966
@@ -2784,11 +2772,7 @@ static int __cpuinit process_zones(int cpu)
2784 2772
2785 node_set_state(node, N_CPU); /* this node has a cpu */ 2773 node_set_state(node, N_CPU); /* this node has a cpu */
2786 2774
2787 for_each_zone(zone) { 2775 for_each_populated_zone(zone) {
2788
2789 if (!populated_zone(zone))
2790 continue;
2791
2792 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2776 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2793 GFP_KERNEL, node); 2777 GFP_KERNEL, node);
2794 if (!zone_pcp(zone, cpu)) 2778 if (!zone_pcp(zone, cpu))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1bca60f0c527..301f057fd115 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2061,11 +2061,9 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2061 struct zone *zone; 2061 struct zone *zone;
2062 unsigned long ret = 0; 2062 unsigned long ret = 0;
2063 2063
2064 for_each_zone(zone) { 2064 for_each_populated_zone(zone) {
2065 enum lru_list l; 2065 enum lru_list l;
2066 2066
2067 if (!populated_zone(zone))
2068 continue;
2069 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2067 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2070 continue; 2068 continue;
2071 2069
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8cd81ea1ddc1..9826766f1274 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -135,11 +135,7 @@ static void refresh_zone_stat_thresholds(void)
135 int cpu; 135 int cpu;
136 int threshold; 136 int threshold;
137 137
138 for_each_zone(zone) { 138 for_each_populated_zone(zone) {
139
140 if (!zone->present_pages)
141 continue;
142
143 threshold = calculate_threshold(zone); 139 threshold = calculate_threshold(zone);
144 140
145 for_each_online_cpu(cpu) 141 for_each_online_cpu(cpu)
@@ -301,12 +297,9 @@ void refresh_cpu_vm_stats(int cpu)
301 int i; 297 int i;
302 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 298 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
303 299
304 for_each_zone(zone) { 300 for_each_populated_zone(zone) {
305 struct per_cpu_pageset *p; 301 struct per_cpu_pageset *p;
306 302
307 if (!populated_zone(zone))
308 continue;
309
310 p = zone_pcp(zone, cpu); 303 p = zone_pcp(zone, cpu);
311 304
312 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 305 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)