aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-08 04:00:41 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:12:40 -0500
commit23316bc86fd31c5d644a71c398ec41d9fecacec4 (patch)
tree9efc5bbd9dd7c35b6b5fea8ce8215477c2ff43dd /mm/page_alloc.c
parent8ad4b1fb8205340dba16b63467bb23efc27264d6 (diff)
[PATCH] mm: cleanup zone_pcp
Use zone_pcp everywhere even though NUMA code "knows" the internal details of the zone. Stop other people trying to copy, and it looks nicer. Also, only print the pagesets of online cpus in zoneinfo. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: "Seth, Rohit" <rohit.seth@intel.com> Cc: Christoph Lameter <christoph@lameter.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c46f697e8ff..6b92a945ae6b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -597,7 +597,7 @@ void drain_remote_pages(void)
597 if (zone->zone_pgdat->node_id == numa_node_id()) 597 if (zone->zone_pgdat->node_id == numa_node_id())
598 continue; 598 continue;
599 599
600 pset = zone->pageset[smp_processor_id()]; 600 pset = zone_pcp(zone, smp_processor_id());
601 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 601 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
602 struct per_cpu_pages *pcp; 602 struct per_cpu_pages *pcp;
603 603
@@ -1881,12 +1881,12 @@ static int __devinit process_zones(int cpu)
1881 1881
1882 for_each_zone(zone) { 1882 for_each_zone(zone) {
1883 1883
1884 zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset), 1884 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
1885 GFP_KERNEL, cpu_to_node(cpu)); 1885 GFP_KERNEL, cpu_to_node(cpu));
1886 if (!zone->pageset[cpu]) 1886 if (!zone_pcp(zone, cpu))
1887 goto bad; 1887 goto bad;
1888 1888
1889 setup_pageset(zone->pageset[cpu], zone_batchsize(zone)); 1889 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
1890 1890
1891 if (percpu_pagelist_fraction) 1891 if (percpu_pagelist_fraction)
1892 setup_pagelist_highmark(zone_pcp(zone, cpu), 1892 setup_pagelist_highmark(zone_pcp(zone, cpu),
@@ -1898,8 +1898,8 @@ bad:
1898 for_each_zone(dzone) { 1898 for_each_zone(dzone) {
1899 if (dzone == zone) 1899 if (dzone == zone)
1900 break; 1900 break;
1901 kfree(dzone->pageset[cpu]); 1901 kfree(zone_pcp(dzone, cpu));
1902 dzone->pageset[cpu] = NULL; 1902 zone_pcp(dzone, cpu) = NULL;
1903 } 1903 }
1904 return -ENOMEM; 1904 return -ENOMEM;
1905} 1905}
@@ -1984,7 +1984,7 @@ static __devinit void zone_pcp_init(struct zone *zone)
1984 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1984 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1985#ifdef CONFIG_NUMA 1985#ifdef CONFIG_NUMA
1986 /* Early boot. Slab allocator not functional yet */ 1986 /* Early boot. Slab allocator not functional yet */
1987 zone->pageset[cpu] = &boot_pageset[cpu]; 1987 zone_pcp(zone, cpu) = &boot_pageset[cpu];
1988 setup_pageset(&boot_pageset[cpu],0); 1988 setup_pageset(&boot_pageset[cpu],0);
1989#else 1989#else
1990 setup_pageset(zone_pcp(zone,cpu), batch); 1990 setup_pageset(zone_pcp(zone,cpu), batch);
@@ -2227,7 +2227,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
2227 seq_printf(m, 2227 seq_printf(m,
2228 ")" 2228 ")"
2229 "\n pagesets"); 2229 "\n pagesets");
2230 for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) { 2230 for_each_online_cpu(i) {
2231 struct per_cpu_pageset *pageset; 2231 struct per_cpu_pageset *pageset;
2232 int j; 2232 int j;
2233 2233