aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2010-01-05 01:34:51 -0500
committerTejun Heo <tj@kernel.org>2010-01-05 01:34:51 -0500
commit99dcc3e5a94ed491fbef402831d8c0bbb267f995 (patch)
treedd4d2b9e10ab0d4502e4b2a22dfc0a02a3300d7e /include/linux/mmzone.h
parent5917dae83cb02dfe74c9167b79e86e6d65183fa3 (diff)
this_cpu: Page allocator conversion
Use the per cpu allocator functionality to avoid per cpu arrays in struct zone. This drastically reduces the size of struct zone for systems with large amounts of processors and allows placement of critical variables of struct zone in one cacheline even on very large systems. Another effect is that the pagesets of one processor are placed near one another. If multiple pagesets from different zones fit into one cacheline then additional cacheline fetches can be avoided on the hot paths when allocating memory from multiple zones. Bootstrap becomes simpler if we use the same scheme for UP, SMP, NUMA. #ifdefs are reduced and we can drop the zone_pcp macro. Hotplug handling is also simplified since cpu alloc can bring up and shut down cpu areas for a specific cpu as a whole. So there is no need to allocate or free individual pagesets. V7-V8: - Explain chicken egg dilemmna with percpu allocator. V4-V5: - Fix up cases where per_cpu_ptr is called before irq disable - Integrate the bootstrap logic that was separate before. tj: Build failure in pageset_cpuup_callback() due to missing ret variable fixed. Reviewed-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h12
1 files changed, 2 insertions, 10 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668c254..7874201a355 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@ struct per_cpu_pageset {
184 s8 stat_threshold; 184 s8 stat_threshold;
185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
186#endif 186#endif
187} ____cacheline_aligned_in_smp; 187};
188
189#ifdef CONFIG_NUMA
190#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
191#else
192#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
193#endif
194 188
195#endif /* !__GENERATING_BOUNDS.H */ 189#endif /* !__GENERATING_BOUNDS.H */
196 190
@@ -306,10 +300,8 @@ struct zone {
306 */ 300 */
307 unsigned long min_unmapped_pages; 301 unsigned long min_unmapped_pages;
308 unsigned long min_slab_pages; 302 unsigned long min_slab_pages;
309 struct per_cpu_pageset *pageset[NR_CPUS];
310#else
311 struct per_cpu_pageset pageset[NR_CPUS];
312#endif 303#endif
304 struct per_cpu_pageset *pageset;
313 /* 305 /*
314 * free areas of different sizes 306 * free areas of different sizes
315 */ 307 */