aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-20 02:29:08 -0500
committerTejun Heo <tj@kernel.org>2009-02-20 02:29:08 -0500
commitfbf59bc9d74d1fb30b8e0630743aff2806eafcea (patch)
tree3f0a7b7cf809a25e27b7a5ba0b16321fdb901801 /include/linux/percpu.h
parent8fc48985006da4ceba24508db64ec77fc0dfe3bb (diff)
percpu: implement new dynamic percpu allocator
Impact: new scalable dynamic percpu allocator which allows dynamic percpu areas to be accessed the same way as static ones Implement scalable dynamic percpu allocator which can be used for both static and dynamic percpu areas. This will allow static and dynamic areas to share faster direct access methods. This feature is optional and enabled only when CONFIG_HAVE_DYNAMIC_PER_CPU_AREA is defined by arch. Please read comment on top of mm/percpu.c for details. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h22
1 files changed, 18 insertions, 4 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index d99e24ae1811..18080995ff3e 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -76,23 +76,37 @@
76 76
77#ifdef CONFIG_SMP 77#ifdef CONFIG_SMP
78 78
79struct percpu_data { 79#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
80 void *ptrs[1];
81};
82 80
83#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 81extern void *pcpu_base_addr;
84 82
83typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
84
85extern size_t __init pcpu_setup_static(pcpu_populate_pte_fn_t populate_pte_fn,
86 struct page **pages, size_t cpu_size);
85/* 87/*
86 * Use this to get to a cpu's version of the per-cpu object 88 * Use this to get to a cpu's version of the per-cpu object
87 * dynamically allocated. Non-atomic access to the current CPU's 89 * dynamically allocated. Non-atomic access to the current CPU's
88 * version should probably be combined with get_cpu()/put_cpu(). 90 * version should probably be combined with get_cpu()/put_cpu().
89 */ 91 */
92#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
93
94#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
95
96struct percpu_data {
97 void *ptrs[1];
98};
99
100#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
101
90#define per_cpu_ptr(ptr, cpu) \ 102#define per_cpu_ptr(ptr, cpu) \
91({ \ 103({ \
92 struct percpu_data *__p = __percpu_disguise(ptr); \ 104 struct percpu_data *__p = __percpu_disguise(ptr); \
93 (__typeof__(ptr))__p->ptrs[(cpu)]; \ 105 (__typeof__(ptr))__p->ptrs[(cpu)]; \
94}) 106})
95 107
108#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
109
96extern void *__alloc_percpu(size_t size, size_t align); 110extern void *__alloc_percpu(size_t size, size_t align);
97extern void free_percpu(void *__pdata); 111extern void free_percpu(void *__pdata);
98 112