aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-09-03 12:22:48 -0400
committerTejun Heo <tj@kernel.org>2010-09-08 05:11:23 -0400
commitbbddff0545878a8649c091a9dd7c43ce91516734 (patch)
tree667714de4398d1589605555650cf7431a27e1a13 /include/linux/percpu.h
parent6abad5acac09921f4944af77d3860f82d49f528d (diff)
percpu: use percpu allocator on UP too
On UP, percpu allocations were redirected to kmalloc. This has the following problems. * For certain amount of allocations (determined by PERCPU_DYNAMIC_EARLY_SLOTS and PERCPU_DYNAMIC_EARLY_SIZE), percpu allocator can be used before the usual kernel memory allocator is brought online. On SMP, this is used to initialize the kernel memory allocator. * percpu allocator honors alignment upto PAGE_SIZE but kmalloc() doesn't. For example, workqueue makes use of larger alignments for cpu_workqueues. Currently, users of percpu allocators need to handle UP differently, which is somewhat fragile and ugly. Other than small amount of memory, there isn't much to lose by enabling percpu allocator on UP. It can simply use kernel memory based chunk allocation which was added for SMP archs w/o MMUs. This patch removes mm/percpu_up.c, builds mm/percpu.c on UP too and makes UP build use percpu-km. As percpu addresses and kernel addresses are always identity mapped and static percpu variables don't need any special treatment, nothing is arch dependent and mm/percpu.c implements generic setup_per_cpu_areas() for UP. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h29
1 files changed, 5 insertions, 24 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index fc8130a7cac0..aeeeef1093cd 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -39,8 +39,6 @@
39 preempt_enable(); \ 39 preempt_enable(); \
40} while (0) 40} while (0)
41 41
42#ifdef CONFIG_SMP
43
44/* minimum unit size, also is the maximum supported allocation size */ 42/* minimum unit size, also is the maximum supported allocation size */
45#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) 43#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
46 44
@@ -137,37 +135,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
137 * dynamically allocated. Non-atomic access to the current CPU's 135 * dynamically allocated. Non-atomic access to the current CPU's
138 * version should probably be combined with get_cpu()/put_cpu(). 136 * version should probably be combined with get_cpu()/put_cpu().
139 */ 137 */
138#ifdef CONFIG_SMP
140#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 139#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
140#else
141#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
142#endif
141 143
142extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); 144extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
143extern bool is_kernel_percpu_address(unsigned long addr); 145extern bool is_kernel_percpu_address(unsigned long addr);
144 146
145#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 147#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
146extern void __init setup_per_cpu_areas(void); 148extern void __init setup_per_cpu_areas(void);
147#endif 149#endif
148extern void __init percpu_init_late(void); 150extern void __init percpu_init_late(void);
149 151
150#else /* CONFIG_SMP */
151
152#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
153
154/* can't distinguish from other static vars, always false */
155static inline bool is_kernel_percpu_address(unsigned long addr)
156{
157 return false;
158}
159
160static inline void __init setup_per_cpu_areas(void) { }
161
162static inline void __init percpu_init_late(void) { }
163
164static inline void *pcpu_lpage_remapped(void *kaddr)
165{
166 return NULL;
167}
168
169#endif /* CONFIG_SMP */
170
171extern void __percpu *__alloc_percpu(size_t size, size_t align); 152extern void __percpu *__alloc_percpu(size_t size, size_t align);
172extern void free_percpu(void __percpu *__pdata); 153extern void free_percpu(void __percpu *__pdata);
173extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 154extern phys_addr_t per_cpu_ptr_to_phys(void *addr);