diff options
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r-- | include/linux/percpu.h | 38 |
1 files changed, 14 insertions, 24 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 49466b13c5c6..5095b834a6fb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -39,10 +39,17 @@ | |||
39 | preempt_enable(); \ | 39 | preempt_enable(); \ |
40 | } while (0) | 40 | } while (0) |
41 | 41 | ||
42 | #ifdef CONFIG_SMP | 42 | #define get_cpu_ptr(var) ({ \ |
43 | preempt_disable(); \ | ||
44 | this_cpu_ptr(var); }) | ||
45 | |||
46 | #define put_cpu_ptr(var) do { \ | ||
47 | (void)(var); \ | ||
48 | preempt_enable(); \ | ||
49 | } while (0) | ||
43 | 50 | ||
44 | /* minimum unit size, also is the maximum supported allocation size */ | 51 | /* minimum unit size, also is the maximum supported allocation size */ |
45 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | 52 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) |
46 | 53 | ||
47 | /* | 54 | /* |
48 | * Percpu allocator can serve percpu allocations before slab is | 55 | * Percpu allocator can serve percpu allocations before slab is |
@@ -137,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
137 | * dynamically allocated. Non-atomic access to the current CPU's | 144 | * dynamically allocated. Non-atomic access to the current CPU's |
138 | * version should probably be combined with get_cpu()/put_cpu(). | 145 | * version should probably be combined with get_cpu()/put_cpu(). |
139 | */ | 146 | */ |
147 | #ifdef CONFIG_SMP | ||
140 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 148 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
149 | #else | ||
150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | ||
151 | #endif | ||
141 | 152 | ||
142 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); | 153 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
143 | extern bool is_kernel_percpu_address(unsigned long addr); | 154 | extern bool is_kernel_percpu_address(unsigned long addr); |
144 | 155 | ||
145 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 156 | #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
146 | extern void __init setup_per_cpu_areas(void); | 157 | extern void __init setup_per_cpu_areas(void); |
147 | #endif | 158 | #endif |
148 | extern void __init percpu_init_late(void); | 159 | extern void __init percpu_init_late(void); |
149 | 160 | ||
150 | #else /* CONFIG_SMP */ | ||
151 | |||
152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | ||
153 | |||
154 | /* can't distinguish from other static vars, always false */ | ||
155 | static inline bool is_kernel_percpu_address(unsigned long addr) | ||
156 | { | ||
157 | return false; | ||
158 | } | ||
159 | |||
160 | static inline void __init setup_per_cpu_areas(void) { } | ||
161 | |||
162 | static inline void __init percpu_init_late(void) { } | ||
163 | |||
164 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
165 | { | ||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | #endif /* CONFIG_SMP */ | ||
170 | |||
171 | extern void __percpu *__alloc_percpu(size_t size, size_t align); | 161 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
172 | extern void free_percpu(void __percpu *__pdata); | 162 | extern void free_percpu(void __percpu *__pdata); |
173 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | 163 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |