aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r--include/linux/percpu.h149
1 files changed, 103 insertions, 46 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9f2a3751873a..545b068bcb70 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -8,35 +8,46 @@
8 8
9#include <asm/percpu.h> 9#include <asm/percpu.h>
10 10
11#ifndef PER_CPU_BASE_SECTION
12#ifdef CONFIG_SMP
13#define PER_CPU_BASE_SECTION ".data.percpu"
14#else
15#define PER_CPU_BASE_SECTION ".data"
16#endif
17#endif
18
11#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
12#define DEFINE_PER_CPU(type, name) \
13 __attribute__((__section__(".data.percpu"))) \
14 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
15 20
16#ifdef MODULE 21#ifdef MODULE
17#define SHARED_ALIGNED_SECTION ".data.percpu" 22#define PER_CPU_SHARED_ALIGNED_SECTION ""
18#else 23#else
19#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" 24#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
20#endif 25#endif
26#define PER_CPU_FIRST_SECTION ".first"
21 27
22#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 28#else
23 __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ 29
24 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ 30#define PER_CPU_SHARED_ALIGNED_SECTION ""
25 ____cacheline_aligned_in_smp 31#define PER_CPU_FIRST_SECTION ""
32
33#endif
26 34
27#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 35#define DEFINE_PER_CPU_SECTION(type, name, section) \
28 __attribute__((__section__(".data.percpu.page_aligned"))) \ 36 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
29 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 37 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
30#else 38
31#define DEFINE_PER_CPU(type, name) \ 39#define DEFINE_PER_CPU(type, name) \
32 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 40 DEFINE_PER_CPU_SECTION(type, name, "")
41
42#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
43 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
44 ____cacheline_aligned_in_smp
33 45
34#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ 46#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
35 DEFINE_PER_CPU(type, name) 47 DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
36 48
37#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ 49#define DEFINE_PER_CPU_FIRST(type, name) \
38 DEFINE_PER_CPU(type, name) 50 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
39#endif
40 51
41#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) 52#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
42#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) 53#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
@@ -65,52 +76,98 @@
65 76
66#ifdef CONFIG_SMP 77#ifdef CONFIG_SMP
67 78
79#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
80
81/* minimum unit size, also is the maximum supported allocation size */
82#define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT)
83
84/*
85 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
86 * back on the first chunk if arch is manually allocating and mapping
87 * it for faster access (as a part of large page mapping for example).
88 * Note that dynamic percpu allocator covers both static and dynamic
89 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
90 *
91 * On typical configuration with modules, the following values leave
92 * about 8k of free space on the first chunk after boot on both x86_32
93 * and 64 when module support is enabled. When module support is
94 * disabled, it's much tighter.
95 */
96#ifndef PERCPU_DYNAMIC_RESERVE
97# if BITS_PER_LONG > 32
98# ifdef CONFIG_MODULES
99# define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT)
100# else
101# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
102# endif
103# else
104# ifdef CONFIG_MODULES
105# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
106# else
107# define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT)
108# endif
109# endif
110#endif /* PERCPU_DYNAMIC_RESERVE */
111
112extern void *pcpu_base_addr;
113
114typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
115typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
116
117extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
118 size_t static_size, size_t unit_size,
119 size_t free_size, void *base_addr,
120 pcpu_populate_pte_fn_t populate_pte_fn);
121
122/*
123 * Use this to get to a cpu's version of the per-cpu object
124 * dynamically allocated. Non-atomic access to the current CPU's
125 * version should probably be combined with get_cpu()/put_cpu().
126 */
127#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
128
129#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
130
68struct percpu_data { 131struct percpu_data {
69 void *ptrs[1]; 132 void *ptrs[1];
70}; 133};
71 134
72#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 135#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
73/* 136
74 * Use this to get to a cpu's version of the per-cpu object dynamically 137#define per_cpu_ptr(ptr, cpu) \
75 * allocated. Non-atomic access to the current CPU's version should 138({ \
76 * probably be combined with get_cpu()/put_cpu(). 139 struct percpu_data *__p = __percpu_disguise(ptr); \
77 */ 140 (__typeof__(ptr))__p->ptrs[(cpu)]; \
78#define percpu_ptr(ptr, cpu) \
79({ \
80 struct percpu_data *__p = __percpu_disguise(ptr); \
81 (__typeof__(ptr))__p->ptrs[(cpu)]; \
82}) 141})
83 142
84extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 143#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
85extern void percpu_free(void *__pdata); 144
145extern void *__alloc_percpu(size_t size, size_t align);
146extern void free_percpu(void *__pdata);
86 147
87#else /* CONFIG_SMP */ 148#else /* CONFIG_SMP */
88 149
89#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
90 151
91static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 152static inline void *__alloc_percpu(size_t size, size_t align)
92{ 153{
93 return kzalloc(size, gfp); 154 /*
155 * Can't easily make larger alignment work with kmalloc. WARN
156 * on it. Larger alignment should only be used for module
157 * percpu sections on SMP for which this path isn't used.
158 */
159 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
160 return kzalloc(size, GFP_KERNEL);
94} 161}
95 162
96static inline void percpu_free(void *__pdata) 163static inline void free_percpu(void *p)
97{ 164{
98 kfree(__pdata); 165 kfree(p);
99} 166}
100 167
101#endif /* CONFIG_SMP */ 168#endif /* CONFIG_SMP */
102 169
103#define percpu_alloc_mask(size, gfp, mask) \ 170#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
104 __percpu_alloc_mask((size), (gfp), &(mask)) 171 __alignof__(type))
105
106#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
107
108/* (legacy) interface for use without CPU hotplug handling */
109
110#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
111 cpu_possible_map)
112#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
113#define free_percpu(ptr) percpu_free((ptr))
114#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
115 172
116#endif /* __LINUX_PERCPU_H */ 173#endif /* __LINUX_PERCPU_H */