diff options
Diffstat (limited to 'include/linux/percpu.h')
-rw-r--r-- | include/linux/percpu.h | 159 |
1 files changed, 107 insertions, 52 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9f2a3751873a..ee5615d65211 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -5,53 +5,66 @@ | |||
5 | #include <linux/slab.h> /* For kmalloc() */ | 5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <linux/pfn.h> | ||
8 | 9 | ||
9 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
10 | 11 | ||
12 | #ifndef PER_CPU_BASE_SECTION | ||
13 | #ifdef CONFIG_SMP | ||
14 | #define PER_CPU_BASE_SECTION ".data.percpu" | ||
15 | #else | ||
16 | #define PER_CPU_BASE_SECTION ".data" | ||
17 | #endif | ||
18 | #endif | ||
19 | |||
11 | #ifdef CONFIG_SMP | 20 | #ifdef CONFIG_SMP |
12 | #define DEFINE_PER_CPU(type, name) \ | ||
13 | __attribute__((__section__(".data.percpu"))) \ | ||
14 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
15 | 21 | ||
16 | #ifdef MODULE | 22 | #ifdef MODULE |
17 | #define SHARED_ALIGNED_SECTION ".data.percpu" | 23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
18 | #else | 24 | #else |
19 | #define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" | 25 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" |
20 | #endif | 26 | #endif |
27 | #define PER_CPU_FIRST_SECTION ".first" | ||
21 | 28 | ||
22 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 29 | #else |
23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ | 30 | |
24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | 31 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
25 | ____cacheline_aligned_in_smp | 32 | #define PER_CPU_FIRST_SECTION "" |
26 | 33 | ||
27 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 34 | #endif |
28 | __attribute__((__section__(".data.percpu.page_aligned"))) \ | 35 | |
36 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ | ||
37 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | ||
29 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 38 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
30 | #else | 39 | |
31 | #define DEFINE_PER_CPU(type, name) \ | 40 | #define DEFINE_PER_CPU(type, name) \ |
32 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 41 | DEFINE_PER_CPU_SECTION(type, name, "") |
33 | 42 | ||
34 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 43 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
35 | DEFINE_PER_CPU(type, name) | 44 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
45 | ____cacheline_aligned_in_smp | ||
36 | 46 | ||
37 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 47 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
38 | DEFINE_PER_CPU(type, name) | 48 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") |
39 | #endif | 49 | |
50 | #define DEFINE_PER_CPU_FIRST(type, name) \ | ||
51 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | ||
40 | 52 | ||
41 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 53 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
42 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | 54 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
43 | 55 | ||
44 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ | 56 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
45 | #ifndef PERCPU_ENOUGH_ROOM | ||
46 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
47 | #define PERCPU_MODULE_RESERVE 8192 | 58 | #define PERCPU_MODULE_RESERVE (8 << 10) |
48 | #else | 59 | #else |
49 | #define PERCPU_MODULE_RESERVE 0 | 60 | #define PERCPU_MODULE_RESERVE 0 |
50 | #endif | 61 | #endif |
51 | 62 | ||
63 | #ifndef PERCPU_ENOUGH_ROOM | ||
52 | #define PERCPU_ENOUGH_ROOM \ | 64 | #define PERCPU_ENOUGH_ROOM \ |
53 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | 65 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
54 | #endif /* PERCPU_ENOUGH_ROOM */ | 66 | PERCPU_MODULE_RESERVE) |
67 | #endif | ||
55 | 68 | ||
56 | /* | 69 | /* |
57 | * Must be an lvalue. Since @var must be a simple identifier, | 70 | * Must be an lvalue. Since @var must be a simple identifier, |
@@ -65,52 +78,94 @@ | |||
65 | 78 | ||
66 | #ifdef CONFIG_SMP | 79 | #ifdef CONFIG_SMP |
67 | 80 | ||
81 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
82 | |||
83 | /* minimum unit size, also is the maximum supported allocation size */ | ||
84 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | ||
85 | |||
86 | /* | ||
87 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | ||
88 | * back on the first chunk for dynamic percpu allocation if arch is | ||
89 | * manually allocating and mapping it for faster access (as a part of | ||
90 | * large page mapping for example). | ||
91 | * | ||
92 | * The following values give between one and two pages of free space | ||
93 | * after typical minimal boot (2-way SMP, single disk and NIC) with | ||
94 | * both defconfig and a distro config on x86_64 and 32. More | ||
95 | * intelligent way to determine this would be nice. | ||
96 | */ | ||
97 | #if BITS_PER_LONG > 32 | ||
98 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | ||
99 | #else | ||
100 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | ||
101 | #endif | ||
102 | |||
103 | extern void *pcpu_base_addr; | ||
104 | |||
105 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | ||
106 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | ||
107 | |||
108 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | ||
109 | size_t static_size, size_t reserved_size, | ||
110 | ssize_t dyn_size, ssize_t unit_size, | ||
111 | void *base_addr, | ||
112 | pcpu_populate_pte_fn_t populate_pte_fn); | ||
113 | |||
114 | extern ssize_t __init pcpu_embed_first_chunk( | ||
115 | size_t static_size, size_t reserved_size, | ||
116 | ssize_t dyn_size, ssize_t unit_size); | ||
117 | |||
118 | /* | ||
119 | * Use this to get to a cpu's version of the per-cpu object | ||
120 | * dynamically allocated. Non-atomic access to the current CPU's | ||
121 | * version should probably be combined with get_cpu()/put_cpu(). | ||
122 | */ | ||
123 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | ||
124 | |||
125 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | ||
126 | |||
127 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
128 | |||
68 | struct percpu_data { | 129 | struct percpu_data { |
69 | void *ptrs[1]; | 130 | void *ptrs[1]; |
70 | }; | 131 | }; |
71 | 132 | ||
72 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 133 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
73 | /* | 134 | |
74 | * Use this to get to a cpu's version of the per-cpu object dynamically | 135 | #define per_cpu_ptr(ptr, cpu) \ |
75 | * allocated. Non-atomic access to the current CPU's version should | 136 | ({ \ |
76 | * probably be combined with get_cpu()/put_cpu(). | 137 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
77 | */ | 138 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
78 | #define percpu_ptr(ptr, cpu) \ | ||
79 | ({ \ | ||
80 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
81 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
82 | }) | 139 | }) |
83 | 140 | ||
84 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | 141 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
85 | extern void percpu_free(void *__pdata); | 142 | |
143 | extern void *__alloc_percpu(size_t size, size_t align); | ||
144 | extern void free_percpu(void *__pdata); | ||
86 | 145 | ||
87 | #else /* CONFIG_SMP */ | 146 | #else /* CONFIG_SMP */ |
88 | 147 | ||
89 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 148 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
90 | 149 | ||
91 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 150 | static inline void *__alloc_percpu(size_t size, size_t align) |
92 | { | 151 | { |
93 | return kzalloc(size, gfp); | 152 | /* |
153 | * Can't easily make larger alignment work with kmalloc. WARN | ||
154 | * on it. Larger alignment should only be used for module | ||
155 | * percpu sections on SMP for which this path isn't used. | ||
156 | */ | ||
157 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
158 | return kzalloc(size, GFP_KERNEL); | ||
94 | } | 159 | } |
95 | 160 | ||
96 | static inline void percpu_free(void *__pdata) | 161 | static inline void free_percpu(void *p) |
97 | { | 162 | { |
98 | kfree(__pdata); | 163 | kfree(p); |
99 | } | 164 | } |
100 | 165 | ||
101 | #endif /* CONFIG_SMP */ | 166 | #endif /* CONFIG_SMP */ |
102 | 167 | ||
103 | #define percpu_alloc_mask(size, gfp, mask) \ | 168 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
104 | __percpu_alloc_mask((size), (gfp), &(mask)) | 169 | __alignof__(type)) |
105 | |||
106 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | ||
107 | |||
108 | /* (legacy) interface for use without CPU hotplug handling */ | ||
109 | |||
110 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | ||
111 | cpu_possible_map) | ||
112 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | ||
113 | #define free_percpu(ptr) percpu_free((ptr)) | ||
114 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | ||
115 | 170 | ||
116 | #endif /* __LINUX_PERCPU_H */ | 171 | #endif /* __LINUX_PERCPU_H */ |