aboutsummaryrefslogtreecommitdiffstats
path: root/mm/allocpercpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-02-20 02:29:08 -0500
committerTejun Heo <tj@kernel.org>2009-02-20 02:29:08 -0500
commitf2a8205c4ef1af917d175c36a4097ae5587791c8 (patch)
tree6c5531aa50803fda8005ea94c04b94fcd0310be3 /mm/allocpercpu.c
parent313e458f81ec3852106c5a83830fe0d4f405a71a (diff)
percpu: kill percpu_alloc() and friends
Impact: kill unused functions percpu_alloc() and its friends never saw much action. It was supposed to replace the cpu-mask unaware __alloc_percpu() but it never happened and in fact __percpu_alloc_mask() itself never really grew proper up/down handling interface either (no exported interface for populate/depopulate). percpu allocation is about to go through major reimplementation and there's no reason to carry this unused interface around. Replace it with __alloc_percpu() and free_percpu(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/allocpercpu.c')
-rw-r--r--mm/allocpercpu.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 4297bc41bfd2..3653c570232b 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
99 __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) 99 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
100 100
101/** 101/**
102 * percpu_alloc_mask - initial setup of per-cpu data 102 * alloc_percpu - initial setup of per-cpu data
103 * @size: size of per-cpu object 103 * @size: size of per-cpu object
104 * @gfp: may sleep or not etc. 104 * @align: alignment
105 * @mask: populate per-data for cpu's selected through mask bits
106 * 105 *
107 * Populating per-cpu data for all online cpu's would be a typical use case, 106 * Allocate dynamic percpu area. Percpu objects are populated with
108 * which is simplified by the percpu_alloc() wrapper. 107 * zeroed buffers.
109 * Per-cpu objects are populated with zeroed buffers.
110 */ 108 */
111void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 109void *__alloc_percpu(size_t size, size_t align)
112{ 110{
113 /* 111 /*
114 * We allocate whole cache lines to avoid false sharing 112 * We allocate whole cache lines to avoid false sharing
115 */ 113 */
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); 114 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, gfp); 115 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata); 116 void *__pdata = __percpu_disguise(pdata);
119 117
118 /*
119 * Can't easily make larger alignment work with kmalloc. WARN
120 * on it. Larger alignment should only be used for module
121 * percpu sections on SMP for which this path isn't used.
122 */
123 WARN_ON_ONCE(align > __alignof__(unsigned long long));
124
120 if (unlikely(!pdata)) 125 if (unlikely(!pdata))
121 return NULL; 126 return NULL;
122 if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) 127 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
128 &cpu_possible_map)))
123 return __pdata; 129 return __pdata;
124 kfree(pdata); 130 kfree(pdata);
125 return NULL; 131 return NULL;
126} 132}
127EXPORT_SYMBOL_GPL(__percpu_alloc_mask); 133EXPORT_SYMBOL_GPL(__alloc_percpu);
128 134
129/** 135/**
130 * percpu_free - final cleanup of per-cpu data 136 * free_percpu - final cleanup of per-cpu data
131 * @__pdata: object to clean up 137 * @__pdata: object to clean up
132 * 138 *
133 * We simply clean up any per-cpu object left. No need for the client to 139 * We simply clean up any per-cpu object left. No need for the client to
134 * track and specify through a bis mask which per-cpu objects are to free. 140 * track and specify through a bis mask which per-cpu objects are to free.
135 */ 141 */
136void percpu_free(void *__pdata) 142void free_percpu(void *__pdata)
137{ 143{
138 if (unlikely(!__pdata)) 144 if (unlikely(!__pdata))
139 return; 145 return;
140 __percpu_depopulate_mask(__pdata, &cpu_possible_map); 146 __percpu_depopulate_mask(__pdata, &cpu_possible_map);
141 kfree(__percpu_disguise(__pdata)); 147 kfree(__percpu_disguise(__pdata));
142} 148}
143EXPORT_SYMBOL_GPL(percpu_free); 149EXPORT_SYMBOL_GPL(free_percpu);