diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 18:23:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 18:23:14 -0400 |
commit | b7c8e55db7141dcbb9d5305a3260fa0ed62a1bcc (patch) | |
tree | 59fbd52d8e80e5a83d9747961d28aaf4d400613a /kernel/padata.c | |
parent | ffd386a9a8273dcfa61705d0b349eebc7525ef87 (diff) | |
parent | 4015d9a865e3bcc42d88bedc8ce1551000bab664 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (39 commits)
random: Reorder struct entropy_store to remove padding on 64bits
padata: update API documentation
padata: Remove padata_get_cpumask
crypto: pcrypt - Update pcrypt cpumask according to the padata cpumask notifier
crypto: pcrypt - Rename pcrypt_instance
padata: Pass the padata cpumasks to the cpumask_change_notifier chain
padata: Rearrange set_cpumask functions
padata: Rename padata_alloc functions
crypto: pcrypt - Dont calulate a callback cpu on empty callback cpumask
padata: Check for valid cpumasks
padata: Allocate cpumask dependend recources in any case
padata: Fix cpu index counting
crypto: geode_aes - Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used)
pcrypt: Added sysfs interface to pcrypt
padata: Added sysfs primitives to padata subsystem
padata: Make two separate cpumasks
padata: update documentation
padata: simplify serialization mechanism
padata: make padata_do_parallel to return zero on success
padata: Handle empty padata cpumasks
...
Diffstat (limited to 'kernel/padata.c')
-rw-r--r-- | kernel/padata.c | 755 |
1 files changed, 558 insertions, 197 deletions
diff --git a/kernel/padata.c b/kernel/padata.c index fdd8ae609ce3..751019415d23 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -26,18 +26,19 @@ | |||
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/sysfs.h> | ||
29 | #include <linux/rcupdate.h> | 30 | #include <linux/rcupdate.h> |
30 | 31 | ||
31 | #define MAX_SEQ_NR INT_MAX - NR_CPUS | 32 | #define MAX_SEQ_NR (INT_MAX - NR_CPUS) |
32 | #define MAX_OBJ_NUM 1000 | 33 | #define MAX_OBJ_NUM 1000 |
33 | 34 | ||
34 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | 35 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) |
35 | { | 36 | { |
36 | int cpu, target_cpu; | 37 | int cpu, target_cpu; |
37 | 38 | ||
38 | target_cpu = cpumask_first(pd->cpumask); | 39 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
39 | for (cpu = 0; cpu < cpu_index; cpu++) | 40 | for (cpu = 0; cpu < cpu_index; cpu++) |
40 | target_cpu = cpumask_next(target_cpu, pd->cpumask); | 41 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
41 | 42 | ||
42 | return target_cpu; | 43 | return target_cpu; |
43 | } | 44 | } |
@@ -53,26 +54,27 @@ static int padata_cpu_hash(struct padata_priv *padata) | |||
53 | * Hash the sequence numbers to the cpus by taking | 54 | * Hash the sequence numbers to the cpus by taking |
54 | * seq_nr mod. number of cpus in use. | 55 | * seq_nr mod. number of cpus in use. |
55 | */ | 56 | */ |
56 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); | 57 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); |
57 | 58 | ||
58 | return padata_index_to_cpu(pd, cpu_index); | 59 | return padata_index_to_cpu(pd, cpu_index); |
59 | } | 60 | } |
60 | 61 | ||
61 | static void padata_parallel_worker(struct work_struct *work) | 62 | static void padata_parallel_worker(struct work_struct *parallel_work) |
62 | { | 63 | { |
63 | struct padata_queue *queue; | 64 | struct padata_parallel_queue *pqueue; |
64 | struct parallel_data *pd; | 65 | struct parallel_data *pd; |
65 | struct padata_instance *pinst; | 66 | struct padata_instance *pinst; |
66 | LIST_HEAD(local_list); | 67 | LIST_HEAD(local_list); |
67 | 68 | ||
68 | local_bh_disable(); | 69 | local_bh_disable(); |
69 | queue = container_of(work, struct padata_queue, pwork); | 70 | pqueue = container_of(parallel_work, |
70 | pd = queue->pd; | 71 | struct padata_parallel_queue, work); |
72 | pd = pqueue->pd; | ||
71 | pinst = pd->pinst; | 73 | pinst = pd->pinst; |
72 | 74 | ||
73 | spin_lock(&queue->parallel.lock); | 75 | spin_lock(&pqueue->parallel.lock); |
74 | list_replace_init(&queue->parallel.list, &local_list); | 76 | list_replace_init(&pqueue->parallel.list, &local_list); |
75 | spin_unlock(&queue->parallel.lock); | 77 | spin_unlock(&pqueue->parallel.lock); |
76 | 78 | ||
77 | while (!list_empty(&local_list)) { | 79 | while (!list_empty(&local_list)) { |
78 | struct padata_priv *padata; | 80 | struct padata_priv *padata; |
@@ -94,7 +96,7 @@ static void padata_parallel_worker(struct work_struct *work) | |||
94 | * @pinst: padata instance | 96 | * @pinst: padata instance |
95 | * @padata: object to be parallelized | 97 | * @padata: object to be parallelized |
96 | * @cb_cpu: cpu the serialization callback function will run on, | 98 | * @cb_cpu: cpu the serialization callback function will run on, |
97 | * must be in the cpumask of padata. | 99 | * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). |
98 | * | 100 | * |
99 | * The parallelization callback function will run with BHs off. | 101 | * The parallelization callback function will run with BHs off. |
100 | * Note: Every object which is parallelized by padata_do_parallel | 102 | * Note: Every object which is parallelized by padata_do_parallel |
@@ -104,15 +106,18 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
104 | struct padata_priv *padata, int cb_cpu) | 106 | struct padata_priv *padata, int cb_cpu) |
105 | { | 107 | { |
106 | int target_cpu, err; | 108 | int target_cpu, err; |
107 | struct padata_queue *queue; | 109 | struct padata_parallel_queue *queue; |
108 | struct parallel_data *pd; | 110 | struct parallel_data *pd; |
109 | 111 | ||
110 | rcu_read_lock_bh(); | 112 | rcu_read_lock_bh(); |
111 | 113 | ||
112 | pd = rcu_dereference(pinst->pd); | 114 | pd = rcu_dereference(pinst->pd); |
113 | 115 | ||
114 | err = 0; | 116 | err = -EINVAL; |
115 | if (!(pinst->flags & PADATA_INIT)) | 117 | if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
118 | goto out; | ||
119 | |||
120 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) | ||
116 | goto out; | 121 | goto out; |
117 | 122 | ||
118 | err = -EBUSY; | 123 | err = -EBUSY; |
@@ -122,11 +127,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
122 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) | 127 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) |
123 | goto out; | 128 | goto out; |
124 | 129 | ||
125 | err = -EINVAL; | 130 | err = 0; |
126 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) | ||
127 | goto out; | ||
128 | |||
129 | err = -EINPROGRESS; | ||
130 | atomic_inc(&pd->refcnt); | 131 | atomic_inc(&pd->refcnt); |
131 | padata->pd = pd; | 132 | padata->pd = pd; |
132 | padata->cb_cpu = cb_cpu; | 133 | padata->cb_cpu = cb_cpu; |
@@ -137,13 +138,13 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
137 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); | 138 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); |
138 | 139 | ||
139 | target_cpu = padata_cpu_hash(padata); | 140 | target_cpu = padata_cpu_hash(padata); |
140 | queue = per_cpu_ptr(pd->queue, target_cpu); | 141 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
141 | 142 | ||
142 | spin_lock(&queue->parallel.lock); | 143 | spin_lock(&queue->parallel.lock); |
143 | list_add_tail(&padata->list, &queue->parallel.list); | 144 | list_add_tail(&padata->list, &queue->parallel.list); |
144 | spin_unlock(&queue->parallel.lock); | 145 | spin_unlock(&queue->parallel.lock); |
145 | 146 | ||
146 | queue_work_on(target_cpu, pinst->wq, &queue->pwork); | 147 | queue_work_on(target_cpu, pinst->wq, &queue->work); |
147 | 148 | ||
148 | out: | 149 | out: |
149 | rcu_read_unlock_bh(); | 150 | rcu_read_unlock_bh(); |
@@ -171,84 +172,52 @@ EXPORT_SYMBOL(padata_do_parallel); | |||
171 | */ | 172 | */ |
172 | static struct padata_priv *padata_get_next(struct parallel_data *pd) | 173 | static struct padata_priv *padata_get_next(struct parallel_data *pd) |
173 | { | 174 | { |
174 | int cpu, num_cpus, empty, calc_seq_nr; | 175 | int cpu, num_cpus; |
175 | int seq_nr, next_nr, overrun, next_overrun; | 176 | int next_nr, next_index; |
176 | struct padata_queue *queue, *next_queue; | 177 | struct padata_parallel_queue *queue, *next_queue; |
177 | struct padata_priv *padata; | 178 | struct padata_priv *padata; |
178 | struct padata_list *reorder; | 179 | struct padata_list *reorder; |
179 | 180 | ||
180 | empty = 0; | 181 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
181 | next_nr = -1; | ||
182 | next_overrun = 0; | ||
183 | next_queue = NULL; | ||
184 | |||
185 | num_cpus = cpumask_weight(pd->cpumask); | ||
186 | |||
187 | for_each_cpu(cpu, pd->cpumask) { | ||
188 | queue = per_cpu_ptr(pd->queue, cpu); | ||
189 | reorder = &queue->reorder; | ||
190 | |||
191 | /* | ||
192 | * Calculate the seq_nr of the object that should be | ||
193 | * next in this reorder queue. | ||
194 | */ | ||
195 | overrun = 0; | ||
196 | calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus) | ||
197 | + queue->cpu_index; | ||
198 | 182 | ||
199 | if (unlikely(calc_seq_nr > pd->max_seq_nr)) { | 183 | /* |
200 | calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1; | 184 | * Calculate the percpu reorder queue and the sequence |
201 | overrun = 1; | 185 | * number of the next object. |
202 | } | 186 | */ |
203 | 187 | next_nr = pd->processed; | |
204 | if (!list_empty(&reorder->list)) { | 188 | next_index = next_nr % num_cpus; |
205 | padata = list_entry(reorder->list.next, | 189 | cpu = padata_index_to_cpu(pd, next_index); |
206 | struct padata_priv, list); | 190 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
207 | 191 | ||
208 | seq_nr = padata->seq_nr; | 192 | if (unlikely(next_nr > pd->max_seq_nr)) { |
209 | BUG_ON(calc_seq_nr != seq_nr); | 193 | next_nr = next_nr - pd->max_seq_nr - 1; |
210 | } else { | 194 | next_index = next_nr % num_cpus; |
211 | seq_nr = calc_seq_nr; | 195 | cpu = padata_index_to_cpu(pd, next_index); |
212 | empty++; | 196 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
213 | } | 197 | pd->processed = 0; |
214 | |||
215 | if (next_nr < 0 || seq_nr < next_nr | ||
216 | || (next_overrun && !overrun)) { | ||
217 | next_nr = seq_nr; | ||
218 | next_overrun = overrun; | ||
219 | next_queue = queue; | ||
220 | } | ||
221 | } | 198 | } |
222 | 199 | ||
223 | padata = NULL; | 200 | padata = NULL; |
224 | 201 | ||
225 | if (empty == num_cpus) | ||
226 | goto out; | ||
227 | |||
228 | reorder = &next_queue->reorder; | 202 | reorder = &next_queue->reorder; |
229 | 203 | ||
230 | if (!list_empty(&reorder->list)) { | 204 | if (!list_empty(&reorder->list)) { |
231 | padata = list_entry(reorder->list.next, | 205 | padata = list_entry(reorder->list.next, |
232 | struct padata_priv, list); | 206 | struct padata_priv, list); |
233 | 207 | ||
234 | if (unlikely(next_overrun)) { | 208 | BUG_ON(next_nr != padata->seq_nr); |
235 | for_each_cpu(cpu, pd->cpumask) { | ||
236 | queue = per_cpu_ptr(pd->queue, cpu); | ||
237 | atomic_set(&queue->num_obj, 0); | ||
238 | } | ||
239 | } | ||
240 | 209 | ||
241 | spin_lock(&reorder->lock); | 210 | spin_lock(&reorder->lock); |
242 | list_del_init(&padata->list); | 211 | list_del_init(&padata->list); |
243 | atomic_dec(&pd->reorder_objects); | 212 | atomic_dec(&pd->reorder_objects); |
244 | spin_unlock(&reorder->lock); | 213 | spin_unlock(&reorder->lock); |
245 | 214 | ||
246 | atomic_inc(&next_queue->num_obj); | 215 | pd->processed++; |
247 | 216 | ||
248 | goto out; | 217 | goto out; |
249 | } | 218 | } |
250 | 219 | ||
251 | queue = per_cpu_ptr(pd->queue, smp_processor_id()); | 220 | queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); |
252 | if (queue->cpu_index == next_queue->cpu_index) { | 221 | if (queue->cpu_index == next_queue->cpu_index) { |
253 | padata = ERR_PTR(-ENODATA); | 222 | padata = ERR_PTR(-ENODATA); |
254 | goto out; | 223 | goto out; |
@@ -262,7 +231,7 @@ out: | |||
262 | static void padata_reorder(struct parallel_data *pd) | 231 | static void padata_reorder(struct parallel_data *pd) |
263 | { | 232 | { |
264 | struct padata_priv *padata; | 233 | struct padata_priv *padata; |
265 | struct padata_queue *queue; | 234 | struct padata_serial_queue *squeue; |
266 | struct padata_instance *pinst = pd->pinst; | 235 | struct padata_instance *pinst = pd->pinst; |
267 | 236 | ||
268 | /* | 237 | /* |
@@ -301,13 +270,13 @@ static void padata_reorder(struct parallel_data *pd) | |||
301 | return; | 270 | return; |
302 | } | 271 | } |
303 | 272 | ||
304 | queue = per_cpu_ptr(pd->queue, padata->cb_cpu); | 273 | squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); |
305 | 274 | ||
306 | spin_lock(&queue->serial.lock); | 275 | spin_lock(&squeue->serial.lock); |
307 | list_add_tail(&padata->list, &queue->serial.list); | 276 | list_add_tail(&padata->list, &squeue->serial.list); |
308 | spin_unlock(&queue->serial.lock); | 277 | spin_unlock(&squeue->serial.lock); |
309 | 278 | ||
310 | queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); | 279 | queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); |
311 | } | 280 | } |
312 | 281 | ||
313 | spin_unlock_bh(&pd->lock); | 282 | spin_unlock_bh(&pd->lock); |
@@ -333,19 +302,19 @@ static void padata_reorder_timer(unsigned long arg) | |||
333 | padata_reorder(pd); | 302 | padata_reorder(pd); |
334 | } | 303 | } |
335 | 304 | ||
336 | static void padata_serial_worker(struct work_struct *work) | 305 | static void padata_serial_worker(struct work_struct *serial_work) |
337 | { | 306 | { |
338 | struct padata_queue *queue; | 307 | struct padata_serial_queue *squeue; |
339 | struct parallel_data *pd; | 308 | struct parallel_data *pd; |
340 | LIST_HEAD(local_list); | 309 | LIST_HEAD(local_list); |
341 | 310 | ||
342 | local_bh_disable(); | 311 | local_bh_disable(); |
343 | queue = container_of(work, struct padata_queue, swork); | 312 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
344 | pd = queue->pd; | 313 | pd = squeue->pd; |
345 | 314 | ||
346 | spin_lock(&queue->serial.lock); | 315 | spin_lock(&squeue->serial.lock); |
347 | list_replace_init(&queue->serial.list, &local_list); | 316 | list_replace_init(&squeue->serial.list, &local_list); |
348 | spin_unlock(&queue->serial.lock); | 317 | spin_unlock(&squeue->serial.lock); |
349 | 318 | ||
350 | while (!list_empty(&local_list)) { | 319 | while (!list_empty(&local_list)) { |
351 | struct padata_priv *padata; | 320 | struct padata_priv *padata; |
@@ -372,18 +341,18 @@ static void padata_serial_worker(struct work_struct *work) | |||
372 | void padata_do_serial(struct padata_priv *padata) | 341 | void padata_do_serial(struct padata_priv *padata) |
373 | { | 342 | { |
374 | int cpu; | 343 | int cpu; |
375 | struct padata_queue *queue; | 344 | struct padata_parallel_queue *pqueue; |
376 | struct parallel_data *pd; | 345 | struct parallel_data *pd; |
377 | 346 | ||
378 | pd = padata->pd; | 347 | pd = padata->pd; |
379 | 348 | ||
380 | cpu = get_cpu(); | 349 | cpu = get_cpu(); |
381 | queue = per_cpu_ptr(pd->queue, cpu); | 350 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
382 | 351 | ||
383 | spin_lock(&queue->reorder.lock); | 352 | spin_lock(&pqueue->reorder.lock); |
384 | atomic_inc(&pd->reorder_objects); | 353 | atomic_inc(&pd->reorder_objects); |
385 | list_add_tail(&padata->list, &queue->reorder.list); | 354 | list_add_tail(&padata->list, &pqueue->reorder.list); |
386 | spin_unlock(&queue->reorder.lock); | 355 | spin_unlock(&pqueue->reorder.lock); |
387 | 356 | ||
388 | put_cpu(); | 357 | put_cpu(); |
389 | 358 | ||
@@ -391,52 +360,89 @@ void padata_do_serial(struct padata_priv *padata) | |||
391 | } | 360 | } |
392 | EXPORT_SYMBOL(padata_do_serial); | 361 | EXPORT_SYMBOL(padata_do_serial); |
393 | 362 | ||
394 | /* Allocate and initialize the internal cpumask dependend resources. */ | 363 | static int padata_setup_cpumasks(struct parallel_data *pd, |
395 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | 364 | const struct cpumask *pcpumask, |
396 | const struct cpumask *cpumask) | 365 | const struct cpumask *cbcpumask) |
397 | { | 366 | { |
398 | int cpu, cpu_index, num_cpus; | 367 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
399 | struct padata_queue *queue; | 368 | return -ENOMEM; |
400 | struct parallel_data *pd; | ||
401 | |||
402 | cpu_index = 0; | ||
403 | 369 | ||
404 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | 370 | cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); |
405 | if (!pd) | 371 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { |
406 | goto err; | 372 | free_cpumask_var(pd->cpumask.cbcpu); |
373 | return -ENOMEM; | ||
374 | } | ||
407 | 375 | ||
408 | pd->queue = alloc_percpu(struct padata_queue); | 376 | cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); |
409 | if (!pd->queue) | 377 | return 0; |
410 | goto err_free_pd; | 378 | } |
411 | 379 | ||
412 | if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) | 380 | static void __padata_list_init(struct padata_list *pd_list) |
413 | goto err_free_queue; | 381 | { |
382 | INIT_LIST_HEAD(&pd_list->list); | ||
383 | spin_lock_init(&pd_list->lock); | ||
384 | } | ||
414 | 385 | ||
415 | cpumask_and(pd->cpumask, cpumask, cpu_active_mask); | 386 | /* Initialize all percpu queues used by serial workers */ |
387 | static void padata_init_squeues(struct parallel_data *pd) | ||
388 | { | ||
389 | int cpu; | ||
390 | struct padata_serial_queue *squeue; | ||
416 | 391 | ||
417 | for_each_cpu(cpu, pd->cpumask) { | 392 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
418 | queue = per_cpu_ptr(pd->queue, cpu); | 393 | squeue = per_cpu_ptr(pd->squeue, cpu); |
394 | squeue->pd = pd; | ||
395 | __padata_list_init(&squeue->serial); | ||
396 | INIT_WORK(&squeue->work, padata_serial_worker); | ||
397 | } | ||
398 | } | ||
419 | 399 | ||
420 | queue->pd = pd; | 400 | /* Initialize all percpu queues used by parallel workers */ |
401 | static void padata_init_pqueues(struct parallel_data *pd) | ||
402 | { | ||
403 | int cpu_index, num_cpus, cpu; | ||
404 | struct padata_parallel_queue *pqueue; | ||
421 | 405 | ||
422 | queue->cpu_index = cpu_index; | 406 | cpu_index = 0; |
407 | for_each_cpu(cpu, pd->cpumask.pcpu) { | ||
408 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | ||
409 | pqueue->pd = pd; | ||
410 | pqueue->cpu_index = cpu_index; | ||
423 | cpu_index++; | 411 | cpu_index++; |
424 | 412 | ||
425 | INIT_LIST_HEAD(&queue->reorder.list); | 413 | __padata_list_init(&pqueue->reorder); |
426 | INIT_LIST_HEAD(&queue->parallel.list); | 414 | __padata_list_init(&pqueue->parallel); |
427 | INIT_LIST_HEAD(&queue->serial.list); | 415 | INIT_WORK(&pqueue->work, padata_parallel_worker); |
428 | spin_lock_init(&queue->reorder.lock); | 416 | atomic_set(&pqueue->num_obj, 0); |
429 | spin_lock_init(&queue->parallel.lock); | ||
430 | spin_lock_init(&queue->serial.lock); | ||
431 | |||
432 | INIT_WORK(&queue->pwork, padata_parallel_worker); | ||
433 | INIT_WORK(&queue->swork, padata_serial_worker); | ||
434 | atomic_set(&queue->num_obj, 0); | ||
435 | } | 417 | } |
436 | 418 | ||
437 | num_cpus = cpumask_weight(pd->cpumask); | 419 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
438 | pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; | 420 | pd->max_seq_nr = num_cpus ? (MAX_SEQ_NR / num_cpus) * num_cpus - 1 : 0; |
421 | } | ||
422 | |||
423 | /* Allocate and initialize the internal cpumask dependend resources. */ | ||
424 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | ||
425 | const struct cpumask *pcpumask, | ||
426 | const struct cpumask *cbcpumask) | ||
427 | { | ||
428 | struct parallel_data *pd; | ||
439 | 429 | ||
430 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | ||
431 | if (!pd) | ||
432 | goto err; | ||
433 | |||
434 | pd->pqueue = alloc_percpu(struct padata_parallel_queue); | ||
435 | if (!pd->pqueue) | ||
436 | goto err_free_pd; | ||
437 | |||
438 | pd->squeue = alloc_percpu(struct padata_serial_queue); | ||
439 | if (!pd->squeue) | ||
440 | goto err_free_pqueue; | ||
441 | if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) | ||
442 | goto err_free_squeue; | ||
443 | |||
444 | padata_init_pqueues(pd); | ||
445 | padata_init_squeues(pd); | ||
440 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); | 446 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
441 | atomic_set(&pd->seq_nr, -1); | 447 | atomic_set(&pd->seq_nr, -1); |
442 | atomic_set(&pd->reorder_objects, 0); | 448 | atomic_set(&pd->reorder_objects, 0); |
@@ -446,8 +452,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |||
446 | 452 | ||
447 | return pd; | 453 | return pd; |
448 | 454 | ||
449 | err_free_queue: | 455 | err_free_squeue: |
450 | free_percpu(pd->queue); | 456 | free_percpu(pd->squeue); |
457 | err_free_pqueue: | ||
458 | free_percpu(pd->pqueue); | ||
451 | err_free_pd: | 459 | err_free_pd: |
452 | kfree(pd); | 460 | kfree(pd); |
453 | err: | 461 | err: |
@@ -456,8 +464,10 @@ err: | |||
456 | 464 | ||
457 | static void padata_free_pd(struct parallel_data *pd) | 465 | static void padata_free_pd(struct parallel_data *pd) |
458 | { | 466 | { |
459 | free_cpumask_var(pd->cpumask); | 467 | free_cpumask_var(pd->cpumask.pcpu); |
460 | free_percpu(pd->queue); | 468 | free_cpumask_var(pd->cpumask.cbcpu); |
469 | free_percpu(pd->pqueue); | ||
470 | free_percpu(pd->squeue); | ||
461 | kfree(pd); | 471 | kfree(pd); |
462 | } | 472 | } |
463 | 473 | ||
@@ -465,11 +475,12 @@ static void padata_free_pd(struct parallel_data *pd) | |||
465 | static void padata_flush_queues(struct parallel_data *pd) | 475 | static void padata_flush_queues(struct parallel_data *pd) |
466 | { | 476 | { |
467 | int cpu; | 477 | int cpu; |
468 | struct padata_queue *queue; | 478 | struct padata_parallel_queue *pqueue; |
479 | struct padata_serial_queue *squeue; | ||
469 | 480 | ||
470 | for_each_cpu(cpu, pd->cpumask) { | 481 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
471 | queue = per_cpu_ptr(pd->queue, cpu); | 482 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
472 | flush_work(&queue->pwork); | 483 | flush_work(&pqueue->work); |
473 | } | 484 | } |
474 | 485 | ||
475 | del_timer_sync(&pd->timer); | 486 | del_timer_sync(&pd->timer); |
@@ -477,19 +488,39 @@ static void padata_flush_queues(struct parallel_data *pd) | |||
477 | if (atomic_read(&pd->reorder_objects)) | 488 | if (atomic_read(&pd->reorder_objects)) |
478 | padata_reorder(pd); | 489 | padata_reorder(pd); |
479 | 490 | ||
480 | for_each_cpu(cpu, pd->cpumask) { | 491 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
481 | queue = per_cpu_ptr(pd->queue, cpu); | 492 | squeue = per_cpu_ptr(pd->squeue, cpu); |
482 | flush_work(&queue->swork); | 493 | flush_work(&squeue->work); |
483 | } | 494 | } |
484 | 495 | ||
485 | BUG_ON(atomic_read(&pd->refcnt) != 0); | 496 | BUG_ON(atomic_read(&pd->refcnt) != 0); |
486 | } | 497 | } |
487 | 498 | ||
499 | static void __padata_start(struct padata_instance *pinst) | ||
500 | { | ||
501 | pinst->flags |= PADATA_INIT; | ||
502 | } | ||
503 | |||
504 | static void __padata_stop(struct padata_instance *pinst) | ||
505 | { | ||
506 | if (!(pinst->flags & PADATA_INIT)) | ||
507 | return; | ||
508 | |||
509 | pinst->flags &= ~PADATA_INIT; | ||
510 | |||
511 | synchronize_rcu(); | ||
512 | |||
513 | get_online_cpus(); | ||
514 | padata_flush_queues(pinst->pd); | ||
515 | put_online_cpus(); | ||
516 | } | ||
517 | |||
488 | /* Replace the internal control stucture with a new one. */ | 518 | /* Replace the internal control stucture with a new one. */ |
489 | static void padata_replace(struct padata_instance *pinst, | 519 | static void padata_replace(struct padata_instance *pinst, |
490 | struct parallel_data *pd_new) | 520 | struct parallel_data *pd_new) |
491 | { | 521 | { |
492 | struct parallel_data *pd_old = pinst->pd; | 522 | struct parallel_data *pd_old = pinst->pd; |
523 | int notification_mask = 0; | ||
493 | 524 | ||
494 | pinst->flags |= PADATA_RESET; | 525 | pinst->flags |= PADATA_RESET; |
495 | 526 | ||
@@ -497,41 +528,162 @@ static void padata_replace(struct padata_instance *pinst, | |||
497 | 528 | ||
498 | synchronize_rcu(); | 529 | synchronize_rcu(); |
499 | 530 | ||
531 | if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) | ||
532 | notification_mask |= PADATA_CPU_PARALLEL; | ||
533 | if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) | ||
534 | notification_mask |= PADATA_CPU_SERIAL; | ||
535 | |||
500 | padata_flush_queues(pd_old); | 536 | padata_flush_queues(pd_old); |
501 | padata_free_pd(pd_old); | 537 | padata_free_pd(pd_old); |
502 | 538 | ||
539 | if (notification_mask) | ||
540 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | ||
541 | notification_mask, | ||
542 | &pd_new->cpumask); | ||
543 | |||
503 | pinst->flags &= ~PADATA_RESET; | 544 | pinst->flags &= ~PADATA_RESET; |
504 | } | 545 | } |
505 | 546 | ||
506 | /** | 547 | /** |
507 | * padata_set_cpumask - set the cpumask that padata should use | 548 | * padata_register_cpumask_notifier - Registers a notifier that will be called |
549 | * if either pcpu or cbcpu or both cpumasks change. | ||
508 | * | 550 | * |
509 | * @pinst: padata instance | 551 | * @pinst: A poineter to padata instance |
510 | * @cpumask: the cpumask to use | 552 | * @nblock: A pointer to notifier block. |
511 | */ | 553 | */ |
512 | int padata_set_cpumask(struct padata_instance *pinst, | 554 | int padata_register_cpumask_notifier(struct padata_instance *pinst, |
513 | cpumask_var_t cpumask) | 555 | struct notifier_block *nblock) |
514 | { | 556 | { |
557 | return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, | ||
558 | nblock); | ||
559 | } | ||
560 | EXPORT_SYMBOL(padata_register_cpumask_notifier); | ||
561 | |||
562 | /** | ||
563 | * padata_unregister_cpumask_notifier - Unregisters cpumask notifier | ||
564 | * registered earlier using padata_register_cpumask_notifier | ||
565 | * | ||
566 | * @pinst: A pointer to data instance. | ||
567 | * @nlock: A pointer to notifier block. | ||
568 | */ | ||
569 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | ||
570 | struct notifier_block *nblock) | ||
571 | { | ||
572 | return blocking_notifier_chain_unregister( | ||
573 | &pinst->cpumask_change_notifier, | ||
574 | nblock); | ||
575 | } | ||
576 | EXPORT_SYMBOL(padata_unregister_cpumask_notifier); | ||
577 | |||
578 | |||
579 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ | ||
580 | static bool padata_validate_cpumask(struct padata_instance *pinst, | ||
581 | const struct cpumask *cpumask) | ||
582 | { | ||
583 | if (!cpumask_intersects(cpumask, cpu_active_mask)) { | ||
584 | pinst->flags |= PADATA_INVALID; | ||
585 | return false; | ||
586 | } | ||
587 | |||
588 | pinst->flags &= ~PADATA_INVALID; | ||
589 | return true; | ||
590 | } | ||
591 | |||
592 | static int __padata_set_cpumasks(struct padata_instance *pinst, | ||
593 | cpumask_var_t pcpumask, | ||
594 | cpumask_var_t cbcpumask) | ||
595 | { | ||
596 | int valid; | ||
515 | struct parallel_data *pd; | 597 | struct parallel_data *pd; |
516 | int err = 0; | 598 | |
599 | valid = padata_validate_cpumask(pinst, pcpumask); | ||
600 | if (!valid) { | ||
601 | __padata_stop(pinst); | ||
602 | goto out_replace; | ||
603 | } | ||
604 | |||
605 | valid = padata_validate_cpumask(pinst, cbcpumask); | ||
606 | if (!valid) | ||
607 | __padata_stop(pinst); | ||
608 | |||
609 | out_replace: | ||
610 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); | ||
611 | if (!pd) | ||
612 | return -ENOMEM; | ||
613 | |||
614 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); | ||
615 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
616 | |||
617 | padata_replace(pinst, pd); | ||
618 | |||
619 | if (valid) | ||
620 | __padata_start(pinst); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | /** | ||
626 | * padata_set_cpumasks - Set both parallel and serial cpumasks. The first | ||
627 | * one is used by parallel workers and the second one | ||
628 | * by the wokers doing serialization. | ||
629 | * | ||
630 | * @pinst: padata instance | ||
631 | * @pcpumask: the cpumask to use for parallel workers | ||
632 | * @cbcpumask: the cpumsak to use for serial workers | ||
633 | */ | ||
634 | int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, | ||
635 | cpumask_var_t cbcpumask) | ||
636 | { | ||
637 | int err; | ||
517 | 638 | ||
518 | mutex_lock(&pinst->lock); | 639 | mutex_lock(&pinst->lock); |
640 | get_online_cpus(); | ||
519 | 641 | ||
642 | err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); | ||
643 | |||
644 | put_online_cpus(); | ||
645 | mutex_unlock(&pinst->lock); | ||
646 | |||
647 | return err; | ||
648 | |||
649 | } | ||
650 | EXPORT_SYMBOL(padata_set_cpumasks); | ||
651 | |||
652 | /** | ||
653 | * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value | ||
654 | * equivalent to @cpumask. | ||
655 | * | ||
656 | * @pinst: padata instance | ||
657 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding | ||
658 | * to parallel and serial cpumasks respectively. | ||
659 | * @cpumask: the cpumask to use | ||
660 | */ | ||
661 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, | ||
662 | cpumask_var_t cpumask) | ||
663 | { | ||
664 | struct cpumask *serial_mask, *parallel_mask; | ||
665 | int err = -EINVAL; | ||
666 | |||
667 | mutex_lock(&pinst->lock); | ||
520 | get_online_cpus(); | 668 | get_online_cpus(); |
521 | 669 | ||
522 | pd = padata_alloc_pd(pinst, cpumask); | 670 | switch (cpumask_type) { |
523 | if (!pd) { | 671 | case PADATA_CPU_PARALLEL: |
524 | err = -ENOMEM; | 672 | serial_mask = pinst->cpumask.cbcpu; |
525 | goto out; | 673 | parallel_mask = cpumask; |
674 | break; | ||
675 | case PADATA_CPU_SERIAL: | ||
676 | parallel_mask = pinst->cpumask.pcpu; | ||
677 | serial_mask = cpumask; | ||
678 | break; | ||
679 | default: | ||
680 | goto out; | ||
526 | } | 681 | } |
527 | 682 | ||
528 | cpumask_copy(pinst->cpumask, cpumask); | 683 | err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
529 | |||
530 | padata_replace(pinst, pd); | ||
531 | 684 | ||
532 | out: | 685 | out: |
533 | put_online_cpus(); | 686 | put_online_cpus(); |
534 | |||
535 | mutex_unlock(&pinst->lock); | 687 | mutex_unlock(&pinst->lock); |
536 | 688 | ||
537 | return err; | 689 | return err; |
@@ -543,30 +695,48 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu) | |||
543 | struct parallel_data *pd; | 695 | struct parallel_data *pd; |
544 | 696 | ||
545 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { | 697 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { |
546 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 698 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
699 | pinst->cpumask.cbcpu); | ||
547 | if (!pd) | 700 | if (!pd) |
548 | return -ENOMEM; | 701 | return -ENOMEM; |
549 | 702 | ||
550 | padata_replace(pinst, pd); | 703 | padata_replace(pinst, pd); |
704 | |||
705 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && | ||
706 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
707 | __padata_start(pinst); | ||
551 | } | 708 | } |
552 | 709 | ||
553 | return 0; | 710 | return 0; |
554 | } | 711 | } |
555 | 712 | ||
556 | /** | 713 | /** |
557 | * padata_add_cpu - add a cpu to the padata cpumask | 714 | * padata_add_cpu - add a cpu to one or both(parallel and serial) |
715 | * padata cpumasks. | ||
558 | * | 716 | * |
559 | * @pinst: padata instance | 717 | * @pinst: padata instance |
560 | * @cpu: cpu to add | 718 | * @cpu: cpu to add |
719 | * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. | ||
720 | * The @mask may be any combination of the following flags: | ||
721 | * PADATA_CPU_SERIAL - serial cpumask | ||
722 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
561 | */ | 723 | */ |
562 | int padata_add_cpu(struct padata_instance *pinst, int cpu) | 724 | |
725 | int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) | ||
563 | { | 726 | { |
564 | int err; | 727 | int err; |
565 | 728 | ||
729 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
730 | return -EINVAL; | ||
731 | |||
566 | mutex_lock(&pinst->lock); | 732 | mutex_lock(&pinst->lock); |
567 | 733 | ||
568 | get_online_cpus(); | 734 | get_online_cpus(); |
569 | cpumask_set_cpu(cpu, pinst->cpumask); | 735 | if (mask & PADATA_CPU_SERIAL) |
736 | cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); | ||
737 | if (mask & PADATA_CPU_PARALLEL) | ||
738 | cpumask_set_cpu(cpu, pinst->cpumask.pcpu); | ||
739 | |||
570 | err = __padata_add_cpu(pinst, cpu); | 740 | err = __padata_add_cpu(pinst, cpu); |
571 | put_online_cpus(); | 741 | put_online_cpus(); |
572 | 742 | ||
@@ -578,10 +748,16 @@ EXPORT_SYMBOL(padata_add_cpu); | |||
578 | 748 | ||
579 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | 749 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) |
580 | { | 750 | { |
581 | struct parallel_data *pd; | 751 | struct parallel_data *pd = NULL; |
582 | 752 | ||
583 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | 753 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
584 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 754 | |
755 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || | ||
756 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
757 | __padata_stop(pinst); | ||
758 | |||
759 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, | ||
760 | pinst->cpumask.cbcpu); | ||
585 | if (!pd) | 761 | if (!pd) |
586 | return -ENOMEM; | 762 | return -ENOMEM; |
587 | 763 | ||
@@ -591,20 +767,32 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | |||
591 | return 0; | 767 | return 0; |
592 | } | 768 | } |
593 | 769 | ||
594 | /** | 770 | /** |
595 | * padata_remove_cpu - remove a cpu from the padata cpumask | 771 | * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) |
772 | * padata cpumasks. | ||
596 | * | 773 | * |
597 | * @pinst: padata instance | 774 | * @pinst: padata instance |
598 | * @cpu: cpu to remove | 775 | * @cpu: cpu to remove |
776 | * @mask: bitmask specifying from which cpumask @cpu should be removed | ||
777 | * The @mask may be any combination of the following flags: | ||
778 | * PADATA_CPU_SERIAL - serial cpumask | ||
779 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
599 | */ | 780 | */ |
600 | int padata_remove_cpu(struct padata_instance *pinst, int cpu) | 781 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
601 | { | 782 | { |
602 | int err; | 783 | int err; |
603 | 784 | ||
785 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
786 | return -EINVAL; | ||
787 | |||
604 | mutex_lock(&pinst->lock); | 788 | mutex_lock(&pinst->lock); |
605 | 789 | ||
606 | get_online_cpus(); | 790 | get_online_cpus(); |
607 | cpumask_clear_cpu(cpu, pinst->cpumask); | 791 | if (mask & PADATA_CPU_SERIAL) |
792 | cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); | ||
793 | if (mask & PADATA_CPU_PARALLEL) | ||
794 | cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); | ||
795 | |||
608 | err = __padata_remove_cpu(pinst, cpu); | 796 | err = __padata_remove_cpu(pinst, cpu); |
609 | put_online_cpus(); | 797 | put_online_cpus(); |
610 | 798 | ||
@@ -619,11 +807,20 @@ EXPORT_SYMBOL(padata_remove_cpu); | |||
619 | * | 807 | * |
620 | * @pinst: padata instance to start | 808 | * @pinst: padata instance to start |
621 | */ | 809 | */ |
622 | void padata_start(struct padata_instance *pinst) | 810 | int padata_start(struct padata_instance *pinst) |
623 | { | 811 | { |
812 | int err = 0; | ||
813 | |||
624 | mutex_lock(&pinst->lock); | 814 | mutex_lock(&pinst->lock); |
625 | pinst->flags |= PADATA_INIT; | 815 | |
816 | if (pinst->flags & PADATA_INVALID) | ||
817 | err =-EINVAL; | ||
818 | |||
819 | __padata_start(pinst); | ||
820 | |||
626 | mutex_unlock(&pinst->lock); | 821 | mutex_unlock(&pinst->lock); |
822 | |||
823 | return err; | ||
627 | } | 824 | } |
628 | EXPORT_SYMBOL(padata_start); | 825 | EXPORT_SYMBOL(padata_start); |
629 | 826 | ||
@@ -635,12 +832,20 @@ EXPORT_SYMBOL(padata_start); | |||
635 | void padata_stop(struct padata_instance *pinst) | 832 | void padata_stop(struct padata_instance *pinst) |
636 | { | 833 | { |
637 | mutex_lock(&pinst->lock); | 834 | mutex_lock(&pinst->lock); |
638 | pinst->flags &= ~PADATA_INIT; | 835 | __padata_stop(pinst); |
639 | mutex_unlock(&pinst->lock); | 836 | mutex_unlock(&pinst->lock); |
640 | } | 837 | } |
641 | EXPORT_SYMBOL(padata_stop); | 838 | EXPORT_SYMBOL(padata_stop); |
642 | 839 | ||
643 | #ifdef CONFIG_HOTPLUG_CPU | 840 | #ifdef CONFIG_HOTPLUG_CPU |
841 | |||
842 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) | ||
843 | { | ||
844 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || | ||
845 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); | ||
846 | } | ||
847 | |||
848 | |||
644 | static int padata_cpu_callback(struct notifier_block *nfb, | 849 | static int padata_cpu_callback(struct notifier_block *nfb, |
645 | unsigned long action, void *hcpu) | 850 | unsigned long action, void *hcpu) |
646 | { | 851 | { |
@@ -653,7 +858,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
653 | switch (action) { | 858 | switch (action) { |
654 | case CPU_ONLINE: | 859 | case CPU_ONLINE: |
655 | case CPU_ONLINE_FROZEN: | 860 | case CPU_ONLINE_FROZEN: |
656 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 861 | if (!pinst_has_cpu(pinst, cpu)) |
657 | break; | 862 | break; |
658 | mutex_lock(&pinst->lock); | 863 | mutex_lock(&pinst->lock); |
659 | err = __padata_add_cpu(pinst, cpu); | 864 | err = __padata_add_cpu(pinst, cpu); |
@@ -664,7 +869,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
664 | 869 | ||
665 | case CPU_DOWN_PREPARE: | 870 | case CPU_DOWN_PREPARE: |
666 | case CPU_DOWN_PREPARE_FROZEN: | 871 | case CPU_DOWN_PREPARE_FROZEN: |
667 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 872 | if (!pinst_has_cpu(pinst, cpu)) |
668 | break; | 873 | break; |
669 | mutex_lock(&pinst->lock); | 874 | mutex_lock(&pinst->lock); |
670 | err = __padata_remove_cpu(pinst, cpu); | 875 | err = __padata_remove_cpu(pinst, cpu); |
@@ -675,7 +880,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
675 | 880 | ||
676 | case CPU_UP_CANCELED: | 881 | case CPU_UP_CANCELED: |
677 | case CPU_UP_CANCELED_FROZEN: | 882 | case CPU_UP_CANCELED_FROZEN: |
678 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 883 | if (!pinst_has_cpu(pinst, cpu)) |
679 | break; | 884 | break; |
680 | mutex_lock(&pinst->lock); | 885 | mutex_lock(&pinst->lock); |
681 | __padata_remove_cpu(pinst, cpu); | 886 | __padata_remove_cpu(pinst, cpu); |
@@ -683,7 +888,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
683 | 888 | ||
684 | case CPU_DOWN_FAILED: | 889 | case CPU_DOWN_FAILED: |
685 | case CPU_DOWN_FAILED_FROZEN: | 890 | case CPU_DOWN_FAILED_FROZEN: |
686 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 891 | if (!pinst_has_cpu(pinst, cpu)) |
687 | break; | 892 | break; |
688 | mutex_lock(&pinst->lock); | 893 | mutex_lock(&pinst->lock); |
689 | __padata_add_cpu(pinst, cpu); | 894 | __padata_add_cpu(pinst, cpu); |
@@ -694,36 +899,202 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
694 | } | 899 | } |
695 | #endif | 900 | #endif |
696 | 901 | ||
902 | static void __padata_free(struct padata_instance *pinst) | ||
903 | { | ||
904 | #ifdef CONFIG_HOTPLUG_CPU | ||
905 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
906 | #endif | ||
907 | |||
908 | padata_stop(pinst); | ||
909 | padata_free_pd(pinst->pd); | ||
910 | free_cpumask_var(pinst->cpumask.pcpu); | ||
911 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
912 | kfree(pinst); | ||
913 | } | ||
914 | |||
915 | #define kobj2pinst(_kobj) \ | ||
916 | container_of(_kobj, struct padata_instance, kobj) | ||
917 | #define attr2pentry(_attr) \ | ||
918 | container_of(_attr, struct padata_sysfs_entry, attr) | ||
919 | |||
920 | static void padata_sysfs_release(struct kobject *kobj) | ||
921 | { | ||
922 | struct padata_instance *pinst = kobj2pinst(kobj); | ||
923 | __padata_free(pinst); | ||
924 | } | ||
925 | |||
926 | struct padata_sysfs_entry { | ||
927 | struct attribute attr; | ||
928 | ssize_t (*show)(struct padata_instance *, struct attribute *, char *); | ||
929 | ssize_t (*store)(struct padata_instance *, struct attribute *, | ||
930 | const char *, size_t); | ||
931 | }; | ||
932 | |||
933 | static ssize_t show_cpumask(struct padata_instance *pinst, | ||
934 | struct attribute *attr, char *buf) | ||
935 | { | ||
936 | struct cpumask *cpumask; | ||
937 | ssize_t len; | ||
938 | |||
939 | mutex_lock(&pinst->lock); | ||
940 | if (!strcmp(attr->name, "serial_cpumask")) | ||
941 | cpumask = pinst->cpumask.cbcpu; | ||
942 | else | ||
943 | cpumask = pinst->cpumask.pcpu; | ||
944 | |||
945 | len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), | ||
946 | nr_cpu_ids); | ||
947 | if (PAGE_SIZE - len < 2) | ||
948 | len = -EINVAL; | ||
949 | else | ||
950 | len += sprintf(buf + len, "\n"); | ||
951 | |||
952 | mutex_unlock(&pinst->lock); | ||
953 | return len; | ||
954 | } | ||
955 | |||
956 | static ssize_t store_cpumask(struct padata_instance *pinst, | ||
957 | struct attribute *attr, | ||
958 | const char *buf, size_t count) | ||
959 | { | ||
960 | cpumask_var_t new_cpumask; | ||
961 | ssize_t ret; | ||
962 | int mask_type; | ||
963 | |||
964 | if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) | ||
965 | return -ENOMEM; | ||
966 | |||
967 | ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), | ||
968 | nr_cpumask_bits); | ||
969 | if (ret < 0) | ||
970 | goto out; | ||
971 | |||
972 | mask_type = !strcmp(attr->name, "serial_cpumask") ? | ||
973 | PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; | ||
974 | ret = padata_set_cpumask(pinst, mask_type, new_cpumask); | ||
975 | if (!ret) | ||
976 | ret = count; | ||
977 | |||
978 | out: | ||
979 | free_cpumask_var(new_cpumask); | ||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ | ||
984 | static struct padata_sysfs_entry _name##_attr = \ | ||
985 | __ATTR(_name, 0644, _show_name, _store_name) | ||
986 | #define PADATA_ATTR_RO(_name, _show_name) \ | ||
987 | static struct padata_sysfs_entry _name##_attr = \ | ||
988 | __ATTR(_name, 0400, _show_name, NULL) | ||
989 | |||
990 | PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); | ||
991 | PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); | ||
992 | |||
993 | /* | ||
994 | * Padata sysfs provides the following objects: | ||
995 | * serial_cpumask [RW] - cpumask for serial workers | ||
996 | * parallel_cpumask [RW] - cpumask for parallel workers | ||
997 | */ | ||
998 | static struct attribute *padata_default_attrs[] = { | ||
999 | &serial_cpumask_attr.attr, | ||
1000 | ¶llel_cpumask_attr.attr, | ||
1001 | NULL, | ||
1002 | }; | ||
1003 | |||
1004 | static ssize_t padata_sysfs_show(struct kobject *kobj, | ||
1005 | struct attribute *attr, char *buf) | ||
1006 | { | ||
1007 | struct padata_instance *pinst; | ||
1008 | struct padata_sysfs_entry *pentry; | ||
1009 | ssize_t ret = -EIO; | ||
1010 | |||
1011 | pinst = kobj2pinst(kobj); | ||
1012 | pentry = attr2pentry(attr); | ||
1013 | if (pentry->show) | ||
1014 | ret = pentry->show(pinst, attr, buf); | ||
1015 | |||
1016 | return ret; | ||
1017 | } | ||
1018 | |||
1019 | static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, | ||
1020 | const char *buf, size_t count) | ||
1021 | { | ||
1022 | struct padata_instance *pinst; | ||
1023 | struct padata_sysfs_entry *pentry; | ||
1024 | ssize_t ret = -EIO; | ||
1025 | |||
1026 | pinst = kobj2pinst(kobj); | ||
1027 | pentry = attr2pentry(attr); | ||
1028 | if (pentry->show) | ||
1029 | ret = pentry->store(pinst, attr, buf, count); | ||
1030 | |||
1031 | return ret; | ||
1032 | } | ||
1033 | |||
1034 | static const struct sysfs_ops padata_sysfs_ops = { | ||
1035 | .show = padata_sysfs_show, | ||
1036 | .store = padata_sysfs_store, | ||
1037 | }; | ||
1038 | |||
1039 | static struct kobj_type padata_attr_type = { | ||
1040 | .sysfs_ops = &padata_sysfs_ops, | ||
1041 | .default_attrs = padata_default_attrs, | ||
1042 | .release = padata_sysfs_release, | ||
1043 | }; | ||
1044 | |||
697 | /** | 1045 | /** |
698 | * padata_alloc - allocate and initialize a padata instance | 1046 | * padata_alloc_possible - Allocate and initialize padata instance. |
1047 | * Use the cpu_possible_mask for serial and | ||
1048 | * parallel workers. | ||
699 | * | 1049 | * |
700 | * @cpumask: cpumask that padata uses for parallelization | ||
701 | * @wq: workqueue to use for the allocated padata instance | 1050 | * @wq: workqueue to use for the allocated padata instance |
702 | */ | 1051 | */ |
703 | struct padata_instance *padata_alloc(const struct cpumask *cpumask, | 1052 | struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) |
704 | struct workqueue_struct *wq) | 1053 | { |
1054 | return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); | ||
1055 | } | ||
1056 | EXPORT_SYMBOL(padata_alloc_possible); | ||
1057 | |||
1058 | /** | ||
1059 | * padata_alloc - allocate and initialize a padata instance and specify | ||
1060 | * cpumasks for serial and parallel workers. | ||
1061 | * | ||
1062 | * @wq: workqueue to use for the allocated padata instance | ||
1063 | * @pcpumask: cpumask that will be used for padata parallelization | ||
1064 | * @cbcpumask: cpumask that will be used for padata serialization | ||
1065 | */ | ||
1066 | struct padata_instance *padata_alloc(struct workqueue_struct *wq, | ||
1067 | const struct cpumask *pcpumask, | ||
1068 | const struct cpumask *cbcpumask) | ||
705 | { | 1069 | { |
706 | struct padata_instance *pinst; | 1070 | struct padata_instance *pinst; |
707 | struct parallel_data *pd; | 1071 | struct parallel_data *pd = NULL; |
708 | 1072 | ||
709 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); | 1073 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); |
710 | if (!pinst) | 1074 | if (!pinst) |
711 | goto err; | 1075 | goto err; |
712 | 1076 | ||
713 | get_online_cpus(); | 1077 | get_online_cpus(); |
714 | 1078 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | |
715 | pd = padata_alloc_pd(pinst, cpumask); | ||
716 | if (!pd) | ||
717 | goto err_free_inst; | 1079 | goto err_free_inst; |
1080 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { | ||
1081 | free_cpumask_var(pinst->cpumask.pcpu); | ||
1082 | goto err_free_inst; | ||
1083 | } | ||
1084 | if (!padata_validate_cpumask(pinst, pcpumask) || | ||
1085 | !padata_validate_cpumask(pinst, cbcpumask)) | ||
1086 | goto err_free_masks; | ||
718 | 1087 | ||
719 | if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) | 1088 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); |
720 | goto err_free_pd; | 1089 | if (!pd) |
1090 | goto err_free_masks; | ||
721 | 1091 | ||
722 | rcu_assign_pointer(pinst->pd, pd); | 1092 | rcu_assign_pointer(pinst->pd, pd); |
723 | 1093 | ||
724 | pinst->wq = wq; | 1094 | pinst->wq = wq; |
725 | 1095 | ||
726 | cpumask_copy(pinst->cpumask, cpumask); | 1096 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
1097 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
727 | 1098 | ||
728 | pinst->flags = 0; | 1099 | pinst->flags = 0; |
729 | 1100 | ||
@@ -735,12 +1106,15 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, | |||
735 | 1106 | ||
736 | put_online_cpus(); | 1107 | put_online_cpus(); |
737 | 1108 | ||
1109 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); | ||
1110 | kobject_init(&pinst->kobj, &padata_attr_type); | ||
738 | mutex_init(&pinst->lock); | 1111 | mutex_init(&pinst->lock); |
739 | 1112 | ||
740 | return pinst; | 1113 | return pinst; |
741 | 1114 | ||
742 | err_free_pd: | 1115 | err_free_masks: |
743 | padata_free_pd(pd); | 1116 | free_cpumask_var(pinst->cpumask.pcpu); |
1117 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
744 | err_free_inst: | 1118 | err_free_inst: |
745 | kfree(pinst); | 1119 | kfree(pinst); |
746 | put_online_cpus(); | 1120 | put_online_cpus(); |
@@ -756,19 +1130,6 @@ EXPORT_SYMBOL(padata_alloc); | |||
756 | */ | 1130 | */ |
757 | void padata_free(struct padata_instance *pinst) | 1131 | void padata_free(struct padata_instance *pinst) |
758 | { | 1132 | { |
759 | padata_stop(pinst); | 1133 | kobject_put(&pinst->kobj); |
760 | |||
761 | synchronize_rcu(); | ||
762 | |||
763 | #ifdef CONFIG_HOTPLUG_CPU | ||
764 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | ||
765 | #endif | ||
766 | get_online_cpus(); | ||
767 | padata_flush_queues(pinst->pd); | ||
768 | put_online_cpus(); | ||
769 | |||
770 | padata_free_pd(pinst->pd); | ||
771 | free_cpumask_var(pinst->cpumask); | ||
772 | kfree(pinst); | ||
773 | } | 1134 | } |
774 | EXPORT_SYMBOL(padata_free); | 1135 | EXPORT_SYMBOL(padata_free); |