diff options
Diffstat (limited to 'kernel/padata.c')
-rw-r--r-- | kernel/padata.c | 471 |
1 files changed, 347 insertions, 124 deletions
diff --git a/kernel/padata.c b/kernel/padata.c index 450d67d394b..84d0ca9dac9 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -35,9 +35,9 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | |||
35 | { | 35 | { |
36 | int cpu, target_cpu; | 36 | int cpu, target_cpu; |
37 | 37 | ||
38 | target_cpu = cpumask_first(pd->cpumask); | 38 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
39 | for (cpu = 0; cpu < cpu_index; cpu++) | 39 | for (cpu = 0; cpu < cpu_index; cpu++) |
40 | target_cpu = cpumask_next(target_cpu, pd->cpumask); | 40 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
41 | 41 | ||
42 | return target_cpu; | 42 | return target_cpu; |
43 | } | 43 | } |
@@ -53,26 +53,27 @@ static int padata_cpu_hash(struct padata_priv *padata) | |||
53 | * Hash the sequence numbers to the cpus by taking | 53 | * Hash the sequence numbers to the cpus by taking |
54 | * seq_nr mod. number of cpus in use. | 54 | * seq_nr mod. number of cpus in use. |
55 | */ | 55 | */ |
56 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask); | 56 | cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask.pcpu); |
57 | 57 | ||
58 | return padata_index_to_cpu(pd, cpu_index); | 58 | return padata_index_to_cpu(pd, cpu_index); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void padata_parallel_worker(struct work_struct *work) | 61 | static void padata_parallel_worker(struct work_struct *parallel_work) |
62 | { | 62 | { |
63 | struct padata_queue *queue; | 63 | struct padata_parallel_queue *pqueue; |
64 | struct parallel_data *pd; | 64 | struct parallel_data *pd; |
65 | struct padata_instance *pinst; | 65 | struct padata_instance *pinst; |
66 | LIST_HEAD(local_list); | 66 | LIST_HEAD(local_list); |
67 | 67 | ||
68 | local_bh_disable(); | 68 | local_bh_disable(); |
69 | queue = container_of(work, struct padata_queue, pwork); | 69 | pqueue = container_of(parallel_work, |
70 | pd = queue->pd; | 70 | struct padata_parallel_queue, work); |
71 | pd = pqueue->pd; | ||
71 | pinst = pd->pinst; | 72 | pinst = pd->pinst; |
72 | 73 | ||
73 | spin_lock(&queue->parallel.lock); | 74 | spin_lock(&pqueue->parallel.lock); |
74 | list_replace_init(&queue->parallel.list, &local_list); | 75 | list_replace_init(&pqueue->parallel.list, &local_list); |
75 | spin_unlock(&queue->parallel.lock); | 76 | spin_unlock(&pqueue->parallel.lock); |
76 | 77 | ||
77 | while (!list_empty(&local_list)) { | 78 | while (!list_empty(&local_list)) { |
78 | struct padata_priv *padata; | 79 | struct padata_priv *padata; |
@@ -94,7 +95,7 @@ static void padata_parallel_worker(struct work_struct *work) | |||
94 | * @pinst: padata instance | 95 | * @pinst: padata instance |
95 | * @padata: object to be parallelized | 96 | * @padata: object to be parallelized |
96 | * @cb_cpu: cpu the serialization callback function will run on, | 97 | * @cb_cpu: cpu the serialization callback function will run on, |
97 | * must be in the cpumask of padata. | 98 | * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). |
98 | * | 99 | * |
99 | * The parallelization callback function will run with BHs off. | 100 | * The parallelization callback function will run with BHs off. |
100 | * Note: Every object which is parallelized by padata_do_parallel | 101 | * Note: Every object which is parallelized by padata_do_parallel |
@@ -104,7 +105,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
104 | struct padata_priv *padata, int cb_cpu) | 105 | struct padata_priv *padata, int cb_cpu) |
105 | { | 106 | { |
106 | int target_cpu, err; | 107 | int target_cpu, err; |
107 | struct padata_queue *queue; | 108 | struct padata_parallel_queue *queue; |
108 | struct parallel_data *pd; | 109 | struct parallel_data *pd; |
109 | 110 | ||
110 | rcu_read_lock_bh(); | 111 | rcu_read_lock_bh(); |
@@ -115,7 +116,7 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
115 | if (!(pinst->flags & PADATA_INIT)) | 116 | if (!(pinst->flags & PADATA_INIT)) |
116 | goto out; | 117 | goto out; |
117 | 118 | ||
118 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask)) | 119 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) |
119 | goto out; | 120 | goto out; |
120 | 121 | ||
121 | err = -EBUSY; | 122 | err = -EBUSY; |
@@ -136,13 +137,13 @@ int padata_do_parallel(struct padata_instance *pinst, | |||
136 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); | 137 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); |
137 | 138 | ||
138 | target_cpu = padata_cpu_hash(padata); | 139 | target_cpu = padata_cpu_hash(padata); |
139 | queue = per_cpu_ptr(pd->queue, target_cpu); | 140 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
140 | 141 | ||
141 | spin_lock(&queue->parallel.lock); | 142 | spin_lock(&queue->parallel.lock); |
142 | list_add_tail(&padata->list, &queue->parallel.list); | 143 | list_add_tail(&padata->list, &queue->parallel.list); |
143 | spin_unlock(&queue->parallel.lock); | 144 | spin_unlock(&queue->parallel.lock); |
144 | 145 | ||
145 | queue_work_on(target_cpu, pinst->wq, &queue->pwork); | 146 | queue_work_on(target_cpu, pinst->wq, &queue->work); |
146 | 147 | ||
147 | out: | 148 | out: |
148 | rcu_read_unlock_bh(); | 149 | rcu_read_unlock_bh(); |
@@ -172,11 +173,11 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) | |||
172 | { | 173 | { |
173 | int cpu, num_cpus; | 174 | int cpu, num_cpus; |
174 | int next_nr, next_index; | 175 | int next_nr, next_index; |
175 | struct padata_queue *queue, *next_queue; | 176 | struct padata_parallel_queue *queue, *next_queue; |
176 | struct padata_priv *padata; | 177 | struct padata_priv *padata; |
177 | struct padata_list *reorder; | 178 | struct padata_list *reorder; |
178 | 179 | ||
179 | num_cpus = cpumask_weight(pd->cpumask); | 180 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
180 | 181 | ||
181 | /* | 182 | /* |
182 | * Calculate the percpu reorder queue and the sequence | 183 | * Calculate the percpu reorder queue and the sequence |
@@ -185,13 +186,13 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) | |||
185 | next_nr = pd->processed; | 186 | next_nr = pd->processed; |
186 | next_index = next_nr % num_cpus; | 187 | next_index = next_nr % num_cpus; |
187 | cpu = padata_index_to_cpu(pd, next_index); | 188 | cpu = padata_index_to_cpu(pd, next_index); |
188 | next_queue = per_cpu_ptr(pd->queue, cpu); | 189 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
189 | 190 | ||
190 | if (unlikely(next_nr > pd->max_seq_nr)) { | 191 | if (unlikely(next_nr > pd->max_seq_nr)) { |
191 | next_nr = next_nr - pd->max_seq_nr - 1; | 192 | next_nr = next_nr - pd->max_seq_nr - 1; |
192 | next_index = next_nr % num_cpus; | 193 | next_index = next_nr % num_cpus; |
193 | cpu = padata_index_to_cpu(pd, next_index); | 194 | cpu = padata_index_to_cpu(pd, next_index); |
194 | next_queue = per_cpu_ptr(pd->queue, cpu); | 195 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
195 | pd->processed = 0; | 196 | pd->processed = 0; |
196 | } | 197 | } |
197 | 198 | ||
@@ -215,7 +216,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd) | |||
215 | goto out; | 216 | goto out; |
216 | } | 217 | } |
217 | 218 | ||
218 | queue = per_cpu_ptr(pd->queue, smp_processor_id()); | 219 | queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); |
219 | if (queue->cpu_index == next_queue->cpu_index) { | 220 | if (queue->cpu_index == next_queue->cpu_index) { |
220 | padata = ERR_PTR(-ENODATA); | 221 | padata = ERR_PTR(-ENODATA); |
221 | goto out; | 222 | goto out; |
@@ -229,7 +230,7 @@ out: | |||
229 | static void padata_reorder(struct parallel_data *pd) | 230 | static void padata_reorder(struct parallel_data *pd) |
230 | { | 231 | { |
231 | struct padata_priv *padata; | 232 | struct padata_priv *padata; |
232 | struct padata_queue *queue; | 233 | struct padata_serial_queue *squeue; |
233 | struct padata_instance *pinst = pd->pinst; | 234 | struct padata_instance *pinst = pd->pinst; |
234 | 235 | ||
235 | /* | 236 | /* |
@@ -268,13 +269,13 @@ static void padata_reorder(struct parallel_data *pd) | |||
268 | return; | 269 | return; |
269 | } | 270 | } |
270 | 271 | ||
271 | queue = per_cpu_ptr(pd->queue, padata->cb_cpu); | 272 | squeue = per_cpu_ptr(pd->squeue, padata->cb_cpu); |
272 | 273 | ||
273 | spin_lock(&queue->serial.lock); | 274 | spin_lock(&squeue->serial.lock); |
274 | list_add_tail(&padata->list, &queue->serial.list); | 275 | list_add_tail(&padata->list, &squeue->serial.list); |
275 | spin_unlock(&queue->serial.lock); | 276 | spin_unlock(&squeue->serial.lock); |
276 | 277 | ||
277 | queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork); | 278 | queue_work_on(padata->cb_cpu, pinst->wq, &squeue->work); |
278 | } | 279 | } |
279 | 280 | ||
280 | spin_unlock_bh(&pd->lock); | 281 | spin_unlock_bh(&pd->lock); |
@@ -300,19 +301,19 @@ static void padata_reorder_timer(unsigned long arg) | |||
300 | padata_reorder(pd); | 301 | padata_reorder(pd); |
301 | } | 302 | } |
302 | 303 | ||
303 | static void padata_serial_worker(struct work_struct *work) | 304 | static void padata_serial_worker(struct work_struct *serial_work) |
304 | { | 305 | { |
305 | struct padata_queue *queue; | 306 | struct padata_serial_queue *squeue; |
306 | struct parallel_data *pd; | 307 | struct parallel_data *pd; |
307 | LIST_HEAD(local_list); | 308 | LIST_HEAD(local_list); |
308 | 309 | ||
309 | local_bh_disable(); | 310 | local_bh_disable(); |
310 | queue = container_of(work, struct padata_queue, swork); | 311 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
311 | pd = queue->pd; | 312 | pd = squeue->pd; |
312 | 313 | ||
313 | spin_lock(&queue->serial.lock); | 314 | spin_lock(&squeue->serial.lock); |
314 | list_replace_init(&queue->serial.list, &local_list); | 315 | list_replace_init(&squeue->serial.list, &local_list); |
315 | spin_unlock(&queue->serial.lock); | 316 | spin_unlock(&squeue->serial.lock); |
316 | 317 | ||
317 | while (!list_empty(&local_list)) { | 318 | while (!list_empty(&local_list)) { |
318 | struct padata_priv *padata; | 319 | struct padata_priv *padata; |
@@ -339,18 +340,18 @@ static void padata_serial_worker(struct work_struct *work) | |||
339 | void padata_do_serial(struct padata_priv *padata) | 340 | void padata_do_serial(struct padata_priv *padata) |
340 | { | 341 | { |
341 | int cpu; | 342 | int cpu; |
342 | struct padata_queue *queue; | 343 | struct padata_parallel_queue *pqueue; |
343 | struct parallel_data *pd; | 344 | struct parallel_data *pd; |
344 | 345 | ||
345 | pd = padata->pd; | 346 | pd = padata->pd; |
346 | 347 | ||
347 | cpu = get_cpu(); | 348 | cpu = get_cpu(); |
348 | queue = per_cpu_ptr(pd->queue, cpu); | 349 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
349 | 350 | ||
350 | spin_lock(&queue->reorder.lock); | 351 | spin_lock(&pqueue->reorder.lock); |
351 | atomic_inc(&pd->reorder_objects); | 352 | atomic_inc(&pd->reorder_objects); |
352 | list_add_tail(&padata->list, &queue->reorder.list); | 353 | list_add_tail(&padata->list, &pqueue->reorder.list); |
353 | spin_unlock(&queue->reorder.lock); | 354 | spin_unlock(&pqueue->reorder.lock); |
354 | 355 | ||
355 | put_cpu(); | 356 | put_cpu(); |
356 | 357 | ||
@@ -358,51 +359,88 @@ void padata_do_serial(struct padata_priv *padata) | |||
358 | } | 359 | } |
359 | EXPORT_SYMBOL(padata_do_serial); | 360 | EXPORT_SYMBOL(padata_do_serial); |
360 | 361 | ||
361 | /* Allocate and initialize the internal cpumask dependend resources. */ | 362 | static int padata_setup_cpumasks(struct parallel_data *pd, |
362 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | 363 | const struct cpumask *pcpumask, |
363 | const struct cpumask *cpumask) | 364 | const struct cpumask *cbcpumask) |
364 | { | 365 | { |
365 | int cpu, cpu_index, num_cpus; | 366 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
366 | struct padata_queue *queue; | 367 | return -ENOMEM; |
367 | struct parallel_data *pd; | ||
368 | 368 | ||
369 | cpu_index = 0; | 369 | cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask); |
370 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { | ||
371 | free_cpumask_var(pd->cpumask.cbcpu); | ||
372 | return -ENOMEM; | ||
373 | } | ||
370 | 374 | ||
371 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); | 375 | cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask); |
372 | if (!pd) | 376 | return 0; |
373 | goto err; | 377 | } |
374 | 378 | ||
375 | pd->queue = alloc_percpu(struct padata_queue); | 379 | static void __padata_list_init(struct padata_list *pd_list) |
376 | if (!pd->queue) | 380 | { |
377 | goto err_free_pd; | 381 | INIT_LIST_HEAD(&pd_list->list); |
382 | spin_lock_init(&pd_list->lock); | ||
383 | } | ||
378 | 384 | ||
379 | if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL)) | 385 | /* Initialize all percpu queues used by serial workers */ |
380 | goto err_free_queue; | 386 | static void padata_init_squeues(struct parallel_data *pd) |
387 | { | ||
388 | int cpu; | ||
389 | struct padata_serial_queue *squeue; | ||
381 | 390 | ||
382 | cpumask_and(pd->cpumask, cpumask, cpu_active_mask); | 391 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
392 | squeue = per_cpu_ptr(pd->squeue, cpu); | ||
393 | squeue->pd = pd; | ||
394 | __padata_list_init(&squeue->serial); | ||
395 | INIT_WORK(&squeue->work, padata_serial_worker); | ||
396 | } | ||
397 | } | ||
383 | 398 | ||
384 | for_each_cpu(cpu, pd->cpumask) { | 399 | /* Initialize all percpu queues used by parallel workers */ |
385 | queue = per_cpu_ptr(pd->queue, cpu); | 400 | static void padata_init_pqueues(struct parallel_data *pd) |
401 | { | ||
402 | int cpu_index, num_cpus, cpu; | ||
403 | struct padata_parallel_queue *pqueue; | ||
386 | 404 | ||
387 | queue->pd = pd; | 405 | cpu_index = 0; |
406 | for_each_cpu(cpu, pd->cpumask.pcpu) { | ||
407 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | ||
408 | pqueue->pd = pd; | ||
409 | pqueue->cpu_index = cpu_index; | ||
410 | |||
411 | __padata_list_init(&pqueue->reorder); | ||
412 | __padata_list_init(&pqueue->parallel); | ||
413 | INIT_WORK(&pqueue->work, padata_parallel_worker); | ||
414 | atomic_set(&pqueue->num_obj, 0); | ||
415 | } | ||
388 | 416 | ||
389 | queue->cpu_index = cpu_index; | 417 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
390 | cpu_index++; | 418 | pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; |
419 | } | ||
391 | 420 | ||
392 | INIT_LIST_HEAD(&queue->reorder.list); | 421 | /* Allocate and initialize the internal cpumask dependend resources. */ |
393 | INIT_LIST_HEAD(&queue->parallel.list); | 422 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, |
394 | INIT_LIST_HEAD(&queue->serial.list); | 423 | const struct cpumask *pcpumask, |
395 | spin_lock_init(&queue->reorder.lock); | 424 | const struct cpumask *cbcpumask) |
396 | spin_lock_init(&queue->parallel.lock); | 425 | { |
397 | spin_lock_init(&queue->serial.lock); | 426 | struct parallel_data *pd; |
398 | 427 | ||
399 | INIT_WORK(&queue->pwork, padata_parallel_worker); | 428 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); |
400 | INIT_WORK(&queue->swork, padata_serial_worker); | 429 | if (!pd) |
401 | } | 430 | goto err; |
402 | 431 | ||
403 | num_cpus = cpumask_weight(pd->cpumask); | 432 | pd->pqueue = alloc_percpu(struct padata_parallel_queue); |
404 | pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1; | 433 | if (!pd->pqueue) |
434 | goto err_free_pd; | ||
435 | |||
436 | pd->squeue = alloc_percpu(struct padata_serial_queue); | ||
437 | if (!pd->squeue) | ||
438 | goto err_free_pqueue; | ||
439 | if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) | ||
440 | goto err_free_squeue; | ||
405 | 441 | ||
442 | padata_init_pqueues(pd); | ||
443 | padata_init_squeues(pd); | ||
406 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); | 444 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
407 | atomic_set(&pd->seq_nr, -1); | 445 | atomic_set(&pd->seq_nr, -1); |
408 | atomic_set(&pd->reorder_objects, 0); | 446 | atomic_set(&pd->reorder_objects, 0); |
@@ -412,8 +450,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |||
412 | 450 | ||
413 | return pd; | 451 | return pd; |
414 | 452 | ||
415 | err_free_queue: | 453 | err_free_squeue: |
416 | free_percpu(pd->queue); | 454 | free_percpu(pd->squeue); |
455 | err_free_pqueue: | ||
456 | free_percpu(pd->pqueue); | ||
417 | err_free_pd: | 457 | err_free_pd: |
418 | kfree(pd); | 458 | kfree(pd); |
419 | err: | 459 | err: |
@@ -422,8 +462,10 @@ err: | |||
422 | 462 | ||
423 | static void padata_free_pd(struct parallel_data *pd) | 463 | static void padata_free_pd(struct parallel_data *pd) |
424 | { | 464 | { |
425 | free_cpumask_var(pd->cpumask); | 465 | free_cpumask_var(pd->cpumask.pcpu); |
426 | free_percpu(pd->queue); | 466 | free_cpumask_var(pd->cpumask.cbcpu); |
467 | free_percpu(pd->pqueue); | ||
468 | free_percpu(pd->squeue); | ||
427 | kfree(pd); | 469 | kfree(pd); |
428 | } | 470 | } |
429 | 471 | ||
@@ -431,11 +473,12 @@ static void padata_free_pd(struct parallel_data *pd) | |||
431 | static void padata_flush_queues(struct parallel_data *pd) | 473 | static void padata_flush_queues(struct parallel_data *pd) |
432 | { | 474 | { |
433 | int cpu; | 475 | int cpu; |
434 | struct padata_queue *queue; | 476 | struct padata_parallel_queue *pqueue; |
477 | struct padata_serial_queue *squeue; | ||
435 | 478 | ||
436 | for_each_cpu(cpu, pd->cpumask) { | 479 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
437 | queue = per_cpu_ptr(pd->queue, cpu); | 480 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
438 | flush_work(&queue->pwork); | 481 | flush_work(&pqueue->work); |
439 | } | 482 | } |
440 | 483 | ||
441 | del_timer_sync(&pd->timer); | 484 | del_timer_sync(&pd->timer); |
@@ -443,9 +486,9 @@ static void padata_flush_queues(struct parallel_data *pd) | |||
443 | if (atomic_read(&pd->reorder_objects)) | 486 | if (atomic_read(&pd->reorder_objects)) |
444 | padata_reorder(pd); | 487 | padata_reorder(pd); |
445 | 488 | ||
446 | for_each_cpu(cpu, pd->cpumask) { | 489 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
447 | queue = per_cpu_ptr(pd->queue, cpu); | 490 | squeue = per_cpu_ptr(pd->squeue, cpu); |
448 | flush_work(&queue->swork); | 491 | flush_work(&squeue->work); |
449 | } | 492 | } |
450 | 493 | ||
451 | BUG_ON(atomic_read(&pd->refcnt) != 0); | 494 | BUG_ON(atomic_read(&pd->refcnt) != 0); |
@@ -475,21 +518,63 @@ static void padata_replace(struct padata_instance *pinst, | |||
475 | struct parallel_data *pd_new) | 518 | struct parallel_data *pd_new) |
476 | { | 519 | { |
477 | struct parallel_data *pd_old = pinst->pd; | 520 | struct parallel_data *pd_old = pinst->pd; |
521 | int notification_mask = 0; | ||
478 | 522 | ||
479 | pinst->flags |= PADATA_RESET; | 523 | pinst->flags |= PADATA_RESET; |
480 | 524 | ||
481 | rcu_assign_pointer(pinst->pd, pd_new); | 525 | rcu_assign_pointer(pinst->pd, pd_new); |
482 | 526 | ||
483 | synchronize_rcu(); | 527 | synchronize_rcu(); |
528 | if (!pd_old) | ||
529 | goto out; | ||
484 | 530 | ||
485 | if (pd_old) { | 531 | padata_flush_queues(pd_old); |
486 | padata_flush_queues(pd_old); | 532 | if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) |
487 | padata_free_pd(pd_old); | 533 | notification_mask |= PADATA_CPU_PARALLEL; |
488 | } | 534 | if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) |
535 | notification_mask |= PADATA_CPU_SERIAL; | ||
536 | |||
537 | padata_free_pd(pd_old); | ||
538 | if (notification_mask) | ||
539 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | ||
540 | notification_mask, pinst); | ||
489 | 541 | ||
542 | out: | ||
490 | pinst->flags &= ~PADATA_RESET; | 543 | pinst->flags &= ~PADATA_RESET; |
491 | } | 544 | } |
492 | 545 | ||
546 | /** | ||
547 | * padata_register_cpumask_notifier - Registers a notifier that will be called | ||
548 | * if either pcpu or cbcpu or both cpumasks change. | ||
549 | * | ||
550 | * @pinst: A poineter to padata instance | ||
551 | * @nblock: A pointer to notifier block. | ||
552 | */ | ||
553 | int padata_register_cpumask_notifier(struct padata_instance *pinst, | ||
554 | struct notifier_block *nblock) | ||
555 | { | ||
556 | return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, | ||
557 | nblock); | ||
558 | } | ||
559 | EXPORT_SYMBOL(padata_register_cpumask_notifier); | ||
560 | |||
561 | /** | ||
562 | * padata_unregister_cpumask_notifier - Unregisters cpumask notifier | ||
563 | * registered earlier using padata_register_cpumask_notifier | ||
564 | * | ||
565 | * @pinst: A pointer to data instance. | ||
566 | * @nlock: A pointer to notifier block. | ||
567 | */ | ||
568 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | ||
569 | struct notifier_block *nblock) | ||
570 | { | ||
571 | return blocking_notifier_chain_unregister( | ||
572 | &pinst->cpumask_change_notifier, | ||
573 | nblock); | ||
574 | } | ||
575 | EXPORT_SYMBOL(padata_unregister_cpumask_notifier); | ||
576 | |||
577 | |||
493 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ | 578 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ |
494 | static bool padata_validate_cpumask(struct padata_instance *pinst, | 579 | static bool padata_validate_cpumask(struct padata_instance *pinst, |
495 | const struct cpumask *cpumask) | 580 | const struct cpumask *cpumask) |
@@ -504,13 +589,82 @@ static bool padata_validate_cpumask(struct padata_instance *pinst, | |||
504 | } | 589 | } |
505 | 590 | ||
506 | /** | 591 | /** |
507 | * padata_set_cpumask - set the cpumask that padata should use | 592 | * padata_get_cpumask: Fetch serial or parallel cpumask from the |
593 | * given padata instance and copy it to @out_mask | ||
594 | * | ||
595 | * @pinst: A pointer to padata instance | ||
596 | * @cpumask_type: Specifies which cpumask will be copied. | ||
597 | * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL | ||
598 | * corresponding to serial and parallel cpumask respectively. | ||
599 | * @out_mask: A pointer to cpumask structure where selected | ||
600 | * cpumask will be copied. | ||
601 | */ | ||
602 | int padata_get_cpumask(struct padata_instance *pinst, | ||
603 | int cpumask_type, struct cpumask *out_mask) | ||
604 | { | ||
605 | struct parallel_data *pd; | ||
606 | int ret = 0; | ||
607 | |||
608 | rcu_read_lock_bh(); | ||
609 | pd = rcu_dereference(pinst->pd); | ||
610 | switch (cpumask_type) { | ||
611 | case PADATA_CPU_SERIAL: | ||
612 | cpumask_copy(out_mask, pd->cpumask.cbcpu); | ||
613 | break; | ||
614 | case PADATA_CPU_PARALLEL: | ||
615 | cpumask_copy(out_mask, pd->cpumask.pcpu); | ||
616 | break; | ||
617 | default: | ||
618 | ret = -EINVAL; | ||
619 | } | ||
620 | |||
621 | rcu_read_unlock_bh(); | ||
622 | return ret; | ||
623 | } | ||
624 | EXPORT_SYMBOL(padata_get_cpumask); | ||
625 | |||
626 | /** | ||
627 | * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value | ||
628 | * equivalent to @cpumask. | ||
508 | * | 629 | * |
509 | * @pinst: padata instance | 630 | * @pinst: padata instance |
631 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding | ||
632 | * to parallel and serial cpumasks respectively. | ||
510 | * @cpumask: the cpumask to use | 633 | * @cpumask: the cpumask to use |
511 | */ | 634 | */ |
512 | int padata_set_cpumask(struct padata_instance *pinst, | 635 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, |
513 | cpumask_var_t cpumask) | 636 | cpumask_var_t cpumask) |
637 | { | ||
638 | struct cpumask *serial_mask, *parallel_mask; | ||
639 | |||
640 | switch (cpumask_type) { | ||
641 | case PADATA_CPU_PARALLEL: | ||
642 | serial_mask = pinst->cpumask.cbcpu; | ||
643 | parallel_mask = cpumask; | ||
644 | break; | ||
645 | case PADATA_CPU_SERIAL: | ||
646 | parallel_mask = pinst->cpumask.pcpu; | ||
647 | serial_mask = cpumask; | ||
648 | break; | ||
649 | default: | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | |||
653 | return __padata_set_cpumasks(pinst, parallel_mask, serial_mask); | ||
654 | } | ||
655 | EXPORT_SYMBOL(padata_set_cpumask); | ||
656 | |||
657 | /** | ||
658 | * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first | ||
659 | * one is used by parallel workers and the second one | ||
660 | * by the wokers doing serialization. | ||
661 | * | ||
662 | * @pinst: padata instance | ||
663 | * @pcpumask: the cpumask to use for parallel workers | ||
664 | * @cbcpumask: the cpumsak to use for serial workers | ||
665 | */ | ||
666 | int __padata_set_cpumasks(struct padata_instance *pinst, | ||
667 | cpumask_var_t pcpumask, cpumask_var_t cbcpumask) | ||
514 | { | 668 | { |
515 | int valid; | 669 | int valid; |
516 | int err = 0; | 670 | int err = 0; |
@@ -518,7 +672,13 @@ int padata_set_cpumask(struct padata_instance *pinst, | |||
518 | 672 | ||
519 | mutex_lock(&pinst->lock); | 673 | mutex_lock(&pinst->lock); |
520 | 674 | ||
521 | valid = padata_validate_cpumask(pinst, cpumask); | 675 | valid = padata_validate_cpumask(pinst, pcpumask); |
676 | if (!valid) { | ||
677 | __padata_stop(pinst); | ||
678 | goto out_replace; | ||
679 | } | ||
680 | |||
681 | valid = padata_validate_cpumask(pinst, cbcpumask); | ||
522 | if (!valid) { | 682 | if (!valid) { |
523 | __padata_stop(pinst); | 683 | __padata_stop(pinst); |
524 | goto out_replace; | 684 | goto out_replace; |
@@ -526,14 +686,15 @@ int padata_set_cpumask(struct padata_instance *pinst, | |||
526 | 686 | ||
527 | get_online_cpus(); | 687 | get_online_cpus(); |
528 | 688 | ||
529 | pd = padata_alloc_pd(pinst, cpumask); | 689 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); |
530 | if (!pd) { | 690 | if (!pd) { |
531 | err = -ENOMEM; | 691 | err = -ENOMEM; |
532 | goto out; | 692 | goto out; |
533 | } | 693 | } |
534 | 694 | ||
535 | out_replace: | 695 | out_replace: |
536 | cpumask_copy(pinst->cpumask, cpumask); | 696 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
697 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
537 | 698 | ||
538 | padata_replace(pinst, pd); | 699 | padata_replace(pinst, pd); |
539 | 700 | ||
@@ -546,41 +707,57 @@ out: | |||
546 | mutex_unlock(&pinst->lock); | 707 | mutex_unlock(&pinst->lock); |
547 | 708 | ||
548 | return err; | 709 | return err; |
710 | |||
549 | } | 711 | } |
550 | EXPORT_SYMBOL(padata_set_cpumask); | 712 | EXPORT_SYMBOL(__padata_set_cpumasks); |
551 | 713 | ||
552 | static int __padata_add_cpu(struct padata_instance *pinst, int cpu) | 714 | static int __padata_add_cpu(struct padata_instance *pinst, int cpu) |
553 | { | 715 | { |
554 | struct parallel_data *pd; | 716 | struct parallel_data *pd; |
555 | 717 | ||
556 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { | 718 | if (cpumask_test_cpu(cpu, cpu_active_mask)) { |
557 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 719 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
720 | pinst->cpumask.cbcpu); | ||
558 | if (!pd) | 721 | if (!pd) |
559 | return -ENOMEM; | 722 | return -ENOMEM; |
560 | 723 | ||
561 | padata_replace(pinst, pd); | 724 | padata_replace(pinst, pd); |
562 | 725 | ||
563 | if (padata_validate_cpumask(pinst, pinst->cpumask)) | 726 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && |
727 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | ||
564 | __padata_start(pinst); | 728 | __padata_start(pinst); |
565 | } | 729 | } |
566 | 730 | ||
567 | return 0; | 731 | return 0; |
568 | } | 732 | } |
569 | 733 | ||
570 | /** | 734 | /** |
571 | * padata_add_cpu - add a cpu to the padata cpumask | 735 | * padata_add_cpu - add a cpu to one or both(parallel and serial) |
736 | * padata cpumasks. | ||
572 | * | 737 | * |
573 | * @pinst: padata instance | 738 | * @pinst: padata instance |
574 | * @cpu: cpu to add | 739 | * @cpu: cpu to add |
740 | * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. | ||
741 | * The @mask may be any combination of the following flags: | ||
742 | * PADATA_CPU_SERIAL - serial cpumask | ||
743 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
575 | */ | 744 | */ |
576 | int padata_add_cpu(struct padata_instance *pinst, int cpu) | 745 | |
746 | int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) | ||
577 | { | 747 | { |
578 | int err; | 748 | int err; |
579 | 749 | ||
750 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
751 | return -EINVAL; | ||
752 | |||
580 | mutex_lock(&pinst->lock); | 753 | mutex_lock(&pinst->lock); |
581 | 754 | ||
582 | get_online_cpus(); | 755 | get_online_cpus(); |
583 | cpumask_set_cpu(cpu, pinst->cpumask); | 756 | if (mask & PADATA_CPU_SERIAL) |
757 | cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); | ||
758 | if (mask & PADATA_CPU_PARALLEL) | ||
759 | cpumask_set_cpu(cpu, pinst->cpumask.pcpu); | ||
760 | |||
584 | err = __padata_add_cpu(pinst, cpu); | 761 | err = __padata_add_cpu(pinst, cpu); |
585 | put_online_cpus(); | 762 | put_online_cpus(); |
586 | 763 | ||
@@ -596,13 +773,15 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | |||
596 | 773 | ||
597 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | 774 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
598 | 775 | ||
599 | if (!padata_validate_cpumask(pinst, pinst->cpumask)) { | 776 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || |
777 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) { | ||
600 | __padata_stop(pinst); | 778 | __padata_stop(pinst); |
601 | padata_replace(pinst, pd); | 779 | padata_replace(pinst, pd); |
602 | goto out; | 780 | goto out; |
603 | } | 781 | } |
604 | 782 | ||
605 | pd = padata_alloc_pd(pinst, pinst->cpumask); | 783 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
784 | pinst->cpumask.cbcpu); | ||
606 | if (!pd) | 785 | if (!pd) |
607 | return -ENOMEM; | 786 | return -ENOMEM; |
608 | 787 | ||
@@ -613,20 +792,32 @@ out: | |||
613 | return 0; | 792 | return 0; |
614 | } | 793 | } |
615 | 794 | ||
616 | /** | 795 | /** |
617 | * padata_remove_cpu - remove a cpu from the padata cpumask | 796 | * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) |
797 | * padata cpumasks. | ||
618 | * | 798 | * |
619 | * @pinst: padata instance | 799 | * @pinst: padata instance |
620 | * @cpu: cpu to remove | 800 | * @cpu: cpu to remove |
801 | * @mask: bitmask specifying from which cpumask @cpu should be removed | ||
802 | * The @mask may be any combination of the following flags: | ||
803 | * PADATA_CPU_SERIAL - serial cpumask | ||
804 | * PADATA_CPU_PARALLEL - parallel cpumask | ||
621 | */ | 805 | */ |
622 | int padata_remove_cpu(struct padata_instance *pinst, int cpu) | 806 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
623 | { | 807 | { |
624 | int err; | 808 | int err; |
625 | 809 | ||
810 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) | ||
811 | return -EINVAL; | ||
812 | |||
626 | mutex_lock(&pinst->lock); | 813 | mutex_lock(&pinst->lock); |
627 | 814 | ||
628 | get_online_cpus(); | 815 | get_online_cpus(); |
629 | cpumask_clear_cpu(cpu, pinst->cpumask); | 816 | if (mask & PADATA_CPU_SERIAL) |
817 | cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); | ||
818 | if (mask & PADATA_CPU_PARALLEL) | ||
819 | cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); | ||
820 | |||
630 | err = __padata_remove_cpu(pinst, cpu); | 821 | err = __padata_remove_cpu(pinst, cpu); |
631 | put_online_cpus(); | 822 | put_online_cpus(); |
632 | 823 | ||
@@ -672,6 +863,14 @@ void padata_stop(struct padata_instance *pinst) | |||
672 | EXPORT_SYMBOL(padata_stop); | 863 | EXPORT_SYMBOL(padata_stop); |
673 | 864 | ||
674 | #ifdef CONFIG_HOTPLUG_CPU | 865 | #ifdef CONFIG_HOTPLUG_CPU |
866 | |||
867 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) | ||
868 | { | ||
869 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || | ||
870 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); | ||
871 | } | ||
872 | |||
873 | |||
675 | static int padata_cpu_callback(struct notifier_block *nfb, | 874 | static int padata_cpu_callback(struct notifier_block *nfb, |
676 | unsigned long action, void *hcpu) | 875 | unsigned long action, void *hcpu) |
677 | { | 876 | { |
@@ -684,7 +883,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
684 | switch (action) { | 883 | switch (action) { |
685 | case CPU_ONLINE: | 884 | case CPU_ONLINE: |
686 | case CPU_ONLINE_FROZEN: | 885 | case CPU_ONLINE_FROZEN: |
687 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 886 | if (!pinst_has_cpu(pinst, cpu)) |
688 | break; | 887 | break; |
689 | mutex_lock(&pinst->lock); | 888 | mutex_lock(&pinst->lock); |
690 | err = __padata_add_cpu(pinst, cpu); | 889 | err = __padata_add_cpu(pinst, cpu); |
@@ -695,7 +894,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
695 | 894 | ||
696 | case CPU_DOWN_PREPARE: | 895 | case CPU_DOWN_PREPARE: |
697 | case CPU_DOWN_PREPARE_FROZEN: | 896 | case CPU_DOWN_PREPARE_FROZEN: |
698 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 897 | if (!pinst_has_cpu(pinst, cpu)) |
699 | break; | 898 | break; |
700 | mutex_lock(&pinst->lock); | 899 | mutex_lock(&pinst->lock); |
701 | err = __padata_remove_cpu(pinst, cpu); | 900 | err = __padata_remove_cpu(pinst, cpu); |
@@ -706,7 +905,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
706 | 905 | ||
707 | case CPU_UP_CANCELED: | 906 | case CPU_UP_CANCELED: |
708 | case CPU_UP_CANCELED_FROZEN: | 907 | case CPU_UP_CANCELED_FROZEN: |
709 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 908 | if (!pinst_has_cpu(pinst, cpu)) |
710 | break; | 909 | break; |
711 | mutex_lock(&pinst->lock); | 910 | mutex_lock(&pinst->lock); |
712 | __padata_remove_cpu(pinst, cpu); | 911 | __padata_remove_cpu(pinst, cpu); |
@@ -714,7 +913,7 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
714 | 913 | ||
715 | case CPU_DOWN_FAILED: | 914 | case CPU_DOWN_FAILED: |
716 | case CPU_DOWN_FAILED_FROZEN: | 915 | case CPU_DOWN_FAILED_FROZEN: |
717 | if (!cpumask_test_cpu(cpu, pinst->cpumask)) | 916 | if (!pinst_has_cpu(pinst, cpu)) |
718 | break; | 917 | break; |
719 | mutex_lock(&pinst->lock); | 918 | mutex_lock(&pinst->lock); |
720 | __padata_add_cpu(pinst, cpu); | 919 | __padata_add_cpu(pinst, cpu); |
@@ -726,13 +925,29 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
726 | #endif | 925 | #endif |
727 | 926 | ||
728 | /** | 927 | /** |
729 | * padata_alloc - allocate and initialize a padata instance | 928 | * padata_alloc - Allocate and initialize padata instance. |
929 | * Use default cpumask(cpu_possible_mask) | ||
930 | * for serial and parallel workes. | ||
931 | * | ||
932 | * @wq: workqueue to use for the allocated padata instance | ||
933 | */ | ||
934 | struct padata_instance *padata_alloc(struct workqueue_struct *wq) | ||
935 | { | ||
936 | return __padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); | ||
937 | } | ||
938 | EXPORT_SYMBOL(padata_alloc); | ||
939 | |||
940 | /** | ||
941 | * __padata_alloc - allocate and initialize a padata instance | ||
942 | * and specify cpumasks for serial and parallel workers. | ||
730 | * | 943 | * |
731 | * @cpumask: cpumask that padata uses for parallelization | ||
732 | * @wq: workqueue to use for the allocated padata instance | 944 | * @wq: workqueue to use for the allocated padata instance |
945 | * @pcpumask: cpumask that will be used for padata parallelization | ||
946 | * @cbcpumask: cpumask that will be used for padata serialization | ||
733 | */ | 947 | */ |
734 | struct padata_instance *padata_alloc(const struct cpumask *cpumask, | 948 | struct padata_instance *__padata_alloc(struct workqueue_struct *wq, |
735 | struct workqueue_struct *wq) | 949 | const struct cpumask *pcpumask, |
950 | const struct cpumask *cbcpumask) | ||
736 | { | 951 | { |
737 | struct padata_instance *pinst; | 952 | struct padata_instance *pinst; |
738 | struct parallel_data *pd = NULL; | 953 | struct parallel_data *pd = NULL; |
@@ -742,21 +957,26 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, | |||
742 | goto err; | 957 | goto err; |
743 | 958 | ||
744 | get_online_cpus(); | 959 | get_online_cpus(); |
745 | 960 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | |
746 | if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL)) | 961 | goto err_free_inst; |
962 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { | ||
963 | free_cpumask_var(pinst->cpumask.pcpu); | ||
747 | goto err_free_inst; | 964 | goto err_free_inst; |
748 | |||
749 | if (padata_validate_cpumask(pinst, cpumask)) { | ||
750 | pd = padata_alloc_pd(pinst, cpumask); | ||
751 | if (!pd) | ||
752 | goto err_free_mask; | ||
753 | } | 965 | } |
966 | if (!padata_validate_cpumask(pinst, pcpumask) || | ||
967 | !padata_validate_cpumask(pinst, cbcpumask)) | ||
968 | goto err_free_masks; | ||
969 | |||
970 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); | ||
971 | if (!pd) | ||
972 | goto err_free_masks; | ||
754 | 973 | ||
755 | rcu_assign_pointer(pinst->pd, pd); | 974 | rcu_assign_pointer(pinst->pd, pd); |
756 | 975 | ||
757 | pinst->wq = wq; | 976 | pinst->wq = wq; |
758 | 977 | ||
759 | cpumask_copy(pinst->cpumask, cpumask); | 978 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
979 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | ||
760 | 980 | ||
761 | pinst->flags = 0; | 981 | pinst->flags = 0; |
762 | 982 | ||
@@ -768,19 +988,21 @@ struct padata_instance *padata_alloc(const struct cpumask *cpumask, | |||
768 | 988 | ||
769 | put_online_cpus(); | 989 | put_online_cpus(); |
770 | 990 | ||
991 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); | ||
771 | mutex_init(&pinst->lock); | 992 | mutex_init(&pinst->lock); |
772 | 993 | ||
773 | return pinst; | 994 | return pinst; |
774 | 995 | ||
775 | err_free_mask: | 996 | err_free_masks: |
776 | free_cpumask_var(pinst->cpumask); | 997 | free_cpumask_var(pinst->cpumask.pcpu); |
998 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
777 | err_free_inst: | 999 | err_free_inst: |
778 | kfree(pinst); | 1000 | kfree(pinst); |
779 | put_online_cpus(); | 1001 | put_online_cpus(); |
780 | err: | 1002 | err: |
781 | return NULL; | 1003 | return NULL; |
782 | } | 1004 | } |
783 | EXPORT_SYMBOL(padata_alloc); | 1005 | EXPORT_SYMBOL(__padata_alloc); |
784 | 1006 | ||
785 | /** | 1007 | /** |
786 | * padata_free - free a padata instance | 1008 | * padata_free - free a padata instance |
@@ -795,7 +1017,8 @@ void padata_free(struct padata_instance *pinst) | |||
795 | 1017 | ||
796 | padata_stop(pinst); | 1018 | padata_stop(pinst); |
797 | padata_free_pd(pinst->pd); | 1019 | padata_free_pd(pinst->pd); |
798 | free_cpumask_var(pinst->cpumask); | 1020 | free_cpumask_var(pinst->cpumask.pcpu); |
1021 | free_cpumask_var(pinst->cpumask.cbcpu); | ||
799 | kfree(pinst); | 1022 | kfree(pinst); |
800 | } | 1023 | } |
801 | EXPORT_SYMBOL(padata_free); | 1024 | EXPORT_SYMBOL(padata_free); |