diff options
author | Steffen Klassert <steffen.klassert@secunet.com> | 2010-05-18 23:43:46 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2010-05-18 23:43:46 -0400 |
commit | 2b73b07ab8a44ce171e07a328439f311481a7ea7 (patch) | |
tree | 0a050dbe8eca78e1a8efff9b023e192c1f11633f /kernel | |
parent | d46a5ac7a7e2045e33c6ad6ffb8cf18a7e86a15a (diff) |
padata: Flush the padata queues actively
yield was used to wait until all references of the internal control
structure in use are dropped before it is freed. This patch implements
padata_flush_queues which actively flushes the padata percpu queues
in this case.
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/padata.c | 33 |
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/padata.c b/kernel/padata.c index 6d7ea481b716..ec6b8b7cf951 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -417,6 +417,29 @@ static void padata_free_pd(struct parallel_data *pd) | |||
417 | kfree(pd); | 417 | kfree(pd); |
418 | } | 418 | } |
419 | 419 | ||
420 | static void padata_flush_queues(struct parallel_data *pd) | ||
421 | { | ||
422 | int cpu; | ||
423 | struct padata_queue *queue; | ||
424 | |||
425 | for_each_cpu(cpu, pd->cpumask) { | ||
426 | queue = per_cpu_ptr(pd->queue, cpu); | ||
427 | flush_work(&queue->pwork); | ||
428 | } | ||
429 | |||
430 | del_timer_sync(&pd->timer); | ||
431 | |||
432 | if (atomic_read(&pd->reorder_objects)) | ||
433 | padata_reorder(pd); | ||
434 | |||
435 | for_each_cpu(cpu, pd->cpumask) { | ||
436 | queue = per_cpu_ptr(pd->queue, cpu); | ||
437 | flush_work(&queue->swork); | ||
438 | } | ||
439 | |||
440 | BUG_ON(atomic_read(&pd->refcnt) != 0); | ||
441 | } | ||
442 | |||
420 | static void padata_replace(struct padata_instance *pinst, | 443 | static void padata_replace(struct padata_instance *pinst, |
421 | struct parallel_data *pd_new) | 444 | struct parallel_data *pd_new) |
422 | { | 445 | { |
@@ -428,11 +451,7 @@ static void padata_replace(struct padata_instance *pinst, | |||
428 | 451 | ||
429 | synchronize_rcu(); | 452 | synchronize_rcu(); |
430 | 453 | ||
431 | while (atomic_read(&pd_old->refcnt) != 0) | 454 | padata_flush_queues(pd_old); |
432 | yield(); | ||
433 | |||
434 | flush_workqueue(pinst->wq); | ||
435 | |||
436 | padata_free_pd(pd_old); | 455 | padata_free_pd(pd_old); |
437 | 456 | ||
438 | pinst->flags &= ~PADATA_RESET; | 457 | pinst->flags &= ~PADATA_RESET; |
@@ -695,12 +714,10 @@ void padata_free(struct padata_instance *pinst) | |||
695 | 714 | ||
696 | synchronize_rcu(); | 715 | synchronize_rcu(); |
697 | 716 | ||
698 | while (atomic_read(&pinst->pd->refcnt) != 0) | ||
699 | yield(); | ||
700 | |||
701 | #ifdef CONFIG_HOTPLUG_CPU | 717 | #ifdef CONFIG_HOTPLUG_CPU |
702 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | 718 | unregister_hotcpu_notifier(&pinst->cpu_notifier); |
703 | #endif | 719 | #endif |
720 | padata_flush_queues(pinst->pd); | ||
704 | padata_free_pd(pinst->pd); | 721 | padata_free_pd(pinst->pd); |
705 | free_cpumask_var(pinst->cpumask); | 722 | free_cpumask_var(pinst->cpumask); |
706 | kfree(pinst); | 723 | kfree(pinst); |