aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-07-17 15:39:28 -0400
committerTejun Heo <tj@kernel.org>2012-07-17 15:39:28 -0400
commit8db25e7891a47e03db6f04344a9c92be16e391bb (patch)
treee093119c71e655b54b159fed76b654a437b1ff30 /kernel/workqueue.c
parent628c78e7ea19d5b70d2b6a59030362168cdbe1ad (diff)
workqueue: simplify CPU hotplug code
With trustee gone, CPU hotplug code can be simplified. * gcwq_claim/release_management() now grab and release gcwq lock too respectively and gained _and_lock and _and_unlock postfixes. * All CPU hotplug logic was implemented in workqueue_cpu_callback() which was called by workqueue_cpu_up/down_callback() for the correct priority. This was because up and down paths shared a lot of logic, which is no longer true. Remove workqueue_cpu_callback() and move all hotplug logic into the two actual callbacks. This patch doesn't make any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c79
1 files changed, 25 insertions, 54 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d1545daa74ad..471996a81633 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3358,19 +3358,21 @@ EXPORT_SYMBOL_GPL(work_busy);
3358 */ 3358 */
3359 3359
3360/* claim manager positions of all pools */ 3360/* claim manager positions of all pools */
3361static void gcwq_claim_management(struct global_cwq *gcwq) 3361static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
3362{ 3362{
3363 struct worker_pool *pool; 3363 struct worker_pool *pool;
3364 3364
3365 for_each_worker_pool(pool, gcwq) 3365 for_each_worker_pool(pool, gcwq)
3366 mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools); 3366 mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
3367 spin_lock_irq(&gcwq->lock);
3367} 3368}
3368 3369
3369/* release manager positions */ 3370/* release manager positions */
3370static void gcwq_release_management(struct global_cwq *gcwq) 3371static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
3371{ 3372{
3372 struct worker_pool *pool; 3373 struct worker_pool *pool;
3373 3374
3375 spin_unlock_irq(&gcwq->lock);
3374 for_each_worker_pool(pool, gcwq) 3376 for_each_worker_pool(pool, gcwq)
3375 mutex_unlock(&pool->manager_mutex); 3377 mutex_unlock(&pool->manager_mutex);
3376} 3378}
@@ -3385,8 +3387,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
3385 3387
3386 BUG_ON(gcwq->cpu != smp_processor_id()); 3388 BUG_ON(gcwq->cpu != smp_processor_id());
3387 3389
3388 gcwq_claim_management(gcwq); 3390 gcwq_claim_management_and_lock(gcwq);
3389 spin_lock_irq(&gcwq->lock);
3390 3391
3391 /* 3392 /*
3392 * We've claimed all manager positions. Make all workers unbound 3393 * We've claimed all manager positions. Make all workers unbound
@@ -3403,8 +3404,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
3403 3404
3404 gcwq->flags |= GCWQ_DISASSOCIATED; 3405 gcwq->flags |= GCWQ_DISASSOCIATED;
3405 3406
3406 spin_unlock_irq(&gcwq->lock); 3407 gcwq_release_management_and_unlock(gcwq);
3407 gcwq_release_management(gcwq);
3408 3408
3409 /* 3409 /*
3410 * Call schedule() so that we cross rq->lock and thus can guarantee 3410 * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3428,26 +3428,19 @@ static void gcwq_unbind_fn(struct work_struct *work)
3428 atomic_set(get_pool_nr_running(pool), 0); 3428 atomic_set(get_pool_nr_running(pool), 0);
3429} 3429}
3430 3430
3431static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 3431/*
3432 unsigned long action, 3432 * Workqueues should be brought up before normal priority CPU notifiers.
3433 void *hcpu) 3433 * This will be registered high priority CPU notifier.
3434 */
3435static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3436 unsigned long action,
3437 void *hcpu)
3434{ 3438{
3435 unsigned int cpu = (unsigned long)hcpu; 3439 unsigned int cpu = (unsigned long)hcpu;
3436 struct global_cwq *gcwq = get_gcwq(cpu); 3440 struct global_cwq *gcwq = get_gcwq(cpu);
3437 struct worker_pool *pool; 3441 struct worker_pool *pool;
3438 struct work_struct unbind_work;
3439 unsigned long flags;
3440
3441 action &= ~CPU_TASKS_FROZEN;
3442
3443 switch (action) {
3444 case CPU_DOWN_PREPARE:
3445 /* unbinding should happen on the local CPU */
3446 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
3447 schedule_work_on(cpu, &unbind_work);
3448 flush_work(&unbind_work);
3449 break;
3450 3442
3443 switch (action & ~CPU_TASKS_FROZEN) {
3451 case CPU_UP_PREPARE: 3444 case CPU_UP_PREPARE:
3452 for_each_worker_pool(pool, gcwq) { 3445 for_each_worker_pool(pool, gcwq) {
3453 struct worker *worker; 3446 struct worker *worker;
@@ -3463,45 +3456,16 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3463 start_worker(worker); 3456 start_worker(worker);
3464 spin_unlock_irq(&gcwq->lock); 3457 spin_unlock_irq(&gcwq->lock);
3465 } 3458 }
3466 } 3459 break;
3467
3468 /* some are called w/ irq disabled, don't disturb irq status */
3469 spin_lock_irqsave(&gcwq->lock, flags);
3470 3460
3471 switch (action) {
3472 case CPU_DOWN_FAILED: 3461 case CPU_DOWN_FAILED:
3473 case CPU_ONLINE: 3462 case CPU_ONLINE:
3474 spin_unlock_irq(&gcwq->lock); 3463 gcwq_claim_management_and_lock(gcwq);
3475 gcwq_claim_management(gcwq);
3476 spin_lock_irq(&gcwq->lock);
3477
3478 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3464 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3479
3480 rebind_workers(gcwq); 3465 rebind_workers(gcwq);
3481 3466 gcwq_release_management_and_unlock(gcwq);
3482 gcwq_release_management(gcwq);
3483 break; 3467 break;
3484 } 3468 }
3485
3486 spin_unlock_irqrestore(&gcwq->lock, flags);
3487
3488 return notifier_from_errno(0);
3489}
3490
3491/*
3492 * Workqueues should be brought up before normal priority CPU notifiers.
3493 * This will be registered high priority CPU notifier.
3494 */
3495static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3496 unsigned long action,
3497 void *hcpu)
3498{
3499 switch (action & ~CPU_TASKS_FROZEN) {
3500 case CPU_UP_PREPARE:
3501 case CPU_DOWN_FAILED:
3502 case CPU_ONLINE:
3503 return workqueue_cpu_callback(nfb, action, hcpu);
3504 }
3505 return NOTIFY_OK; 3469 return NOTIFY_OK;
3506} 3470}
3507 3471
@@ -3513,9 +3477,16 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3513 unsigned long action, 3477 unsigned long action,
3514 void *hcpu) 3478 void *hcpu)
3515{ 3479{
3480 unsigned int cpu = (unsigned long)hcpu;
3481 struct work_struct unbind_work;
3482
3516 switch (action & ~CPU_TASKS_FROZEN) { 3483 switch (action & ~CPU_TASKS_FROZEN) {
3517 case CPU_DOWN_PREPARE: 3484 case CPU_DOWN_PREPARE:
3518 return workqueue_cpu_callback(nfb, action, hcpu); 3485 /* unbinding should happen on the local CPU */
3486 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
3487 schedule_work_on(cpu, &unbind_work);
3488 flush_work(&unbind_work);
3489 break;
3519 } 3490 }
3520 return NOTIFY_OK; 3491 return NOTIFY_OK;
3521} 3492}