aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
committerTejun Heo <tj@kernel.org>2013-01-24 14:01:33 -0500
commit7c3eed5cd60d0f736516e6ade77d90c6255860bd (patch)
treebfc017307b98a4db8c919ba9fb53399189ecf0ad /kernel
parent9daf9e678d18585433a4ad90ec51a448e5fd054c (diff)
workqueue: record pool ID instead of CPU in work->data when off-queue
Currently, when a work item is off-queue, work->data records the CPU it was last on, which is used to locate the last executing instance for non-reentrance, flushing, etc. We're in the process of removing global_cwq and making worker_pool the top level abstraction. This patch makes work->data point to the pool it was last associated with instead of CPU. After the previous WORK_OFFQ_POOL_CPU and worker_poo->id additions, the conversion is fairly straight-forward. WORK_OFFQ constants and functions are modified to record and read back pool ID instead. worker_pool_by_id() is added to allow looking up pool from ID. get_work_pool() replaces get_work_gcwq(), which is reimplemented using get_work_pool(). get_work_pool_id() replaces work_cpu(). This patch shouldn't introduce any observable behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c111
1 files changed, 67 insertions, 44 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9c6ad974bb9e..a4d7e3f0a874 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -451,7 +451,6 @@ static DEFINE_MUTEX(worker_pool_idr_mutex);
451static DEFINE_IDR(worker_pool_idr); 451static DEFINE_IDR(worker_pool_idr);
452 452
453static int worker_thread(void *__worker); 453static int worker_thread(void *__worker);
454static unsigned int work_cpu(struct work_struct *work);
455 454
456static int std_worker_pool_pri(struct worker_pool *pool) 455static int std_worker_pool_pri(struct worker_pool *pool)
457{ 456{
@@ -479,6 +478,15 @@ static int worker_pool_assign_id(struct worker_pool *pool)
479 return ret; 478 return ret;
480} 479}
481 480
481/*
482 * Lookup worker_pool by id. The idr currently is built during boot and
483 * never modified. Don't worry about locking for now.
484 */
485static struct worker_pool *worker_pool_by_id(int pool_id)
486{
487 return idr_find(&worker_pool_idr, pool_id);
488}
489
482static atomic_t *get_pool_nr_running(struct worker_pool *pool) 490static atomic_t *get_pool_nr_running(struct worker_pool *pool)
483{ 491{
484 int cpu = pool->gcwq->cpu; 492 int cpu = pool->gcwq->cpu;
@@ -520,17 +528,17 @@ static int work_next_color(int color)
520/* 528/*
521 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data 529 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
522 * contain the pointer to the queued cwq. Once execution starts, the flag 530 * contain the pointer to the queued cwq. Once execution starts, the flag
523 * is cleared and the high bits contain OFFQ flags and CPU number. 531 * is cleared and the high bits contain OFFQ flags and pool ID.
524 * 532 *
525 * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling() 533 * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
526 * and clear_work_data() can be used to set the cwq, cpu or clear 534 * and clear_work_data() can be used to set the cwq, pool or clear
527 * work->data. These functions should only be called while the work is 535 * work->data. These functions should only be called while the work is
528 * owned - ie. while the PENDING bit is set. 536 * owned - ie. while the PENDING bit is set.
529 * 537 *
530 * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to 538 * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq
531 * a work. gcwq is available once the work has been queued anywhere after 539 * corresponding to a work. Pool is available once the work has been
532 * initialization until it is sync canceled. cwq is available only while 540 * queued anywhere after initialization until it is sync canceled. cwq is
533 * the work item is queued. 541 * available only while the work item is queued.
534 * 542 *
535 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 543 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
536 * canceled. While being canceled, a work item may have its PENDING set 544 * canceled. While being canceled, a work item may have its PENDING set
@@ -552,8 +560,8 @@ static void set_work_cwq(struct work_struct *work,
552 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 560 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
553} 561}
554 562
555static void set_work_cpu_and_clear_pending(struct work_struct *work, 563static void set_work_pool_and_clear_pending(struct work_struct *work,
556 unsigned int cpu) 564 int pool_id)
557{ 565{
558 /* 566 /*
559 * The following wmb is paired with the implied mb in 567 * The following wmb is paired with the implied mb in
@@ -562,13 +570,13 @@ static void set_work_cpu_and_clear_pending(struct work_struct *work,
562 * owner. 570 * owner.
563 */ 571 */
564 smp_wmb(); 572 smp_wmb();
565 set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0); 573 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
566} 574}
567 575
568static void clear_work_data(struct work_struct *work) 576static void clear_work_data(struct work_struct *work)
569{ 577{
570 smp_wmb(); /* see set_work_cpu_and_clear_pending() */ 578 smp_wmb(); /* see set_work_pool_and_clear_pending() */
571 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 579 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
572} 580}
573 581
574static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work) 582static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
@@ -581,30 +589,58 @@ static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
581 return NULL; 589 return NULL;
582} 590}
583 591
584static struct global_cwq *get_work_gcwq(struct work_struct *work) 592/**
593 * get_work_pool - return the worker_pool a given work was associated with
594 * @work: the work item of interest
595 *
596 * Return the worker_pool @work was last associated with. %NULL if none.
597 */
598static struct worker_pool *get_work_pool(struct work_struct *work)
585{ 599{
586 unsigned long data = atomic_long_read(&work->data); 600 unsigned long data = atomic_long_read(&work->data);
587 unsigned int cpu; 601 struct worker_pool *pool;
602 int pool_id;
588 603
589 if (data & WORK_STRUCT_CWQ) 604 if (data & WORK_STRUCT_CWQ)
590 return ((struct cpu_workqueue_struct *) 605 return ((struct cpu_workqueue_struct *)
591 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; 606 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
592 607
593 cpu = data >> WORK_OFFQ_CPU_SHIFT; 608 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
594 if (cpu == WORK_OFFQ_CPU_NONE) 609 if (pool_id == WORK_OFFQ_POOL_NONE)
595 return NULL; 610 return NULL;
596 611
597 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); 612 pool = worker_pool_by_id(pool_id);
598 return get_gcwq(cpu); 613 WARN_ON_ONCE(!pool);
614 return pool;
615}
616
617/**
618 * get_work_pool_id - return the worker pool ID a given work is associated with
619 * @work: the work item of interest
620 *
621 * Return the worker_pool ID @work was last associated with.
622 * %WORK_OFFQ_POOL_NONE if none.
623 */
624static int get_work_pool_id(struct work_struct *work)
625{
626 struct worker_pool *pool = get_work_pool(work);
627
628 return pool ? pool->id : WORK_OFFQ_POOL_NONE;
629}
630
631static struct global_cwq *get_work_gcwq(struct work_struct *work)
632{
633 struct worker_pool *pool = get_work_pool(work);
634
635 return pool ? pool->gcwq : NULL;
599} 636}
600 637
601static void mark_work_canceling(struct work_struct *work) 638static void mark_work_canceling(struct work_struct *work)
602{ 639{
603 struct global_cwq *gcwq = get_work_gcwq(work); 640 unsigned long pool_id = get_work_pool_id(work);
604 unsigned long cpu = gcwq ? gcwq->cpu : WORK_OFFQ_CPU_NONE;
605 641
606 set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING, 642 pool_id <<= WORK_OFFQ_POOL_SHIFT;
607 WORK_STRUCT_PENDING); 643 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
608} 644}
609 645
610static bool work_is_canceling(struct work_struct *work) 646static bool work_is_canceling(struct work_struct *work)
@@ -2192,12 +2228,12 @@ __acquires(&gcwq->lock)
2192 wake_up_worker(pool); 2228 wake_up_worker(pool);
2193 2229
2194 /* 2230 /*
2195 * Record the last CPU and clear PENDING which should be the last 2231 * Record the last pool and clear PENDING which should be the last
2196 * update to @work. Also, do this inside @gcwq->lock so that 2232 * update to @work. Also, do this inside @gcwq->lock so that
2197 * PENDING and queued state changes happen together while IRQ is 2233 * PENDING and queued state changes happen together while IRQ is
2198 * disabled. 2234 * disabled.
2199 */ 2235 */
2200 set_work_cpu_and_clear_pending(work, gcwq->cpu); 2236 set_work_pool_and_clear_pending(work, pool->id);
2201 2237
2202 spin_unlock_irq(&gcwq->lock); 2238 spin_unlock_irq(&gcwq->lock);
2203 2239
@@ -2967,7 +3003,8 @@ bool cancel_delayed_work(struct delayed_work *dwork)
2967 if (unlikely(ret < 0)) 3003 if (unlikely(ret < 0))
2968 return false; 3004 return false;
2969 3005
2970 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); 3006 set_work_pool_and_clear_pending(&dwork->work,
3007 get_work_pool_id(&dwork->work));
2971 local_irq_restore(flags); 3008 local_irq_restore(flags);
2972 return ret; 3009 return ret;
2973} 3010}
@@ -3431,20 +3468,6 @@ bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3431EXPORT_SYMBOL_GPL(workqueue_congested); 3468EXPORT_SYMBOL_GPL(workqueue_congested);
3432 3469
3433/** 3470/**
3434 * work_cpu - return the last known associated cpu for @work
3435 * @work: the work of interest
3436 *
3437 * RETURNS:
3438 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3439 */
3440static unsigned int work_cpu(struct work_struct *work)
3441{
3442 struct global_cwq *gcwq = get_work_gcwq(work);
3443
3444 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3445}
3446
3447/**
3448 * work_busy - test whether a work is currently pending or running 3471 * work_busy - test whether a work is currently pending or running
3449 * @work: the work to be tested 3472 * @work: the work to be tested
3450 * 3473 *
@@ -3816,9 +3839,9 @@ static int __init init_workqueues(void)
3816{ 3839{
3817 unsigned int cpu; 3840 unsigned int cpu;
3818 3841
3819 /* make sure we have enough bits for OFFQ CPU number */ 3842 /* make sure we have enough bits for OFFQ pool ID */
3820 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) < 3843 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
3821 WORK_CPU_LAST); 3844 WORK_CPU_LAST * NR_STD_WORKER_POOLS);
3822 3845
3823 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 3846 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3824 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3847 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);