diff options
author | Tejun Heo <tj@kernel.org> | 2012-08-03 13:30:45 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-08-03 13:30:45 -0400 |
commit | 57469821fd5c61f25f783827d7334063cff67d65 (patch) | |
tree | e77ead09d823125bc4dc9a9cd49864f9340ad363 /kernel | |
parent | d8e794dfd51c368ed3f686b7f4172830b60ae47b (diff) |
workqueue: unify local CPU queueing handling
Queueing functions have been using different methods to determine the
local CPU.
* queue_work() superflously uses get/put_cpu() to acquire and hold the
local CPU across queue_work_on().
* delayed_work_timer_fn() uses smp_processor_id().
* queue_delayed_work() calls queue_delayed_work_on() with -1 @cpu
which is interpreted as the local CPU.
* flush_delayed_work[_sync]() were using raw_smp_processor_id().
* __queue_work() interprets %WORK_CPU_UNBOUND as local CPU if the
target workqueue is bound one but nobody uses this.
This patch converts all functions to uniformly use %WORK_CPU_UNBOUND
to indicate local CPU and use the local binding feature of
__queue_work(). unlikely() is dropped from %WORK_CPU_UNBOUND handling
in __queue_work().
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 19 |
1 files changed, 7 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55392385fe30..ce60bb5d12fb 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1003,7 +1003,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1003 | if (!(wq->flags & WQ_UNBOUND)) { | 1003 | if (!(wq->flags & WQ_UNBOUND)) { |
1004 | struct global_cwq *last_gcwq; | 1004 | struct global_cwq *last_gcwq; |
1005 | 1005 | ||
1006 | if (unlikely(cpu == WORK_CPU_UNBOUND)) | 1006 | if (cpu == WORK_CPU_UNBOUND) |
1007 | cpu = raw_smp_processor_id(); | 1007 | cpu = raw_smp_processor_id(); |
1008 | 1008 | ||
1009 | /* | 1009 | /* |
@@ -1103,12 +1103,7 @@ EXPORT_SYMBOL_GPL(queue_work_on); | |||
1103 | */ | 1103 | */ |
1104 | bool queue_work(struct workqueue_struct *wq, struct work_struct *work) | 1104 | bool queue_work(struct workqueue_struct *wq, struct work_struct *work) |
1105 | { | 1105 | { |
1106 | bool ret; | 1106 | return queue_work_on(WORK_CPU_UNBOUND, wq, work); |
1107 | |||
1108 | ret = queue_work_on(get_cpu(), wq, work); | ||
1109 | put_cpu(); | ||
1110 | |||
1111 | return ret; | ||
1112 | } | 1107 | } |
1113 | EXPORT_SYMBOL_GPL(queue_work); | 1108 | EXPORT_SYMBOL_GPL(queue_work); |
1114 | 1109 | ||
@@ -1118,7 +1113,7 @@ void delayed_work_timer_fn(unsigned long __data) | |||
1118 | struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); | 1113 | struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); |
1119 | 1114 | ||
1120 | local_irq_disable(); | 1115 | local_irq_disable(); |
1121 | __queue_work(smp_processor_id(), cwq->wq, &dwork->work); | 1116 | __queue_work(WORK_CPU_UNBOUND, cwq->wq, &dwork->work); |
1122 | local_irq_enable(); | 1117 | local_irq_enable(); |
1123 | } | 1118 | } |
1124 | EXPORT_SYMBOL_GPL(delayed_work_timer_fn); | 1119 | EXPORT_SYMBOL_GPL(delayed_work_timer_fn); |
@@ -1172,7 +1167,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
1172 | 1167 | ||
1173 | timer->expires = jiffies + delay; | 1168 | timer->expires = jiffies + delay; |
1174 | 1169 | ||
1175 | if (unlikely(cpu >= 0)) | 1170 | if (unlikely(cpu != WORK_CPU_UNBOUND)) |
1176 | add_timer_on(timer, cpu); | 1171 | add_timer_on(timer, cpu); |
1177 | else | 1172 | else |
1178 | add_timer(timer); | 1173 | add_timer(timer); |
@@ -1198,7 +1193,7 @@ bool queue_delayed_work(struct workqueue_struct *wq, | |||
1198 | if (delay == 0) | 1193 | if (delay == 0) |
1199 | return queue_work(wq, &dwork->work); | 1194 | return queue_work(wq, &dwork->work); |
1200 | 1195 | ||
1201 | return queue_delayed_work_on(-1, wq, dwork, delay); | 1196 | return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); |
1202 | } | 1197 | } |
1203 | EXPORT_SYMBOL_GPL(queue_delayed_work); | 1198 | EXPORT_SYMBOL_GPL(queue_delayed_work); |
1204 | 1199 | ||
@@ -2868,7 +2863,7 @@ bool flush_delayed_work(struct delayed_work *dwork) | |||
2868 | { | 2863 | { |
2869 | local_irq_disable(); | 2864 | local_irq_disable(); |
2870 | if (del_timer_sync(&dwork->timer)) | 2865 | if (del_timer_sync(&dwork->timer)) |
2871 | __queue_work(raw_smp_processor_id(), | 2866 | __queue_work(WORK_CPU_UNBOUND, |
2872 | get_work_cwq(&dwork->work)->wq, &dwork->work); | 2867 | get_work_cwq(&dwork->work)->wq, &dwork->work); |
2873 | local_irq_enable(); | 2868 | local_irq_enable(); |
2874 | return flush_work(&dwork->work); | 2869 | return flush_work(&dwork->work); |
@@ -2891,7 +2886,7 @@ bool flush_delayed_work_sync(struct delayed_work *dwork) | |||
2891 | { | 2886 | { |
2892 | local_irq_disable(); | 2887 | local_irq_disable(); |
2893 | if (del_timer_sync(&dwork->timer)) | 2888 | if (del_timer_sync(&dwork->timer)) |
2894 | __queue_work(raw_smp_processor_id(), | 2889 | __queue_work(WORK_CPU_UNBOUND, |
2895 | get_work_cwq(&dwork->work)->wq, &dwork->work); | 2890 | get_work_cwq(&dwork->work)->wq, &dwork->work); |
2896 | local_irq_enable(); | 2891 | local_irq_enable(); |
2897 | return flush_work_sync(&dwork->work); | 2892 | return flush_work_sync(&dwork->work); |