diff options
-rw-r--r-- | kernel/workqueue.c | 28 |
1 files changed, 18 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7d1ebfc1a995..d107e1c3b071 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -90,18 +90,20 @@ static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | |||
90 | * Set the workqueue on which a work item is to be run | 90 | * Set the workqueue on which a work item is to be run |
91 | * - Must *only* be called if the pending flag is set | 91 | * - Must *only* be called if the pending flag is set |
92 | */ | 92 | */ |
93 | static inline void set_wq_data(struct work_struct *work, void *wq) | 93 | static inline void set_wq_data(struct work_struct *work, |
94 | struct cpu_workqueue_struct *cwq) | ||
94 | { | 95 | { |
95 | unsigned long new; | 96 | unsigned long new; |
96 | 97 | ||
97 | BUG_ON(!work_pending(work)); | 98 | BUG_ON(!work_pending(work)); |
98 | 99 | ||
99 | new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); | 100 | new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); |
100 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); | 101 | new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); |
101 | atomic_long_set(&work->data, new); | 102 | atomic_long_set(&work->data, new); |
102 | } | 103 | } |
103 | 104 | ||
104 | static inline void *get_wq_data(struct work_struct *work) | 105 | static inline |
106 | struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | ||
105 | { | 107 | { |
106 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 108 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
107 | } | 109 | } |
@@ -157,7 +159,8 @@ EXPORT_SYMBOL_GPL(queue_work); | |||
157 | void delayed_work_timer_fn(unsigned long __data) | 159 | void delayed_work_timer_fn(unsigned long __data) |
158 | { | 160 | { |
159 | struct delayed_work *dwork = (struct delayed_work *)__data; | 161 | struct delayed_work *dwork = (struct delayed_work *)__data; |
160 | struct workqueue_struct *wq = get_wq_data(&dwork->work); | 162 | struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); |
163 | struct workqueue_struct *wq = cwq->wq; | ||
161 | int cpu = smp_processor_id(); | 164 | int cpu = smp_processor_id(); |
162 | 165 | ||
163 | if (unlikely(is_single_threaded(wq))) | 166 | if (unlikely(is_single_threaded(wq))) |
@@ -189,8 +192,9 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, | |||
189 | BUG_ON(timer_pending(timer)); | 192 | BUG_ON(timer_pending(timer)); |
190 | BUG_ON(!list_empty(&work->entry)); | 193 | BUG_ON(!list_empty(&work->entry)); |
191 | 194 | ||
192 | /* This stores wq for the moment, for the timer_fn */ | 195 | /* This stores cwq for the moment, for the timer_fn */ |
193 | set_wq_data(work, wq); | 196 | set_wq_data(work, |
197 | per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id())); | ||
194 | timer->expires = jiffies + delay; | 198 | timer->expires = jiffies + delay; |
195 | timer->data = (unsigned long)dwork; | 199 | timer->data = (unsigned long)dwork; |
196 | timer->function = delayed_work_timer_fn; | 200 | timer->function = delayed_work_timer_fn; |
@@ -221,8 +225,9 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
221 | BUG_ON(timer_pending(timer)); | 225 | BUG_ON(timer_pending(timer)); |
222 | BUG_ON(!list_empty(&work->entry)); | 226 | BUG_ON(!list_empty(&work->entry)); |
223 | 227 | ||
224 | /* This stores wq for the moment, for the timer_fn */ | 228 | /* This stores cwq for the moment, for the timer_fn */ |
225 | set_wq_data(work, wq); | 229 | set_wq_data(work, |
230 | per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id())); | ||
226 | timer->expires = jiffies + delay; | 231 | timer->expires = jiffies + delay; |
227 | timer->data = (unsigned long)dwork; | 232 | timer->data = (unsigned long)dwork; |
228 | timer->function = delayed_work_timer_fn; | 233 | timer->function = delayed_work_timer_fn; |
@@ -562,9 +567,12 @@ void flush_work_keventd(struct work_struct *work) | |||
562 | EXPORT_SYMBOL(flush_work_keventd); | 567 | EXPORT_SYMBOL(flush_work_keventd); |
563 | 568 | ||
564 | /** | 569 | /** |
565 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. | 570 | * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work. |
566 | * @wq: the controlling workqueue structure | 571 | * @wq: the controlling workqueue structure |
567 | * @dwork: the delayed work struct | 572 | * @dwork: the delayed work struct |
573 | * | ||
574 | * Note that the work callback function may still be running on return from | ||
575 | * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. | ||
568 | */ | 576 | */ |
569 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 577 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
570 | struct delayed_work *dwork) | 578 | struct delayed_work *dwork) |
@@ -579,7 +587,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | |||
579 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 587 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
580 | 588 | ||
581 | /** | 589 | /** |
582 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. | 590 | * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work. |
583 | * @dwork: the delayed work struct | 591 | * @dwork: the delayed work struct |
584 | */ | 592 | */ |
585 | void cancel_rearming_delayed_work(struct delayed_work *dwork) | 593 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |