aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index db49886bfae1..a3da07c5af28 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq)
96 BUG_ON(!work_pending(work)); 96 BUG_ON(!work_pending(work));
97 97
98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
99 new |= work->management & WORK_STRUCT_FLAG_MASK; 99 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 work->management = new; 100 atomic_long_set(&work->data, new);
101} 101}
102 102
103static inline void *get_wq_data(struct work_struct *work) 103static inline void *get_wq_data(struct work_struct *work)
104{ 104{
105 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
106} 106}
107 107
108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work
133 list_del_init(&work->entry); 133 list_del_init(&work->entry);
134 spin_unlock_irqrestore(&cwq->lock, flags); 134 spin_unlock_irqrestore(&cwq->lock, flags);
135 135
136 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
137 work_release(work); 137 work_release(work);
138 f(work); 138 f(work);
139 139
@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{ 206{
207 int ret = 0, cpu = get_cpu(); 207 int ret = 0, cpu = get_cpu();
208 208
209 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 if (unlikely(is_single_threaded(wq))) 210 if (unlikely(is_single_threaded(wq)))
211 cpu = singlethread_cpu; 211 cpu = singlethread_cpu;
212 BUG_ON(!list_empty(&work->entry)); 212 BUG_ON(!list_empty(&work->entry));
@@ -233,7 +233,7 @@ static void delayed_work_timer_fn(unsigned long __data)
233/** 233/**
234 * queue_delayed_work - queue work on a workqueue after delay 234 * queue_delayed_work - queue work on a workqueue after delay
235 * @wq: workqueue to use 235 * @wq: workqueue to use
236 * @work: delayable work to queue 236 * @dwork: delayable work to queue
237 * @delay: number of jiffies to wait before queueing 237 * @delay: number of jiffies to wait before queueing
238 * 238 *
239 * Returns 0 if @work was already on a queue, non-zero otherwise. 239 * Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
248 if (delay == 0) 248 if (delay == 0)
249 return queue_work(wq, work); 249 return queue_work(wq, work);
250 250
251 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 251 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
252 BUG_ON(timer_pending(timer)); 252 BUG_ON(timer_pending(timer));
253 BUG_ON(!list_empty(&work->entry)); 253 BUG_ON(!list_empty(&work->entry));
254 254
@@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
268 * queue_delayed_work_on - queue work on specific CPU after delay 268 * queue_delayed_work_on - queue work on specific CPU after delay
269 * @cpu: CPU number to execute work on 269 * @cpu: CPU number to execute work on
270 * @wq: workqueue to use 270 * @wq: workqueue to use
271 * @work: work to queue 271 * @dwork: work to queue
272 * @delay: number of jiffies to wait before queueing 272 * @delay: number of jiffies to wait before queueing
273 * 273 *
274 * Returns 0 if @work was already on a queue, non-zero otherwise. 274 * Returns 0 if @work was already on a queue, non-zero otherwise.
@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
280 struct timer_list *timer = &dwork->timer; 280 struct timer_list *timer = &dwork->timer;
281 struct work_struct *work = &dwork->work; 281 struct work_struct *work = &dwork->work;
282 282
283 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 283 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
284 BUG_ON(timer_pending(timer)); 284 BUG_ON(timer_pending(timer));
285 BUG_ON(!list_empty(&work->entry)); 285 BUG_ON(!list_empty(&work->entry));
286 286
@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
321 spin_unlock_irqrestore(&cwq->lock, flags); 321 spin_unlock_irqrestore(&cwq->lock, flags);
322 322
323 BUG_ON(get_wq_data(work) != cwq); 323 BUG_ON(get_wq_data(work) != cwq);
324 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
325 work_release(work); 325 work_release(work);
326 f(work); 326 f(work);
327 327
@@ -637,9 +637,11 @@ int schedule_on_each_cpu(work_func_t func)
637 637
638 mutex_lock(&workqueue_mutex); 638 mutex_lock(&workqueue_mutex);
639 for_each_online_cpu(cpu) { 639 for_each_online_cpu(cpu) {
640 INIT_WORK(per_cpu_ptr(works, cpu), func); 640 struct work_struct *work = per_cpu_ptr(works, cpu);
641 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 641
642 per_cpu_ptr(works, cpu)); 642 INIT_WORK(work, func);
643 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
644 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
643 } 645 }
644 mutex_unlock(&workqueue_mutex); 646 mutex_unlock(&workqueue_mutex);
645 flush_workqueue(keventd_wq); 647 flush_workqueue(keventd_wq);