aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 967479756511..8d1e7cb8a51a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -241,14 +241,14 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
241 struct work_struct *work = list_entry(cwq->worklist.next, 241 struct work_struct *work = list_entry(cwq->worklist.next,
242 struct work_struct, entry); 242 struct work_struct, entry);
243 work_func_t f = work->func; 243 work_func_t f = work->func;
244 void *data = work->data;
245 244
246 list_del_init(cwq->worklist.next); 245 list_del_init(cwq->worklist.next);
247 spin_unlock_irqrestore(&cwq->lock, flags); 246 spin_unlock_irqrestore(&cwq->lock, flags);
248 247
249 BUG_ON(get_wq_data(work) != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
250 clear_bit(WORK_STRUCT_PENDING, &work->management); 249 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
251 f(data); 250 work_release(work);
251 f(work);
252 252
253 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);
254 cwq->remove_sequence++; 254 cwq->remove_sequence++;
@@ -527,7 +527,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
527/** 527/**
528 * schedule_on_each_cpu - call a function on each online CPU from keventd 528 * schedule_on_each_cpu - call a function on each online CPU from keventd
529 * @func: the function to call 529 * @func: the function to call
530 * @info: a pointer to pass to func()
531 * 530 *
532 * Returns zero on success. 531 * Returns zero on success.
533 * Returns -ve errno on failure. 532 * Returns -ve errno on failure.
@@ -536,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
536 * 535 *
537 * schedule_on_each_cpu() is very slow. 536 * schedule_on_each_cpu() is very slow.
538 */ 537 */
539int schedule_on_each_cpu(work_func_t func, void *info) 538int schedule_on_each_cpu(work_func_t func)
540{ 539{
541 int cpu; 540 int cpu;
542 struct work_struct *works; 541 struct work_struct *works;
@@ -547,7 +546,7 @@ int schedule_on_each_cpu(work_func_t func, void *info)
547 546
548 mutex_lock(&workqueue_mutex); 547 mutex_lock(&workqueue_mutex);
549 for_each_online_cpu(cpu) { 548 for_each_online_cpu(cpu) {
550 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 549 INIT_WORK(per_cpu_ptr(works, cpu), func);
551 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 550 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
552 per_cpu_ptr(works, cpu)); 551 per_cpu_ptr(works, cpu));
553 } 552 }
@@ -591,7 +590,6 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
591/** 590/**
592 * execute_in_process_context - reliably execute the routine with user context 591 * execute_in_process_context - reliably execute the routine with user context
593 * @fn: the function to execute 592 * @fn: the function to execute
594 * @data: data to pass to the function
595 * @ew: guaranteed storage for the execute work structure (must 593 * @ew: guaranteed storage for the execute work structure (must
596 * be available when the work executes) 594 * be available when the work executes)
597 * 595 *
@@ -601,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
601 * Returns: 0 - function was executed 599 * Returns: 0 - function was executed
602 * 1 - function was scheduled for execution 600 * 1 - function was scheduled for execution
603 */ 601 */
604int execute_in_process_context(work_func_t fn, void *data, 602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
605 struct execute_work *ew)
606{ 603{
607 if (!in_interrupt()) { 604 if (!in_interrupt()) {
608 fn(data); 605 fn(&ew->work);
609 return 0; 606 return 0;
610 } 607 }
611 608
612 INIT_WORK(&ew->work, fn, data); 609 INIT_WORK(&ew->work, fn);
613 schedule_work(&ew->work); 610 schedule_work(&ew->work);
614 611
615 return 1; 612 return 1;