aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 09:55:48 -0500
committerDavid Howells <dhowells@redhat.com>2006-11-22 09:55:48 -0500
commit65f27f38446e1976cc98fd3004b110fedcddd189 (patch)
tree68f8be93feae31dfa018c22db392a05546b63ee1 /kernel/workqueue.c
parent365970a1ea76d81cb1ad2f652acb605f06dae256 (diff)
WorkStruct: Pass the work_struct pointer instead of context data
Pass the work_struct pointer to the work function rather than context data. The work function can use container_of() to work out the data. For the cases where the container of the work_struct may go away the moment the pending bit is cleared, it is made possible to defer the release of the structure by deferring the clearing of the pending bit. To make this work, an extra flag is introduced into the management side of the work_struct. This governs auto-release of the structure upon execution. Ordinarily, the work queue executor would release the work_struct for further scheduling or deallocation by clearing the pending bit prior to jumping to the work function. This means that, unless the driver makes some guarantee itself that the work_struct won't go away, the work function may not access anything else in the work_struct or its container lest they be deallocated.. This is a problem if the auxiliary data is taken away (as done by the last patch). However, if the pending bit is *not* cleared before jumping to the work function, then the work function *may* access the work_struct and its container with no problems. But then the work function must itself release the work_struct by calling work_release(). In most cases, automatic release is fine, so this is the default. Special initiators exist for the non-auto-release case (ending in _NAR). Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 967479756511..8d1e7cb8a51a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -241,14 +241,14 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
241 struct work_struct *work = list_entry(cwq->worklist.next, 241 struct work_struct *work = list_entry(cwq->worklist.next,
242 struct work_struct, entry); 242 struct work_struct, entry);
243 work_func_t f = work->func; 243 work_func_t f = work->func;
244 void *data = work->data;
245 244
246 list_del_init(cwq->worklist.next); 245 list_del_init(cwq->worklist.next);
247 spin_unlock_irqrestore(&cwq->lock, flags); 246 spin_unlock_irqrestore(&cwq->lock, flags);
248 247
249 BUG_ON(get_wq_data(work) != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
250 clear_bit(WORK_STRUCT_PENDING, &work->management); 249 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
251 f(data); 250 work_release(work);
251 f(work);
252 252
253 spin_lock_irqsave(&cwq->lock, flags); 253 spin_lock_irqsave(&cwq->lock, flags);
254 cwq->remove_sequence++; 254 cwq->remove_sequence++;
@@ -527,7 +527,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
527/** 527/**
528 * schedule_on_each_cpu - call a function on each online CPU from keventd 528 * schedule_on_each_cpu - call a function on each online CPU from keventd
529 * @func: the function to call 529 * @func: the function to call
530 * @info: a pointer to pass to func()
531 * 530 *
532 * Returns zero on success. 531 * Returns zero on success.
533 * Returns -ve errno on failure. 532 * Returns -ve errno on failure.
@@ -536,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
536 * 535 *
537 * schedule_on_each_cpu() is very slow. 536 * schedule_on_each_cpu() is very slow.
538 */ 537 */
539int schedule_on_each_cpu(work_func_t func, void *info) 538int schedule_on_each_cpu(work_func_t func)
540{ 539{
541 int cpu; 540 int cpu;
542 struct work_struct *works; 541 struct work_struct *works;
@@ -547,7 +546,7 @@ int schedule_on_each_cpu(work_func_t func, void *info)
547 546
548 mutex_lock(&workqueue_mutex); 547 mutex_lock(&workqueue_mutex);
549 for_each_online_cpu(cpu) { 548 for_each_online_cpu(cpu) {
550 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 549 INIT_WORK(per_cpu_ptr(works, cpu), func);
551 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 550 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
552 per_cpu_ptr(works, cpu)); 551 per_cpu_ptr(works, cpu));
553 } 552 }
@@ -591,7 +590,6 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
591/** 590/**
592 * execute_in_process_context - reliably execute the routine with user context 591 * execute_in_process_context - reliably execute the routine with user context
593 * @fn: the function to execute 592 * @fn: the function to execute
594 * @data: data to pass to the function
595 * @ew: guaranteed storage for the execute work structure (must 593 * @ew: guaranteed storage for the execute work structure (must
596 * be available when the work executes) 594 * be available when the work executes)
597 * 595 *
@@ -601,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
601 * Returns: 0 - function was executed 599 * Returns: 0 - function was executed
602 * 1 - function was scheduled for execution 600 * 1 - function was scheduled for execution
603 */ 601 */
604int execute_in_process_context(work_func_t fn, void *data, 602int execute_in_process_context(work_func_t fn, struct execute_work *ew)
605 struct execute_work *ew)
606{ 603{
607 if (!in_interrupt()) { 604 if (!in_interrupt()) {
608 fn(data); 605 fn(&ew->work);
609 return 0; 606 return 0;
610 } 607 }
611 608
612 INIT_WORK(&ew->work, fn, data); 609 INIT_WORK(&ew->work, fn);
613 schedule_work(&ew->work); 610 schedule_work(&ew->work);
614 611
615 return 1; 612 return 1;