diff options
-rw-r--r-- | include/linux/workqueue.h | 6 | ||||
-rw-r--r-- | kernel/workqueue.c | 29 |
2 files changed, 35 insertions, 0 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 86b111300231..957c21c16d62 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -20,6 +20,10 @@ struct work_struct { | |||
20 | struct timer_list timer; | 20 | struct timer_list timer; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct execute_work { | ||
24 | struct work_struct work; | ||
25 | }; | ||
26 | |||
23 | #define __WORK_INITIALIZER(n, f, d) { \ | 27 | #define __WORK_INITIALIZER(n, f, d) { \ |
24 | .entry = { &(n).entry, &(n).entry }, \ | 28 | .entry = { &(n).entry, &(n).entry }, \ |
25 | .func = (f), \ | 29 | .func = (f), \ |
@@ -74,6 +78,8 @@ extern void init_workqueues(void); | |||
74 | void cancel_rearming_delayed_work(struct work_struct *work); | 78 | void cancel_rearming_delayed_work(struct work_struct *work); |
75 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, | 79 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, |
76 | struct work_struct *); | 80 | struct work_struct *); |
81 | int execute_in_process_context(void (*fn)(void *), void *, | ||
82 | struct execute_work *); | ||
77 | 83 | ||
78 | /* | 84 | /* |
79 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 85 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b052e2c4c710..e9e464a90376 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
28 | #include <linux/notifier.h> | 28 | #include <linux/notifier.h> |
29 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
30 | #include <linux/hardirq.h> | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * The per-CPU workqueue (if single thread, we always use the first | 33 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -476,6 +477,34 @@ void cancel_rearming_delayed_work(struct work_struct *work) | |||
476 | } | 477 | } |
477 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 478 | EXPORT_SYMBOL(cancel_rearming_delayed_work); |
478 | 479 | ||
480 | /** | ||
481 | * execute_in_process_context - reliably execute the routine with user context | ||
482 | * @fn: the function to execute | ||
483 | * @data: data to pass to the function | ||
484 | * @ew: guaranteed storage for the execute work structure (must | ||
485 | * be available when the work executes) | ||
486 | * | ||
487 | * Executes the function immediately if process context is available, | ||
488 | * otherwise schedules the function for delayed execution. | ||
489 | * | ||
490 | * Returns: 0 - function was executed | ||
491 | * 1 - function was scheduled for execution | ||
492 | */ | ||
493 | int execute_in_process_context(void (*fn)(void *data), void *data, | ||
494 | struct execute_work *ew) | ||
495 | { | ||
496 | if (!in_interrupt()) { | ||
497 | fn(data); | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | INIT_WORK(&ew->work, fn, data); | ||
502 | schedule_work(&ew->work); | ||
503 | |||
504 | return 1; | ||
505 | } | ||
506 | EXPORT_SYMBOL_GPL(execute_in_process_context); | ||
507 | |||
479 | int keventd_up(void) | 508 | int keventd_up(void) |
480 | { | 509 | { |
481 | return keventd_wq != NULL; | 510 | return keventd_wq != NULL; |