diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kmod.c | 16 | ||||
-rw-r--r-- | kernel/kthread.c | 13 | ||||
-rw-r--r-- | kernel/power/poweroff.c | 4 | ||||
-rw-r--r-- | kernel/relay.c | 10 | ||||
-rw-r--r-- | kernel/sys.c | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 109 |
6 files changed, 95 insertions, 61 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c index 2b76dee28496..8d2bea09a4ec 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module); | |||
114 | #endif /* CONFIG_KMOD */ | 114 | #endif /* CONFIG_KMOD */ |
115 | 115 | ||
116 | struct subprocess_info { | 116 | struct subprocess_info { |
117 | struct work_struct work; | ||
117 | struct completion *complete; | 118 | struct completion *complete; |
118 | char *path; | 119 | char *path; |
119 | char **argv; | 120 | char **argv; |
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data) | |||
221 | } | 222 | } |
222 | 223 | ||
223 | /* This is run by khelper thread */ | 224 | /* This is run by khelper thread */ |
224 | static void __call_usermodehelper(void *data) | 225 | static void __call_usermodehelper(struct work_struct *work) |
225 | { | 226 | { |
226 | struct subprocess_info *sub_info = data; | 227 | struct subprocess_info *sub_info = |
228 | container_of(work, struct subprocess_info, work); | ||
227 | pid_t pid; | 229 | pid_t pid; |
228 | int wait = sub_info->wait; | 230 | int wait = sub_info->wait; |
229 | 231 | ||
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
264 | { | 266 | { |
265 | DECLARE_COMPLETION_ONSTACK(done); | 267 | DECLARE_COMPLETION_ONSTACK(done); |
266 | struct subprocess_info sub_info = { | 268 | struct subprocess_info sub_info = { |
269 | .work = __WORK_INITIALIZER(sub_info.work, | ||
270 | __call_usermodehelper), | ||
267 | .complete = &done, | 271 | .complete = &done, |
268 | .path = path, | 272 | .path = path, |
269 | .argv = argv, | 273 | .argv = argv, |
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
272 | .wait = wait, | 276 | .wait = wait, |
273 | .retval = 0, | 277 | .retval = 0, |
274 | }; | 278 | }; |
275 | DECLARE_WORK(work, __call_usermodehelper, &sub_info); | ||
276 | 279 | ||
277 | if (!khelper_wq) | 280 | if (!khelper_wq) |
278 | return -EBUSY; | 281 | return -EBUSY; |
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
280 | if (path[0] == '\0') | 283 | if (path[0] == '\0') |
281 | return 0; | 284 | return 0; |
282 | 285 | ||
283 | queue_work(khelper_wq, &work); | 286 | queue_work(khelper_wq, &sub_info.work); |
284 | wait_for_completion(&done); | 287 | wait_for_completion(&done); |
285 | return sub_info.retval; | 288 | return sub_info.retval; |
286 | } | 289 | } |
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
291 | { | 294 | { |
292 | DECLARE_COMPLETION(done); | 295 | DECLARE_COMPLETION(done); |
293 | struct subprocess_info sub_info = { | 296 | struct subprocess_info sub_info = { |
297 | .work = __WORK_INITIALIZER(sub_info.work, | ||
298 | __call_usermodehelper), | ||
294 | .complete = &done, | 299 | .complete = &done, |
295 | .path = path, | 300 | .path = path, |
296 | .argv = argv, | 301 | .argv = argv, |
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
298 | .retval = 0, | 303 | .retval = 0, |
299 | }; | 304 | }; |
300 | struct file *f; | 305 | struct file *f; |
301 | DECLARE_WORK(work, __call_usermodehelper, &sub_info); | ||
302 | 306 | ||
303 | if (!khelper_wq) | 307 | if (!khelper_wq) |
304 | return -EBUSY; | 308 | return -EBUSY; |
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
318 | } | 322 | } |
319 | sub_info.stdin = f; | 323 | sub_info.stdin = f; |
320 | 324 | ||
321 | queue_work(khelper_wq, &work); | 325 | queue_work(khelper_wq, &sub_info.work); |
322 | wait_for_completion(&done); | 326 | wait_for_completion(&done); |
323 | return sub_info.retval; | 327 | return sub_info.retval; |
324 | } | 328 | } |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 4f9c60ef95e8..1db8c72d0d38 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -31,6 +31,8 @@ struct kthread_create_info | |||
31 | /* Result passed back to kthread_create() from keventd. */ | 31 | /* Result passed back to kthread_create() from keventd. */ |
32 | struct task_struct *result; | 32 | struct task_struct *result; |
33 | struct completion done; | 33 | struct completion done; |
34 | |||
35 | struct work_struct work; | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct kthread_stop_info | 38 | struct kthread_stop_info |
@@ -111,9 +113,10 @@ static int kthread(void *_create) | |||
111 | } | 113 | } |
112 | 114 | ||
113 | /* We are keventd: create a thread. */ | 115 | /* We are keventd: create a thread. */ |
114 | static void keventd_create_kthread(void *_create) | 116 | static void keventd_create_kthread(struct work_struct *work) |
115 | { | 117 | { |
116 | struct kthread_create_info *create = _create; | 118 | struct kthread_create_info *create = |
119 | container_of(work, struct kthread_create_info, work); | ||
117 | int pid; | 120 | int pid; |
118 | 121 | ||
119 | /* We want our own signal handler (we take no signals by default). */ | 122 | /* We want our own signal handler (we take no signals by default). */ |
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
154 | ...) | 157 | ...) |
155 | { | 158 | { |
156 | struct kthread_create_info create; | 159 | struct kthread_create_info create; |
157 | DECLARE_WORK(work, keventd_create_kthread, &create); | ||
158 | 160 | ||
159 | create.threadfn = threadfn; | 161 | create.threadfn = threadfn; |
160 | create.data = data; | 162 | create.data = data; |
161 | init_completion(&create.started); | 163 | init_completion(&create.started); |
162 | init_completion(&create.done); | 164 | init_completion(&create.done); |
165 | INIT_WORK(&create.work, keventd_create_kthread); | ||
163 | 166 | ||
164 | /* | 167 | /* |
165 | * The workqueue needs to start up first: | 168 | * The workqueue needs to start up first: |
166 | */ | 169 | */ |
167 | if (!helper_wq) | 170 | if (!helper_wq) |
168 | work.func(work.data); | 171 | create.work.func(&create.work); |
169 | else { | 172 | else { |
170 | queue_work(helper_wq, &work); | 173 | queue_work(helper_wq, &create.work); |
171 | wait_for_completion(&create.done); | 174 | wait_for_completion(&create.done); |
172 | } | 175 | } |
173 | if (!IS_ERR(create.result)) { | 176 | if (!IS_ERR(create.result)) { |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index f1f900ac3164..678ec736076b 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -16,12 +16,12 @@ | |||
16 | * callback we use. | 16 | * callback we use. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | static void do_poweroff(void *dummy) | 19 | static void do_poweroff(struct work_struct *dummy) |
20 | { | 20 | { |
21 | kernel_power_off(); | 21 | kernel_power_off(); |
22 | } | 22 | } |
23 | 23 | ||
24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); | 24 | static DECLARE_WORK(poweroff_work, do_poweroff); |
25 | 25 | ||
26 | static void handle_poweroff(int key, struct tty_struct *tty) | 26 | static void handle_poweroff(int key, struct tty_struct *tty) |
27 | { | 27 | { |
diff --git a/kernel/relay.c b/kernel/relay.c index f04bbdb56ac2..2b92e8ece85b 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = { | |||
308 | * reason waking is deferred is that calling directly from write | 308 | * reason waking is deferred is that calling directly from write |
309 | * causes problems if you're writing from say the scheduler. | 309 | * causes problems if you're writing from say the scheduler. |
310 | */ | 310 | */ |
311 | static void wakeup_readers(void *private) | 311 | static void wakeup_readers(struct work_struct *work) |
312 | { | 312 | { |
313 | struct rchan_buf *buf = private; | 313 | struct rchan_buf *buf = |
314 | container_of(work, struct rchan_buf, wake_readers.work); | ||
314 | wake_up_interruptible(&buf->read_wait); | 315 | wake_up_interruptible(&buf->read_wait); |
315 | } | 316 | } |
316 | 317 | ||
@@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
328 | if (init) { | 329 | if (init) { |
329 | init_waitqueue_head(&buf->read_wait); | 330 | init_waitqueue_head(&buf->read_wait); |
330 | kref_init(&buf->kref); | 331 | kref_init(&buf->kref); |
331 | INIT_WORK(&buf->wake_readers, NULL, NULL); | 332 | INIT_DELAYED_WORK(&buf->wake_readers, NULL); |
332 | } else { | 333 | } else { |
333 | cancel_delayed_work(&buf->wake_readers); | 334 | cancel_delayed_work(&buf->wake_readers); |
334 | flush_scheduled_work(); | 335 | flush_scheduled_work(); |
@@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
549 | buf->padding[old_subbuf]; | 550 | buf->padding[old_subbuf]; |
550 | smp_mb(); | 551 | smp_mb(); |
551 | if (waitqueue_active(&buf->read_wait)) { | 552 | if (waitqueue_active(&buf->read_wait)) { |
552 | PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); | 553 | PREPARE_DELAYED_WORK(&buf->wake_readers, |
554 | wakeup_readers); | ||
553 | schedule_delayed_work(&buf->wake_readers, 1); | 555 | schedule_delayed_work(&buf->wake_readers, 1); |
554 | } | 556 | } |
555 | } | 557 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 98489d82801b..c87b461de38d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
880 | return 0; | 880 | return 0; |
881 | } | 881 | } |
882 | 882 | ||
883 | static void deferred_cad(void *dummy) | 883 | static void deferred_cad(struct work_struct *dummy) |
884 | { | 884 | { |
885 | kernel_restart(NULL); | 885 | kernel_restart(NULL); |
886 | } | 886 | } |
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy) | |||
892 | */ | 892 | */ |
893 | void ctrl_alt_del(void) | 893 | void ctrl_alt_del(void) |
894 | { | 894 | { |
895 | static DECLARE_WORK(cad_work, deferred_cad, NULL); | 895 | static DECLARE_WORK(cad_work, deferred_cad); |
896 | 896 | ||
897 | if (C_A_D) | 897 | if (C_A_D) |
898 | schedule_work(&cad_work); | 898 | schedule_work(&cad_work); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 17c2f03d2c27..8d1e7cb8a51a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq) | |||
80 | return list_empty(&wq->list); | 80 | return list_empty(&wq->list); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void set_wq_data(struct work_struct *work, void *wq) | ||
84 | { | ||
85 | unsigned long new, old, res; | ||
86 | |||
87 | /* assume the pending flag is already set and that the task has already | ||
88 | * been queued on this workqueue */ | ||
89 | new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); | ||
90 | res = work->management; | ||
91 | if (res != new) { | ||
92 | do { | ||
93 | old = res; | ||
94 | new = (unsigned long) wq; | ||
95 | new |= (old & WORK_STRUCT_FLAG_MASK); | ||
96 | res = cmpxchg(&work->management, old, new); | ||
97 | } while (res != old); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | static inline void *get_wq_data(struct work_struct *work) | ||
102 | { | ||
103 | return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); | ||
104 | } | ||
105 | |||
83 | /* Preempt must be disabled. */ | 106 | /* Preempt must be disabled. */ |
84 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 107 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
85 | struct work_struct *work) | 108 | struct work_struct *work) |
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
87 | unsigned long flags; | 110 | unsigned long flags; |
88 | 111 | ||
89 | spin_lock_irqsave(&cwq->lock, flags); | 112 | spin_lock_irqsave(&cwq->lock, flags); |
90 | work->wq_data = cwq; | 113 | set_wq_data(work, cwq); |
91 | list_add_tail(&work->entry, &cwq->worklist); | 114 | list_add_tail(&work->entry, &cwq->worklist); |
92 | cwq->insert_sequence++; | 115 | cwq->insert_sequence++; |
93 | wake_up(&cwq->more_work); | 116 | wake_up(&cwq->more_work); |
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
108 | { | 131 | { |
109 | int ret = 0, cpu = get_cpu(); | 132 | int ret = 0, cpu = get_cpu(); |
110 | 133 | ||
111 | if (!test_and_set_bit(0, &work->pending)) { | 134 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
112 | if (unlikely(is_single_threaded(wq))) | 135 | if (unlikely(is_single_threaded(wq))) |
113 | cpu = singlethread_cpu; | 136 | cpu = singlethread_cpu; |
114 | BUG_ON(!list_empty(&work->entry)); | 137 | BUG_ON(!list_empty(&work->entry)); |
@@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work); | |||
122 | 145 | ||
123 | static void delayed_work_timer_fn(unsigned long __data) | 146 | static void delayed_work_timer_fn(unsigned long __data) |
124 | { | 147 | { |
125 | struct work_struct *work = (struct work_struct *)__data; | 148 | struct delayed_work *dwork = (struct delayed_work *)__data; |
126 | struct workqueue_struct *wq = work->wq_data; | 149 | struct workqueue_struct *wq = get_wq_data(&dwork->work); |
127 | int cpu = smp_processor_id(); | 150 | int cpu = smp_processor_id(); |
128 | 151 | ||
129 | if (unlikely(is_single_threaded(wq))) | 152 | if (unlikely(is_single_threaded(wq))) |
130 | cpu = singlethread_cpu; | 153 | cpu = singlethread_cpu; |
131 | 154 | ||
132 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 155 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); |
133 | } | 156 | } |
134 | 157 | ||
135 | /** | 158 | /** |
136 | * queue_delayed_work - queue work on a workqueue after delay | 159 | * queue_delayed_work - queue work on a workqueue after delay |
137 | * @wq: workqueue to use | 160 | * @wq: workqueue to use |
138 | * @work: work to queue | 161 | * @work: delayable work to queue |
139 | * @delay: number of jiffies to wait before queueing | 162 | * @delay: number of jiffies to wait before queueing |
140 | * | 163 | * |
141 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 164 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
142 | */ | 165 | */ |
143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 166 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
144 | struct work_struct *work, unsigned long delay) | 167 | struct delayed_work *dwork, unsigned long delay) |
145 | { | 168 | { |
146 | int ret = 0; | 169 | int ret = 0; |
147 | struct timer_list *timer = &work->timer; | 170 | struct timer_list *timer = &dwork->timer; |
171 | struct work_struct *work = &dwork->work; | ||
172 | |||
173 | if (delay == 0) | ||
174 | return queue_work(wq, work); | ||
148 | 175 | ||
149 | if (!test_and_set_bit(0, &work->pending)) { | 176 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
150 | BUG_ON(timer_pending(timer)); | 177 | BUG_ON(timer_pending(timer)); |
151 | BUG_ON(!list_empty(&work->entry)); | 178 | BUG_ON(!list_empty(&work->entry)); |
152 | 179 | ||
153 | /* This stores wq for the moment, for the timer_fn */ | 180 | /* This stores wq for the moment, for the timer_fn */ |
154 | work->wq_data = wq; | 181 | set_wq_data(work, wq); |
155 | timer->expires = jiffies + delay; | 182 | timer->expires = jiffies + delay; |
156 | timer->data = (unsigned long)work; | 183 | timer->data = (unsigned long)dwork; |
157 | timer->function = delayed_work_timer_fn; | 184 | timer->function = delayed_work_timer_fn; |
158 | add_timer(timer); | 185 | add_timer(timer); |
159 | ret = 1; | 186 | ret = 1; |
@@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
172 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 199 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
173 | */ | 200 | */ |
174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 201 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
175 | struct work_struct *work, unsigned long delay) | 202 | struct delayed_work *dwork, unsigned long delay) |
176 | { | 203 | { |
177 | int ret = 0; | 204 | int ret = 0; |
178 | struct timer_list *timer = &work->timer; | 205 | struct timer_list *timer = &dwork->timer; |
206 | struct work_struct *work = &dwork->work; | ||
179 | 207 | ||
180 | if (!test_and_set_bit(0, &work->pending)) { | 208 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
181 | BUG_ON(timer_pending(timer)); | 209 | BUG_ON(timer_pending(timer)); |
182 | BUG_ON(!list_empty(&work->entry)); | 210 | BUG_ON(!list_empty(&work->entry)); |
183 | 211 | ||
184 | /* This stores wq for the moment, for the timer_fn */ | 212 | /* This stores wq for the moment, for the timer_fn */ |
185 | work->wq_data = wq; | 213 | set_wq_data(work, wq); |
186 | timer->expires = jiffies + delay; | 214 | timer->expires = jiffies + delay; |
187 | timer->data = (unsigned long)work; | 215 | timer->data = (unsigned long)dwork; |
188 | timer->function = delayed_work_timer_fn; | 216 | timer->function = delayed_work_timer_fn; |
189 | add_timer_on(timer, cpu); | 217 | add_timer_on(timer, cpu); |
190 | ret = 1; | 218 | ret = 1; |
@@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
212 | while (!list_empty(&cwq->worklist)) { | 240 | while (!list_empty(&cwq->worklist)) { |
213 | struct work_struct *work = list_entry(cwq->worklist.next, | 241 | struct work_struct *work = list_entry(cwq->worklist.next, |
214 | struct work_struct, entry); | 242 | struct work_struct, entry); |
215 | void (*f) (void *) = work->func; | 243 | work_func_t f = work->func; |
216 | void *data = work->data; | ||
217 | 244 | ||
218 | list_del_init(cwq->worklist.next); | 245 | list_del_init(cwq->worklist.next); |
219 | spin_unlock_irqrestore(&cwq->lock, flags); | 246 | spin_unlock_irqrestore(&cwq->lock, flags); |
220 | 247 | ||
221 | BUG_ON(work->wq_data != cwq); | 248 | BUG_ON(get_wq_data(work) != cwq); |
222 | clear_bit(0, &work->pending); | 249 | if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) |
223 | f(data); | 250 | work_release(work); |
251 | f(work); | ||
224 | 252 | ||
225 | spin_lock_irqsave(&cwq->lock, flags); | 253 | spin_lock_irqsave(&cwq->lock, flags); |
226 | cwq->remove_sequence++; | 254 | cwq->remove_sequence++; |
@@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work); | |||
468 | 496 | ||
469 | /** | 497 | /** |
470 | * schedule_delayed_work - put work task in global workqueue after delay | 498 | * schedule_delayed_work - put work task in global workqueue after delay |
471 | * @work: job to be done | 499 | * @dwork: job to be done |
472 | * @delay: number of jiffies to wait | 500 | * @delay: number of jiffies to wait or 0 for immediate execution |
473 | * | 501 | * |
474 | * After waiting for a given time this puts a job in the kernel-global | 502 | * After waiting for a given time this puts a job in the kernel-global |
475 | * workqueue. | 503 | * workqueue. |
476 | */ | 504 | */ |
477 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 505 | int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) |
478 | { | 506 | { |
479 | return queue_delayed_work(keventd_wq, work, delay); | 507 | return queue_delayed_work(keventd_wq, dwork, delay); |
480 | } | 508 | } |
481 | EXPORT_SYMBOL(schedule_delayed_work); | 509 | EXPORT_SYMBOL(schedule_delayed_work); |
482 | 510 | ||
483 | /** | 511 | /** |
484 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 512 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
485 | * @cpu: cpu to use | 513 | * @cpu: cpu to use |
486 | * @work: job to be done | 514 | * @dwork: job to be done |
487 | * @delay: number of jiffies to wait | 515 | * @delay: number of jiffies to wait |
488 | * | 516 | * |
489 | * After waiting for a given time this puts a job in the kernel-global | 517 | * After waiting for a given time this puts a job in the kernel-global |
490 | * workqueue on the specified CPU. | 518 | * workqueue on the specified CPU. |
491 | */ | 519 | */ |
492 | int schedule_delayed_work_on(int cpu, | 520 | int schedule_delayed_work_on(int cpu, |
493 | struct work_struct *work, unsigned long delay) | 521 | struct delayed_work *dwork, unsigned long delay) |
494 | { | 522 | { |
495 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 523 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
496 | } | 524 | } |
497 | EXPORT_SYMBOL(schedule_delayed_work_on); | 525 | EXPORT_SYMBOL(schedule_delayed_work_on); |
498 | 526 | ||
499 | /** | 527 | /** |
500 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 528 | * schedule_on_each_cpu - call a function on each online CPU from keventd |
501 | * @func: the function to call | 529 | * @func: the function to call |
502 | * @info: a pointer to pass to func() | ||
503 | * | 530 | * |
504 | * Returns zero on success. | 531 | * Returns zero on success. |
505 | * Returns -ve errno on failure. | 532 | * Returns -ve errno on failure. |
@@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
508 | * | 535 | * |
509 | * schedule_on_each_cpu() is very slow. | 536 | * schedule_on_each_cpu() is very slow. |
510 | */ | 537 | */ |
511 | int schedule_on_each_cpu(void (*func)(void *info), void *info) | 538 | int schedule_on_each_cpu(work_func_t func) |
512 | { | 539 | { |
513 | int cpu; | 540 | int cpu; |
514 | struct work_struct *works; | 541 | struct work_struct *works; |
@@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info) | |||
519 | 546 | ||
520 | mutex_lock(&workqueue_mutex); | 547 | mutex_lock(&workqueue_mutex); |
521 | for_each_online_cpu(cpu) { | 548 | for_each_online_cpu(cpu) { |
522 | INIT_WORK(per_cpu_ptr(works, cpu), func, info); | 549 | INIT_WORK(per_cpu_ptr(works, cpu), func); |
523 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | 550 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), |
524 | per_cpu_ptr(works, cpu)); | 551 | per_cpu_ptr(works, cpu)); |
525 | } | 552 | } |
@@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
539 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 566 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed |
540 | * work whose handler rearms the delayed work. | 567 | * work whose handler rearms the delayed work. |
541 | * @wq: the controlling workqueue structure | 568 | * @wq: the controlling workqueue structure |
542 | * @work: the delayed work struct | 569 | * @dwork: the delayed work struct |
543 | */ | 570 | */ |
544 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 571 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
545 | struct work_struct *work) | 572 | struct delayed_work *dwork) |
546 | { | 573 | { |
547 | while (!cancel_delayed_work(work)) | 574 | while (!cancel_delayed_work(dwork)) |
548 | flush_workqueue(wq); | 575 | flush_workqueue(wq); |
549 | } | 576 | } |
550 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 577 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
@@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | |||
552 | /** | 579 | /** |
553 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 580 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd |
554 | * work whose handler rearms the delayed work. | 581 | * work whose handler rearms the delayed work. |
555 | * @work: the delayed work struct | 582 | * @dwork: the delayed work struct |
556 | */ | 583 | */ |
557 | void cancel_rearming_delayed_work(struct work_struct *work) | 584 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |
558 | { | 585 | { |
559 | cancel_rearming_delayed_workqueue(keventd_wq, work); | 586 | cancel_rearming_delayed_workqueue(keventd_wq, dwork); |
560 | } | 587 | } |
561 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 588 | EXPORT_SYMBOL(cancel_rearming_delayed_work); |
562 | 589 | ||
563 | /** | 590 | /** |
564 | * execute_in_process_context - reliably execute the routine with user context | 591 | * execute_in_process_context - reliably execute the routine with user context |
565 | * @fn: the function to execute | 592 | * @fn: the function to execute |
566 | * @data: data to pass to the function | ||
567 | * @ew: guaranteed storage for the execute work structure (must | 593 | * @ew: guaranteed storage for the execute work structure (must |
568 | * be available when the work executes) | 594 | * be available when the work executes) |
569 | * | 595 | * |
@@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work); | |||
573 | * Returns: 0 - function was executed | 599 | * Returns: 0 - function was executed |
574 | * 1 - function was scheduled for execution | 600 | * 1 - function was scheduled for execution |
575 | */ | 601 | */ |
576 | int execute_in_process_context(void (*fn)(void *data), void *data, | 602 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
577 | struct execute_work *ew) | ||
578 | { | 603 | { |
579 | if (!in_interrupt()) { | 604 | if (!in_interrupt()) { |
580 | fn(data); | 605 | fn(&ew->work); |
581 | return 0; | 606 | return 0; |
582 | } | 607 | } |
583 | 608 | ||
584 | INIT_WORK(&ew->work, fn, data); | 609 | INIT_WORK(&ew->work, fn); |
585 | schedule_work(&ew->work); | 610 | schedule_work(&ew->work); |
586 | 611 | ||
587 | return 1; | 612 | return 1; |