diff options
Diffstat (limited to 'include/linux/workqueue.h')
| -rw-r--r-- | include/linux/workqueue.h | 220 |
1 files changed, 122 insertions, 98 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index af155450cabb..2b58905d3504 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -16,6 +16,7 @@ struct workqueue_struct; | |||
| 16 | 16 | ||
| 17 | struct work_struct; | 17 | struct work_struct; |
| 18 | typedef void (*work_func_t)(struct work_struct *work); | 18 | typedef void (*work_func_t)(struct work_struct *work); |
| 19 | void delayed_work_timer_fn(unsigned long __data); | ||
| 19 | 20 | ||
| 20 | /* | 21 | /* |
| 21 | * The first word is the work queue pointer and the flags rolled into | 22 | * The first word is the work queue pointer and the flags rolled into |
| @@ -67,9 +68,18 @@ enum { | |||
| 67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | 68 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
| 68 | WORK_STRUCT_COLOR_BITS, | 69 | WORK_STRUCT_COLOR_BITS, |
| 69 | 70 | ||
| 71 | /* data contains off-queue information when !WORK_STRUCT_CWQ */ | ||
| 72 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, | ||
| 73 | |||
| 74 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | ||
| 75 | |||
| 76 | WORK_OFFQ_FLAG_BITS = 1, | ||
| 77 | WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, | ||
| 78 | |||
| 79 | /* convenience constants */ | ||
| 70 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, | 80 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
| 71 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | 81 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
| 72 | WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, | 82 | WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT, |
| 73 | 83 | ||
| 74 | /* bit mask for work_busy() return values */ | 84 | /* bit mask for work_busy() return values */ |
| 75 | WORK_BUSY_PENDING = 1 << 0, | 85 | WORK_BUSY_PENDING = 1 << 0, |
| @@ -92,6 +102,7 @@ struct work_struct { | |||
| 92 | struct delayed_work { | 102 | struct delayed_work { |
| 93 | struct work_struct work; | 103 | struct work_struct work; |
| 94 | struct timer_list timer; | 104 | struct timer_list timer; |
| 105 | int cpu; | ||
| 95 | }; | 106 | }; |
| 96 | 107 | ||
| 97 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) | 108 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
| @@ -115,41 +126,38 @@ struct execute_work { | |||
| 115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 126 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
| 116 | #endif | 127 | #endif |
| 117 | 128 | ||
| 118 | #define __WORK_INITIALIZER(n, f) { \ | 129 | #define __WORK_INITIALIZER(n, f) { \ |
| 119 | .data = WORK_DATA_STATIC_INIT(), \ | 130 | .data = WORK_DATA_STATIC_INIT(), \ |
| 120 | .entry = { &(n).entry, &(n).entry }, \ | 131 | .entry = { &(n).entry, &(n).entry }, \ |
| 121 | .func = (f), \ | 132 | .func = (f), \ |
| 122 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | 133 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
| 123 | } | 134 | } |
| 124 | 135 | ||
| 125 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | 136 | #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
| 126 | .work = __WORK_INITIALIZER((n).work, (f)), \ | 137 | .work = __WORK_INITIALIZER((n).work, (f)), \ |
| 127 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | 138 | .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ |
| 139 | 0, (unsigned long)&(n), \ | ||
| 140 | (tflags) | TIMER_IRQSAFE), \ | ||
| 128 | } | 141 | } |
| 129 | 142 | ||
| 130 | #define __DEFERRED_WORK_INITIALIZER(n, f) { \ | 143 | #define DECLARE_WORK(n, f) \ |
| 131 | .work = __WORK_INITIALIZER((n).work, (f)), \ | ||
| 132 | .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \ | ||
| 133 | } | ||
| 134 | |||
| 135 | #define DECLARE_WORK(n, f) \ | ||
| 136 | struct work_struct n = __WORK_INITIALIZER(n, f) | 144 | struct work_struct n = __WORK_INITIALIZER(n, f) |
| 137 | 145 | ||
| 138 | #define DECLARE_DELAYED_WORK(n, f) \ | 146 | #define DECLARE_DELAYED_WORK(n, f) \ |
| 139 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | 147 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
| 140 | 148 | ||
| 141 | #define DECLARE_DEFERRED_WORK(n, f) \ | 149 | #define DECLARE_DEFERRABLE_WORK(n, f) \ |
| 142 | struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) | 150 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
| 143 | 151 | ||
| 144 | /* | 152 | /* |
| 145 | * initialize a work item's function pointer | 153 | * initialize a work item's function pointer |
| 146 | */ | 154 | */ |
| 147 | #define PREPARE_WORK(_work, _func) \ | 155 | #define PREPARE_WORK(_work, _func) \ |
| 148 | do { \ | 156 | do { \ |
| 149 | (_work)->func = (_func); \ | 157 | (_work)->func = (_func); \ |
| 150 | } while (0) | 158 | } while (0) |
| 151 | 159 | ||
| 152 | #define PREPARE_DELAYED_WORK(_work, _func) \ | 160 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
| 153 | PREPARE_WORK(&(_work)->work, (_func)) | 161 | PREPARE_WORK(&(_work)->work, (_func)) |
| 154 | 162 | ||
| 155 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 163 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| @@ -179,7 +187,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
| 179 | \ | 187 | \ |
| 180 | __init_work((_work), _onstack); \ | 188 | __init_work((_work), _onstack); \ |
| 181 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | 189 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
| 182 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ | 190 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ |
| 183 | INIT_LIST_HEAD(&(_work)->entry); \ | 191 | INIT_LIST_HEAD(&(_work)->entry); \ |
| 184 | PREPARE_WORK((_work), (_func)); \ | 192 | PREPARE_WORK((_work), (_func)); \ |
| 185 | } while (0) | 193 | } while (0) |
| @@ -193,33 +201,44 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
| 193 | } while (0) | 201 | } while (0) |
| 194 | #endif | 202 | #endif |
| 195 | 203 | ||
| 196 | #define INIT_WORK(_work, _func) \ | 204 | #define INIT_WORK(_work, _func) \ |
| 197 | do { \ | 205 | do { \ |
| 198 | __INIT_WORK((_work), (_func), 0); \ | 206 | __INIT_WORK((_work), (_func), 0); \ |
| 199 | } while (0) | 207 | } while (0) |
| 200 | 208 | ||
| 201 | #define INIT_WORK_ONSTACK(_work, _func) \ | 209 | #define INIT_WORK_ONSTACK(_work, _func) \ |
| 202 | do { \ | 210 | do { \ |
| 203 | __INIT_WORK((_work), (_func), 1); \ | 211 | __INIT_WORK((_work), (_func), 1); \ |
| 204 | } while (0) | 212 | } while (0) |
| 205 | 213 | ||
| 206 | #define INIT_DELAYED_WORK(_work, _func) \ | 214 | #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
| 207 | do { \ | 215 | do { \ |
| 208 | INIT_WORK(&(_work)->work, (_func)); \ | 216 | INIT_WORK(&(_work)->work, (_func)); \ |
| 209 | init_timer(&(_work)->timer); \ | 217 | __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ |
| 218 | (unsigned long)(_work), \ | ||
| 219 | (_tflags) | TIMER_IRQSAFE); \ | ||
| 210 | } while (0) | 220 | } while (0) |
| 211 | 221 | ||
| 212 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ | 222 | #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
| 213 | do { \ | 223 | do { \ |
| 214 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ | 224 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ |
| 215 | init_timer_on_stack(&(_work)->timer); \ | 225 | __setup_timer_on_stack(&(_work)->timer, \ |
| 226 | delayed_work_timer_fn, \ | ||
| 227 | (unsigned long)(_work), \ | ||
| 228 | (_tflags) | TIMER_IRQSAFE); \ | ||
| 216 | } while (0) | 229 | } while (0) |
| 217 | 230 | ||
| 218 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ | 231 | #define INIT_DELAYED_WORK(_work, _func) \ |
| 219 | do { \ | 232 | __INIT_DELAYED_WORK(_work, _func, 0) |
| 220 | INIT_WORK(&(_work)->work, (_func)); \ | 233 | |
| 221 | init_timer_deferrable(&(_work)->timer); \ | 234 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ |
| 222 | } while (0) | 235 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) |
| 236 | |||
| 237 | #define INIT_DEFERRABLE_WORK(_work, _func) \ | ||
| 238 | __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) | ||
| 239 | |||
| 240 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ | ||
| 241 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) | ||
| 223 | 242 | ||
| 224 | /** | 243 | /** |
| 225 | * work_pending - Find out whether a work item is currently pending | 244 | * work_pending - Find out whether a work item is currently pending |
| @@ -278,10 +297,6 @@ enum { | |||
| 278 | * system_long_wq is similar to system_wq but may host long running | 297 | * system_long_wq is similar to system_wq but may host long running |
| 279 | * works. Queue flushing might take relatively long. | 298 | * works. Queue flushing might take relatively long. |
| 280 | * | 299 | * |
| 281 | * system_nrt_wq is non-reentrant and guarantees that any given work | ||
| 282 | * item is never executed in parallel by multiple CPUs. Queue | ||
| 283 | * flushing might take relatively long. | ||
| 284 | * | ||
| 285 | * system_unbound_wq is unbound workqueue. Workers are not bound to | 300 | * system_unbound_wq is unbound workqueue. Workers are not bound to |
| 286 | * any specific CPU, not concurrency managed, and all queued works are | 301 | * any specific CPU, not concurrency managed, and all queued works are |
| 287 | * executed immediately as long as max_active limit is not reached and | 302 | * executed immediately as long as max_active limit is not reached and |
| @@ -289,16 +304,25 @@ enum { | |||
| 289 | * | 304 | * |
| 290 | * system_freezable_wq is equivalent to system_wq except that it's | 305 | * system_freezable_wq is equivalent to system_wq except that it's |
| 291 | * freezable. | 306 | * freezable. |
| 292 | * | ||
| 293 | * system_nrt_freezable_wq is equivalent to system_nrt_wq except that | ||
| 294 | * it's freezable. | ||
| 295 | */ | 307 | */ |
| 296 | extern struct workqueue_struct *system_wq; | 308 | extern struct workqueue_struct *system_wq; |
| 297 | extern struct workqueue_struct *system_long_wq; | 309 | extern struct workqueue_struct *system_long_wq; |
| 298 | extern struct workqueue_struct *system_nrt_wq; | ||
| 299 | extern struct workqueue_struct *system_unbound_wq; | 310 | extern struct workqueue_struct *system_unbound_wq; |
| 300 | extern struct workqueue_struct *system_freezable_wq; | 311 | extern struct workqueue_struct *system_freezable_wq; |
| 301 | extern struct workqueue_struct *system_nrt_freezable_wq; | 312 | |
| 313 | static inline struct workqueue_struct * __deprecated __system_nrt_wq(void) | ||
| 314 | { | ||
| 315 | return system_wq; | ||
| 316 | } | ||
| 317 | |||
| 318 | static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void) | ||
| 319 | { | ||
| 320 | return system_freezable_wq; | ||
| 321 | } | ||
| 322 | |||
| 323 | /* equivlalent to system_wq and system_freezable_wq, deprecated */ | ||
| 324 | #define system_nrt_wq __system_nrt_wq() | ||
| 325 | #define system_nrt_freezable_wq __system_nrt_freezable_wq() | ||
| 302 | 326 | ||
| 303 | extern struct workqueue_struct * | 327 | extern struct workqueue_struct * |
| 304 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | 328 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, |
| @@ -321,22 +345,22 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
| 321 | * Pointer to the allocated workqueue on success, %NULL on failure. | 345 | * Pointer to the allocated workqueue on success, %NULL on failure. |
| 322 | */ | 346 | */ |
| 323 | #ifdef CONFIG_LOCKDEP | 347 | #ifdef CONFIG_LOCKDEP |
| 324 | #define alloc_workqueue(fmt, flags, max_active, args...) \ | 348 | #define alloc_workqueue(fmt, flags, max_active, args...) \ |
| 325 | ({ \ | 349 | ({ \ |
| 326 | static struct lock_class_key __key; \ | 350 | static struct lock_class_key __key; \ |
| 327 | const char *__lock_name; \ | 351 | const char *__lock_name; \ |
| 328 | \ | 352 | \ |
| 329 | if (__builtin_constant_p(fmt)) \ | 353 | if (__builtin_constant_p(fmt)) \ |
| 330 | __lock_name = (fmt); \ | 354 | __lock_name = (fmt); \ |
| 331 | else \ | 355 | else \ |
| 332 | __lock_name = #fmt; \ | 356 | __lock_name = #fmt; \ |
| 333 | \ | 357 | \ |
| 334 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | 358 | __alloc_workqueue_key((fmt), (flags), (max_active), \ |
| 335 | &__key, __lock_name, ##args); \ | 359 | &__key, __lock_name, ##args); \ |
| 336 | }) | 360 | }) |
| 337 | #else | 361 | #else |
| 338 | #define alloc_workqueue(fmt, flags, max_active, args...) \ | 362 | #define alloc_workqueue(fmt, flags, max_active, args...) \ |
| 339 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | 363 | __alloc_workqueue_key((fmt), (flags), (max_active), \ |
| 340 | NULL, NULL, ##args) | 364 | NULL, NULL, ##args) |
| 341 | #endif | 365 | #endif |
| 342 | 366 | ||
| @@ -353,46 +377,50 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
| 353 | * RETURNS: | 377 | * RETURNS: |
| 354 | * Pointer to the allocated workqueue on success, %NULL on failure. | 378 | * Pointer to the allocated workqueue on success, %NULL on failure. |
| 355 | */ | 379 | */ |
| 356 | #define alloc_ordered_workqueue(fmt, flags, args...) \ | 380 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
| 357 | alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) | 381 | alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) |
| 358 | 382 | ||
| 359 | #define create_workqueue(name) \ | 383 | #define create_workqueue(name) \ |
| 360 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 384 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
| 361 | #define create_freezable_workqueue(name) \ | 385 | #define create_freezable_workqueue(name) \ |
| 362 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 386 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
| 363 | #define create_singlethread_workqueue(name) \ | 387 | #define create_singlethread_workqueue(name) \ |
| 364 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 388 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
| 365 | 389 | ||
| 366 | extern void destroy_workqueue(struct workqueue_struct *wq); | 390 | extern void destroy_workqueue(struct workqueue_struct *wq); |
| 367 | 391 | ||
| 368 | extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); | 392 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
| 369 | extern int queue_work_on(int cpu, struct workqueue_struct *wq, | ||
| 370 | struct work_struct *work); | 393 | struct work_struct *work); |
| 371 | extern int queue_delayed_work(struct workqueue_struct *wq, | 394 | extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work); |
| 395 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | ||
| 372 | struct delayed_work *work, unsigned long delay); | 396 | struct delayed_work *work, unsigned long delay); |
| 373 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 397 | extern bool queue_delayed_work(struct workqueue_struct *wq, |
| 374 | struct delayed_work *work, unsigned long delay); | 398 | struct delayed_work *work, unsigned long delay); |
| 399 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | ||
| 400 | struct delayed_work *dwork, unsigned long delay); | ||
| 401 | extern bool mod_delayed_work(struct workqueue_struct *wq, | ||
| 402 | struct delayed_work *dwork, unsigned long delay); | ||
| 375 | 403 | ||
| 376 | extern void flush_workqueue(struct workqueue_struct *wq); | 404 | extern void flush_workqueue(struct workqueue_struct *wq); |
| 377 | extern void drain_workqueue(struct workqueue_struct *wq); | 405 | extern void drain_workqueue(struct workqueue_struct *wq); |
| 378 | extern void flush_scheduled_work(void); | 406 | extern void flush_scheduled_work(void); |
| 379 | 407 | ||
| 380 | extern int schedule_work(struct work_struct *work); | 408 | extern bool schedule_work_on(int cpu, struct work_struct *work); |
| 381 | extern int schedule_work_on(int cpu, struct work_struct *work); | 409 | extern bool schedule_work(struct work_struct *work); |
| 382 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); | 410 | extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work, |
| 383 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, | 411 | unsigned long delay); |
| 384 | unsigned long delay); | 412 | extern bool schedule_delayed_work(struct delayed_work *work, |
| 413 | unsigned long delay); | ||
| 385 | extern int schedule_on_each_cpu(work_func_t func); | 414 | extern int schedule_on_each_cpu(work_func_t func); |
| 386 | extern int keventd_up(void); | 415 | extern int keventd_up(void); |
| 387 | 416 | ||
| 388 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 417 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
| 389 | 418 | ||
| 390 | extern bool flush_work(struct work_struct *work); | 419 | extern bool flush_work(struct work_struct *work); |
| 391 | extern bool flush_work_sync(struct work_struct *work); | ||
| 392 | extern bool cancel_work_sync(struct work_struct *work); | 420 | extern bool cancel_work_sync(struct work_struct *work); |
| 393 | 421 | ||
| 394 | extern bool flush_delayed_work(struct delayed_work *dwork); | 422 | extern bool flush_delayed_work(struct delayed_work *dwork); |
| 395 | extern bool flush_delayed_work_sync(struct delayed_work *work); | 423 | extern bool cancel_delayed_work(struct delayed_work *dwork); |
| 396 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | 424 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
| 397 | 425 | ||
| 398 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 426 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
| @@ -402,27 +430,11 @@ extern unsigned int work_cpu(struct work_struct *work); | |||
| 402 | extern unsigned int work_busy(struct work_struct *work); | 430 | extern unsigned int work_busy(struct work_struct *work); |
| 403 | 431 | ||
| 404 | /* | 432 | /* |
| 405 | * Kill off a pending schedule_delayed_work(). Note that the work callback | ||
| 406 | * function may still be running on return from cancel_delayed_work(), unless | ||
| 407 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or | ||
| 408 | * cancel_work_sync() to wait on it. | ||
| 409 | */ | ||
| 410 | static inline bool cancel_delayed_work(struct delayed_work *work) | ||
| 411 | { | ||
| 412 | bool ret; | ||
| 413 | |||
| 414 | ret = del_timer_sync(&work->timer); | ||
| 415 | if (ret) | ||
| 416 | work_clear_pending(&work->work); | ||
| 417 | return ret; | ||
| 418 | } | ||
| 419 | |||
| 420 | /* | ||
| 421 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, | 433 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, |
| 422 | * if it returns 0 the timer function may be running and the queueing is in | 434 | * if it returns 0 the timer function may be running and the queueing is in |
| 423 | * progress. | 435 | * progress. |
| 424 | */ | 436 | */ |
| 425 | static inline bool __cancel_delayed_work(struct delayed_work *work) | 437 | static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work) |
| 426 | { | 438 | { |
| 427 | bool ret; | 439 | bool ret; |
| 428 | 440 | ||
| @@ -432,6 +444,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work) | |||
| 432 | return ret; | 444 | return ret; |
| 433 | } | 445 | } |
| 434 | 446 | ||
| 447 | /* used to be different but now identical to flush_work(), deprecated */ | ||
| 448 | static inline bool __deprecated flush_work_sync(struct work_struct *work) | ||
| 449 | { | ||
| 450 | return flush_work(work); | ||
| 451 | } | ||
| 452 | |||
| 453 | /* used to be different but now identical to flush_delayed_work(), deprecated */ | ||
| 454 | static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork) | ||
| 455 | { | ||
| 456 | return flush_delayed_work(dwork); | ||
| 457 | } | ||
| 458 | |||
| 435 | #ifndef CONFIG_SMP | 459 | #ifndef CONFIG_SMP |
| 436 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 460 | static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
| 437 | { | 461 | { |
