aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 12:54:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 12:54:49 -0400
commit033d9959ed2dc1029217d4165f80a71702dc578e (patch)
tree3d306316e44bdabce2e0bf2ef7e466e525f90b4c /include/linux
parent974a847e00cf3ff1695e62b276892137893706ab (diff)
parent7c6e72e46c9ea4a88f3f8ba96edce9db4bd48726 (diff)
Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo: "This is workqueue updates for v3.7-rc1. A lot of activities this round including considerable API and behavior cleanups. * delayed_work combines a timer and a work item. The handling of the timer part has always been a bit clunky leading to confusing cancelation API with weird corner-case behaviors. delayed_work is updated to use new IRQ safe timer and cancelation now works as expected. * Another deficiency of delayed_work was lack of the counterpart of mod_timer() which led to cancel+queue combinations or open-coded timer+work usages. mod_delayed_work[_on]() are added. These two delayed_work changes make delayed_work provide interface and behave like timer which is executed with process context. * A work item could be executed concurrently on multiple CPUs, which is rather unintuitive and made flush_work() behavior confusing and half-broken under certain circumstances. This problem doesn't exist for non-reentrant workqueues. While non-reentrancy check isn't free, the overhead is incurred only when a work item bounces across different CPUs and even in simulated pathological scenario the overhead isn't too high. All workqueues are made non-reentrant. This removes the distinction between flush_[delayed_]work() and flush_[delayed_]_work_sync(). The former is now as strong as the latter and the specified work item is guaranteed to have finished execution of any previous queueing on return. * In addition to the various bug fixes, Lai redid and simplified CPU hotplug handling significantly. * Joonsoo introduced system_highpri_wq and used it during CPU hotplug. There are two merge commits - one to pull in IRQ safe timer from tip/timers/core and the other to pull in CPU hotplug fixes from wq/for-3.6-fixes as Lai's hotplug restructuring depended on them." Fixed a number of trivial conflicts, but the more interesting conflicts were silent ones where the deprecated interfaces had been used by new code in the merge window, and thus didn't cause any real data conflicts. Tejun pointed out a few of them, I fixed a couple more. * 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits) workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending() workqueue: use cwq_set_max_active() helper for workqueue_set_max_active() workqueue: introduce cwq_set_max_active() helper for thaw_workqueues() workqueue: remove @delayed from cwq_dec_nr_in_flight() workqueue: fix possible stall on try_to_grab_pending() of a delayed work item workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback() workqueue: use __cpuinit instead of __devinit for cpu callbacks workqueue: rename manager_mutex to assoc_mutex workqueue: WORKER_REBIND is no longer necessary for idle rebinding workqueue: WORKER_REBIND is no longer necessary for busy rebinding workqueue: reimplement idle worker rebinding workqueue: deprecate __cancel_delayed_work() workqueue: reimplement cancel_delayed_work() using try_to_grab_pending() workqueue: use mod_delayed_work() instead of __cancel + queue workqueue: use irqsafe timer for delayed_work workqueue: clean up delayed_work initializers and add missing one workqueue: make deferrable delayed_work initializer names consistent workqueue: cosmetic whitespace updates for macro definitions workqueue: deprecate system_nrt[_freezable]_wq workqueue: deprecate flush[_delayed]_work_sync() ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/workqueue.h220
1 files changed, 122 insertions, 98 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index af155450cabb..2b58905d3504 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -16,6 +16,7 @@ struct workqueue_struct;
16 16
17struct work_struct; 17struct work_struct;
18typedef void (*work_func_t)(struct work_struct *work); 18typedef void (*work_func_t)(struct work_struct *work);
19void delayed_work_timer_fn(unsigned long __data);
19 20
20/* 21/*
21 * The first word is the work queue pointer and the flags rolled into 22 * The first word is the work queue pointer and the flags rolled into
@@ -67,9 +68,18 @@ enum {
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS, 69 WORK_STRUCT_COLOR_BITS,
69 70
71 /* data contains off-queue information when !WORK_STRUCT_CWQ */
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
73
74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75
76 WORK_OFFQ_FLAG_BITS = 1,
77 WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
78
79 /* convenience constants */
70 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 80 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
71 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 81 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
72 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, 82 WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
73 83
74 /* bit mask for work_busy() return values */ 84 /* bit mask for work_busy() return values */
75 WORK_BUSY_PENDING = 1 << 0, 85 WORK_BUSY_PENDING = 1 << 0,
@@ -92,6 +102,7 @@ struct work_struct {
92struct delayed_work { 102struct delayed_work {
93 struct work_struct work; 103 struct work_struct work;
94 struct timer_list timer; 104 struct timer_list timer;
105 int cpu;
95}; 106};
96 107
97static inline struct delayed_work *to_delayed_work(struct work_struct *work) 108static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -115,41 +126,38 @@ struct execute_work {
115#define __WORK_INIT_LOCKDEP_MAP(n, k) 126#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif 127#endif
117 128
118#define __WORK_INITIALIZER(n, f) { \ 129#define __WORK_INITIALIZER(n, f) { \
119 .data = WORK_DATA_STATIC_INIT(), \ 130 .data = WORK_DATA_STATIC_INIT(), \
120 .entry = { &(n).entry, &(n).entry }, \ 131 .entry = { &(n).entry, &(n).entry }, \
121 .func = (f), \ 132 .func = (f), \
122 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 133 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
123 } 134 }
124 135
125#define __DELAYED_WORK_INITIALIZER(n, f) { \ 136#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
126 .work = __WORK_INITIALIZER((n).work, (f)), \ 137 .work = __WORK_INITIALIZER((n).work, (f)), \
127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 138 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
139 0, (unsigned long)&(n), \
140 (tflags) | TIMER_IRQSAFE), \
128 } 141 }
129 142
130#define __DEFERRED_WORK_INITIALIZER(n, f) { \ 143#define DECLARE_WORK(n, f) \
131 .work = __WORK_INITIALIZER((n).work, (f)), \
132 .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
133 }
134
135#define DECLARE_WORK(n, f) \
136 struct work_struct n = __WORK_INITIALIZER(n, f) 144 struct work_struct n = __WORK_INITIALIZER(n, f)
137 145
138#define DECLARE_DELAYED_WORK(n, f) \ 146#define DECLARE_DELAYED_WORK(n, f) \
139 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 147 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
140 148
141#define DECLARE_DEFERRED_WORK(n, f) \ 149#define DECLARE_DEFERRABLE_WORK(n, f) \
142 struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) 150 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
143 151
144/* 152/*
145 * initialize a work item's function pointer 153 * initialize a work item's function pointer
146 */ 154 */
147#define PREPARE_WORK(_work, _func) \ 155#define PREPARE_WORK(_work, _func) \
148 do { \ 156 do { \
149 (_work)->func = (_func); \ 157 (_work)->func = (_func); \
150 } while (0) 158 } while (0)
151 159
152#define PREPARE_DELAYED_WORK(_work, _func) \ 160#define PREPARE_DELAYED_WORK(_work, _func) \
153 PREPARE_WORK(&(_work)->work, (_func)) 161 PREPARE_WORK(&(_work)->work, (_func))
154 162
155#ifdef CONFIG_DEBUG_OBJECTS_WORK 163#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -179,7 +187,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
179 \ 187 \
180 __init_work((_work), _onstack); \ 188 __init_work((_work), _onstack); \
181 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 189 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
182 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ 190 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
183 INIT_LIST_HEAD(&(_work)->entry); \ 191 INIT_LIST_HEAD(&(_work)->entry); \
184 PREPARE_WORK((_work), (_func)); \ 192 PREPARE_WORK((_work), (_func)); \
185 } while (0) 193 } while (0)
@@ -193,33 +201,44 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
193 } while (0) 201 } while (0)
194#endif 202#endif
195 203
196#define INIT_WORK(_work, _func) \ 204#define INIT_WORK(_work, _func) \
197 do { \ 205 do { \
198 __INIT_WORK((_work), (_func), 0); \ 206 __INIT_WORK((_work), (_func), 0); \
199 } while (0) 207 } while (0)
200 208
201#define INIT_WORK_ONSTACK(_work, _func) \ 209#define INIT_WORK_ONSTACK(_work, _func) \
202 do { \ 210 do { \
203 __INIT_WORK((_work), (_func), 1); \ 211 __INIT_WORK((_work), (_func), 1); \
204 } while (0) 212 } while (0)
205 213
206#define INIT_DELAYED_WORK(_work, _func) \ 214#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
207 do { \ 215 do { \
208 INIT_WORK(&(_work)->work, (_func)); \ 216 INIT_WORK(&(_work)->work, (_func)); \
209 init_timer(&(_work)->timer); \ 217 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
218 (unsigned long)(_work), \
219 (_tflags) | TIMER_IRQSAFE); \
210 } while (0) 220 } while (0)
211 221
212#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 222#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
213 do { \ 223 do { \
214 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 224 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
215 init_timer_on_stack(&(_work)->timer); \ 225 __setup_timer_on_stack(&(_work)->timer, \
226 delayed_work_timer_fn, \
227 (unsigned long)(_work), \
228 (_tflags) | TIMER_IRQSAFE); \
216 } while (0) 229 } while (0)
217 230
218#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 231#define INIT_DELAYED_WORK(_work, _func) \
219 do { \ 232 __INIT_DELAYED_WORK(_work, _func, 0)
220 INIT_WORK(&(_work)->work, (_func)); \ 233
221 init_timer_deferrable(&(_work)->timer); \ 234#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
222 } while (0) 235 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
236
237#define INIT_DEFERRABLE_WORK(_work, _func) \
238 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
239
240#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
241 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
223 242
224/** 243/**
225 * work_pending - Find out whether a work item is currently pending 244 * work_pending - Find out whether a work item is currently pending
@@ -278,10 +297,6 @@ enum {
278 * system_long_wq is similar to system_wq but may host long running 297 * system_long_wq is similar to system_wq but may host long running
279 * works. Queue flushing might take relatively long. 298 * works. Queue flushing might take relatively long.
280 * 299 *
281 * system_nrt_wq is non-reentrant and guarantees that any given work
282 * item is never executed in parallel by multiple CPUs. Queue
283 * flushing might take relatively long.
284 *
285 * system_unbound_wq is unbound workqueue. Workers are not bound to 300 * system_unbound_wq is unbound workqueue. Workers are not bound to
286 * any specific CPU, not concurrency managed, and all queued works are 301 * any specific CPU, not concurrency managed, and all queued works are
287 * executed immediately as long as max_active limit is not reached and 302 * executed immediately as long as max_active limit is not reached and
@@ -289,16 +304,25 @@ enum {
289 * 304 *
290 * system_freezable_wq is equivalent to system_wq except that it's 305 * system_freezable_wq is equivalent to system_wq except that it's
291 * freezable. 306 * freezable.
292 *
293 * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
294 * it's freezable.
295 */ 307 */
296extern struct workqueue_struct *system_wq; 308extern struct workqueue_struct *system_wq;
297extern struct workqueue_struct *system_long_wq; 309extern struct workqueue_struct *system_long_wq;
298extern struct workqueue_struct *system_nrt_wq;
299extern struct workqueue_struct *system_unbound_wq; 310extern struct workqueue_struct *system_unbound_wq;
300extern struct workqueue_struct *system_freezable_wq; 311extern struct workqueue_struct *system_freezable_wq;
301extern struct workqueue_struct *system_nrt_freezable_wq; 312
313static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
314{
315 return system_wq;
316}
317
318static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
319{
320 return system_freezable_wq;
321}
322
323/* equivlalent to system_wq and system_freezable_wq, deprecated */
324#define system_nrt_wq __system_nrt_wq()
325#define system_nrt_freezable_wq __system_nrt_freezable_wq()
302 326
303extern struct workqueue_struct * 327extern struct workqueue_struct *
304__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 328__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
@@ -321,22 +345,22 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
321 * Pointer to the allocated workqueue on success, %NULL on failure. 345 * Pointer to the allocated workqueue on success, %NULL on failure.
322 */ 346 */
323#ifdef CONFIG_LOCKDEP 347#ifdef CONFIG_LOCKDEP
324#define alloc_workqueue(fmt, flags, max_active, args...) \ 348#define alloc_workqueue(fmt, flags, max_active, args...) \
325({ \ 349({ \
326 static struct lock_class_key __key; \ 350 static struct lock_class_key __key; \
327 const char *__lock_name; \ 351 const char *__lock_name; \
328 \ 352 \
329 if (__builtin_constant_p(fmt)) \ 353 if (__builtin_constant_p(fmt)) \
330 __lock_name = (fmt); \ 354 __lock_name = (fmt); \
331 else \ 355 else \
332 __lock_name = #fmt; \ 356 __lock_name = #fmt; \
333 \ 357 \
334 __alloc_workqueue_key((fmt), (flags), (max_active), \ 358 __alloc_workqueue_key((fmt), (flags), (max_active), \
335 &__key, __lock_name, ##args); \ 359 &__key, __lock_name, ##args); \
336}) 360})
337#else 361#else
338#define alloc_workqueue(fmt, flags, max_active, args...) \ 362#define alloc_workqueue(fmt, flags, max_active, args...) \
339 __alloc_workqueue_key((fmt), (flags), (max_active), \ 363 __alloc_workqueue_key((fmt), (flags), (max_active), \
340 NULL, NULL, ##args) 364 NULL, NULL, ##args)
341#endif 365#endif
342 366
@@ -353,46 +377,50 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
353 * RETURNS: 377 * RETURNS:
354 * Pointer to the allocated workqueue on success, %NULL on failure. 378 * Pointer to the allocated workqueue on success, %NULL on failure.
355 */ 379 */
356#define alloc_ordered_workqueue(fmt, flags, args...) \ 380#define alloc_ordered_workqueue(fmt, flags, args...) \
357 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) 381 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
358 382
359#define create_workqueue(name) \ 383#define create_workqueue(name) \
360 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 384 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
361#define create_freezable_workqueue(name) \ 385#define create_freezable_workqueue(name) \
362 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 386 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
363#define create_singlethread_workqueue(name) \ 387#define create_singlethread_workqueue(name) \
364 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 388 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
365 389
366extern void destroy_workqueue(struct workqueue_struct *wq); 390extern void destroy_workqueue(struct workqueue_struct *wq);
367 391
368extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); 392extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
369extern int queue_work_on(int cpu, struct workqueue_struct *wq,
370 struct work_struct *work); 393 struct work_struct *work);
371extern int queue_delayed_work(struct workqueue_struct *wq, 394extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
395extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
372 struct delayed_work *work, unsigned long delay); 396 struct delayed_work *work, unsigned long delay);
373extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 397extern bool queue_delayed_work(struct workqueue_struct *wq,
374 struct delayed_work *work, unsigned long delay); 398 struct delayed_work *work, unsigned long delay);
399extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
400 struct delayed_work *dwork, unsigned long delay);
401extern bool mod_delayed_work(struct workqueue_struct *wq,
402 struct delayed_work *dwork, unsigned long delay);
375 403
376extern void flush_workqueue(struct workqueue_struct *wq); 404extern void flush_workqueue(struct workqueue_struct *wq);
377extern void drain_workqueue(struct workqueue_struct *wq); 405extern void drain_workqueue(struct workqueue_struct *wq);
378extern void flush_scheduled_work(void); 406extern void flush_scheduled_work(void);
379 407
380extern int schedule_work(struct work_struct *work); 408extern bool schedule_work_on(int cpu, struct work_struct *work);
381extern int schedule_work_on(int cpu, struct work_struct *work); 409extern bool schedule_work(struct work_struct *work);
382extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); 410extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
383extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 411 unsigned long delay);
384 unsigned long delay); 412extern bool schedule_delayed_work(struct delayed_work *work,
413 unsigned long delay);
385extern int schedule_on_each_cpu(work_func_t func); 414extern int schedule_on_each_cpu(work_func_t func);
386extern int keventd_up(void); 415extern int keventd_up(void);
387 416
388int execute_in_process_context(work_func_t fn, struct execute_work *); 417int execute_in_process_context(work_func_t fn, struct execute_work *);
389 418
390extern bool flush_work(struct work_struct *work); 419extern bool flush_work(struct work_struct *work);
391extern bool flush_work_sync(struct work_struct *work);
392extern bool cancel_work_sync(struct work_struct *work); 420extern bool cancel_work_sync(struct work_struct *work);
393 421
394extern bool flush_delayed_work(struct delayed_work *dwork); 422extern bool flush_delayed_work(struct delayed_work *dwork);
395extern bool flush_delayed_work_sync(struct delayed_work *work); 423extern bool cancel_delayed_work(struct delayed_work *dwork);
396extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 424extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
397 425
398extern void workqueue_set_max_active(struct workqueue_struct *wq, 426extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -402,27 +430,11 @@ extern unsigned int work_cpu(struct work_struct *work);
402extern unsigned int work_busy(struct work_struct *work); 430extern unsigned int work_busy(struct work_struct *work);
403 431
404/* 432/*
405 * Kill off a pending schedule_delayed_work(). Note that the work callback
406 * function may still be running on return from cancel_delayed_work(), unless
407 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
408 * cancel_work_sync() to wait on it.
409 */
410static inline bool cancel_delayed_work(struct delayed_work *work)
411{
412 bool ret;
413
414 ret = del_timer_sync(&work->timer);
415 if (ret)
416 work_clear_pending(&work->work);
417 return ret;
418}
419
420/*
421 * Like above, but uses del_timer() instead of del_timer_sync(). This means, 433 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
422 * if it returns 0 the timer function may be running and the queueing is in 434 * if it returns 0 the timer function may be running and the queueing is in
423 * progress. 435 * progress.
424 */ 436 */
425static inline bool __cancel_delayed_work(struct delayed_work *work) 437static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
426{ 438{
427 bool ret; 439 bool ret;
428 440
@@ -432,6 +444,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
432 return ret; 444 return ret;
433} 445}
434 446
447/* used to be different but now identical to flush_work(), deprecated */
448static inline bool __deprecated flush_work_sync(struct work_struct *work)
449{
450 return flush_work(work);
451}
452
453/* used to be different but now identical to flush_delayed_work(), deprecated */
454static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
455{
456 return flush_delayed_work(dwork);
457}
458
435#ifndef CONFIG_SMP 459#ifndef CONFIG_SMP
436static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 460static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
437{ 461{