aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:20:30 -0500
committerGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:21:47 -0500
commitd392da5207352f09030e95d9ea335a4225667ec0 (patch)
tree7d6cd1932afcad0a5619a5c504a6d93ca318187c /include/linux/workqueue.h
parente39d5ef678045d61812c1401f04fe8edb14d6359 (diff)
parent387c31c7e5c9805b0aef8833d1731a5fe7bdea14 (diff)
Merge v2.6.37-rc8 into powerpc/next
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h79
1 files changed, 52 insertions, 27 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4f9d277bcd9a..0c0771f06bfa 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
25 25
26enum { 26enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
28 WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ 28 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
29 WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ 29 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
30 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
30#ifdef CONFIG_DEBUG_OBJECTS_WORK 31#ifdef CONFIG_DEBUG_OBJECTS_WORK
31 WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ 32 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
32 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 33 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
33#else 34#else
34 WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ 35 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
35#endif 36#endif
36 37
37 WORK_STRUCT_COLOR_BITS = 4, 38 WORK_STRUCT_COLOR_BITS = 4,
38 39
39 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 40 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
41 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
40 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, 42 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
41 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 43 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
42#ifdef CONFIG_DEBUG_OBJECTS_WORK 44#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@ enum {
59 61
60 /* 62 /*
61 * Reserve 7 bits off of cwq pointer w/ debugobjects turned 63 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
62 * off. This makes cwqs aligned to 128 bytes which isn't too 64 * off. This makes cwqs aligned to 256 bytes and allows 15
63 * excessive while allowing 15 workqueue flush colors. 65 * workqueue flush colors.
64 */ 66 */
65 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
66 WORK_STRUCT_COLOR_BITS, 68 WORK_STRUCT_COLOR_BITS,
@@ -188,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
188 __INIT_WORK((_work), (_func), 0); \ 190 __INIT_WORK((_work), (_func), 0); \
189 } while (0) 191 } while (0)
190 192
191#define INIT_WORK_ON_STACK(_work, _func) \ 193#define INIT_WORK_ONSTACK(_work, _func) \
192 do { \ 194 do { \
193 __INIT_WORK((_work), (_func), 1); \ 195 __INIT_WORK((_work), (_func), 1); \
194 } while (0) 196 } while (0)
@@ -199,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
199 init_timer(&(_work)->timer); \ 201 init_timer(&(_work)->timer); \
200 } while (0) 202 } while (0)
201 203
202#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ 204#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
203 do { \ 205 do { \
204 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ 206 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
205 init_timer_on_stack(&(_work)->timer); \ 207 init_timer_on_stack(&(_work)->timer); \
206 } while (0) 208 } while (0)
207 209
@@ -233,14 +235,21 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
233#define work_clear_pending(work) \ 235#define work_clear_pending(work) \
234 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 236 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
235 237
238/*
239 * Workqueue flags and constants. For details, please refer to
240 * Documentation/workqueue.txt.
241 */
236enum { 242enum {
237 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
238 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
239 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
240 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 246 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
241 WQ_HIGHPRI = 1 << 4, /* high priority */ 247 WQ_HIGHPRI = 1 << 4, /* high priority */
242 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
243 249
250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
251 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
252
244 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 253 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
245 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 254 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
246 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 255 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
@@ -298,12 +307,30 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
298 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) 307 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
299#endif 308#endif
300 309
310/**
311 * alloc_ordered_workqueue - allocate an ordered workqueue
312 * @name: name of the workqueue
313 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
314 *
315 * Allocate an ordered workqueue. An ordered workqueue executes at
316 * most one work item at any given time in the queued order. They are
317 * implemented as unbound workqueues with @max_active of one.
318 *
319 * RETURNS:
320 * Pointer to the allocated workqueue on success, %NULL on failure.
321 */
322static inline struct workqueue_struct *
323alloc_ordered_workqueue(const char *name, unsigned int flags)
324{
325 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
326}
327
301#define create_workqueue(name) \ 328#define create_workqueue(name) \
302 alloc_workqueue((name), WQ_RESCUER, 1) 329 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
303#define create_freezeable_workqueue(name) \ 330#define create_freezeable_workqueue(name) \
304 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) 331 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
305#define create_singlethread_workqueue(name) \ 332#define create_singlethread_workqueue(name) \
306 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) 333 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
307 334
308extern void destroy_workqueue(struct workqueue_struct *wq); 335extern void destroy_workqueue(struct workqueue_struct *wq);
309 336
@@ -317,7 +344,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
317 344
318extern void flush_workqueue(struct workqueue_struct *wq); 345extern void flush_workqueue(struct workqueue_struct *wq);
319extern void flush_scheduled_work(void); 346extern void flush_scheduled_work(void);
320extern void flush_delayed_work(struct delayed_work *work);
321 347
322extern int schedule_work(struct work_struct *work); 348extern int schedule_work(struct work_struct *work);
323extern int schedule_work_on(int cpu, struct work_struct *work); 349extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -329,8 +355,13 @@ extern int keventd_up(void);
329 355
330int execute_in_process_context(work_func_t fn, struct execute_work *); 356int execute_in_process_context(work_func_t fn, struct execute_work *);
331 357
332extern int flush_work(struct work_struct *work); 358extern bool flush_work(struct work_struct *work);
333extern int cancel_work_sync(struct work_struct *work); 359extern bool flush_work_sync(struct work_struct *work);
360extern bool cancel_work_sync(struct work_struct *work);
361
362extern bool flush_delayed_work(struct delayed_work *dwork);
363extern bool flush_delayed_work_sync(struct delayed_work *work);
364extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
334 365
335extern void workqueue_set_max_active(struct workqueue_struct *wq, 366extern void workqueue_set_max_active(struct workqueue_struct *wq,
336 int max_active); 367 int max_active);
@@ -344,9 +375,9 @@ extern unsigned int work_busy(struct work_struct *work);
344 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 375 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
345 * cancel_work_sync() to wait on it. 376 * cancel_work_sync() to wait on it.
346 */ 377 */
347static inline int cancel_delayed_work(struct delayed_work *work) 378static inline bool cancel_delayed_work(struct delayed_work *work)
348{ 379{
349 int ret; 380 bool ret;
350 381
351 ret = del_timer_sync(&work->timer); 382 ret = del_timer_sync(&work->timer);
352 if (ret) 383 if (ret)
@@ -359,9 +390,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
359 * if it returns 0 the timer function may be running and the queueing is in 390 * if it returns 0 the timer function may be running and the queueing is in
360 * progress. 391 * progress.
361 */ 392 */
362static inline int __cancel_delayed_work(struct delayed_work *work) 393static inline bool __cancel_delayed_work(struct delayed_work *work)
363{ 394{
364 int ret; 395 bool ret;
365 396
366 ret = del_timer(&work->timer); 397 ret = del_timer(&work->timer);
367 if (ret) 398 if (ret)
@@ -369,8 +400,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
369 return ret; 400 return ret;
370} 401}
371 402
372extern int cancel_delayed_work_sync(struct delayed_work *work);
373
374/* Obsolete. use cancel_delayed_work_sync() */ 403/* Obsolete. use cancel_delayed_work_sync() */
375static inline 404static inline
376void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 405void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
@@ -401,8 +430,4 @@ extern bool freeze_workqueues_busy(void);
401extern void thaw_workqueues(void); 430extern void thaw_workqueues(void);
402#endif /* CONFIG_FREEZER */ 431#endif /* CONFIG_FREEZER */
403 432
404#ifdef CONFIG_LOCKDEP
405int in_workqueue_context(struct workqueue_struct *wq);
406#endif
407
408#endif 433#endif