aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h217
1 files changed, 179 insertions, 38 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9466e860d8c2..1ac11586a2f5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -9,6 +9,7 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/lockdep.h> 11#include <linux/lockdep.h>
12#include <linux/threads.h>
12#include <asm/atomic.h> 13#include <asm/atomic.h>
13 14
14struct workqueue_struct; 15struct workqueue_struct;
@@ -22,12 +23,61 @@ typedef void (*work_func_t)(struct work_struct *work);
22 */ 23 */
23#define work_data_bits(work) ((unsigned long *)(&(work)->data)) 24#define work_data_bits(work) ((unsigned long *)(&(work)->data))
24 25
26enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
28 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
29 WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
30 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
31#ifdef CONFIG_DEBUG_OBJECTS_WORK
32 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
33 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
34#else
35 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
36#endif
37
38 WORK_STRUCT_COLOR_BITS = 4,
39
40 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
41 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
42 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
43 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
44#ifdef CONFIG_DEBUG_OBJECTS_WORK
45 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
46#else
47 WORK_STRUCT_STATIC = 0,
48#endif
49
50 /*
51 * The last color is no color used for works which don't
52 * participate in workqueue flushing.
53 */
54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
55 WORK_NO_COLOR = WORK_NR_COLORS,
56
57 /* special cpu IDs */
58 WORK_CPU_UNBOUND = NR_CPUS,
59 WORK_CPU_NONE = NR_CPUS + 1,
60 WORK_CPU_LAST = WORK_CPU_NONE,
61
62 /*
63 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
64 * off. This makes cwqs aligned to 256 bytes and allows 15
65 * workqueue flush colors.
66 */
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
69
70 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
71 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
72 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
73
74 /* bit mask for work_busy() return values */
75 WORK_BUSY_PENDING = 1 << 0,
76 WORK_BUSY_RUNNING = 1 << 1,
77};
78
25struct work_struct { 79struct work_struct {
26 atomic_long_t data; 80 atomic_long_t data;
27#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
28#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */
29#define WORK_STRUCT_FLAG_MASK (3UL)
30#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
31 struct list_head entry; 81 struct list_head entry;
32 work_func_t func; 82 work_func_t func;
33#ifdef CONFIG_LOCKDEP 83#ifdef CONFIG_LOCKDEP
@@ -35,8 +85,9 @@ struct work_struct {
35#endif 85#endif
36}; 86};
37 87
38#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) 88#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
39#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) 89#define WORK_DATA_STATIC_INIT() \
90 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
40 91
41struct delayed_work { 92struct delayed_work {
42 struct work_struct work; 93 struct work_struct work;
@@ -76,12 +127,20 @@ struct execute_work {
76 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
77 } 128 }
78 129
130#define __DEFERRED_WORK_INITIALIZER(n, f) { \
131 .work = __WORK_INITIALIZER((n).work, (f)), \
132 .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
133 }
134
79#define DECLARE_WORK(n, f) \ 135#define DECLARE_WORK(n, f) \
80 struct work_struct n = __WORK_INITIALIZER(n, f) 136 struct work_struct n = __WORK_INITIALIZER(n, f)
81 137
82#define DECLARE_DELAYED_WORK(n, f) \ 138#define DECLARE_DELAYED_WORK(n, f) \
83 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 139 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
84 140
141#define DECLARE_DEFERRED_WORK(n, f) \
142 struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
143
85/* 144/*
86 * initialize a work item's function pointer 145 * initialize a work item's function pointer
87 */ 146 */
@@ -96,9 +155,14 @@ struct execute_work {
96#ifdef CONFIG_DEBUG_OBJECTS_WORK 155#ifdef CONFIG_DEBUG_OBJECTS_WORK
97extern void __init_work(struct work_struct *work, int onstack); 156extern void __init_work(struct work_struct *work, int onstack);
98extern void destroy_work_on_stack(struct work_struct *work); 157extern void destroy_work_on_stack(struct work_struct *work);
158static inline unsigned int work_static(struct work_struct *work)
159{
160 return *work_data_bits(work) & WORK_STRUCT_STATIC;
161}
99#else 162#else
100static inline void __init_work(struct work_struct *work, int onstack) { } 163static inline void __init_work(struct work_struct *work, int onstack) { }
101static inline void destroy_work_on_stack(struct work_struct *work) { } 164static inline void destroy_work_on_stack(struct work_struct *work) { }
165static inline unsigned int work_static(struct work_struct *work) { return 0; }
102#endif 166#endif
103 167
104/* 168/*
@@ -134,7 +198,7 @@ static inline void destroy_work_on_stack(struct work_struct *work) { }
134 __INIT_WORK((_work), (_func), 0); \ 198 __INIT_WORK((_work), (_func), 0); \
135 } while (0) 199 } while (0)
136 200
137#define INIT_WORK_ON_STACK(_work, _func) \ 201#define INIT_WORK_ONSTACK(_work, _func) \
138 do { \ 202 do { \
139 __INIT_WORK((_work), (_func), 1); \ 203 __INIT_WORK((_work), (_func), 1); \
140 } while (0) 204 } while (0)
@@ -145,9 +209,9 @@ static inline void destroy_work_on_stack(struct work_struct *work) { }
145 init_timer(&(_work)->timer); \ 209 init_timer(&(_work)->timer); \
146 } while (0) 210 } while (0)
147 211
148#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ 212#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
149 do { \ 213 do { \
150 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ 214 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
151 init_timer_on_stack(&(_work)->timer); \ 215 init_timer_on_stack(&(_work)->timer); \
152 } while (0) 216 } while (0)
153 217
@@ -162,7 +226,7 @@ static inline void destroy_work_on_stack(struct work_struct *work) { }
162 * @work: The work item in question 226 * @work: The work item in question
163 */ 227 */
164#define work_pending(work) \ 228#define work_pending(work) \
165 test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) 229 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
166 230
167/** 231/**
168 * delayed_work_pending - Find out whether a delayable work item is currently 232 * delayed_work_pending - Find out whether a delayable work item is currently
@@ -177,16 +241,63 @@ static inline void destroy_work_on_stack(struct work_struct *work) { }
177 * @work: The work item in question 241 * @work: The work item in question
178 */ 242 */
179#define work_clear_pending(work) \ 243#define work_clear_pending(work) \
180 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) 244 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
181 245
246/*
247 * Workqueue flags and constants. For details, please refer to
248 * Documentation/workqueue.txt.
249 */
250enum {
251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
253 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
255 WQ_HIGHPRI = 1 << 4, /* high priority */
256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
257
258 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
259 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
260
261 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
262 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
263 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
264};
265
266/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
267#define WQ_UNBOUND_MAX_ACTIVE \
268 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
269
270/*
271 * System-wide workqueues which are always present.
272 *
273 * system_wq is the one used by schedule[_delayed]_work[_on]().
274 * Multi-CPU multi-threaded. There are users which expect relatively
275 * short queue flush time. Don't queue works which can run for too
276 * long.
277 *
278 * system_long_wq is similar to system_wq but may host long running
279 * works. Queue flushing might take relatively long.
280 *
281 * system_nrt_wq is non-reentrant and guarantees that any given work
282 * item is never executed in parallel by multiple CPUs. Queue
283 * flushing might take relatively long.
284 *
285 * system_unbound_wq is unbound workqueue. Workers are not bound to
286 * any specific CPU, not concurrency managed, and all queued works are
287 * executed immediately as long as max_active limit is not reached and
288 * resources are available.
289 */
290extern struct workqueue_struct *system_wq;
291extern struct workqueue_struct *system_long_wq;
292extern struct workqueue_struct *system_nrt_wq;
293extern struct workqueue_struct *system_unbound_wq;
182 294
183extern struct workqueue_struct * 295extern struct workqueue_struct *
184__create_workqueue_key(const char *name, int singlethread, 296__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
185 int freezeable, int rt, struct lock_class_key *key, 297 struct lock_class_key *key, const char *lock_name);
186 const char *lock_name);
187 298
188#ifdef CONFIG_LOCKDEP 299#ifdef CONFIG_LOCKDEP
189#define __create_workqueue(name, singlethread, freezeable, rt) \ 300#define alloc_workqueue(name, flags, max_active) \
190({ \ 301({ \
191 static struct lock_class_key __key; \ 302 static struct lock_class_key __key; \
192 const char *__lock_name; \ 303 const char *__lock_name; \
@@ -196,20 +307,38 @@ __create_workqueue_key(const char *name, int singlethread,
196 else \ 307 else \
197 __lock_name = #name; \ 308 __lock_name = #name; \
198 \ 309 \
199 __create_workqueue_key((name), (singlethread), \ 310 __alloc_workqueue_key((name), (flags), (max_active), \
200 (freezeable), (rt), &__key, \ 311 &__key, __lock_name); \
201 __lock_name); \
202}) 312})
203#else 313#else
204#define __create_workqueue(name, singlethread, freezeable, rt) \ 314#define alloc_workqueue(name, flags, max_active) \
205 __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ 315 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
206 NULL, NULL)
207#endif 316#endif
208 317
209#define create_workqueue(name) __create_workqueue((name), 0, 0, 0) 318/**
210#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) 319 * alloc_ordered_workqueue - allocate an ordered workqueue
211#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) 320 * @name: name of the workqueue
212#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) 321 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
322 *
323 * Allocate an ordered workqueue. An ordered workqueue executes at
324 * most one work item at any given time in the queued order. They are
325 * implemented as unbound workqueues with @max_active of one.
326 *
327 * RETURNS:
328 * Pointer to the allocated workqueue on success, %NULL on failure.
329 */
330static inline struct workqueue_struct *
331alloc_ordered_workqueue(const char *name, unsigned int flags)
332{
333 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
334}
335
336#define create_workqueue(name) \
337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
338#define create_freezeable_workqueue(name) \
339 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
340#define create_singlethread_workqueue(name) \
341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
213 342
214extern void destroy_workqueue(struct workqueue_struct *wq); 343extern void destroy_workqueue(struct workqueue_struct *wq);
215 344
@@ -223,7 +352,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
223 352
224extern void flush_workqueue(struct workqueue_struct *wq); 353extern void flush_workqueue(struct workqueue_struct *wq);
225extern void flush_scheduled_work(void); 354extern void flush_scheduled_work(void);
226extern void flush_delayed_work(struct delayed_work *work);
227 355
228extern int schedule_work(struct work_struct *work); 356extern int schedule_work(struct work_struct *work);
229extern int schedule_work_on(int cpu, struct work_struct *work); 357extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -231,15 +359,23 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay)
231extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 359extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
232 unsigned long delay); 360 unsigned long delay);
233extern int schedule_on_each_cpu(work_func_t func); 361extern int schedule_on_each_cpu(work_func_t func);
234extern int current_is_keventd(void);
235extern int keventd_up(void); 362extern int keventd_up(void);
236 363
237extern void init_workqueues(void);
238int execute_in_process_context(work_func_t fn, struct execute_work *); 364int execute_in_process_context(work_func_t fn, struct execute_work *);
239 365
240extern int flush_work(struct work_struct *work); 366extern bool flush_work(struct work_struct *work);
367extern bool flush_work_sync(struct work_struct *work);
368extern bool cancel_work_sync(struct work_struct *work);
241 369
242extern int cancel_work_sync(struct work_struct *work); 370extern bool flush_delayed_work(struct delayed_work *dwork);
371extern bool flush_delayed_work_sync(struct delayed_work *work);
372extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
373
374extern void workqueue_set_max_active(struct workqueue_struct *wq,
375 int max_active);
376extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
377extern unsigned int work_cpu(struct work_struct *work);
378extern unsigned int work_busy(struct work_struct *work);
243 379
244/* 380/*
245 * Kill off a pending schedule_delayed_work(). Note that the work callback 381 * Kill off a pending schedule_delayed_work(). Note that the work callback
@@ -247,9 +383,9 @@ extern int cancel_work_sync(struct work_struct *work);
247 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 383 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
248 * cancel_work_sync() to wait on it. 384 * cancel_work_sync() to wait on it.
249 */ 385 */
250static inline int cancel_delayed_work(struct delayed_work *work) 386static inline bool cancel_delayed_work(struct delayed_work *work)
251{ 387{
252 int ret; 388 bool ret;
253 389
254 ret = del_timer_sync(&work->timer); 390 ret = del_timer_sync(&work->timer);
255 if (ret) 391 if (ret)
@@ -262,9 +398,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
262 * if it returns 0 the timer function may be running and the queueing is in 398 * if it returns 0 the timer function may be running and the queueing is in
263 * progress. 399 * progress.
264 */ 400 */
265static inline int __cancel_delayed_work(struct delayed_work *work) 401static inline bool __cancel_delayed_work(struct delayed_work *work)
266{ 402{
267 int ret; 403 bool ret;
268 404
269 ret = del_timer(&work->timer); 405 ret = del_timer(&work->timer);
270 if (ret) 406 if (ret)
@@ -272,10 +408,8 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
272 return ret; 408 return ret;
273} 409}
274 410
275extern int cancel_delayed_work_sync(struct delayed_work *work);
276
277/* Obsolete. use cancel_delayed_work_sync() */ 411/* Obsolete. use cancel_delayed_work_sync() */
278static inline 412static inline __deprecated
279void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 413void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
280 struct delayed_work *work) 414 struct delayed_work *work)
281{ 415{
@@ -283,7 +417,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
283} 417}
284 418
285/* Obsolete. use cancel_delayed_work_sync() */ 419/* Obsolete. use cancel_delayed_work_sync() */
286static inline 420static inline __deprecated
287void cancel_rearming_delayed_work(struct delayed_work *work) 421void cancel_rearming_delayed_work(struct delayed_work *work)
288{ 422{
289 cancel_delayed_work_sync(work); 423 cancel_delayed_work_sync(work);
@@ -297,4 +431,11 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
297#else 431#else
298long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); 432long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
299#endif /* CONFIG_SMP */ 433#endif /* CONFIG_SMP */
434
435#ifdef CONFIG_FREEZER
436extern void freeze_workqueues_begin(void);
437extern bool freeze_workqueues_busy(void);
438extern void thaw_workqueues(void);
439#endif /* CONFIG_FREEZER */
440
300#endif 441#endif