aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h51
1 files changed, 34 insertions, 17 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 25e02c941bac..070bb7a88936 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -243,11 +243,12 @@ enum {
243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
246 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 246 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
247 WQ_HIGHPRI = 1 << 4, /* high priority */ 247 WQ_HIGHPRI = 1 << 4, /* high priority */
248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
249 249
250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */ 250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
251 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
251 252
252 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 253 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
253 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 254 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -306,12 +307,30 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
306 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) 307 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
307#endif 308#endif
308 309
310/**
311 * alloc_ordered_workqueue - allocate an ordered workqueue
312 * @name: name of the workqueue
313 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
314 *
315 * Allocate an ordered workqueue. An ordered workqueue executes at
316 * most one work item at any given time in the queued order. They are
317 * implemented as unbound workqueues with @max_active of one.
318 *
319 * RETURNS:
320 * Pointer to the allocated workqueue on success, %NULL on failure.
321 */
322static inline struct workqueue_struct *
323alloc_ordered_workqueue(const char *name, unsigned int flags)
324{
325 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
326}
327
309#define create_workqueue(name) \ 328#define create_workqueue(name) \
310 alloc_workqueue((name), WQ_RESCUER, 1) 329 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
311#define create_freezeable_workqueue(name) \ 330#define create_freezeable_workqueue(name) \
312 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) 331 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
313#define create_singlethread_workqueue(name) \ 332#define create_singlethread_workqueue(name) \
314 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) 333 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
315 334
316extern void destroy_workqueue(struct workqueue_struct *wq); 335extern void destroy_workqueue(struct workqueue_struct *wq);
317 336
@@ -325,7 +344,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
325 344
326extern void flush_workqueue(struct workqueue_struct *wq); 345extern void flush_workqueue(struct workqueue_struct *wq);
327extern void flush_scheduled_work(void); 346extern void flush_scheduled_work(void);
328extern void flush_delayed_work(struct delayed_work *work);
329 347
330extern int schedule_work(struct work_struct *work); 348extern int schedule_work(struct work_struct *work);
331extern int schedule_work_on(int cpu, struct work_struct *work); 349extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -337,8 +355,13 @@ extern int keventd_up(void);
337 355
338int execute_in_process_context(work_func_t fn, struct execute_work *); 356int execute_in_process_context(work_func_t fn, struct execute_work *);
339 357
340extern int flush_work(struct work_struct *work); 358extern bool flush_work(struct work_struct *work);
341extern int cancel_work_sync(struct work_struct *work); 359extern bool flush_work_sync(struct work_struct *work);
360extern bool cancel_work_sync(struct work_struct *work);
361
362extern bool flush_delayed_work(struct delayed_work *dwork);
363extern bool flush_delayed_work_sync(struct delayed_work *work);
364extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
342 365
343extern void workqueue_set_max_active(struct workqueue_struct *wq, 366extern void workqueue_set_max_active(struct workqueue_struct *wq,
344 int max_active); 367 int max_active);
@@ -352,9 +375,9 @@ extern unsigned int work_busy(struct work_struct *work);
352 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 375 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
353 * cancel_work_sync() to wait on it. 376 * cancel_work_sync() to wait on it.
354 */ 377 */
355static inline int cancel_delayed_work(struct delayed_work *work) 378static inline bool cancel_delayed_work(struct delayed_work *work)
356{ 379{
357 int ret; 380 bool ret;
358 381
359 ret = del_timer_sync(&work->timer); 382 ret = del_timer_sync(&work->timer);
360 if (ret) 383 if (ret)
@@ -367,9 +390,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
367 * if it returns 0 the timer function may be running and the queueing is in 390 * if it returns 0 the timer function may be running and the queueing is in
368 * progress. 391 * progress.
369 */ 392 */
370static inline int __cancel_delayed_work(struct delayed_work *work) 393static inline bool __cancel_delayed_work(struct delayed_work *work)
371{ 394{
372 int ret; 395 bool ret;
373 396
374 ret = del_timer(&work->timer); 397 ret = del_timer(&work->timer);
375 if (ret) 398 if (ret)
@@ -377,8 +400,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
377 return ret; 400 return ret;
378} 401}
379 402
380extern int cancel_delayed_work_sync(struct delayed_work *work);
381
382/* Obsolete. use cancel_delayed_work_sync() */ 403/* Obsolete. use cancel_delayed_work_sync() */
383static inline 404static inline
384void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 405void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
@@ -409,8 +430,4 @@ extern bool freeze_workqueues_busy(void);
409extern void thaw_workqueues(void); 430extern void thaw_workqueues(void);
410#endif /* CONFIG_FREEZER */ 431#endif /* CONFIG_FREEZER */
411 432
412#ifdef CONFIG_LOCKDEP
413int in_workqueue_context(struct workqueue_struct *wq);
414#endif
415
416#endif 433#endif