aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:13:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-22 20:13:10 -0400
commit91b745016c12d440386c40fb76ab69c8e08cbc06 (patch)
tree881a26a43b3ee924c0979a54b38c7a87d79d7f1a /include
parent04cc69768e7d1f40d98b79d23d203674553b4da2 (diff)
parentdaaae6b010ac0f60c9c35e481589966f9f1fcc22 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: remove in_workqueue_context() workqueue: Clarify that schedule_on_each_cpu is synchronous memory_hotplug: drop spurious calls to flush_scheduled_work() shpchp: update workqueue usage pciehp: update workqueue usage isdn/eicon: don't call flush_scheduled_work() from diva_os_remove_soft_isr() workqueue: add and use WQ_MEM_RECLAIM flag workqueue: fix HIGHPRI handling in keep_working() workqueue: add queue_work and activate_work trace points workqueue: prepare for more tracepoints workqueue: implement flush[_delayed]_work_sync() workqueue: factor out start_flush_work() workqueue: cleanup flush/cancel functions workqueue: implement alloc_ordered_workqueue() Fix up trivial conflict in fs/gfs2/main.c as per Tejun
Diffstat (limited to 'include')
-rw-r--r--include/linux/workqueue.h51
-rw-r--r--include/trace/events/workqueue.h77
2 files changed, 102 insertions, 26 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 25e02c941bac..070bb7a88936 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -243,11 +243,12 @@ enum {
243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
246 WQ_RESCUER = 1 << 3, /* has an rescue worker */ 246 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
247 WQ_HIGHPRI = 1 << 4, /* high priority */ 247 WQ_HIGHPRI = 1 << 4, /* high priority */
248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
249 249
250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */ 250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
251 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
251 252
252 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 253 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
253 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 254 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -306,12 +307,30 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
306 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) 307 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
307#endif 308#endif
308 309
310/**
311 * alloc_ordered_workqueue - allocate an ordered workqueue
312 * @name: name of the workqueue
313 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
314 *
315 * Allocate an ordered workqueue. An ordered workqueue executes at
316 * most one work item at any given time in the queued order. They are
317 * implemented as unbound workqueues with @max_active of one.
318 *
319 * RETURNS:
320 * Pointer to the allocated workqueue on success, %NULL on failure.
321 */
322static inline struct workqueue_struct *
323alloc_ordered_workqueue(const char *name, unsigned int flags)
324{
325 return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
326}
327
309#define create_workqueue(name) \ 328#define create_workqueue(name) \
310 alloc_workqueue((name), WQ_RESCUER, 1) 329 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
311#define create_freezeable_workqueue(name) \ 330#define create_freezeable_workqueue(name) \
312 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) 331 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
313#define create_singlethread_workqueue(name) \ 332#define create_singlethread_workqueue(name) \
314 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) 333 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
315 334
316extern void destroy_workqueue(struct workqueue_struct *wq); 335extern void destroy_workqueue(struct workqueue_struct *wq);
317 336
@@ -325,7 +344,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
325 344
326extern void flush_workqueue(struct workqueue_struct *wq); 345extern void flush_workqueue(struct workqueue_struct *wq);
327extern void flush_scheduled_work(void); 346extern void flush_scheduled_work(void);
328extern void flush_delayed_work(struct delayed_work *work);
329 347
330extern int schedule_work(struct work_struct *work); 348extern int schedule_work(struct work_struct *work);
331extern int schedule_work_on(int cpu, struct work_struct *work); 349extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -337,8 +355,13 @@ extern int keventd_up(void);
337 355
338int execute_in_process_context(work_func_t fn, struct execute_work *); 356int execute_in_process_context(work_func_t fn, struct execute_work *);
339 357
340extern int flush_work(struct work_struct *work); 358extern bool flush_work(struct work_struct *work);
341extern int cancel_work_sync(struct work_struct *work); 359extern bool flush_work_sync(struct work_struct *work);
360extern bool cancel_work_sync(struct work_struct *work);
361
362extern bool flush_delayed_work(struct delayed_work *dwork);
363extern bool flush_delayed_work_sync(struct delayed_work *work);
364extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
342 365
343extern void workqueue_set_max_active(struct workqueue_struct *wq, 366extern void workqueue_set_max_active(struct workqueue_struct *wq,
344 int max_active); 367 int max_active);
@@ -352,9 +375,9 @@ extern unsigned int work_busy(struct work_struct *work);
352 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 375 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
353 * cancel_work_sync() to wait on it. 376 * cancel_work_sync() to wait on it.
354 */ 377 */
355static inline int cancel_delayed_work(struct delayed_work *work) 378static inline bool cancel_delayed_work(struct delayed_work *work)
356{ 379{
357 int ret; 380 bool ret;
358 381
359 ret = del_timer_sync(&work->timer); 382 ret = del_timer_sync(&work->timer);
360 if (ret) 383 if (ret)
@@ -367,9 +390,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
367 * if it returns 0 the timer function may be running and the queueing is in 390 * if it returns 0 the timer function may be running and the queueing is in
368 * progress. 391 * progress.
369 */ 392 */
370static inline int __cancel_delayed_work(struct delayed_work *work) 393static inline bool __cancel_delayed_work(struct delayed_work *work)
371{ 394{
372 int ret; 395 bool ret;
373 396
374 ret = del_timer(&work->timer); 397 ret = del_timer(&work->timer);
375 if (ret) 398 if (ret)
@@ -377,8 +400,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
377 return ret; 400 return ret;
378} 401}
379 402
380extern int cancel_delayed_work_sync(struct delayed_work *work);
381
382/* Obsolete. use cancel_delayed_work_sync() */ 403/* Obsolete. use cancel_delayed_work_sync() */
383static inline 404static inline
384void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 405void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
@@ -409,8 +430,4 @@ extern bool freeze_workqueues_busy(void);
409extern void thaw_workqueues(void); 430extern void thaw_workqueues(void);
410#endif /* CONFIG_FREEZER */ 431#endif /* CONFIG_FREEZER */
411 432
412#ifdef CONFIG_LOCKDEP
413int in_workqueue_context(struct workqueue_struct *wq);
414#endif
415
416#endif 433#endif
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 49682d7e9d60..7d497291c85d 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -7,38 +7,83 @@
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8#include <linux/workqueue.h> 8#include <linux/workqueue.h>
9 9
10DECLARE_EVENT_CLASS(workqueue_work,
11
12 TP_PROTO(struct work_struct *work),
13
14 TP_ARGS(work),
15
16 TP_STRUCT__entry(
17 __field( void *, work )
18 ),
19
20 TP_fast_assign(
21 __entry->work = work;
22 ),
23
24 TP_printk("work struct %p", __entry->work)
25);
26
10/** 27/**
11 * workqueue_execute_start - called immediately before the workqueue callback 28 * workqueue_queue_work - called when a work gets queued
29 * @req_cpu: the requested cpu
30 * @cwq: pointer to struct cpu_workqueue_struct
12 * @work: pointer to struct work_struct 31 * @work: pointer to struct work_struct
13 * 32 *
14 * Allows to track workqueue execution. 33 * This event occurs when a work is queued immediately or once a
34 * delayed work is actually queued on a workqueue (ie: once the delay
35 * has been reached).
15 */ 36 */
16TRACE_EVENT(workqueue_execute_start, 37TRACE_EVENT(workqueue_queue_work,
17 38
18 TP_PROTO(struct work_struct *work), 39 TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
40 struct work_struct *work),
19 41
20 TP_ARGS(work), 42 TP_ARGS(req_cpu, cwq, work),
21 43
22 TP_STRUCT__entry( 44 TP_STRUCT__entry(
23 __field( void *, work ) 45 __field( void *, work )
24 __field( void *, function) 46 __field( void *, function)
47 __field( void *, workqueue)
48 __field( unsigned int, req_cpu )
49 __field( unsigned int, cpu )
25 ), 50 ),
26 51
27 TP_fast_assign( 52 TP_fast_assign(
28 __entry->work = work; 53 __entry->work = work;
29 __entry->function = work->func; 54 __entry->function = work->func;
55 __entry->workqueue = cwq->wq;
56 __entry->req_cpu = req_cpu;
57 __entry->cpu = cwq->gcwq->cpu;
30 ), 58 ),
31 59
32 TP_printk("work struct %p: function %pf", __entry->work, __entry->function) 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
61 __entry->work, __entry->function, __entry->workqueue,
62 __entry->req_cpu, __entry->cpu)
33); 63);
34 64
35/** 65/**
36 * workqueue_execute_end - called immediately before the workqueue callback 66 * workqueue_activate_work - called when a work gets activated
67 * @work: pointer to struct work_struct
68 *
69 * This event occurs when a queued work is put on the active queue,
70 * which happens immediately after queueing unless @max_active limit
71 * is reached.
72 */
73DEFINE_EVENT(workqueue_work, workqueue_activate_work,
74
75 TP_PROTO(struct work_struct *work),
76
77 TP_ARGS(work)
78);
79
80/**
81 * workqueue_execute_start - called immediately before the workqueue callback
37 * @work: pointer to struct work_struct 82 * @work: pointer to struct work_struct
38 * 83 *
39 * Allows to track workqueue execution. 84 * Allows to track workqueue execution.
40 */ 85 */
41TRACE_EVENT(workqueue_execute_end, 86TRACE_EVENT(workqueue_execute_start,
42 87
43 TP_PROTO(struct work_struct *work), 88 TP_PROTO(struct work_struct *work),
44 89
@@ -46,15 +91,29 @@ TRACE_EVENT(workqueue_execute_end,
46 91
47 TP_STRUCT__entry( 92 TP_STRUCT__entry(
48 __field( void *, work ) 93 __field( void *, work )
94 __field( void *, function)
49 ), 95 ),
50 96
51 TP_fast_assign( 97 TP_fast_assign(
52 __entry->work = work; 98 __entry->work = work;
99 __entry->function = work->func;
53 ), 100 ),
54 101
55 TP_printk("work struct %p", __entry->work) 102 TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
56); 103);
57 104
105/**
106 * workqueue_execute_end - called immediately before the workqueue callback
107 * @work: pointer to struct work_struct
108 *
109 * Allows to track workqueue execution.
110 */
111DEFINE_EVENT(workqueue_work, workqueue_execute_end,
112
113 TP_PROTO(struct work_struct *work),
114
115 TP_ARGS(work)
116);
58 117
59#endif /* _TRACE_WORKQUEUE_H */ 118#endif /* _TRACE_WORKQUEUE_H */
60 119