aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 05:34:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:52 -0400
commit23b2e5991afde5af91a1a661d7f47ee56120759e (patch)
treed4b8e5bc1a311abd6b6de469f862a8b53c5f9f36
parentc214b2cc5f9be7c236f9b91acf524688ff0e3e72 (diff)
workqueue: kill NOAUTOREL works
We don't have any users, and it is not so trivial to use NOAUTOREL works correctly. It is better to simplify API. Delete NOAUTOREL support and rename work_release to work_clear_pending to avoid a confusion. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Acked-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/workqueue.h64
-rw-r--r--kernel/workqueue.c5
2 files changed, 11 insertions, 58 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 27110c04f21e..e1581dce5890 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -24,15 +24,13 @@ typedef void (*work_func_t)(struct work_struct *work);
24struct work_struct { 24struct work_struct {
25 atomic_long_t data; 25 atomic_long_t data;
26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
27#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
28#define WORK_STRUCT_FLAG_MASK (3UL) 27#define WORK_STRUCT_FLAG_MASK (3UL)
29#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 28#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
30 struct list_head entry; 29 struct list_head entry;
31 work_func_t func; 30 work_func_t func;
32}; 31};
33 32
34#define WORK_DATA_INIT(autorelease) \ 33#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
35 ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
36 34
37struct delayed_work { 35struct delayed_work {
38 struct work_struct work; 36 struct work_struct work;
@@ -44,14 +42,8 @@ struct execute_work {
44}; 42};
45 43
46#define __WORK_INITIALIZER(n, f) { \ 44#define __WORK_INITIALIZER(n, f) { \
47 .data = WORK_DATA_INIT(0), \ 45 .data = WORK_DATA_INIT(), \
48 .entry = { &(n).entry, &(n).entry }, \ 46 .entry = { &(n).entry, &(n).entry }, \
49 .func = (f), \
50 }
51
52#define __WORK_INITIALIZER_NAR(n, f) { \
53 .data = WORK_DATA_INIT(1), \
54 .entry = { &(n).entry, &(n).entry }, \
55 .func = (f), \ 47 .func = (f), \
56 } 48 }
57 49
@@ -60,23 +52,12 @@ struct execute_work {
60 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 52 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
61 } 53 }
62 54
63#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
64 .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
65 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
66 }
67
68#define DECLARE_WORK(n, f) \ 55#define DECLARE_WORK(n, f) \
69 struct work_struct n = __WORK_INITIALIZER(n, f) 56 struct work_struct n = __WORK_INITIALIZER(n, f)
70 57
71#define DECLARE_WORK_NAR(n, f) \
72 struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
73
74#define DECLARE_DELAYED_WORK(n, f) \ 58#define DECLARE_DELAYED_WORK(n, f) \
75 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 59 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
76 60
77#define DECLARE_DELAYED_WORK_NAR(n, f) \
78 struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
79
80/* 61/*
81 * initialize a work item's function pointer 62 * initialize a work item's function pointer
82 */ 63 */
@@ -95,16 +76,9 @@ struct execute_work {
95 * assignment of the work data initializer allows the compiler 76 * assignment of the work data initializer allows the compiler
96 * to generate better code. 77 * to generate better code.
97 */ 78 */
98#define INIT_WORK(_work, _func) \ 79#define INIT_WORK(_work, _func) \
99 do { \
100 (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
101 INIT_LIST_HEAD(&(_work)->entry); \
102 PREPARE_WORK((_work), (_func)); \
103 } while (0)
104
105#define INIT_WORK_NAR(_work, _func) \
106 do { \ 80 do { \
107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \ 81 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
108 INIT_LIST_HEAD(&(_work)->entry); \ 82 INIT_LIST_HEAD(&(_work)->entry); \
109 PREPARE_WORK((_work), (_func)); \ 83 PREPARE_WORK((_work), (_func)); \
110 } while (0) 84 } while (0)
@@ -115,12 +89,6 @@ struct execute_work {
115 init_timer(&(_work)->timer); \ 89 init_timer(&(_work)->timer); \
116 } while (0) 90 } while (0)
117 91
118#define INIT_DELAYED_WORK_NAR(_work, _func) \
119 do { \
120 INIT_WORK_NAR(&(_work)->work, (_func)); \
121 init_timer(&(_work)->timer); \
122 } while (0)
123
124#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 92#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
125 do { \ 93 do { \
126 INIT_WORK(&(_work)->work, (_func)); \ 94 INIT_WORK(&(_work)->work, (_func)); \
@@ -143,24 +111,10 @@ struct execute_work {
143 work_pending(&(w)->work) 111 work_pending(&(w)->work)
144 112
145/** 113/**
146 * work_release - Release a work item under execution 114 * work_clear_pending - for internal use only, mark a work item as not pending
147 * @work: The work item to release 115 * @work: The work item in question
148 *
149 * This is used to release a work item that has been initialised with automatic
150 * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
151 * function the opportunity to grab auxiliary data from the container of the
152 * work_struct before clearing the pending bit as the work_struct may be
153 * subject to deallocation the moment the pending bit is cleared.
154 *
155 * In such a case, this should be called in the work function after it has
156 * fetched any data it may require from the containter of the work_struct.
157 * After this function has been called, the work_struct may be scheduled for
158 * further execution or it may be deallocated unless other precautions are
159 * taken.
160 *
161 * This should also be used to release a delayed work item.
162 */ 116 */
163#define work_release(work) \ 117#define work_clear_pending(work) \
164 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) 118 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
165 119
166 120
@@ -205,7 +159,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
205 159
206 ret = del_timer(&work->timer); 160 ret = del_timer(&work->timer);
207 if (ret) 161 if (ret)
208 work_release(&work->work); 162 work_clear_pending(&work->work);
209 return ret; 163 return ret;
210} 164}
211 165
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 41eaffd125ca..0611de815a8f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -246,8 +246,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
246 spin_unlock_irq(&cwq->lock); 246 spin_unlock_irq(&cwq->lock);
247 247
248 BUG_ON(get_wq_data(work) != cwq); 248 BUG_ON(get_wq_data(work) != cwq);
249 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 249 work_clear_pending(work);
250 work_release(work);
251 f(work); 250 f(work);
252 251
253 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 252 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
@@ -453,7 +452,7 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
453 */ 452 */
454 spin_lock_irq(&cwq->lock); 453 spin_lock_irq(&cwq->lock);
455 list_del_init(&work->entry); 454 list_del_init(&work->entry);
456 work_release(work); 455 work_clear_pending(work);
457 spin_unlock_irq(&cwq->lock); 456 spin_unlock_irq(&cwq->lock);
458 457
459 for_each_cpu_mask(cpu, *cpu_map) 458 for_each_cpu_mask(cpu, *cpu_map)