aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/workqueue.h32
-rw-r--r--kernel/workqueue.c16
2 files changed, 30 insertions, 18 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 5b13dcf02714..2a7b38d87018 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -8,16 +8,21 @@
8#include <linux/timer.h> 8#include <linux/timer.h>
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <asm/atomic.h>
11 12
12struct workqueue_struct; 13struct workqueue_struct;
13 14
14struct work_struct; 15struct work_struct;
15typedef void (*work_func_t)(struct work_struct *work); 16typedef void (*work_func_t)(struct work_struct *work);
16 17
18/*
19 * The first word is the work queue pointer and the flags rolled into
20 * one
21 */
22#define work_data_bits(work) ((unsigned long *)(&(work)->data))
23
17struct work_struct { 24struct work_struct {
18 /* the first word is the work queue pointer and the flags rolled into 25 atomic_long_t data;
19 * one */
20 unsigned long management;
21#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 26#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
22#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ 27#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
23#define WORK_STRUCT_FLAG_MASK (3UL) 28#define WORK_STRUCT_FLAG_MASK (3UL)
@@ -26,6 +31,9 @@ struct work_struct {
26 work_func_t func; 31 work_func_t func;
27}; 32};
28 33
34#define WORK_DATA_INIT(autorelease) \
35 ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
36
29struct delayed_work { 37struct delayed_work {
30 struct work_struct work; 38 struct work_struct work;
31 struct timer_list timer; 39 struct timer_list timer;
@@ -36,13 +44,13 @@ struct execute_work {
36}; 44};
37 45
38#define __WORK_INITIALIZER(n, f) { \ 46#define __WORK_INITIALIZER(n, f) { \
39 .management = 0, \ 47 .data = WORK_DATA_INIT(0), \
40 .entry = { &(n).entry, &(n).entry }, \ 48 .entry = { &(n).entry, &(n).entry }, \
41 .func = (f), \ 49 .func = (f), \
42 } 50 }
43 51
44#define __WORK_INITIALIZER_NAR(n, f) { \ 52#define __WORK_INITIALIZER_NAR(n, f) { \
45 .management = (1 << WORK_STRUCT_NOAUTOREL), \ 53 .data = WORK_DATA_INIT(1), \
46 .entry = { &(n).entry, &(n).entry }, \ 54 .entry = { &(n).entry, &(n).entry }, \
47 .func = (f), \ 55 .func = (f), \
48 } 56 }
@@ -82,17 +90,21 @@ struct execute_work {
82 90
83/* 91/*
84 * initialize all of a work item in one go 92 * initialize all of a work item in one go
93 *
94 * NOTE! No point in using "atomic_long_set()": useing a direct
95 * assignment of the work data initializer allows the compiler
96 * to generate better code.
85 */ 97 */
86#define INIT_WORK(_work, _func) \ 98#define INIT_WORK(_work, _func) \
87 do { \ 99 do { \
88 (_work)->management = 0; \ 100 (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
89 INIT_LIST_HEAD(&(_work)->entry); \ 101 INIT_LIST_HEAD(&(_work)->entry); \
90 PREPARE_WORK((_work), (_func)); \ 102 PREPARE_WORK((_work), (_func)); \
91 } while (0) 103 } while (0)
92 104
93#define INIT_WORK_NAR(_work, _func) \ 105#define INIT_WORK_NAR(_work, _func) \
94 do { \ 106 do { \
95 (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ 107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \
96 INIT_LIST_HEAD(&(_work)->entry); \ 108 INIT_LIST_HEAD(&(_work)->entry); \
97 PREPARE_WORK((_work), (_func)); \ 109 PREPARE_WORK((_work), (_func)); \
98 } while (0) 110 } while (0)
@@ -114,7 +126,7 @@ struct execute_work {
114 * @work: The work item in question 126 * @work: The work item in question
115 */ 127 */
116#define work_pending(work) \ 128#define work_pending(work) \
117 test_bit(WORK_STRUCT_PENDING, &(work)->management) 129 test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
118 130
119/** 131/**
120 * delayed_work_pending - Find out whether a delayable work item is currently 132 * delayed_work_pending - Find out whether a delayable work item is currently
@@ -143,7 +155,7 @@ struct execute_work {
143 * This should also be used to release a delayed work item. 155 * This should also be used to release a delayed work item.
144 */ 156 */
145#define work_release(work) \ 157#define work_release(work) \
146 clear_bit(WORK_STRUCT_PENDING, &(work)->management) 158 clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
147 159
148 160
149extern struct workqueue_struct *__create_workqueue(const char *name, 161extern struct workqueue_struct *__create_workqueue(const char *name,
@@ -188,7 +200,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
188 200
189 ret = del_timer_sync(&work->timer); 201 ret = del_timer_sync(&work->timer);
190 if (ret) 202 if (ret)
191 clear_bit(WORK_STRUCT_PENDING, &work->work.management); 203 work_release(&work->work);
192 return ret; 204 return ret;
193} 205}
194 206
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index db49886bfae1..742cbbe49bdc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq)
96 BUG_ON(!work_pending(work)); 96 BUG_ON(!work_pending(work));
97 97
98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
99 new |= work->management & WORK_STRUCT_FLAG_MASK; 99 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 work->management = new; 100 atomic_long_set(&work->data, new);
101} 101}
102 102
103static inline void *get_wq_data(struct work_struct *work) 103static inline void *get_wq_data(struct work_struct *work)
104{ 104{
105 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
106} 106}
107 107
108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work
133 list_del_init(&work->entry); 133 list_del_init(&work->entry);
134 spin_unlock_irqrestore(&cwq->lock, flags); 134 spin_unlock_irqrestore(&cwq->lock, flags);
135 135
136 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
137 work_release(work); 137 work_release(work);
138 f(work); 138 f(work);
139 139
@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{ 206{
207 int ret = 0, cpu = get_cpu(); 207 int ret = 0, cpu = get_cpu();
208 208
209 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 if (unlikely(is_single_threaded(wq))) 210 if (unlikely(is_single_threaded(wq)))
211 cpu = singlethread_cpu; 211 cpu = singlethread_cpu;
212 BUG_ON(!list_empty(&work->entry)); 212 BUG_ON(!list_empty(&work->entry));
@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
248 if (delay == 0) 248 if (delay == 0)
249 return queue_work(wq, work); 249 return queue_work(wq, work);
250 250
251 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 251 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
252 BUG_ON(timer_pending(timer)); 252 BUG_ON(timer_pending(timer));
253 BUG_ON(!list_empty(&work->entry)); 253 BUG_ON(!list_empty(&work->entry));
254 254
@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
280 struct timer_list *timer = &dwork->timer; 280 struct timer_list *timer = &dwork->timer;
281 struct work_struct *work = &dwork->work; 281 struct work_struct *work = &dwork->work;
282 282
283 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 283 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
284 BUG_ON(timer_pending(timer)); 284 BUG_ON(timer_pending(timer));
285 BUG_ON(!list_empty(&work->entry)); 285 BUG_ON(!list_empty(&work->entry));
286 286
@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
321 spin_unlock_irqrestore(&cwq->lock, flags); 321 spin_unlock_irqrestore(&cwq->lock, flags);
322 322
323 BUG_ON(get_wq_data(work) != cwq); 323 BUG_ON(get_wq_data(work) != cwq);
324 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
325 work_release(work); 325 work_release(work);
326 f(work); 326 f(work);
327 327