aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-16 12:53:50 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-16 12:53:50 -0500
commita08727bae727fc2ca3a6ee9506d77786b71070b3 (patch)
treeb1b42acea520c7738fc2e62476221a049f195f87 /kernel/workqueue.c
parent2f77d107050abc14bc393b34bdb7b91cf670c250 (diff)
Make workqueue bit operations work on "atomic_long_t"
On architectures where the atomicity of the bit operations is handled by external means (ie a separate spinlock to protect concurrent accesses), just doing a direct assignment on the workqueue data field (as done by commit 4594bf159f1962cec3b727954b7c598b07e2e737) can cause the assignment to be lost due to lack of serialization with the bitops on the same word. So we need to serialize the assignment with the locks on those architectures (notably older ARM chips, PA-RISC and sparc32). So rather than using an "unsigned long", let's use "atomic_long_t", which already has a safe assignment operation (atomic_long_set()) on such architectures. This requires that the atomic operations use the same atomicity locks as the bit operations do, but that is largely the case anyway. Sparc32 will probably need fixing. Architectures (including modern ARM with LL/SC) that implement sane atomic operations for SMP won't see any of this matter. Cc: Russell King <rmk+lkml@arm.linux.org.uk> Cc: David Howells <dhowells@redhat.com> Cc: David Miller <davem@davemloft.com> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Linux Arch Maintainers <linux-arch@vger.kernel.org> Cc: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index db49886bfae1..742cbbe49bdc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -96,13 +96,13 @@ static inline void set_wq_data(struct work_struct *work, void *wq)
96 BUG_ON(!work_pending(work)); 96 BUG_ON(!work_pending(work));
97 97
98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
99 new |= work->management & WORK_STRUCT_FLAG_MASK; 99 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 work->management = new; 100 atomic_long_set(&work->data, new);
101} 101}
102 102
103static inline void *get_wq_data(struct work_struct *work) 103static inline void *get_wq_data(struct work_struct *work)
104{ 104{
105 return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
106} 106}
107 107
108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
@@ -133,7 +133,7 @@ static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work
133 list_del_init(&work->entry); 133 list_del_init(&work->entry);
134 spin_unlock_irqrestore(&cwq->lock, flags); 134 spin_unlock_irqrestore(&cwq->lock, flags);
135 135
136 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
137 work_release(work); 137 work_release(work);
138 f(work); 138 f(work);
139 139
@@ -206,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{ 206{
207 int ret = 0, cpu = get_cpu(); 207 int ret = 0, cpu = get_cpu();
208 208
209 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 if (unlikely(is_single_threaded(wq))) 210 if (unlikely(is_single_threaded(wq)))
211 cpu = singlethread_cpu; 211 cpu = singlethread_cpu;
212 BUG_ON(!list_empty(&work->entry)); 212 BUG_ON(!list_empty(&work->entry));
@@ -248,7 +248,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
248 if (delay == 0) 248 if (delay == 0)
249 return queue_work(wq, work); 249 return queue_work(wq, work);
250 250
251 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 251 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
252 BUG_ON(timer_pending(timer)); 252 BUG_ON(timer_pending(timer));
253 BUG_ON(!list_empty(&work->entry)); 253 BUG_ON(!list_empty(&work->entry));
254 254
@@ -280,7 +280,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
280 struct timer_list *timer = &dwork->timer; 280 struct timer_list *timer = &dwork->timer;
281 struct work_struct *work = &dwork->work; 281 struct work_struct *work = &dwork->work;
282 282
283 if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 283 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
284 BUG_ON(timer_pending(timer)); 284 BUG_ON(timer_pending(timer));
285 BUG_ON(!list_empty(&work->entry)); 285 BUG_ON(!list_empty(&work->entry));
286 286
@@ -321,7 +321,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
321 spin_unlock_irqrestore(&cwq->lock, flags); 321 spin_unlock_irqrestore(&cwq->lock, flags);
322 322
323 BUG_ON(get_wq_data(work) != cwq); 323 BUG_ON(get_wq_data(work) != cwq);
324 if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
325 work_release(work); 325 work_release(work);
326 f(work); 326 f(work);
327 327