diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-11 18:11:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 18:11:49 -0400 |
commit | 23a0ee908cbfba3264d19729c67c22b20fa73886 (patch) | |
tree | 541103f6283cbac6b82cff88a7b91128acfce046 /kernel/workqueue.c | |
parent | cc7a486cac78f6fc1a24e8cd63036bae8d2ab431 (diff) | |
parent | 0f2bc27be27ca1dcc66b96131e44bf7648b959c6 (diff) |
Merge branch 'core/locking' into core/urgent
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4a26a1382df0..4048e92aa04f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
290 | 290 | ||
291 | BUG_ON(get_wq_data(work) != cwq); | 291 | BUG_ON(get_wq_data(work) != cwq); |
292 | work_clear_pending(work); | 292 | work_clear_pending(work); |
293 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 293 | lock_map_acquire(&cwq->wq->lockdep_map); |
294 | lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 294 | lock_map_acquire(&lockdep_map); |
295 | f(work); | 295 | f(work); |
296 | lock_release(&lockdep_map, 1, _THIS_IP_); | 296 | lock_map_release(&lockdep_map); |
297 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 297 | lock_map_release(&cwq->wq->lockdep_map); |
298 | 298 | ||
299 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 299 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
300 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 300 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " |
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
413 | int cpu; | 413 | int cpu; |
414 | 414 | ||
415 | might_sleep(); | 415 | might_sleep(); |
416 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 416 | lock_map_acquire(&wq->lockdep_map); |
417 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | 417 | lock_map_release(&wq->lockdep_map); |
418 | for_each_cpu_mask_nr(cpu, *cpu_map) | 418 | for_each_cpu_mask_nr(cpu, *cpu_map) |
419 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 419 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
420 | } | 420 | } |
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) | |||
441 | if (!cwq) | 441 | if (!cwq) |
442 | return 0; | 442 | return 0; |
443 | 443 | ||
444 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 444 | lock_map_acquire(&cwq->wq->lockdep_map); |
445 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 445 | lock_map_release(&cwq->wq->lockdep_map); |
446 | 446 | ||
447 | prev = NULL; | 447 | prev = NULL; |
448 | spin_lock_irq(&cwq->lock); | 448 | spin_lock_irq(&cwq->lock); |
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) | |||
536 | 536 | ||
537 | might_sleep(); | 537 | might_sleep(); |
538 | 538 | ||
539 | lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 539 | lock_map_acquire(&work->lockdep_map); |
540 | lock_release(&work->lockdep_map, 1, _THIS_IP_); | 540 | lock_map_release(&work->lockdep_map); |
541 | 541 | ||
542 | cwq = get_wq_data(work); | 542 | cwq = get_wq_data(work); |
543 | if (!cwq) | 543 | if (!cwq) |
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
872 | if (cwq->thread == NULL) | 872 | if (cwq->thread == NULL) |
873 | return; | 873 | return; |
874 | 874 | ||
875 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 875 | lock_map_acquire(&cwq->wq->lockdep_map); |
876 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 876 | lock_map_release(&cwq->wq->lockdep_map); |
877 | 877 | ||
878 | flush_cpu_workqueue(cwq); | 878 | flush_cpu_workqueue(cwq); |
879 | /* | 879 | /* |