aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 04:30:30 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-11 04:30:30 -0400
commit3295f0ef9ff048a4619ede597ad9ec9cab725654 (patch)
treef39a8ecf1958130a0b86c554399d23a65b1c3991 /kernel/workqueue.c
parent8bfe0298f7a04952d19f4a2cf510d7a6311eeed0 (diff)
lockdep: rename map_[acquire|release]() => lock_map_[acquire|release]()
the names were too generic: drivers/uio/uio.c:87: error: expected identifier or '(' before 'do' drivers/uio/uio.c:87: error: expected identifier or '(' before 'while' drivers/uio/uio.c:113: error: 'map_release' undeclared here (not in a function) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 53564ae894a..8bb5b68fb3a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
290 290
291 BUG_ON(get_wq_data(work) != cwq); 291 BUG_ON(get_wq_data(work) != cwq);
292 work_clear_pending(work); 292 work_clear_pending(work);
293 map_acquire(&cwq->wq->lockdep_map); 293 lock_map_acquire(&cwq->wq->lockdep_map);
294 map_acquire(&lockdep_map); 294 lock_map_acquire(&lockdep_map);
295 f(work); 295 f(work);
296 map_release(&lockdep_map); 296 lock_map_release(&lockdep_map);
297 map_release(&cwq->wq->lockdep_map); 297 lock_map_release(&cwq->wq->lockdep_map);
298 298
299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
413 int cpu; 413 int cpu;
414 414
415 might_sleep(); 415 might_sleep();
416 map_acquire(&wq->lockdep_map); 416 lock_map_acquire(&wq->lockdep_map);
417 map_release(&wq->lockdep_map); 417 lock_map_release(&wq->lockdep_map);
418 for_each_cpu_mask_nr(cpu, *cpu_map) 418 for_each_cpu_mask_nr(cpu, *cpu_map)
419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
420} 420}
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
441 if (!cwq) 441 if (!cwq)
442 return 0; 442 return 0;
443 443
444 map_acquire(&cwq->wq->lockdep_map); 444 lock_map_acquire(&cwq->wq->lockdep_map);
445 map_release(&cwq->wq->lockdep_map); 445 lock_map_release(&cwq->wq->lockdep_map);
446 446
447 prev = NULL; 447 prev = NULL;
448 spin_lock_irq(&cwq->lock); 448 spin_lock_irq(&cwq->lock);
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
536 536
537 might_sleep(); 537 might_sleep();
538 538
539 map_acquire(&work->lockdep_map); 539 lock_map_acquire(&work->lockdep_map);
540 map_release(&work->lockdep_map); 540 lock_map_release(&work->lockdep_map);
541 541
542 cwq = get_wq_data(work); 542 cwq = get_wq_data(work);
543 if (!cwq) 543 if (!cwq)
@@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
861 if (cwq->thread == NULL) 861 if (cwq->thread == NULL)
862 return; 862 return;
863 863
864 map_acquire(&cwq->wq->lockdep_map); 864 lock_map_acquire(&cwq->wq->lockdep_map);
865 map_release(&cwq->wq->lockdep_map); 865 lock_map_release(&cwq->wq->lockdep_map);
866 866
867 flush_cpu_workqueue(cwq); 867 flush_cpu_workqueue(cwq);
868 /* 868 /*