aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--include/linux/lockdep.h10
-rw-r--r--kernel/workqueue.c24
4 files changed, 21 insertions, 21 deletions
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 31a4bd7f78d7..0540ca27a446 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
291 goto out; 291 goto out;
292 } 292 }
293 293
294 map_acquire(&handle->h_lockdep_map); 294 lock_map_acquire(&handle->h_lockdep_map);
295 295
296out: 296out:
297 return handle; 297 return handle;
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
1448 spin_unlock(&journal->j_state_lock); 1448 spin_unlock(&journal->j_state_lock);
1449 } 1449 }
1450 1450
1451 map_release(&handle->h_lockdep_map); 1451 lock_map_release(&handle->h_lockdep_map);
1452 1452
1453 jbd_free_handle(handle); 1453 jbd_free_handle(handle);
1454 return err; 1454 return err;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index c074971215ed..e5d540588fa9 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
301 goto out; 301 goto out;
302 } 302 }
303 303
304 map_acquire(&handle->h_lockdep_map); 304 lock_map_acquire(&handle->h_lockdep_map);
305out: 305out:
306 return handle; 306 return handle;
307} 307}
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
1279 spin_unlock(&journal->j_state_lock); 1279 spin_unlock(&journal->j_state_lock);
1280 } 1280 }
1281 1281
1282 map_release(&handle->h_lockdep_map); 1282 lock_map_release(&handle->h_lockdep_map);
1283 1283
1284 jbd2_free_handle(handle); 1284 jbd2_free_handle(handle);
1285 return err; 1285 return err;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4452c04a7f6e..67f42b300c65 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -465,14 +465,14 @@ static inline void print_irqtrace_events(struct task_struct *curr)
465 465
466#ifdef CONFIG_DEBUG_LOCK_ALLOC 466#ifdef CONFIG_DEBUG_LOCK_ALLOC
467# ifdef CONFIG_PROVE_LOCKING 467# ifdef CONFIG_PROVE_LOCKING
468# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) 468# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
469# else 469# else
470# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) 470# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
471# endif 471# endif
472# define map_release(l) lock_release(l, 1, _THIS_IP_) 472# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
473#else 473#else
474# define map_acquire(l) do { } while (0) 474# define lock_map_acquire(l) do { } while (0)
475# define map_release(l) do { } while (0) 475# define lock_map_release(l) do { } while (0)
476#endif 476#endif
477 477
478#endif /* __LINUX_LOCKDEP_H */ 478#endif /* __LINUX_LOCKDEP_H */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 53564ae894a6..8bb5b68fb3a9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
290 290
291 BUG_ON(get_wq_data(work) != cwq); 291 BUG_ON(get_wq_data(work) != cwq);
292 work_clear_pending(work); 292 work_clear_pending(work);
293 map_acquire(&cwq->wq->lockdep_map); 293 lock_map_acquire(&cwq->wq->lockdep_map);
294 map_acquire(&lockdep_map); 294 lock_map_acquire(&lockdep_map);
295 f(work); 295 f(work);
296 map_release(&lockdep_map); 296 lock_map_release(&lockdep_map);
297 map_release(&cwq->wq->lockdep_map); 297 lock_map_release(&cwq->wq->lockdep_map);
298 298
299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
413 int cpu; 413 int cpu;
414 414
415 might_sleep(); 415 might_sleep();
416 map_acquire(&wq->lockdep_map); 416 lock_map_acquire(&wq->lockdep_map);
417 map_release(&wq->lockdep_map); 417 lock_map_release(&wq->lockdep_map);
418 for_each_cpu_mask_nr(cpu, *cpu_map) 418 for_each_cpu_mask_nr(cpu, *cpu_map)
419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
420} 420}
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
441 if (!cwq) 441 if (!cwq)
442 return 0; 442 return 0;
443 443
444 map_acquire(&cwq->wq->lockdep_map); 444 lock_map_acquire(&cwq->wq->lockdep_map);
445 map_release(&cwq->wq->lockdep_map); 445 lock_map_release(&cwq->wq->lockdep_map);
446 446
447 prev = NULL; 447 prev = NULL;
448 spin_lock_irq(&cwq->lock); 448 spin_lock_irq(&cwq->lock);
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
536 536
537 might_sleep(); 537 might_sleep();
538 538
539 map_acquire(&work->lockdep_map); 539 lock_map_acquire(&work->lockdep_map);
540 map_release(&work->lockdep_map); 540 lock_map_release(&work->lockdep_map);
541 541
542 cwq = get_wq_data(work); 542 cwq = get_wq_data(work);
543 if (!cwq) 543 if (!cwq)
@@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
861 if (cwq->thread == NULL) 861 if (cwq->thread == NULL)
862 return; 862 return;
863 863
864 map_acquire(&cwq->wq->lockdep_map); 864 lock_map_acquire(&cwq->wq->lockdep_map);
865 map_release(&cwq->wq->lockdep_map); 865 lock_map_release(&cwq->wq->lockdep_map);
866 866
867 flush_cpu_workqueue(cwq); 867 flush_cpu_workqueue(cwq);
868 /* 868 /*