diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 11:18:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-04 11:18:19 -0400 |
commit | 4689550bb278cb142979c313a0d608e802c6711b (patch) | |
tree | f8776c28f1328ab4077132c636c2706f12c793aa /kernel/lglock.c | |
parent | b854e4de0bf88d094476af82c0d5a80f6f2af916 (diff) | |
parent | 15e71911fcc655508e02f767a3d9b8b138051d2b (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core/locking changes from Ingo Molnar:
"Main changes:
- another mutex optimization, from Davidlohr Bueso
- improved lglock lockdep tracking, from Michel Lespinasse
- [ assorted smaller updates, improvements, cleanups. ]"
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
generic-ipi/locking: Fix misleading smp_call_function_any() description
hung_task debugging: Print more info when reporting the problem
mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER
mutex: Do not unnecessarily deal with waiters
mutex: Fix/document access-once assumption in mutex_can_spin_on_owner()
lglock: Update lockdep annotations to report recursive local locks
lockdep: Introduce lock_acquire_exclusive()/shared() helper macros
Diffstat (limited to 'kernel/lglock.c')
-rw-r--r-- | kernel/lglock.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/lglock.c b/kernel/lglock.c index 6535a667a5a7..86ae2aebf004 100644 --- a/kernel/lglock.c +++ b/kernel/lglock.c | |||
@@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg) | |||
21 | arch_spinlock_t *lock; | 21 | arch_spinlock_t *lock; |
22 | 22 | ||
23 | preempt_disable(); | 23 | preempt_disable(); |
24 | rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); | 24 | lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
25 | lock = this_cpu_ptr(lg->lock); | 25 | lock = this_cpu_ptr(lg->lock); |
26 | arch_spin_lock(lock); | 26 | arch_spin_lock(lock); |
27 | } | 27 | } |
@@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg) | |||
31 | { | 31 | { |
32 | arch_spinlock_t *lock; | 32 | arch_spinlock_t *lock; |
33 | 33 | ||
34 | rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); | 34 | lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
35 | lock = this_cpu_ptr(lg->lock); | 35 | lock = this_cpu_ptr(lg->lock); |
36 | arch_spin_unlock(lock); | 36 | arch_spin_unlock(lock); |
37 | preempt_enable(); | 37 | preempt_enable(); |
@@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu) | |||
43 | arch_spinlock_t *lock; | 43 | arch_spinlock_t *lock; |
44 | 44 | ||
45 | preempt_disable(); | 45 | preempt_disable(); |
46 | rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); | 46 | lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
47 | lock = per_cpu_ptr(lg->lock, cpu); | 47 | lock = per_cpu_ptr(lg->lock, cpu); |
48 | arch_spin_lock(lock); | 48 | arch_spin_lock(lock); |
49 | } | 49 | } |
@@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu) | |||
53 | { | 53 | { |
54 | arch_spinlock_t *lock; | 54 | arch_spinlock_t *lock; |
55 | 55 | ||
56 | rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); | 56 | lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
57 | lock = per_cpu_ptr(lg->lock, cpu); | 57 | lock = per_cpu_ptr(lg->lock, cpu); |
58 | arch_spin_unlock(lock); | 58 | arch_spin_unlock(lock); |
59 | preempt_enable(); | 59 | preempt_enable(); |
@@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg) | |||
65 | int i; | 65 | int i; |
66 | 66 | ||
67 | preempt_disable(); | 67 | preempt_disable(); |
68 | rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); | 68 | lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
69 | for_each_possible_cpu(i) { | 69 | for_each_possible_cpu(i) { |
70 | arch_spinlock_t *lock; | 70 | arch_spinlock_t *lock; |
71 | lock = per_cpu_ptr(lg->lock, i); | 71 | lock = per_cpu_ptr(lg->lock, i); |
@@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg) | |||
78 | { | 78 | { |
79 | int i; | 79 | int i; |
80 | 80 | ||
81 | rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); | 81 | lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
82 | for_each_possible_cpu(i) { | 82 | for_each_possible_cpu(i) { |
83 | arch_spinlock_t *lock; | 83 | arch_spinlock_t *lock; |
84 | lock = per_cpu_ptr(lg->lock, i); | 84 | lock = per_cpu_ptr(lg->lock, i); |