diff options
Diffstat (limited to 'kernel/lglock.c')
| -rw-r--r-- | kernel/lglock.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/lglock.c b/kernel/lglock.c index 6535a667a5a7..86ae2aebf004 100644 --- a/kernel/lglock.c +++ b/kernel/lglock.c | |||
| @@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg) | |||
| 21 | arch_spinlock_t *lock; | 21 | arch_spinlock_t *lock; |
| 22 | 22 | ||
| 23 | preempt_disable(); | 23 | preempt_disable(); |
| 24 | rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); | 24 | lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| 25 | lock = this_cpu_ptr(lg->lock); | 25 | lock = this_cpu_ptr(lg->lock); |
| 26 | arch_spin_lock(lock); | 26 | arch_spin_lock(lock); |
| 27 | } | 27 | } |
| @@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg) | |||
| 31 | { | 31 | { |
| 32 | arch_spinlock_t *lock; | 32 | arch_spinlock_t *lock; |
| 33 | 33 | ||
| 34 | rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); | 34 | lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| 35 | lock = this_cpu_ptr(lg->lock); | 35 | lock = this_cpu_ptr(lg->lock); |
| 36 | arch_spin_unlock(lock); | 36 | arch_spin_unlock(lock); |
| 37 | preempt_enable(); | 37 | preempt_enable(); |
| @@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu) | |||
| 43 | arch_spinlock_t *lock; | 43 | arch_spinlock_t *lock; |
| 44 | 44 | ||
| 45 | preempt_disable(); | 45 | preempt_disable(); |
| 46 | rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); | 46 | lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| 47 | lock = per_cpu_ptr(lg->lock, cpu); | 47 | lock = per_cpu_ptr(lg->lock, cpu); |
| 48 | arch_spin_lock(lock); | 48 | arch_spin_lock(lock); |
| 49 | } | 49 | } |
| @@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu) | |||
| 53 | { | 53 | { |
| 54 | arch_spinlock_t *lock; | 54 | arch_spinlock_t *lock; |
| 55 | 55 | ||
| 56 | rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); | 56 | lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| 57 | lock = per_cpu_ptr(lg->lock, cpu); | 57 | lock = per_cpu_ptr(lg->lock, cpu); |
| 58 | arch_spin_unlock(lock); | 58 | arch_spin_unlock(lock); |
| 59 | preempt_enable(); | 59 | preempt_enable(); |
| @@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg) | |||
| 65 | int i; | 65 | int i; |
| 66 | 66 | ||
| 67 | preempt_disable(); | 67 | preempt_disable(); |
| 68 | rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); | 68 | lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); |
| 69 | for_each_possible_cpu(i) { | 69 | for_each_possible_cpu(i) { |
| 70 | arch_spinlock_t *lock; | 70 | arch_spinlock_t *lock; |
| 71 | lock = per_cpu_ptr(lg->lock, i); | 71 | lock = per_cpu_ptr(lg->lock, i); |
| @@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg) | |||
| 78 | { | 78 | { |
| 79 | int i; | 79 | int i; |
| 80 | 80 | ||
| 81 | rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); | 81 | lock_release(&lg->lock_dep_map, 1, _RET_IP_); |
| 82 | for_each_possible_cpu(i) { | 82 | for_each_possible_cpu(i) { |
| 83 | arch_spinlock_t *lock; | 83 | arch_spinlock_t *lock; |
| 84 | lock = per_cpu_ptr(lg->lock, i); | 84 | lock = per_cpu_ptr(lg->lock, i); |
