aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-26 10:17:32 -0400
commit3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (patch)
tree88647d9dc50d634dee9cfeb7f354d620977a2f33 /lib
parent982653009b883ef1529089e3e6f1ae2fee41cbe2 (diff)
parent68cc3990a545dc0da221b4844dd8b9c06623a6c5 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock() lockdep: Comment all warnings lib: atomic64: Change the type of local lock to raw_spinlock_t locking, lib/atomic64: Annotate atomic64_lock::lock as raw locking, x86, iommu: Annotate qi->q_lock as raw locking, x86, iommu: Annotate irq_2_ir_lock as raw locking, x86, iommu: Annotate iommu->register_lock as raw locking, dma, ipu: Annotate bank_lock as raw locking, ARM: Annotate low level hw locks as raw locking, drivers/dca: Annotate dca_lock as raw locking, powerpc: Annotate uic->lock as raw locking, x86: mce: Annotate cmci_discover_lock as raw locking, ACPI: Annotate c3_lock as raw locking, oprofile: Annotate oprofilefs lock as raw locking, video: Annotate vga console lock as raw locking, latencytop: Annotate latency_lock as raw locking, timer_stats: Annotate table_lock as raw locking, rwsem: Annotate inner lock as raw locking, semaphores: Annotate inner lock as raw locking, sched: Annotate thread_group_cputimer as raw ... Fix up conflicts in kernel/posix-cpu-timers.c manually: making cputimer->cputime a raw lock conflicted with the ABBA fix in commit bcd5cff7216f ("cputimer: Cure lock inversion").
Diffstat (limited to 'lib')
-rw-r--r--lib/atomic64.c66
-rw-r--r--lib/percpu_counter.c18
-rw-r--r--lib/proportions.c12
-rw-r--r--lib/ratelimit.c4
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c14
6 files changed, 76 insertions, 76 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e12ae0dd08a8..3975470caf4f 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -29,11 +29,11 @@
29 * Ensure each lock is in a separate cacheline. 29 * Ensure each lock is in a separate cacheline.
30 */ 30 */
31static union { 31static union {
32 spinlock_t lock; 32 raw_spinlock_t lock;
33 char pad[L1_CACHE_BYTES]; 33 char pad[L1_CACHE_BYTES];
34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; 34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
35 35
36static inline spinlock_t *lock_addr(const atomic64_t *v) 36static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
37{ 37{
38 unsigned long addr = (unsigned long) v; 38 unsigned long addr = (unsigned long) v;
39 39
@@ -45,12 +45,12 @@ static inline spinlock_t *lock_addr(const atomic64_t *v)
45long long atomic64_read(const atomic64_t *v) 45long long atomic64_read(const atomic64_t *v)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
48 spinlock_t *lock = lock_addr(v); 48 raw_spinlock_t *lock = lock_addr(v);
49 long long val; 49 long long val;
50 50
51 spin_lock_irqsave(lock, flags); 51 raw_spin_lock_irqsave(lock, flags);
52 val = v->counter; 52 val = v->counter;
53 spin_unlock_irqrestore(lock, flags); 53 raw_spin_unlock_irqrestore(lock, flags);
54 return val; 54 return val;
55} 55}
56EXPORT_SYMBOL(atomic64_read); 56EXPORT_SYMBOL(atomic64_read);
@@ -58,34 +58,34 @@ EXPORT_SYMBOL(atomic64_read);
58void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, long long i)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 spinlock_t *lock = lock_addr(v); 61 raw_spinlock_t *lock = lock_addr(v);
62 62
63 spin_lock_irqsave(lock, flags); 63 raw_spin_lock_irqsave(lock, flags);
64 v->counter = i; 64 v->counter = i;
65 spin_unlock_irqrestore(lock, flags); 65 raw_spin_unlock_irqrestore(lock, flags);
66} 66}
67EXPORT_SYMBOL(atomic64_set); 67EXPORT_SYMBOL(atomic64_set);
68 68
69void atomic64_add(long long a, atomic64_t *v) 69void atomic64_add(long long a, atomic64_t *v)
70{ 70{
71 unsigned long flags; 71 unsigned long flags;
72 spinlock_t *lock = lock_addr(v); 72 raw_spinlock_t *lock = lock_addr(v);
73 73
74 spin_lock_irqsave(lock, flags); 74 raw_spin_lock_irqsave(lock, flags);
75 v->counter += a; 75 v->counter += a;
76 spin_unlock_irqrestore(lock, flags); 76 raw_spin_unlock_irqrestore(lock, flags);
77} 77}
78EXPORT_SYMBOL(atomic64_add); 78EXPORT_SYMBOL(atomic64_add);
79 79
80long long atomic64_add_return(long long a, atomic64_t *v) 80long long atomic64_add_return(long long a, atomic64_t *v)
81{ 81{
82 unsigned long flags; 82 unsigned long flags;
83 spinlock_t *lock = lock_addr(v); 83 raw_spinlock_t *lock = lock_addr(v);
84 long long val; 84 long long val;
85 85
86 spin_lock_irqsave(lock, flags); 86 raw_spin_lock_irqsave(lock, flags);
87 val = v->counter += a; 87 val = v->counter += a;
88 spin_unlock_irqrestore(lock, flags); 88 raw_spin_unlock_irqrestore(lock, flags);
89 return val; 89 return val;
90} 90}
91EXPORT_SYMBOL(atomic64_add_return); 91EXPORT_SYMBOL(atomic64_add_return);
@@ -93,23 +93,23 @@ EXPORT_SYMBOL(atomic64_add_return);
93void atomic64_sub(long long a, atomic64_t *v) 93void atomic64_sub(long long a, atomic64_t *v)
94{ 94{
95 unsigned long flags; 95 unsigned long flags;
96 spinlock_t *lock = lock_addr(v); 96 raw_spinlock_t *lock = lock_addr(v);
97 97
98 spin_lock_irqsave(lock, flags); 98 raw_spin_lock_irqsave(lock, flags);
99 v->counter -= a; 99 v->counter -= a;
100 spin_unlock_irqrestore(lock, flags); 100 raw_spin_unlock_irqrestore(lock, flags);
101} 101}
102EXPORT_SYMBOL(atomic64_sub); 102EXPORT_SYMBOL(atomic64_sub);
103 103
104long long atomic64_sub_return(long long a, atomic64_t *v) 104long long atomic64_sub_return(long long a, atomic64_t *v)
105{ 105{
106 unsigned long flags; 106 unsigned long flags;
107 spinlock_t *lock = lock_addr(v); 107 raw_spinlock_t *lock = lock_addr(v);
108 long long val; 108 long long val;
109 109
110 spin_lock_irqsave(lock, flags); 110 raw_spin_lock_irqsave(lock, flags);
111 val = v->counter -= a; 111 val = v->counter -= a;
112 spin_unlock_irqrestore(lock, flags); 112 raw_spin_unlock_irqrestore(lock, flags);
113 return val; 113 return val;
114} 114}
115EXPORT_SYMBOL(atomic64_sub_return); 115EXPORT_SYMBOL(atomic64_sub_return);
@@ -117,14 +117,14 @@ EXPORT_SYMBOL(atomic64_sub_return);
117long long atomic64_dec_if_positive(atomic64_t *v) 117long long atomic64_dec_if_positive(atomic64_t *v)
118{ 118{
119 unsigned long flags; 119 unsigned long flags;
120 spinlock_t *lock = lock_addr(v); 120 raw_spinlock_t *lock = lock_addr(v);
121 long long val; 121 long long val;
122 122
123 spin_lock_irqsave(lock, flags); 123 raw_spin_lock_irqsave(lock, flags);
124 val = v->counter - 1; 124 val = v->counter - 1;
125 if (val >= 0) 125 if (val >= 0)
126 v->counter = val; 126 v->counter = val;
127 spin_unlock_irqrestore(lock, flags); 127 raw_spin_unlock_irqrestore(lock, flags);
128 return val; 128 return val;
129} 129}
130EXPORT_SYMBOL(atomic64_dec_if_positive); 130EXPORT_SYMBOL(atomic64_dec_if_positive);
@@ -132,14 +132,14 @@ EXPORT_SYMBOL(atomic64_dec_if_positive);
132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 132long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
133{ 133{
134 unsigned long flags; 134 unsigned long flags;
135 spinlock_t *lock = lock_addr(v); 135 raw_spinlock_t *lock = lock_addr(v);
136 long long val; 136 long long val;
137 137
138 spin_lock_irqsave(lock, flags); 138 raw_spin_lock_irqsave(lock, flags);
139 val = v->counter; 139 val = v->counter;
140 if (val == o) 140 if (val == o)
141 v->counter = n; 141 v->counter = n;
142 spin_unlock_irqrestore(lock, flags); 142 raw_spin_unlock_irqrestore(lock, flags);
143 return val; 143 return val;
144} 144}
145EXPORT_SYMBOL(atomic64_cmpxchg); 145EXPORT_SYMBOL(atomic64_cmpxchg);
@@ -147,13 +147,13 @@ EXPORT_SYMBOL(atomic64_cmpxchg);
147long long atomic64_xchg(atomic64_t *v, long long new) 147long long atomic64_xchg(atomic64_t *v, long long new)
148{ 148{
149 unsigned long flags; 149 unsigned long flags;
150 spinlock_t *lock = lock_addr(v); 150 raw_spinlock_t *lock = lock_addr(v);
151 long long val; 151 long long val;
152 152
153 spin_lock_irqsave(lock, flags); 153 raw_spin_lock_irqsave(lock, flags);
154 val = v->counter; 154 val = v->counter;
155 v->counter = new; 155 v->counter = new;
156 spin_unlock_irqrestore(lock, flags); 156 raw_spin_unlock_irqrestore(lock, flags);
157 return val; 157 return val;
158} 158}
159EXPORT_SYMBOL(atomic64_xchg); 159EXPORT_SYMBOL(atomic64_xchg);
@@ -161,15 +161,15 @@ EXPORT_SYMBOL(atomic64_xchg);
161int atomic64_add_unless(atomic64_t *v, long long a, long long u) 161int atomic64_add_unless(atomic64_t *v, long long a, long long u)
162{ 162{
163 unsigned long flags; 163 unsigned long flags;
164 spinlock_t *lock = lock_addr(v); 164 raw_spinlock_t *lock = lock_addr(v);
165 int ret = 0; 165 int ret = 0;
166 166
167 spin_lock_irqsave(lock, flags); 167 raw_spin_lock_irqsave(lock, flags);
168 if (v->counter != u) { 168 if (v->counter != u) {
169 v->counter += a; 169 v->counter += a;
170 ret = 1; 170 ret = 1;
171 } 171 }
172 spin_unlock_irqrestore(lock, flags); 172 raw_spin_unlock_irqrestore(lock, flags);
173 return ret; 173 return ret;
174} 174}
175EXPORT_SYMBOL(atomic64_add_unless); 175EXPORT_SYMBOL(atomic64_add_unless);
@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
179 int i; 179 int i;
180 180
181 for (i = 0; i < NR_LOCKS; ++i) 181 for (i = 0; i < NR_LOCKS; ++i)
182 spin_lock_init(&atomic64_lock[i].lock); 182 raw_spin_lock_init(&atomic64_lock[i].lock);
183 return 0; 183 return 0;
184} 184}
185 185
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 28f2c33c6b53..f087105ed914 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
59{ 59{
60 int cpu; 60 int cpu;
61 61
62 spin_lock(&fbc->lock); 62 raw_spin_lock(&fbc->lock);
63 for_each_possible_cpu(cpu) { 63 for_each_possible_cpu(cpu) {
64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
65 *pcount = 0; 65 *pcount = 0;
66 } 66 }
67 fbc->count = amount; 67 fbc->count = amount;
68 spin_unlock(&fbc->lock); 68 raw_spin_unlock(&fbc->lock);
69} 69}
70EXPORT_SYMBOL(percpu_counter_set); 70EXPORT_SYMBOL(percpu_counter_set);
71 71
@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
76 preempt_disable(); 76 preempt_disable();
77 count = __this_cpu_read(*fbc->counters) + amount; 77 count = __this_cpu_read(*fbc->counters) + amount;
78 if (count >= batch || count <= -batch) { 78 if (count >= batch || count <= -batch) {
79 spin_lock(&fbc->lock); 79 raw_spin_lock(&fbc->lock);
80 fbc->count += count; 80 fbc->count += count;
81 __this_cpu_write(*fbc->counters, 0); 81 __this_cpu_write(*fbc->counters, 0);
82 spin_unlock(&fbc->lock); 82 raw_spin_unlock(&fbc->lock);
83 } else { 83 } else {
84 __this_cpu_write(*fbc->counters, count); 84 __this_cpu_write(*fbc->counters, count);
85 } 85 }
@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
96 s64 ret; 96 s64 ret;
97 int cpu; 97 int cpu;
98 98
99 spin_lock(&fbc->lock); 99 raw_spin_lock(&fbc->lock);
100 ret = fbc->count; 100 ret = fbc->count;
101 for_each_online_cpu(cpu) { 101 for_each_online_cpu(cpu) {
102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
103 ret += *pcount; 103 ret += *pcount;
104 } 104 }
105 spin_unlock(&fbc->lock); 105 raw_spin_unlock(&fbc->lock);
106 return ret; 106 return ret;
107} 107}
108EXPORT_SYMBOL(__percpu_counter_sum); 108EXPORT_SYMBOL(__percpu_counter_sum);
@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
110int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 110int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
111 struct lock_class_key *key) 111 struct lock_class_key *key)
112{ 112{
113 spin_lock_init(&fbc->lock); 113 raw_spin_lock_init(&fbc->lock);
114 lockdep_set_class(&fbc->lock, key); 114 lockdep_set_class(&fbc->lock, key);
115 fbc->count = amount; 115 fbc->count = amount;
116 fbc->counters = alloc_percpu(s32); 116 fbc->counters = alloc_percpu(s32);
@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
173 s32 *pcount; 173 s32 *pcount;
174 unsigned long flags; 174 unsigned long flags;
175 175
176 spin_lock_irqsave(&fbc->lock, flags); 176 raw_spin_lock_irqsave(&fbc->lock, flags);
177 pcount = per_cpu_ptr(fbc->counters, cpu); 177 pcount = per_cpu_ptr(fbc->counters, cpu);
178 fbc->count += *pcount; 178 fbc->count += *pcount;
179 *pcount = 0; 179 *pcount = 0;
180 spin_unlock_irqrestore(&fbc->lock, flags); 180 raw_spin_unlock_irqrestore(&fbc->lock, flags);
181 } 181 }
182 mutex_unlock(&percpu_counters_lock); 182 mutex_unlock(&percpu_counters_lock);
183#endif 183#endif
diff --git a/lib/proportions.c b/lib/proportions.c
index d50746a79de2..05df84801b56 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
190 190
191int prop_local_init_percpu(struct prop_local_percpu *pl) 191int prop_local_init_percpu(struct prop_local_percpu *pl)
192{ 192{
193 spin_lock_init(&pl->lock); 193 raw_spin_lock_init(&pl->lock);
194 pl->shift = 0; 194 pl->shift = 0;
195 pl->period = 0; 195 pl->period = 0;
196 return percpu_counter_init(&pl->events, 0); 196 return percpu_counter_init(&pl->events, 0);
@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
226 if (pl->period == global_period) 226 if (pl->period == global_period)
227 return; 227 return;
228 228
229 spin_lock_irqsave(&pl->lock, flags); 229 raw_spin_lock_irqsave(&pl->lock, flags);
230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); 230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
231 231
232 /* 232 /*
@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
247 percpu_counter_set(&pl->events, 0); 247 percpu_counter_set(&pl->events, 0);
248 248
249 pl->period = global_period; 249 pl->period = global_period;
250 spin_unlock_irqrestore(&pl->lock, flags); 250 raw_spin_unlock_irqrestore(&pl->lock, flags);
251} 251}
252 252
253/* 253/*
@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
324 324
325int prop_local_init_single(struct prop_local_single *pl) 325int prop_local_init_single(struct prop_local_single *pl)
326{ 326{
327 spin_lock_init(&pl->lock); 327 raw_spin_lock_init(&pl->lock);
328 pl->shift = 0; 328 pl->shift = 0;
329 pl->period = 0; 329 pl->period = 0;
330 pl->events = 0; 330 pl->events = 0;
@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
356 if (pl->period == global_period) 356 if (pl->period == global_period)
357 return; 357 return;
358 358
359 spin_lock_irqsave(&pl->lock, flags); 359 raw_spin_lock_irqsave(&pl->lock, flags);
360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); 360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
361 /* 361 /*
362 * For each missed period, we half the local counter. 362 * For each missed period, we half the local counter.
@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
367 else 367 else
368 pl->events = 0; 368 pl->events = 0;
369 pl->period = global_period; 369 pl->period = global_period;
370 spin_unlock_irqrestore(&pl->lock, flags); 370 raw_spin_unlock_irqrestore(&pl->lock, flags);
371} 371}
372 372
373/* 373/*
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 027a03f4c56d..c96d500577de 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
39 * in addition to the one that will be printed by 39 * in addition to the one that will be printed by
40 * the entity that is holding the lock already: 40 * the entity that is holding the lock already:
41 */ 41 */
42 if (!spin_trylock_irqsave(&rs->lock, flags)) 42 if (!raw_spin_trylock_irqsave(&rs->lock, flags))
43 return 0; 43 return 0;
44 44
45 if (!rs->begin) 45 if (!rs->begin)
@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
60 rs->missed++; 60 rs->missed++;
61 ret = 0; 61 ret = 0;
62 } 62 }
63 spin_unlock_irqrestore(&rs->lock, flags); 63 raw_spin_unlock_irqrestore(&rs->lock, flags);
64 64
65 return ret; 65 return ret;
66} 66}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ffc9fc7f3b05..f2393c21fe85 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem)
22 int ret = 1; 22 int ret = 1;
23 unsigned long flags; 23 unsigned long flags;
24 24
25 if (spin_trylock_irqsave(&sem->wait_lock, flags)) { 25 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
26 ret = (sem->activity != 0); 26 ret = (sem->activity != 0);
27 spin_unlock_irqrestore(&sem->wait_lock, flags); 27 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
28 } 28 }
29 return ret; 29 return ret;
30} 30}
@@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
44 lockdep_init_map(&sem->dep_map, name, key, 0); 44 lockdep_init_map(&sem->dep_map, name, key, 0);
45#endif 45#endif
46 sem->activity = 0; 46 sem->activity = 0;
47 spin_lock_init(&sem->wait_lock); 47 raw_spin_lock_init(&sem->wait_lock);
48 INIT_LIST_HEAD(&sem->wait_list); 48 INIT_LIST_HEAD(&sem->wait_list);
49} 49}
50EXPORT_SYMBOL(__init_rwsem); 50EXPORT_SYMBOL(__init_rwsem);
@@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem)
145 struct task_struct *tsk; 145 struct task_struct *tsk;
146 unsigned long flags; 146 unsigned long flags;
147 147
148 spin_lock_irqsave(&sem->wait_lock, flags); 148 raw_spin_lock_irqsave(&sem->wait_lock, flags);
149 149
150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
151 /* granted */ 151 /* granted */
152 sem->activity++; 152 sem->activity++;
153 spin_unlock_irqrestore(&sem->wait_lock, flags); 153 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
154 goto out; 154 goto out;
155 } 155 }
156 156
@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
165 list_add_tail(&waiter.list, &sem->wait_list); 165 list_add_tail(&waiter.list, &sem->wait_list);
166 166
167 /* we don't need to touch the semaphore struct anymore */ 167 /* we don't need to touch the semaphore struct anymore */
168 spin_unlock_irqrestore(&sem->wait_lock, flags); 168 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
169 169
170 /* wait to be given the lock */ 170 /* wait to be given the lock */
171 for (;;) { 171 for (;;) {
@@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
189 int ret = 0; 189 int ret = 0;
190 190
191 191
192 spin_lock_irqsave(&sem->wait_lock, flags); 192 raw_spin_lock_irqsave(&sem->wait_lock, flags);
193 193
194 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 194 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
195 /* granted */ 195 /* granted */
@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
197 ret = 1; 197 ret = 1;
198 } 198 }
199 199
200 spin_unlock_irqrestore(&sem->wait_lock, flags); 200 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
201 201
202 return ret; 202 return ret;
203} 203}
@@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
212 struct task_struct *tsk; 212 struct task_struct *tsk;
213 unsigned long flags; 213 unsigned long flags;
214 214
215 spin_lock_irqsave(&sem->wait_lock, flags); 215 raw_spin_lock_irqsave(&sem->wait_lock, flags);
216 216
217 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
218 /* granted */ 218 /* granted */
219 sem->activity = -1; 219 sem->activity = -1;
220 spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
221 goto out; 221 goto out;
222 } 222 }
223 223
@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
232 list_add_tail(&waiter.list, &sem->wait_list); 232 list_add_tail(&waiter.list, &sem->wait_list);
233 233
234 /* we don't need to touch the semaphore struct anymore */ 234 /* we don't need to touch the semaphore struct anymore */
235 spin_unlock_irqrestore(&sem->wait_lock, flags); 235 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
236 236
237 /* wait to be given the lock */ 237 /* wait to be given the lock */
238 for (;;) { 238 for (;;) {
@@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
260 unsigned long flags; 260 unsigned long flags;
261 int ret = 0; 261 int ret = 0;
262 262
263 spin_lock_irqsave(&sem->wait_lock, flags); 263 raw_spin_lock_irqsave(&sem->wait_lock, flags);
264 264
265 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 265 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
266 /* granted */ 266 /* granted */
@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
268 ret = 1; 268 ret = 1;
269 } 269 }
270 270
271 spin_unlock_irqrestore(&sem->wait_lock, flags); 271 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
272 272
273 return ret; 273 return ret;
274} 274}
@@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem)
280{ 280{
281 unsigned long flags; 281 unsigned long flags;
282 282
283 spin_lock_irqsave(&sem->wait_lock, flags); 283 raw_spin_lock_irqsave(&sem->wait_lock, flags);
284 284
285 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 285 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
286 sem = __rwsem_wake_one_writer(sem); 286 sem = __rwsem_wake_one_writer(sem);
287 287
288 spin_unlock_irqrestore(&sem->wait_lock, flags); 288 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
289} 289}
290 290
291/* 291/*
@@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem)
295{ 295{
296 unsigned long flags; 296 unsigned long flags;
297 297
298 spin_lock_irqsave(&sem->wait_lock, flags); 298 raw_spin_lock_irqsave(&sem->wait_lock, flags);
299 299
300 sem->activity = 0; 300 sem->activity = 0;
301 if (!list_empty(&sem->wait_list)) 301 if (!list_empty(&sem->wait_list))
302 sem = __rwsem_do_wake(sem, 1); 302 sem = __rwsem_do_wake(sem, 1);
303 303
304 spin_unlock_irqrestore(&sem->wait_lock, flags); 304 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
305} 305}
306 306
307/* 307/*
@@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem)
312{ 312{
313 unsigned long flags; 313 unsigned long flags;
314 314
315 spin_lock_irqsave(&sem->wait_lock, flags); 315 raw_spin_lock_irqsave(&sem->wait_lock, flags);
316 316
317 sem->activity = 1; 317 sem->activity = 1;
318 if (!list_empty(&sem->wait_list)) 318 if (!list_empty(&sem->wait_list))
319 sem = __rwsem_do_wake(sem, 0); 319 sem = __rwsem_do_wake(sem, 0);
320 320
321 spin_unlock_irqrestore(&sem->wait_lock, flags); 321 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
322} 322}
323 323
diff --git a/lib/rwsem.c b/lib/rwsem.c
index aa7c3052261f..410aa1189b13 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
22 lockdep_init_map(&sem->dep_map, name, key, 0); 22 lockdep_init_map(&sem->dep_map, name, key, 0);
23#endif 23#endif
24 sem->count = RWSEM_UNLOCKED_VALUE; 24 sem->count = RWSEM_UNLOCKED_VALUE;
25 spin_lock_init(&sem->wait_lock); 25 raw_spin_lock_init(&sem->wait_lock);
26 INIT_LIST_HEAD(&sem->wait_list); 26 INIT_LIST_HEAD(&sem->wait_list);
27} 27}
28 28
@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
180 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 180 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
181 181
182 /* set up my own style of waitqueue */ 182 /* set up my own style of waitqueue */
183 spin_lock_irq(&sem->wait_lock); 183 raw_spin_lock_irq(&sem->wait_lock);
184 waiter.task = tsk; 184 waiter.task = tsk;
185 waiter.flags = flags; 185 waiter.flags = flags;
186 get_task_struct(tsk); 186 get_task_struct(tsk);
@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS) 204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
206 206
207 spin_unlock_irq(&sem->wait_lock); 207 raw_spin_unlock_irq(&sem->wait_lock);
208 208
209 /* wait to be given the lock */ 209 /* wait to be given the lock */
210 for (;;) { 210 for (;;) {
@@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
245{ 245{
246 unsigned long flags; 246 unsigned long flags;
247 247
248 spin_lock_irqsave(&sem->wait_lock, flags); 248 raw_spin_lock_irqsave(&sem->wait_lock, flags);
249 249
250 /* do nothing if list empty */ 250 /* do nothing if list empty */
251 if (!list_empty(&sem->wait_list)) 251 if (!list_empty(&sem->wait_list))
252 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); 252 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
253 253
254 spin_unlock_irqrestore(&sem->wait_lock, flags); 254 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
255 255
256 return sem; 256 return sem;
257} 257}
@@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
265{ 265{
266 unsigned long flags; 266 unsigned long flags;
267 267
268 spin_lock_irqsave(&sem->wait_lock, flags); 268 raw_spin_lock_irqsave(&sem->wait_lock, flags);
269 269
270 /* do nothing if list empty */ 270 /* do nothing if list empty */
271 if (!list_empty(&sem->wait_list)) 271 if (!list_empty(&sem->wait_list))
272 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 272 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
273 273
274 spin_unlock_irqrestore(&sem->wait_lock, flags); 274 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
275 275
276 return sem; 276 return sem;
277} 277}