aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorKevin Hilman <khilman@deeprootsystems.com>2010-04-07 14:52:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-04-07 19:15:05 -0400
commit3eac4abaa69949af0e2f64e5c55ee8a22bbdd3e7 (patch)
tree9620ae3c39a0870a010a08166e29823a7afbf262 /lib
parent04287f975e68038051eb9c79896866d36610b8e0 (diff)
rwsem generic spinlock: use IRQ save/restore spinlocks
rwsems can be used with IRQs disabled, particularily in early boot before IRQs are enabled. Currently the spin_unlock_irq() usage in the slow-patch will unconditionally enable interrupts and cause problems since interrupts are not yet initialized or enabled. This patch uses save/restore versions of IRQ spinlocks in the slowpath to ensure interrupts are not unintentionally disabled. Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/rwsem-spinlock.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index ccf95bff7984..ffc9fc7f3b05 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -143,13 +143,14 @@ void __sched __down_read(struct rw_semaphore *sem)
143{ 143{
144 struct rwsem_waiter waiter; 144 struct rwsem_waiter waiter;
145 struct task_struct *tsk; 145 struct task_struct *tsk;
146 unsigned long flags;
146 147
147 spin_lock_irq(&sem->wait_lock); 148 spin_lock_irqsave(&sem->wait_lock, flags);
148 149
149 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
150 /* granted */ 151 /* granted */
151 sem->activity++; 152 sem->activity++;
152 spin_unlock_irq(&sem->wait_lock); 153 spin_unlock_irqrestore(&sem->wait_lock, flags);
153 goto out; 154 goto out;
154 } 155 }
155 156
@@ -164,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem)
164 list_add_tail(&waiter.list, &sem->wait_list); 165 list_add_tail(&waiter.list, &sem->wait_list);
165 166
166 /* we don't need to touch the semaphore struct anymore */ 167 /* we don't need to touch the semaphore struct anymore */
167 spin_unlock_irq(&sem->wait_lock); 168 spin_unlock_irqrestore(&sem->wait_lock, flags);
168 169
169 /* wait to be given the lock */ 170 /* wait to be given the lock */
170 for (;;) { 171 for (;;) {
@@ -209,13 +210,14 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
209{ 210{
210 struct rwsem_waiter waiter; 211 struct rwsem_waiter waiter;
211 struct task_struct *tsk; 212 struct task_struct *tsk;
213 unsigned long flags;
212 214
213 spin_lock_irq(&sem->wait_lock); 215 spin_lock_irqsave(&sem->wait_lock, flags);
214 216
215 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
216 /* granted */ 218 /* granted */
217 sem->activity = -1; 219 sem->activity = -1;
218 spin_unlock_irq(&sem->wait_lock); 220 spin_unlock_irqrestore(&sem->wait_lock, flags);
219 goto out; 221 goto out;
220 } 222 }
221 223
@@ -230,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
230 list_add_tail(&waiter.list, &sem->wait_list); 232 list_add_tail(&waiter.list, &sem->wait_list);
231 233
232 /* we don't need to touch the semaphore struct anymore */ 234 /* we don't need to touch the semaphore struct anymore */
233 spin_unlock_irq(&sem->wait_lock); 235 spin_unlock_irqrestore(&sem->wait_lock, flags);
234 236
235 /* wait to be given the lock */ 237 /* wait to be given the lock */
236 for (;;) { 238 for (;;) {