diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2010-02-24 03:54:54 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-09-13 05:11:59 -0400 |
| commit | ddb6c9b58a19edcfac93ac670b066c836ff729f1 (patch) | |
| tree | 70a9ab2dde08671a050424b0d70fa535472e1345 /lib | |
| parent | 8292c9e15c3b069459794a04f5e2cf0d5665ddc4 (diff) | |
locking, rwsem: Annotate inner lock as raw
There is no reason to allow the lock protecting rwsems (the
ownerless variant) to be preemptible on -rt. Convert it to raw.
In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/rwsem-spinlock.c | 38 | ||||
| -rw-r--r-- | lib/rwsem.c | 14 |
2 files changed, 26 insertions, 26 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index ffc9fc7f3b05..f2393c21fe85 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
| @@ -22,9 +22,9 @@ int rwsem_is_locked(struct rw_semaphore *sem) | |||
| 22 | int ret = 1; | 22 | int ret = 1; |
| 23 | unsigned long flags; | 23 | unsigned long flags; |
| 24 | 24 | ||
| 25 | if (spin_trylock_irqsave(&sem->wait_lock, flags)) { | 25 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
| 26 | ret = (sem->activity != 0); | 26 | ret = (sem->activity != 0); |
| 27 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 27 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 28 | } | 28 | } |
| 29 | return ret; | 29 | return ret; |
| 30 | } | 30 | } |
| @@ -44,7 +44,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 44 | lockdep_init_map(&sem->dep_map, name, key, 0); | 44 | lockdep_init_map(&sem->dep_map, name, key, 0); |
| 45 | #endif | 45 | #endif |
| 46 | sem->activity = 0; | 46 | sem->activity = 0; |
| 47 | spin_lock_init(&sem->wait_lock); | 47 | raw_spin_lock_init(&sem->wait_lock); |
| 48 | INIT_LIST_HEAD(&sem->wait_list); | 48 | INIT_LIST_HEAD(&sem->wait_list); |
| 49 | } | 49 | } |
| 50 | EXPORT_SYMBOL(__init_rwsem); | 50 | EXPORT_SYMBOL(__init_rwsem); |
| @@ -145,12 +145,12 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 145 | struct task_struct *tsk; | 145 | struct task_struct *tsk; |
| 146 | unsigned long flags; | 146 | unsigned long flags; |
| 147 | 147 | ||
| 148 | spin_lock_irqsave(&sem->wait_lock, flags); | 148 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 149 | 149 | ||
| 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 150 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| 151 | /* granted */ | 151 | /* granted */ |
| 152 | sem->activity++; | 152 | sem->activity++; |
| 153 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 153 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 154 | goto out; | 154 | goto out; |
| 155 | } | 155 | } |
| 156 | 156 | ||
| @@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 165 | list_add_tail(&waiter.list, &sem->wait_list); | 165 | list_add_tail(&waiter.list, &sem->wait_list); |
| 166 | 166 | ||
| 167 | /* we don't need to touch the semaphore struct anymore */ | 167 | /* we don't need to touch the semaphore struct anymore */ |
| 168 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 168 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 169 | 169 | ||
| 170 | /* wait to be given the lock */ | 170 | /* wait to be given the lock */ |
| 171 | for (;;) { | 171 | for (;;) { |
| @@ -189,7 +189,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
| 189 | int ret = 0; | 189 | int ret = 0; |
| 190 | 190 | ||
| 191 | 191 | ||
| 192 | spin_lock_irqsave(&sem->wait_lock, flags); | 192 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 193 | 193 | ||
| 194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 194 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { |
| 195 | /* granted */ | 195 | /* granted */ |
| @@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
| 197 | ret = 1; | 197 | ret = 1; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 200 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 201 | 201 | ||
| 202 | return ret; | 202 | return ret; |
| 203 | } | 203 | } |
| @@ -212,12 +212,12 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 212 | struct task_struct *tsk; | 212 | struct task_struct *tsk; |
| 213 | unsigned long flags; | 213 | unsigned long flags; |
| 214 | 214 | ||
| 215 | spin_lock_irqsave(&sem->wait_lock, flags); | 215 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 216 | 216 | ||
| 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 217 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| 218 | /* granted */ | 218 | /* granted */ |
| 219 | sem->activity = -1; | 219 | sem->activity = -1; |
| 220 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 221 | goto out; | 221 | goto out; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| @@ -232,7 +232,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 232 | list_add_tail(&waiter.list, &sem->wait_list); | 232 | list_add_tail(&waiter.list, &sem->wait_list); |
| 233 | 233 | ||
| 234 | /* we don't need to touch the semaphore struct anymore */ | 234 | /* we don't need to touch the semaphore struct anymore */ |
| 235 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 235 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 236 | 236 | ||
| 237 | /* wait to be given the lock */ | 237 | /* wait to be given the lock */ |
| 238 | for (;;) { | 238 | for (;;) { |
| @@ -260,7 +260,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
| 260 | unsigned long flags; | 260 | unsigned long flags; |
| 261 | int ret = 0; | 261 | int ret = 0; |
| 262 | 262 | ||
| 263 | spin_lock_irqsave(&sem->wait_lock, flags); | 263 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 264 | 264 | ||
| 265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 265 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { |
| 266 | /* granted */ | 266 | /* granted */ |
| @@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
| 268 | ret = 1; | 268 | ret = 1; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 271 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 272 | 272 | ||
| 273 | return ret; | 273 | return ret; |
| 274 | } | 274 | } |
| @@ -280,12 +280,12 @@ void __up_read(struct rw_semaphore *sem) | |||
| 280 | { | 280 | { |
| 281 | unsigned long flags; | 281 | unsigned long flags; |
| 282 | 282 | ||
| 283 | spin_lock_irqsave(&sem->wait_lock, flags); | 283 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 284 | 284 | ||
| 285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 285 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) |
| 286 | sem = __rwsem_wake_one_writer(sem); | 286 | sem = __rwsem_wake_one_writer(sem); |
| 287 | 287 | ||
| 288 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 288 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 289 | } | 289 | } |
| 290 | 290 | ||
| 291 | /* | 291 | /* |
| @@ -295,13 +295,13 @@ void __up_write(struct rw_semaphore *sem) | |||
| 295 | { | 295 | { |
| 296 | unsigned long flags; | 296 | unsigned long flags; |
| 297 | 297 | ||
| 298 | spin_lock_irqsave(&sem->wait_lock, flags); | 298 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 299 | 299 | ||
| 300 | sem->activity = 0; | 300 | sem->activity = 0; |
| 301 | if (!list_empty(&sem->wait_list)) | 301 | if (!list_empty(&sem->wait_list)) |
| 302 | sem = __rwsem_do_wake(sem, 1); | 302 | sem = __rwsem_do_wake(sem, 1); |
| 303 | 303 | ||
| 304 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 304 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | /* | 307 | /* |
| @@ -312,12 +312,12 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
| 312 | { | 312 | { |
| 313 | unsigned long flags; | 313 | unsigned long flags; |
| 314 | 314 | ||
| 315 | spin_lock_irqsave(&sem->wait_lock, flags); | 315 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 316 | 316 | ||
| 317 | sem->activity = 1; | 317 | sem->activity = 1; |
| 318 | if (!list_empty(&sem->wait_list)) | 318 | if (!list_empty(&sem->wait_list)) |
| 319 | sem = __rwsem_do_wake(sem, 0); | 319 | sem = __rwsem_do_wake(sem, 0); |
| 320 | 320 | ||
| 321 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 321 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 322 | } | 322 | } |
| 323 | 323 | ||
diff --git a/lib/rwsem.c b/lib/rwsem.c index aa7c3052261f..410aa1189b13 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
| @@ -22,7 +22,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 22 | lockdep_init_map(&sem->dep_map, name, key, 0); | 22 | lockdep_init_map(&sem->dep_map, name, key, 0); |
| 23 | #endif | 23 | #endif |
| 24 | sem->count = RWSEM_UNLOCKED_VALUE; | 24 | sem->count = RWSEM_UNLOCKED_VALUE; |
| 25 | spin_lock_init(&sem->wait_lock); | 25 | raw_spin_lock_init(&sem->wait_lock); |
| 26 | INIT_LIST_HEAD(&sem->wait_list); | 26 | INIT_LIST_HEAD(&sem->wait_list); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| @@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
| 180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 180 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 181 | 181 | ||
| 182 | /* set up my own style of waitqueue */ | 182 | /* set up my own style of waitqueue */ |
| 183 | spin_lock_irq(&sem->wait_lock); | 183 | raw_spin_lock_irq(&sem->wait_lock); |
| 184 | waiter.task = tsk; | 184 | waiter.task = tsk; |
| 185 | waiter.flags = flags; | 185 | waiter.flags = flags; |
| 186 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
| @@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
| 204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | 204 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) |
| 205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 205 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
| 206 | 206 | ||
| 207 | spin_unlock_irq(&sem->wait_lock); | 207 | raw_spin_unlock_irq(&sem->wait_lock); |
| 208 | 208 | ||
| 209 | /* wait to be given the lock */ | 209 | /* wait to be given the lock */ |
| 210 | for (;;) { | 210 | for (;;) { |
| @@ -245,13 +245,13 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) | |||
| 245 | { | 245 | { |
| 246 | unsigned long flags; | 246 | unsigned long flags; |
| 247 | 247 | ||
| 248 | spin_lock_irqsave(&sem->wait_lock, flags); | 248 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 249 | 249 | ||
| 250 | /* do nothing if list empty */ | 250 | /* do nothing if list empty */ |
| 251 | if (!list_empty(&sem->wait_list)) | 251 | if (!list_empty(&sem->wait_list)) |
| 252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | 252 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
| 253 | 253 | ||
| 254 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 254 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 255 | 255 | ||
| 256 | return sem; | 256 | return sem; |
| 257 | } | 257 | } |
| @@ -265,13 +265,13 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) | |||
| 265 | { | 265 | { |
| 266 | unsigned long flags; | 266 | unsigned long flags; |
| 267 | 267 | ||
| 268 | spin_lock_irqsave(&sem->wait_lock, flags); | 268 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 269 | 269 | ||
| 270 | /* do nothing if list empty */ | 270 | /* do nothing if list empty */ |
| 271 | if (!list_empty(&sem->wait_list)) | 271 | if (!list_empty(&sem->wait_list)) |
| 272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 272 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
| 273 | 273 | ||
| 274 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 274 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 275 | 275 | ||
| 276 | return sem; | 276 | return sem; |
| 277 | } | 277 | } |
