diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-16 16:41:02 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-16 16:41:02 -0400 |
| commit | 3469d261eac65912927dca13ee8f77c744ad7aa2 (patch) | |
| tree | 09b25f80c065d52ee6e158c189ba44bb28bb76fc /kernel/locking | |
| parent | 1c19b68a279c58d6da4379bf8b6d679a300a1daf (diff) | |
| parent | 4544ba8c6b1743499cabb682897a469911845f15 (diff) | |
Merge branch 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull support for killable rwsems from Ingo Molnar:
"This, by Michal Hocko, implements down_write_killable().
The main usecase will be to update mm_sem usage sites to use this new
API, to allow the mm-reaper introduced in commit aac453635549 ("mm,
oom: introduce oom reaper") to tear down oom victim address spaces
asynchronously with minimum latencies and without deadlock worries"
[ The vfs will want it too as the inode lock is changed from a mutex to
a rwsem due to the parallel lookup and readdir updates ]
* 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/rwsem: Fix comment on register clobbering
locking/rwsem: Fix down_write_killable()
locking/rwsem, x86: Add frame annotation for call_rwsem_down_write_failed_killable()
locking/rwsem: Provide down_write_killable()
locking/rwsem, x86: Provide __down_write_killable()
locking/rwsem, s390: Provide __down_write_killable()
locking/rwsem, ia64: Provide __down_write_killable()
locking/rwsem, alpha: Provide __down_write_killable()
locking/rwsem: Introduce basis for down_write_killable()
locking/rwsem, sparc: Drop superfluous arch specific implementation
locking/rwsem, sh: Drop superfluous arch specific implementation
locking/rwsem, xtensa: Drop superfluous arch specific implementation
locking/rwsem: Drop explicit memory barriers
locking/rwsem: Get rid of __down_write_nested()
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/rwsem-spinlock.c | 19 | ||||
| -rw-r--r-- | kernel/locking/rwsem-xadd.c | 38 | ||||
| -rw-r--r-- | kernel/locking/rwsem.c | 19 |
3 files changed, 68 insertions, 8 deletions
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 3a5048572065..1591f6b3539f 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c | |||
| @@ -191,11 +191,12 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
| 191 | /* | 191 | /* |
| 192 | * get a write lock on the semaphore | 192 | * get a write lock on the semaphore |
| 193 | */ | 193 | */ |
| 194 | void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | 194 | int __sched __down_write_common(struct rw_semaphore *sem, int state) |
| 195 | { | 195 | { |
| 196 | struct rwsem_waiter waiter; | 196 | struct rwsem_waiter waiter; |
| 197 | struct task_struct *tsk; | 197 | struct task_struct *tsk; |
| 198 | unsigned long flags; | 198 | unsigned long flags; |
| 199 | int ret = 0; | ||
| 199 | 200 | ||
| 200 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 201 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 201 | 202 | ||
| @@ -215,21 +216,33 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 215 | */ | 216 | */ |
| 216 | if (sem->count == 0) | 217 | if (sem->count == 0) |
| 217 | break; | 218 | break; |
| 218 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 219 | if (signal_pending_state(state, current)) { |
| 220 | ret = -EINTR; | ||
| 221 | goto out; | ||
| 222 | } | ||
| 223 | set_task_state(tsk, state); | ||
| 219 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 224 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 220 | schedule(); | 225 | schedule(); |
| 221 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 226 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 222 | } | 227 | } |
| 223 | /* got the lock */ | 228 | /* got the lock */ |
| 224 | sem->count = -1; | 229 | sem->count = -1; |
| 230 | out: | ||
| 225 | list_del(&waiter.list); | 231 | list_del(&waiter.list); |
| 226 | 232 | ||
| 227 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 233 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 234 | |||
| 235 | return ret; | ||
| 228 | } | 236 | } |
| 229 | 237 | ||
| 230 | void __sched __down_write(struct rw_semaphore *sem) | 238 | void __sched __down_write(struct rw_semaphore *sem) |
| 231 | { | 239 | { |
| 232 | __down_write_nested(sem, 0); | 240 | __down_write_common(sem, TASK_UNINTERRUPTIBLE); |
| 241 | } | ||
| 242 | |||
| 243 | int __sched __down_write_killable(struct rw_semaphore *sem) | ||
| 244 | { | ||
| 245 | return __down_write_common(sem, TASK_KILLABLE); | ||
| 233 | } | 246 | } |
| 234 | 247 | ||
| 235 | /* | 248 | /* |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index a4d4de05b2d1..09e30c6225e5 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -433,12 +433,13 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem) | |||
| 433 | /* | 433 | /* |
| 434 | * Wait until we successfully acquire the write lock | 434 | * Wait until we successfully acquire the write lock |
| 435 | */ | 435 | */ |
| 436 | __visible | 436 | static inline struct rw_semaphore * |
| 437 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | 437 | __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) |
| 438 | { | 438 | { |
| 439 | long count; | 439 | long count; |
| 440 | bool waiting = true; /* any queued threads before us */ | 440 | bool waiting = true; /* any queued threads before us */ |
| 441 | struct rwsem_waiter waiter; | 441 | struct rwsem_waiter waiter; |
| 442 | struct rw_semaphore *ret = sem; | ||
| 442 | 443 | ||
| 443 | /* undo write bias from down_write operation, stop active locking */ | 444 | /* undo write bias from down_write operation, stop active locking */ |
| 444 | count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); | 445 | count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); |
| @@ -478,7 +479,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
| 478 | count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); | 479 | count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); |
| 479 | 480 | ||
| 480 | /* wait until we successfully acquire the lock */ | 481 | /* wait until we successfully acquire the lock */ |
| 481 | set_current_state(TASK_UNINTERRUPTIBLE); | 482 | set_current_state(state); |
| 482 | while (true) { | 483 | while (true) { |
| 483 | if (rwsem_try_write_lock(count, sem)) | 484 | if (rwsem_try_write_lock(count, sem)) |
| 484 | break; | 485 | break; |
| @@ -486,21 +487,48 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
| 486 | 487 | ||
| 487 | /* Block until there are no active lockers. */ | 488 | /* Block until there are no active lockers. */ |
| 488 | do { | 489 | do { |
| 490 | if (signal_pending_state(state, current)) | ||
| 491 | goto out_nolock; | ||
| 492 | |||
| 489 | schedule(); | 493 | schedule(); |
| 490 | set_current_state(TASK_UNINTERRUPTIBLE); | 494 | set_current_state(state); |
| 491 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); | 495 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); |
| 492 | 496 | ||
| 493 | raw_spin_lock_irq(&sem->wait_lock); | 497 | raw_spin_lock_irq(&sem->wait_lock); |
| 494 | } | 498 | } |
| 495 | __set_current_state(TASK_RUNNING); | 499 | __set_current_state(TASK_RUNNING); |
| 500 | list_del(&waiter.list); | ||
| 501 | raw_spin_unlock_irq(&sem->wait_lock); | ||
| 496 | 502 | ||
| 503 | return ret; | ||
| 504 | |||
| 505 | out_nolock: | ||
| 506 | __set_current_state(TASK_RUNNING); | ||
| 507 | raw_spin_lock_irq(&sem->wait_lock); | ||
| 497 | list_del(&waiter.list); | 508 | list_del(&waiter.list); |
| 509 | if (list_empty(&sem->wait_list)) | ||
| 510 | rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem); | ||
| 511 | else | ||
| 512 | __rwsem_do_wake(sem, RWSEM_WAKE_ANY); | ||
| 498 | raw_spin_unlock_irq(&sem->wait_lock); | 513 | raw_spin_unlock_irq(&sem->wait_lock); |
| 499 | 514 | ||
| 500 | return sem; | 515 | return ERR_PTR(-EINTR); |
| 516 | } | ||
| 517 | |||
| 518 | __visible struct rw_semaphore * __sched | ||
| 519 | rwsem_down_write_failed(struct rw_semaphore *sem) | ||
| 520 | { | ||
| 521 | return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE); | ||
| 501 | } | 522 | } |
| 502 | EXPORT_SYMBOL(rwsem_down_write_failed); | 523 | EXPORT_SYMBOL(rwsem_down_write_failed); |
| 503 | 524 | ||
| 525 | __visible struct rw_semaphore * __sched | ||
| 526 | rwsem_down_write_failed_killable(struct rw_semaphore *sem) | ||
| 527 | { | ||
| 528 | return __rwsem_down_write_failed_common(sem, TASK_KILLABLE); | ||
| 529 | } | ||
| 530 | EXPORT_SYMBOL(rwsem_down_write_failed_killable); | ||
| 531 | |||
| 504 | /* | 532 | /* |
| 505 | * handle waking up a waiter on the semaphore | 533 | * handle waking up a waiter on the semaphore |
| 506 | * - up_read/up_write has decremented the active part of count if we come here | 534 | * - up_read/up_write has decremented the active part of count if we come here |
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 205be0ce34de..c817216c1615 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c | |||
| @@ -55,6 +55,25 @@ void __sched down_write(struct rw_semaphore *sem) | |||
| 55 | EXPORT_SYMBOL(down_write); | 55 | EXPORT_SYMBOL(down_write); |
| 56 | 56 | ||
| 57 | /* | 57 | /* |
| 58 | * lock for writing | ||
| 59 | */ | ||
| 60 | int __sched down_write_killable(struct rw_semaphore *sem) | ||
| 61 | { | ||
| 62 | might_sleep(); | ||
| 63 | rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); | ||
| 64 | |||
| 65 | if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) { | ||
| 66 | rwsem_release(&sem->dep_map, 1, _RET_IP_); | ||
| 67 | return -EINTR; | ||
| 68 | } | ||
| 69 | |||
| 70 | rwsem_set_owner(sem); | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | EXPORT_SYMBOL(down_write_killable); | ||
| 75 | |||
| 76 | /* | ||
| 58 | * trylock for writing -- returns 1 if successful, 0 if contention | 77 | * trylock for writing -- returns 1 if successful, 0 if contention |
| 59 | */ | 78 | */ |
| 60 | int down_write_trylock(struct rw_semaphore *sem) | 79 | int down_write_trylock(struct rw_semaphore *sem) |
