diff options
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/Makefile | 11 | ||||
| -rw-r--r-- | kernel/locking/mcs_spinlock.h | 16 | ||||
| -rw-r--r-- | kernel/locking/mutex.c | 62 | ||||
| -rw-r--r-- | kernel/locking/osq_lock.c (renamed from kernel/locking/mcs_spinlock.c) | 9 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 10 | ||||
| -rw-r--r-- | kernel/locking/rwsem-spinlock.c | 2 | ||||
| -rw-r--r-- | kernel/locking/rwsem-xadd.c | 3 | ||||
| -rw-r--r-- | kernel/locking/spinlock.c | 8 |
8 files changed, 56 insertions, 65 deletions
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 8541bfdfd232..de7a416cca2a 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
| @@ -1,11 +1,11 @@ | |||
| 1 | 1 | ||
| 2 | obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o | 2 | obj-y += mutex.o semaphore.o rwsem.o |
| 3 | 3 | ||
| 4 | ifdef CONFIG_FUNCTION_TRACER | 4 | ifdef CONFIG_FUNCTION_TRACER |
| 5 | CFLAGS_REMOVE_lockdep.o = -pg | 5 | CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) |
| 6 | CFLAGS_REMOVE_lockdep_proc.o = -pg | 6 | CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE) |
| 7 | CFLAGS_REMOVE_mutex-debug.o = -pg | 7 | CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) |
| 8 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 8 | CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) |
| 9 | endif | 9 | endif |
| 10 | 10 | ||
| 11 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o | 11 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o |
| @@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y) | |||
| 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o | 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o |
| 15 | endif | 15 | endif |
| 16 | obj-$(CONFIG_SMP) += spinlock.o | 16 | obj-$(CONFIG_SMP) += spinlock.o |
| 17 | obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o | ||
| 17 | obj-$(CONFIG_SMP) += lglock.o | 18 | obj-$(CONFIG_SMP) += lglock.o |
| 18 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | 19 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o |
| 19 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 20 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 4d60986fcbee..d1fe2ba5bac9 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h | |||
| @@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |||
| 108 | arch_mcs_spin_unlock_contended(&next->locked); | 108 | arch_mcs_spin_unlock_contended(&next->locked); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | /* | ||
| 112 | * Cancellable version of the MCS lock above. | ||
| 113 | * | ||
| 114 | * Intended for adaptive spinning of sleeping locks: | ||
| 115 | * mutex_lock()/rwsem_down_{read,write}() etc. | ||
| 116 | */ | ||
| 117 | |||
| 118 | struct optimistic_spin_node { | ||
| 119 | struct optimistic_spin_node *next, *prev; | ||
| 120 | int locked; /* 1 if lock acquired */ | ||
| 121 | int cpu; /* encoded CPU # value */ | ||
| 122 | }; | ||
| 123 | |||
| 124 | extern bool osq_lock(struct optimistic_spin_queue *lock); | ||
| 125 | extern void osq_unlock(struct optimistic_spin_queue *lock); | ||
| 126 | |||
| 127 | #endif /* __LINUX_MCS_SPINLOCK_H */ | 111 | #endif /* __LINUX_MCS_SPINLOCK_H */ |
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 454195194d4a..94674e5919cb 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
| @@ -81,7 +81,7 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count); | |||
| 81 | * The mutex must later on be released by the same task that | 81 | * The mutex must later on be released by the same task that |
| 82 | * acquired it. Recursive locking is not allowed. The task | 82 | * acquired it. Recursive locking is not allowed. The task |
| 83 | * may not exit without first unlocking the mutex. Also, kernel | 83 | * may not exit without first unlocking the mutex. Also, kernel |
| 84 | * memory where the mutex resides mutex must not be freed with | 84 | * memory where the mutex resides must not be freed with |
| 85 | * the mutex still locked. The mutex must first be initialized | 85 | * the mutex still locked. The mutex must first be initialized |
| 86 | * (or statically defined) before it can be locked. memset()-ing | 86 | * (or statically defined) before it can be locked. memset()-ing |
| 87 | * the mutex to 0 is not allowed. | 87 | * the mutex to 0 is not allowed. |
| @@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, | |||
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | /* | 149 | /* |
| 150 | * after acquiring lock with fastpath or when we lost out in contested | 150 | * After acquiring lock with fastpath or when we lost out in contested |
| 151 | * slowpath, set ctx and wake up any waiters so they can recheck. | 151 | * slowpath, set ctx and wake up any waiters so they can recheck. |
| 152 | * | 152 | * |
| 153 | * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, | 153 | * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, |
| @@ -191,19 +191,32 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, | |||
| 191 | spin_unlock_mutex(&lock->base.wait_lock, flags); | 191 | spin_unlock_mutex(&lock->base.wait_lock, flags); |
| 192 | } | 192 | } |
| 193 | 193 | ||
| 194 | |||
| 195 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | ||
| 196 | /* | 194 | /* |
| 197 | * In order to avoid a stampede of mutex spinners from acquiring the mutex | 195 | * After acquiring lock in the slowpath set ctx and wake up any |
| 198 | * more or less simultaneously, the spinners need to acquire a MCS lock | 196 | * waiters so they can recheck. |
| 199 | * first before spinning on the owner field. | ||
| 200 | * | 197 | * |
| 198 | * Callers must hold the mutex wait_lock. | ||
| 201 | */ | 199 | */ |
| 200 | static __always_inline void | ||
| 201 | ww_mutex_set_context_slowpath(struct ww_mutex *lock, | ||
| 202 | struct ww_acquire_ctx *ctx) | ||
| 203 | { | ||
| 204 | struct mutex_waiter *cur; | ||
| 202 | 205 | ||
| 203 | /* | 206 | ww_mutex_lock_acquired(lock, ctx); |
| 204 | * Mutex spinning code migrated from kernel/sched/core.c | 207 | lock->ctx = ctx; |
| 205 | */ | 208 | |
| 209 | /* | ||
| 210 | * Give any possible sleeping processes the chance to wake up, | ||
| 211 | * so they can recheck if they have to back off. | ||
| 212 | */ | ||
| 213 | list_for_each_entry(cur, &lock->base.wait_list, list) { | ||
| 214 | debug_mutex_wake_waiter(&lock->base, cur); | ||
| 215 | wake_up_process(cur->task); | ||
| 216 | } | ||
| 217 | } | ||
| 206 | 218 | ||
| 219 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | ||
| 207 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | 220 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 208 | { | 221 | { |
| 209 | if (lock->owner != owner) | 222 | if (lock->owner != owner) |
| @@ -307,6 +320,11 @@ static bool mutex_optimistic_spin(struct mutex *lock, | |||
| 307 | if (!mutex_can_spin_on_owner(lock)) | 320 | if (!mutex_can_spin_on_owner(lock)) |
| 308 | goto done; | 321 | goto done; |
| 309 | 322 | ||
| 323 | /* | ||
| 324 | * In order to avoid a stampede of mutex spinners trying to | ||
| 325 | * acquire the mutex all at once, the spinners need to take a | ||
| 326 | * MCS (queued) lock first before spinning on the owner field. | ||
| 327 | */ | ||
| 310 | if (!osq_lock(&lock->osq)) | 328 | if (!osq_lock(&lock->osq)) |
| 311 | goto done; | 329 | goto done; |
| 312 | 330 | ||
| @@ -469,7 +487,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock) | |||
| 469 | EXPORT_SYMBOL(ww_mutex_unlock); | 487 | EXPORT_SYMBOL(ww_mutex_unlock); |
| 470 | 488 | ||
| 471 | static inline int __sched | 489 | static inline int __sched |
| 472 | __mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) | 490 | __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) |
| 473 | { | 491 | { |
| 474 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | 492 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
| 475 | struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); | 493 | struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); |
| @@ -557,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 557 | } | 575 | } |
| 558 | 576 | ||
| 559 | if (use_ww_ctx && ww_ctx->acquired > 0) { | 577 | if (use_ww_ctx && ww_ctx->acquired > 0) { |
| 560 | ret = __mutex_lock_check_stamp(lock, ww_ctx); | 578 | ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); |
| 561 | if (ret) | 579 | if (ret) |
| 562 | goto err; | 580 | goto err; |
| 563 | } | 581 | } |
| @@ -569,6 +587,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 569 | schedule_preempt_disabled(); | 587 | schedule_preempt_disabled(); |
| 570 | spin_lock_mutex(&lock->wait_lock, flags); | 588 | spin_lock_mutex(&lock->wait_lock, flags); |
| 571 | } | 589 | } |
| 590 | __set_task_state(task, TASK_RUNNING); | ||
| 591 | |||
| 572 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | 592 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 573 | /* set it to 0 if there are no waiters left: */ | 593 | /* set it to 0 if there are no waiters left: */ |
| 574 | if (likely(list_empty(&lock->wait_list))) | 594 | if (likely(list_empty(&lock->wait_list))) |
| @@ -582,23 +602,7 @@ skip_wait: | |||
| 582 | 602 | ||
| 583 | if (use_ww_ctx) { | 603 | if (use_ww_ctx) { |
| 584 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | 604 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
| 585 | struct mutex_waiter *cur; | 605 | ww_mutex_set_context_slowpath(ww, ww_ctx); |
| 586 | |||
| 587 | /* | ||
| 588 | * This branch gets optimized out for the common case, | ||
| 589 | * and is only important for ww_mutex_lock. | ||
| 590 | */ | ||
| 591 | ww_mutex_lock_acquired(ww, ww_ctx); | ||
| 592 | ww->ctx = ww_ctx; | ||
| 593 | |||
| 594 | /* | ||
| 595 | * Give any possible sleeping processes the chance to wake up, | ||
| 596 | * so they can recheck if they have to back off. | ||
| 597 | */ | ||
| 598 | list_for_each_entry(cur, &lock->wait_list, list) { | ||
| 599 | debug_mutex_wake_waiter(lock, cur); | ||
| 600 | wake_up_process(cur->task); | ||
| 601 | } | ||
| 602 | } | 606 | } |
| 603 | 607 | ||
| 604 | spin_unlock_mutex(&lock->wait_lock, flags); | 608 | spin_unlock_mutex(&lock->wait_lock, flags); |
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/osq_lock.c index 9887a905a762..c112d00341b0 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/osq_lock.c | |||
| @@ -1,8 +1,6 @@ | |||
| 1 | #include <linux/percpu.h> | 1 | #include <linux/percpu.h> |
| 2 | #include <linux/sched.h> | 2 | #include <linux/sched.h> |
| 3 | #include "mcs_spinlock.h" | 3 | #include <linux/osq_lock.h> |
| 4 | |||
| 5 | #ifdef CONFIG_SMP | ||
| 6 | 4 | ||
| 7 | /* | 5 | /* |
| 8 | * An MCS like lock especially tailored for optimistic spinning for sleeping | 6 | * An MCS like lock especially tailored for optimistic spinning for sleeping |
| @@ -111,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) | |||
| 111 | * cmpxchg in an attempt to undo our queueing. | 109 | * cmpxchg in an attempt to undo our queueing. |
| 112 | */ | 110 | */ |
| 113 | 111 | ||
| 114 | while (!smp_load_acquire(&node->locked)) { | 112 | while (!ACCESS_ONCE(node->locked)) { |
| 115 | /* | 113 | /* |
| 116 | * If we need to reschedule bail... so we can block. | 114 | * If we need to reschedule bail... so we can block. |
| 117 | */ | 115 | */ |
| @@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock) | |||
| 203 | if (next) | 201 | if (next) |
| 204 | ACCESS_ONCE(next->locked) = 1; | 202 | ACCESS_ONCE(next->locked) = 1; |
| 205 | } | 203 | } |
| 206 | |||
| 207 | #endif | ||
| 208 | |||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 7c98873a3077..e16e5542bf13 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -1130,6 +1130,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 1130 | set_current_state(state); | 1130 | set_current_state(state); |
| 1131 | } | 1131 | } |
| 1132 | 1132 | ||
| 1133 | __set_current_state(TASK_RUNNING); | ||
| 1133 | return ret; | 1134 | return ret; |
| 1134 | } | 1135 | } |
| 1135 | 1136 | ||
| @@ -1188,12 +1189,12 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 1188 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); | 1189 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); |
| 1189 | 1190 | ||
| 1190 | if (likely(!ret)) | 1191 | if (likely(!ret)) |
| 1192 | /* sleep on the mutex */ | ||
| 1191 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | 1193 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); |
| 1192 | 1194 | ||
| 1193 | set_current_state(TASK_RUNNING); | ||
| 1194 | |||
| 1195 | if (unlikely(ret)) { | 1195 | if (unlikely(ret)) { |
| 1196 | remove_waiter(lock, &waiter); | 1196 | if (rt_mutex_has_waiters(lock)) |
| 1197 | remove_waiter(lock, &waiter); | ||
| 1197 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); | 1198 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); |
| 1198 | } | 1199 | } |
| 1199 | 1200 | ||
| @@ -1626,10 +1627,9 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |||
| 1626 | 1627 | ||
| 1627 | set_current_state(TASK_INTERRUPTIBLE); | 1628 | set_current_state(TASK_INTERRUPTIBLE); |
| 1628 | 1629 | ||
| 1630 | /* sleep on the mutex */ | ||
| 1629 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); | 1631 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
| 1630 | 1632 | ||
| 1631 | set_current_state(TASK_RUNNING); | ||
| 1632 | |||
| 1633 | if (unlikely(ret)) | 1633 | if (unlikely(ret)) |
| 1634 | remove_waiter(lock, waiter); | 1634 | remove_waiter(lock, waiter); |
| 1635 | 1635 | ||
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 2c93571162cb..2555ae15ec14 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c | |||
| @@ -154,7 +154,7 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 154 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 154 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | tsk->state = TASK_RUNNING; | 157 | __set_task_state(tsk, TASK_RUNNING); |
| 158 | out: | 158 | out: |
| 159 | ; | 159 | ; |
| 160 | } | 160 | } |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 7628c3fc37ca..2f7cc4076f50 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -242,8 +242,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) | |||
| 242 | schedule(); | 242 | schedule(); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | tsk->state = TASK_RUNNING; | 245 | __set_task_state(tsk, TASK_RUNNING); |
| 246 | |||
| 247 | return sem; | 246 | return sem; |
| 248 | } | 247 | } |
| 249 | EXPORT_SYMBOL(rwsem_down_read_failed); | 248 | EXPORT_SYMBOL(rwsem_down_read_failed); |
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 4b082b5cac9e..db3ccb1dd614 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c | |||
| @@ -363,6 +363,14 @@ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | |||
| 363 | } | 363 | } |
| 364 | EXPORT_SYMBOL(_raw_spin_lock_nested); | 364 | EXPORT_SYMBOL(_raw_spin_lock_nested); |
| 365 | 365 | ||
| 366 | void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass) | ||
| 367 | { | ||
| 368 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); | ||
| 369 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
| 370 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
| 371 | } | ||
| 372 | EXPORT_SYMBOL(_raw_spin_lock_bh_nested); | ||
| 373 | |||
| 366 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, | 374 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, |
| 367 | int subclass) | 375 | int subclass) |
| 368 | { | 376 | { |
