diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-07-19 12:27:55 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-07-19 12:27:55 -0400 |
| commit | d057190925d994b808e1d07e6c76b90a32caac77 (patch) | |
| tree | 1f2d104f879435881d77f0d8d1e48c3224264d53 /kernel | |
| parent | d1743b810d7a306d1dd837e086d18124bc38b575 (diff) | |
| parent | 9de8033f1bbcce5ed23fe5da9ca1a5060207f7ed (diff) | |
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner:
"The locking department delivers:
- A rather large and intrusive bundle of fixes to address serious
performance regressions introduced by the new rwsem / mcs
technology. Simpler solutions have been discussed, but they would
have been ugly bandaids with more risk than doing the right thing.
- Make the rwsem spin on owner technology opt-in for architectures
and enable it only on the known to work ones.
- A few fixes to the lockdep userspace library"
* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/rwsem: Add CONFIG_RWSEM_SPIN_ON_OWNER
locking/mutex: Disable optimistic spinning on some architectures
locking/rwsem: Reduce the size of struct rw_semaphore
locking/rwsem: Rename 'activity' to 'count'
locking/spinlocks/mcs: Micro-optimize osq_unlock()
locking/spinlocks/mcs: Introduce and use init macro and function for osq locks
locking/spinlocks/mcs: Convert osq lock to atomic_t to reduce overhead
locking/spinlocks/mcs: Rename optimistic_spin_queue() to optimistic_spin_node()
locking/rwsem: Allow conservative optimistic spinning when readers have lock
tools/liblockdep: Account for bitfield changes in lockdeps lock_acquire
tools/liblockdep: Remove debug print left over from development
tools/liblockdep: Fix comparison of a boolean value with a value of 2
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Kconfig.locks | 9 | ||||
| -rw-r--r-- | kernel/locking/mcs_spinlock.c | 64 | ||||
| -rw-r--r-- | kernel/locking/mcs_spinlock.h | 9 | ||||
| -rw-r--r-- | kernel/locking/mutex.c | 2 | ||||
| -rw-r--r-- | kernel/locking/rwsem-spinlock.c | 28 | ||||
| -rw-r--r-- | kernel/locking/rwsem-xadd.c | 16 | ||||
| -rw-r--r-- | kernel/locking/rwsem.c | 2 |
7 files changed, 85 insertions, 45 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 35536d9c0964..76768ee812b2 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks | |||
| @@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE | |||
| 220 | 220 | ||
| 221 | endif | 221 | endif |
| 222 | 222 | ||
| 223 | config ARCH_SUPPORTS_ATOMIC_RMW | ||
| 224 | bool | ||
| 225 | |||
| 223 | config MUTEX_SPIN_ON_OWNER | 226 | config MUTEX_SPIN_ON_OWNER |
| 224 | def_bool y | 227 | def_bool y |
| 225 | depends on SMP && !DEBUG_MUTEXES | 228 | depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW |
| 229 | |||
| 230 | config RWSEM_SPIN_ON_OWNER | ||
| 231 | def_bool y | ||
| 232 | depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW | ||
| 226 | 233 | ||
| 227 | config ARCH_USE_QUEUE_RWLOCK | 234 | config ARCH_USE_QUEUE_RWLOCK |
| 228 | bool | 235 | bool |
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c index 838dc9e00669..be9ee1559fca 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/mcs_spinlock.c | |||
| @@ -14,21 +14,47 @@ | |||
| 14 | * called from interrupt context and we have preemption disabled while | 14 | * called from interrupt context and we have preemption disabled while |
| 15 | * spinning. | 15 | * spinning. |
| 16 | */ | 16 | */ |
| 17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); | 17 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); |
| 18 | |||
| 19 | /* | ||
| 20 | * We use the value 0 to represent "no CPU", thus the encoded value | ||
| 21 | * will be the CPU number incremented by 1. | ||
| 22 | */ | ||
| 23 | static inline int encode_cpu(int cpu_nr) | ||
| 24 | { | ||
| 25 | return cpu_nr + 1; | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) | ||
| 29 | { | ||
| 30 | int cpu_nr = encoded_cpu_val - 1; | ||
| 31 | |||
| 32 | return per_cpu_ptr(&osq_node, cpu_nr); | ||
| 33 | } | ||
| 18 | 34 | ||
| 19 | /* | 35 | /* |
| 20 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | 36 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. |
| 21 | * Can return NULL in case we were the last queued and we updated @lock instead. | 37 | * Can return NULL in case we were the last queued and we updated @lock instead. |
| 22 | */ | 38 | */ |
| 23 | static inline struct optimistic_spin_queue * | 39 | static inline struct optimistic_spin_node * |
| 24 | osq_wait_next(struct optimistic_spin_queue **lock, | 40 | osq_wait_next(struct optimistic_spin_queue *lock, |
| 25 | struct optimistic_spin_queue *node, | 41 | struct optimistic_spin_node *node, |
| 26 | struct optimistic_spin_queue *prev) | 42 | struct optimistic_spin_node *prev) |
| 27 | { | 43 | { |
| 28 | struct optimistic_spin_queue *next = NULL; | 44 | struct optimistic_spin_node *next = NULL; |
| 45 | int curr = encode_cpu(smp_processor_id()); | ||
| 46 | int old; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * If there is a prev node in queue, then the 'old' value will be | ||
| 50 | * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if | ||
| 51 | * we're currently last in queue, then the queue will then become empty. | ||
| 52 | */ | ||
| 53 | old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; | ||
| 29 | 54 | ||
| 30 | for (;;) { | 55 | for (;;) { |
| 31 | if (*lock == node && cmpxchg(lock, node, prev) == node) { | 56 | if (atomic_read(&lock->tail) == curr && |
| 57 | atomic_cmpxchg(&lock->tail, curr, old) == curr) { | ||
| 32 | /* | 58 | /* |
| 33 | * We were the last queued, we moved @lock back. @prev | 59 | * We were the last queued, we moved @lock back. @prev |
| 34 | * will now observe @lock and will complete its | 60 | * will now observe @lock and will complete its |
| @@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock, | |||
| 59 | return next; | 85 | return next; |
| 60 | } | 86 | } |
| 61 | 87 | ||
| 62 | bool osq_lock(struct optimistic_spin_queue **lock) | 88 | bool osq_lock(struct optimistic_spin_queue *lock) |
| 63 | { | 89 | { |
| 64 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | 90 | struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); |
| 65 | struct optimistic_spin_queue *prev, *next; | 91 | struct optimistic_spin_node *prev, *next; |
| 92 | int curr = encode_cpu(smp_processor_id()); | ||
| 93 | int old; | ||
| 66 | 94 | ||
| 67 | node->locked = 0; | 95 | node->locked = 0; |
| 68 | node->next = NULL; | 96 | node->next = NULL; |
| 97 | node->cpu = curr; | ||
| 69 | 98 | ||
| 70 | node->prev = prev = xchg(lock, node); | 99 | old = atomic_xchg(&lock->tail, curr); |
| 71 | if (likely(prev == NULL)) | 100 | if (old == OSQ_UNLOCKED_VAL) |
| 72 | return true; | 101 | return true; |
| 73 | 102 | ||
| 103 | prev = decode_cpu(old); | ||
| 104 | node->prev = prev; | ||
| 74 | ACCESS_ONCE(prev->next) = node; | 105 | ACCESS_ONCE(prev->next) = node; |
| 75 | 106 | ||
| 76 | /* | 107 | /* |
| @@ -149,20 +180,21 @@ unqueue: | |||
| 149 | return false; | 180 | return false; |
| 150 | } | 181 | } |
| 151 | 182 | ||
| 152 | void osq_unlock(struct optimistic_spin_queue **lock) | 183 | void osq_unlock(struct optimistic_spin_queue *lock) |
| 153 | { | 184 | { |
| 154 | struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); | 185 | struct optimistic_spin_node *node, *next; |
| 155 | struct optimistic_spin_queue *next; | 186 | int curr = encode_cpu(smp_processor_id()); |
| 156 | 187 | ||
| 157 | /* | 188 | /* |
| 158 | * Fast path for the uncontended case. | 189 | * Fast path for the uncontended case. |
| 159 | */ | 190 | */ |
| 160 | if (likely(cmpxchg(lock, node, NULL) == node)) | 191 | if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) |
| 161 | return; | 192 | return; |
| 162 | 193 | ||
| 163 | /* | 194 | /* |
| 164 | * Second most likely case. | 195 | * Second most likely case. |
| 165 | */ | 196 | */ |
| 197 | node = this_cpu_ptr(&osq_node); | ||
| 166 | next = xchg(&node->next, NULL); | 198 | next = xchg(&node->next, NULL); |
| 167 | if (next) { | 199 | if (next) { |
| 168 | ACCESS_ONCE(next->locked) = 1; | 200 | ACCESS_ONCE(next->locked) = 1; |
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index a2dbac4aca6b..74356dc0ce29 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h | |||
| @@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |||
| 118 | * mutex_lock()/rwsem_down_{read,write}() etc. | 118 | * mutex_lock()/rwsem_down_{read,write}() etc. |
| 119 | */ | 119 | */ |
| 120 | 120 | ||
| 121 | struct optimistic_spin_queue { | 121 | struct optimistic_spin_node { |
| 122 | struct optimistic_spin_queue *next, *prev; | 122 | struct optimistic_spin_node *next, *prev; |
| 123 | int locked; /* 1 if lock acquired */ | 123 | int locked; /* 1 if lock acquired */ |
| 124 | int cpu; /* encoded CPU # value */ | ||
| 124 | }; | 125 | }; |
| 125 | 126 | ||
| 126 | extern bool osq_lock(struct optimistic_spin_queue **lock); | 127 | extern bool osq_lock(struct optimistic_spin_queue *lock); |
| 127 | extern void osq_unlock(struct optimistic_spin_queue **lock); | 128 | extern void osq_unlock(struct optimistic_spin_queue *lock); |
| 128 | 129 | ||
| 129 | #endif /* __LINUX_MCS_SPINLOCK_H */ | 130 | #endif /* __LINUX_MCS_SPINLOCK_H */ |
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index bc73d33c6760..acca2c1a3c5e 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
| @@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
| 60 | INIT_LIST_HEAD(&lock->wait_list); | 60 | INIT_LIST_HEAD(&lock->wait_list); |
| 61 | mutex_clear_owner(lock); | 61 | mutex_clear_owner(lock); |
| 62 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 62 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 63 | lock->osq = NULL; | 63 | osq_lock_init(&lock->osq); |
| 64 | #endif | 64 | #endif |
| 65 | 65 | ||
| 66 | debug_mutex_init(lock, name, key); | 66 | debug_mutex_init(lock, name, key); |
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 9be8a9144978..2c93571162cb 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c | |||
| @@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem) | |||
| 26 | unsigned long flags; | 26 | unsigned long flags; |
| 27 | 27 | ||
| 28 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { | 28 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
| 29 | ret = (sem->activity != 0); | 29 | ret = (sem->count != 0); |
| 30 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 30 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 31 | } | 31 | } |
| 32 | return ret; | 32 | return ret; |
| @@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 46 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | 46 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
| 47 | lockdep_init_map(&sem->dep_map, name, key, 0); | 47 | lockdep_init_map(&sem->dep_map, name, key, 0); |
| 48 | #endif | 48 | #endif |
| 49 | sem->activity = 0; | 49 | sem->count = 0; |
| 50 | raw_spin_lock_init(&sem->wait_lock); | 50 | raw_spin_lock_init(&sem->wait_lock); |
| 51 | INIT_LIST_HEAD(&sem->wait_list); | 51 | INIT_LIST_HEAD(&sem->wait_list); |
| 52 | } | 52 | } |
| @@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |||
| 95 | waiter = list_entry(next, struct rwsem_waiter, list); | 95 | waiter = list_entry(next, struct rwsem_waiter, list); |
| 96 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); | 96 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
| 97 | 97 | ||
| 98 | sem->activity += woken; | 98 | sem->count += woken; |
| 99 | 99 | ||
| 100 | out: | 100 | out: |
| 101 | return sem; | 101 | return sem; |
| @@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem) | |||
| 126 | 126 | ||
| 127 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 127 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 128 | 128 | ||
| 129 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 129 | if (sem->count >= 0 && list_empty(&sem->wait_list)) { |
| 130 | /* granted */ | 130 | /* granted */ |
| 131 | sem->activity++; | 131 | sem->count++; |
| 132 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 132 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 133 | goto out; | 133 | goto out; |
| 134 | } | 134 | } |
| @@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem) | |||
| 170 | 170 | ||
| 171 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 171 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 172 | 172 | ||
| 173 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 173 | if (sem->count >= 0 && list_empty(&sem->wait_list)) { |
| 174 | /* granted */ | 174 | /* granted */ |
| 175 | sem->activity++; | 175 | sem->count++; |
| 176 | ret = 1; | 176 | ret = 1; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| @@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 206 | * itself into sleep and waiting for system woke it or someone | 206 | * itself into sleep and waiting for system woke it or someone |
| 207 | * else in the head of the wait list up. | 207 | * else in the head of the wait list up. |
| 208 | */ | 208 | */ |
| 209 | if (sem->activity == 0) | 209 | if (sem->count == 0) |
| 210 | break; | 210 | break; |
| 211 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 211 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 212 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 212 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| @@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | |||
| 214 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 214 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 215 | } | 215 | } |
| 216 | /* got the lock */ | 216 | /* got the lock */ |
| 217 | sem->activity = -1; | 217 | sem->count = -1; |
| 218 | list_del(&waiter.list); | 218 | list_del(&waiter.list); |
| 219 | 219 | ||
| 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| @@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem) | |||
| 235 | 235 | ||
| 236 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 236 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 237 | 237 | ||
| 238 | if (sem->activity == 0) { | 238 | if (sem->count == 0) { |
| 239 | /* got the lock */ | 239 | /* got the lock */ |
| 240 | sem->activity = -1; | 240 | sem->count = -1; |
| 241 | ret = 1; | 241 | ret = 1; |
| 242 | } | 242 | } |
| 243 | 243 | ||
| @@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem) | |||
| 255 | 255 | ||
| 256 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 256 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 257 | 257 | ||
| 258 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 258 | if (--sem->count == 0 && !list_empty(&sem->wait_list)) |
| 259 | sem = __rwsem_wake_one_writer(sem); | 259 | sem = __rwsem_wake_one_writer(sem); |
| 260 | 260 | ||
| 261 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 261 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| @@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem) | |||
| 270 | 270 | ||
| 271 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 271 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 272 | 272 | ||
| 273 | sem->activity = 0; | 273 | sem->count = 0; |
| 274 | if (!list_empty(&sem->wait_list)) | 274 | if (!list_empty(&sem->wait_list)) |
| 275 | sem = __rwsem_do_wake(sem, 1); | 275 | sem = __rwsem_do_wake(sem, 1); |
| 276 | 276 | ||
| @@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem) | |||
| 287 | 287 | ||
| 288 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | 288 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
| 289 | 289 | ||
| 290 | sem->activity = 1; | 290 | sem->count = 1; |
| 291 | if (!list_empty(&sem->wait_list)) | 291 | if (!list_empty(&sem->wait_list)) |
| 292 | sem = __rwsem_do_wake(sem, 0); | 292 | sem = __rwsem_do_wake(sem, 0); |
| 293 | 293 | ||
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index dacc32142fcc..a2391ac135c8 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
| @@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
| 82 | sem->count = RWSEM_UNLOCKED_VALUE; | 82 | sem->count = RWSEM_UNLOCKED_VALUE; |
| 83 | raw_spin_lock_init(&sem->wait_lock); | 83 | raw_spin_lock_init(&sem->wait_lock); |
| 84 | INIT_LIST_HEAD(&sem->wait_list); | 84 | INIT_LIST_HEAD(&sem->wait_list); |
| 85 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 86 | sem->owner = NULL; | 86 | sem->owner = NULL; |
| 87 | sem->osq = NULL; | 87 | osq_lock_init(&sem->osq); |
| 88 | #endif | 88 | #endif |
| 89 | } | 89 | } |
| 90 | 90 | ||
| @@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) | |||
| 262 | return false; | 262 | return false; |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | #ifdef CONFIG_SMP | 265 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 266 | /* | 266 | /* |
| 267 | * Try to acquire write lock before the writer has been put on wait queue. | 267 | * Try to acquire write lock before the writer has been put on wait queue. |
| 268 | */ | 268 | */ |
| @@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) | |||
| 285 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | 285 | static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) |
| 286 | { | 286 | { |
| 287 | struct task_struct *owner; | 287 | struct task_struct *owner; |
| 288 | bool on_cpu = true; | 288 | bool on_cpu = false; |
| 289 | 289 | ||
| 290 | if (need_resched()) | 290 | if (need_resched()) |
| 291 | return 0; | 291 | return false; |
| 292 | 292 | ||
| 293 | rcu_read_lock(); | 293 | rcu_read_lock(); |
| 294 | owner = ACCESS_ONCE(sem->owner); | 294 | owner = ACCESS_ONCE(sem->owner); |
| @@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) | |||
| 297 | rcu_read_unlock(); | 297 | rcu_read_unlock(); |
| 298 | 298 | ||
| 299 | /* | 299 | /* |
| 300 | * If sem->owner is not set, the rwsem owner may have | 300 | * If sem->owner is not set, yet we have just recently entered the |
| 301 | * just acquired it and not set the owner yet or the rwsem | 301 | * slowpath, then there is a possibility reader(s) may have the lock. |
| 302 | * has been released. | 302 | * To be safe, avoid spinning in these situations. |
| 303 | */ | 303 | */ |
| 304 | return on_cpu; | 304 | return on_cpu; |
| 305 | } | 305 | } |
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 42f806de49d4..e2d3bc7f03b4 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/atomic.h> | 13 | #include <linux/atomic.h> |
| 14 | 14 | ||
| 15 | #if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) | 15 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
| 16 | static inline void rwsem_set_owner(struct rw_semaphore *sem) | 16 | static inline void rwsem_set_owner(struct rw_semaphore *sem) |
| 17 | { | 17 | { |
| 18 | sem->owner = current; | 18 | sem->owner = current; |
