aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/futex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/futex.c')
-rw-r--r--kernel/futex.c53
1 files changed, 43 insertions, 10 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 44a1261cb9ff..08ec814ad9d2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = {
234 * waiting on a futex. 234 * waiting on a futex.
235 */ 235 */
236struct futex_hash_bucket { 236struct futex_hash_bucket {
237 atomic_t waiters;
237 spinlock_t lock; 238 spinlock_t lock;
238 struct plist_head chain; 239 struct plist_head chain;
239} ____cacheline_aligned_in_smp; 240} ____cacheline_aligned_in_smp;
@@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key)
253 smp_mb__after_atomic_inc(); 254 smp_mb__after_atomic_inc();
254} 255}
255 256
256static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) 257/*
258 * Reflects a new waiter being added to the waitqueue.
259 */
260static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
257{ 261{
258#ifdef CONFIG_SMP 262#ifdef CONFIG_SMP
263 atomic_inc(&hb->waiters);
259 /* 264 /*
260 * Tasks trying to enter the critical region are most likely 265 * Full barrier (A), see the ordering comment above.
261 * potential waiters that will be added to the plist. Ensure
262 * that wakers won't miss to-be-slept tasks in the window between
263 * the wait call and the actual plist_add.
264 */ 266 */
265 if (spin_is_locked(&hb->lock)) 267 smp_mb__after_atomic_inc();
266 return true; 268#endif
267 smp_rmb(); /* Make sure we check the lock state first */ 269}
270
271/*
272 * Reflects a waiter being removed from the waitqueue by wakeup
273 * paths.
274 */
275static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
276{
277#ifdef CONFIG_SMP
278 atomic_dec(&hb->waiters);
279#endif
280}
268 281
269 return !plist_head_empty(&hb->chain); 282static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
283{
284#ifdef CONFIG_SMP
285 return atomic_read(&hb->waiters);
270#else 286#else
271 return true; 287 return 1;
272#endif 288#endif
273} 289}
274 290
@@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q)
954 970
955 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); 971 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
956 plist_del(&q->list, &hb->chain); 972 plist_del(&q->list, &hb->chain);
973 hb_waiters_dec(hb);
957} 974}
958 975
959/* 976/*
@@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1257 */ 1274 */
1258 if (likely(&hb1->chain != &hb2->chain)) { 1275 if (likely(&hb1->chain != &hb2->chain)) {
1259 plist_del(&q->list, &hb1->chain); 1276 plist_del(&q->list, &hb1->chain);
1277 hb_waiters_dec(hb1);
1260 plist_add(&q->list, &hb2->chain); 1278 plist_add(&q->list, &hb2->chain);
1279 hb_waiters_inc(hb2);
1261 q->lock_ptr = &hb2->lock; 1280 q->lock_ptr = &hb2->lock;
1262 } 1281 }
1263 get_futex_key_refs(key2); 1282 get_futex_key_refs(key2);
@@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1600 struct futex_hash_bucket *hb; 1619 struct futex_hash_bucket *hb;
1601 1620
1602 hb = hash_futex(&q->key); 1621 hb = hash_futex(&q->key);
1622
1623 /*
1624 * Increment the counter before taking the lock so that
1625 * a potential waker won't miss a to-be-slept task that is
1626 * waiting for the spinlock. This is safe as all queue_lock()
1627 * users end up calling queue_me(). Similarly, for housekeeping,
1628 * decrement the counter at queue_unlock() when some error has
1629 * occurred and we don't end up adding the task to the list.
1630 */
1631 hb_waiters_inc(hb);
1632
1603 q->lock_ptr = &hb->lock; 1633 q->lock_ptr = &hb->lock;
1604 1634
1605 spin_lock(&hb->lock); /* implies MB (A) */ 1635 spin_lock(&hb->lock); /* implies MB (A) */
@@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb)
1611 __releases(&hb->lock) 1641 __releases(&hb->lock)
1612{ 1642{
1613 spin_unlock(&hb->lock); 1643 spin_unlock(&hb->lock);
1644 hb_waiters_dec(hb);
1614} 1645}
1615 1646
1616/** 1647/**
@@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2342 * Unqueue the futex_q and determine which it was. 2373 * Unqueue the futex_q and determine which it was.
2343 */ 2374 */
2344 plist_del(&q->list, &hb->chain); 2375 plist_del(&q->list, &hb->chain);
2376 hb_waiters_dec(hb);
2345 2377
2346 /* Handle spurious wakeups gracefully */ 2378 /* Handle spurious wakeups gracefully */
2347 ret = -EWOULDBLOCK; 2379 ret = -EWOULDBLOCK;
@@ -2875,6 +2907,7 @@ static int __init futex_init(void)
2875 futex_cmpxchg_enabled = 1; 2907 futex_cmpxchg_enabled = 1;
2876 2908
2877 for (i = 0; i < futex_hashsize; i++) { 2909 for (i = 0; i < futex_hashsize; i++) {
2910 atomic_set(&futex_queues[i].waiters, 0);
2878 plist_head_init(&futex_queues[i].chain); 2911 plist_head_init(&futex_queues[i].chain);
2879 spin_lock_init(&futex_queues[i].lock); 2912 spin_lock_init(&futex_queues[i].lock);
2880 } 2913 }