diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 28 | ||||
-rw-r--r-- | kernel/futex_compat.c | 6 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 12 | ||||
-rw-r--r-- | kernel/wait.c | 5 |
4 files changed, 30 insertions, 21 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 0672ff88f159..e18cfbdc7190 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1010,15 +1010,19 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
1010 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue | 1010 | * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
1011 | * q: the futex_q | 1011 | * q: the futex_q |
1012 | * key: the key of the requeue target futex | 1012 | * key: the key of the requeue target futex |
1013 | * hb: the hash_bucket of the requeue target futex | ||
1013 | * | 1014 | * |
1014 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the | 1015 | * During futex_requeue, with requeue_pi=1, it is possible to acquire the |
1015 | * target futex if it is uncontended or via a lock steal. Set the futex_q key | 1016 | * target futex if it is uncontended or via a lock steal. Set the futex_q key |
1016 | * to the requeue target futex so the waiter can detect the wakeup on the right | 1017 | * to the requeue target futex so the waiter can detect the wakeup on the right |
1017 | * futex, but remove it from the hb and NULL the rt_waiter so it can detect | 1018 | * futex, but remove it from the hb and NULL the rt_waiter so it can detect |
1018 | * atomic lock acquisition. Must be called with the q->lock_ptr held. | 1019 | * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock |
1020 | * to protect access to the pi_state to fixup the owner later. Must be called | ||
1021 | * with both q->lock_ptr and hb->lock held. | ||
1019 | */ | 1022 | */ |
1020 | static inline | 1023 | static inline |
1021 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) | 1024 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1025 | struct futex_hash_bucket *hb) | ||
1022 | { | 1026 | { |
1023 | drop_futex_key_refs(&q->key); | 1027 | drop_futex_key_refs(&q->key); |
1024 | get_futex_key_refs(key); | 1028 | get_futex_key_refs(key); |
@@ -1030,6 +1034,11 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) | |||
1030 | WARN_ON(!q->rt_waiter); | 1034 | WARN_ON(!q->rt_waiter); |
1031 | q->rt_waiter = NULL; | 1035 | q->rt_waiter = NULL; |
1032 | 1036 | ||
1037 | q->lock_ptr = &hb->lock; | ||
1038 | #ifdef CONFIG_DEBUG_PI_LIST | ||
1039 | q->list.plist.lock = &hb->lock; | ||
1040 | #endif | ||
1041 | |||
1033 | wake_up_state(q->task, TASK_NORMAL); | 1042 | wake_up_state(q->task, TASK_NORMAL); |
1034 | } | 1043 | } |
1035 | 1044 | ||
@@ -1088,7 +1097,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
1088 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, | 1097 | ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, |
1089 | set_waiters); | 1098 | set_waiters); |
1090 | if (ret == 1) | 1099 | if (ret == 1) |
1091 | requeue_pi_wake_futex(top_waiter, key2); | 1100 | requeue_pi_wake_futex(top_waiter, key2, hb2); |
1092 | 1101 | ||
1093 | return ret; | 1102 | return ret; |
1094 | } | 1103 | } |
@@ -1247,8 +1256,15 @@ retry_private: | |||
1247 | if (!match_futex(&this->key, &key1)) | 1256 | if (!match_futex(&this->key, &key1)) |
1248 | continue; | 1257 | continue; |
1249 | 1258 | ||
1250 | WARN_ON(!requeue_pi && this->rt_waiter); | 1259 | /* |
1251 | WARN_ON(requeue_pi && !this->rt_waiter); | 1260 | * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always |
1261 | * be paired with each other and no other futex ops. | ||
1262 | */ | ||
1263 | if ((requeue_pi && !this->rt_waiter) || | ||
1264 | (!requeue_pi && this->rt_waiter)) { | ||
1265 | ret = -EINVAL; | ||
1266 | break; | ||
1267 | } | ||
1252 | 1268 | ||
1253 | /* | 1269 | /* |
1254 | * Wake nr_wake waiters. For requeue_pi, if we acquired the | 1270 | * Wake nr_wake waiters. For requeue_pi, if we acquired the |
@@ -1273,7 +1289,7 @@ retry_private: | |||
1273 | this->task, 1); | 1289 | this->task, 1); |
1274 | if (ret == 1) { | 1290 | if (ret == 1) { |
1275 | /* We got the lock. */ | 1291 | /* We got the lock. */ |
1276 | requeue_pi_wake_futex(this, &key2); | 1292 | requeue_pi_wake_futex(this, &key2, hb2); |
1277 | continue; | 1293 | continue; |
1278 | } else if (ret) { | 1294 | } else if (ret) { |
1279 | /* -EDEADLK */ | 1295 | /* -EDEADLK */ |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index d607a5b9ee29..235716556bf1 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -180,7 +180,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | |||
180 | int cmd = op & FUTEX_CMD_MASK; | 180 | int cmd = op & FUTEX_CMD_MASK; |
181 | 181 | ||
182 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || | 182 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
183 | cmd == FUTEX_WAIT_BITSET)) { | 183 | cmd == FUTEX_WAIT_BITSET || |
184 | cmd == FUTEX_WAIT_REQUEUE_PI)) { | ||
184 | if (get_compat_timespec(&ts, utime)) | 185 | if (get_compat_timespec(&ts, utime)) |
185 | return -EFAULT; | 186 | return -EFAULT; |
186 | if (!timespec_valid(&ts)) | 187 | if (!timespec_valid(&ts)) |
@@ -191,7 +192,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | |||
191 | t = ktime_add_safe(ktime_get(), t); | 192 | t = ktime_add_safe(ktime_get(), t); |
192 | tp = &t; | 193 | tp = &t; |
193 | } | 194 | } |
194 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE) | 195 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
196 | cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) | ||
195 | val2 = (int) (unsigned long) utime; | 197 | val2 = (int) (unsigned long) utime; |
196 | 198 | ||
197 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); | 199 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1090b0aed9ba..7a34cb563fec 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -267,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt) | |||
267 | { | 267 | { |
268 | debugfs_remove(bt->msg_file); | 268 | debugfs_remove(bt->msg_file); |
269 | debugfs_remove(bt->dropped_file); | 269 | debugfs_remove(bt->dropped_file); |
270 | debugfs_remove(bt->dir); | ||
271 | relay_close(bt->rchan); | 270 | relay_close(bt->rchan); |
271 | debugfs_remove(bt->dir); | ||
272 | free_percpu(bt->sequence); | 272 | free_percpu(bt->sequence); |
273 | free_percpu(bt->msg_data); | 273 | free_percpu(bt->msg_data); |
274 | kfree(bt); | 274 | kfree(bt); |
@@ -378,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
378 | 378 | ||
379 | static int blk_remove_buf_file_callback(struct dentry *dentry) | 379 | static int blk_remove_buf_file_callback(struct dentry *dentry) |
380 | { | 380 | { |
381 | struct dentry *parent = dentry->d_parent; | ||
382 | debugfs_remove(dentry); | 381 | debugfs_remove(dentry); |
383 | 382 | ||
384 | /* | ||
385 | * this will fail for all but the last file, but that is ok. what we | ||
386 | * care about is the top level buts->name directory going away, when | ||
387 | * the last trace file is gone. Then we don't have to rmdir() that | ||
388 | * manually on trace stop, so it nicely solves the issue with | ||
389 | * force killing of running traces. | ||
390 | */ | ||
391 | |||
392 | debugfs_remove(parent); | ||
393 | return 0; | 383 | return 0; |
394 | } | 384 | } |
395 | 385 | ||
diff --git a/kernel/wait.c b/kernel/wait.c index ea7c3b4275cf..c4bd3d825f35 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -10,13 +10,14 @@ | |||
10 | #include <linux/wait.h> | 10 | #include <linux/wait.h> |
11 | #include <linux/hash.h> | 11 | #include <linux/hash.h> |
12 | 12 | ||
13 | void init_waitqueue_head(wait_queue_head_t *q) | 13 | void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) |
14 | { | 14 | { |
15 | spin_lock_init(&q->lock); | 15 | spin_lock_init(&q->lock); |
16 | lockdep_set_class(&q->lock, key); | ||
16 | INIT_LIST_HEAD(&q->task_list); | 17 | INIT_LIST_HEAD(&q->task_list); |
17 | } | 18 | } |
18 | 19 | ||
19 | EXPORT_SYMBOL(init_waitqueue_head); | 20 | EXPORT_SYMBOL(__init_waitqueue_head); |
20 | 21 | ||
21 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | 22 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
22 | { | 23 | { |