aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarkus Pargmann <mpa@pengutronix.de>2015-10-06 14:03:54 -0400
committerJens Axboe <axboe@fb.com>2015-10-08 16:21:24 -0400
commitdcc909d90ccdbb73226397ff6d298f7af35b0e11 (patch)
treebce78076d417b467bfe3a4da807fe78e0e963059
parentc3984cc994377671f18d3d719ddd86b5107457ee (diff)
nbd: Add locking for tasks
The timeout handling introduced in 7e2893a16d3e (nbd: Fix timeout detection) introduces a race condition which may lead to killing of tasks that are not in nbd context anymore. This was not observed or reproducable yet. This patch adds locking to critical use of task_recv and task_send to avoid killing tasks that already left the NBD thread functions. This lock is only acquired if a timeout occures or the nbd device starts/stops. Reported-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Markus Pargmann <mpa@pengutronix.de> Reviewed-by: Ben Hutchings <ben@decadent.org.uk> Fixes: 7e2893a16d3e ("nbd: Fix timeout detection") Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/nbd.c36
1 files changed, 30 insertions, 6 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 293495a75d3d..1b87623381e2 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -60,6 +60,7 @@ struct nbd_device {
60 bool disconnect; /* a disconnect has been requested by user */ 60 bool disconnect; /* a disconnect has been requested by user */
61 61
62 struct timer_list timeout_timer; 62 struct timer_list timeout_timer;
63 spinlock_t tasks_lock;
63 struct task_struct *task_recv; 64 struct task_struct *task_recv;
64 struct task_struct *task_send; 65 struct task_struct *task_send;
65 66
@@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd)
140static void nbd_xmit_timeout(unsigned long arg) 141static void nbd_xmit_timeout(unsigned long arg)
141{ 142{
142 struct nbd_device *nbd = (struct nbd_device *)arg; 143 struct nbd_device *nbd = (struct nbd_device *)arg;
143 struct task_struct *task; 144 unsigned long flags;
144 145
145 if (list_empty(&nbd->queue_head)) 146 if (list_empty(&nbd->queue_head))
146 return; 147 return;
147 148
148 nbd->disconnect = true; 149 nbd->disconnect = true;
149 150
150 task = READ_ONCE(nbd->task_recv); 151 spin_lock_irqsave(&nbd->tasks_lock, flags);
151 if (task) 152
152 force_sig(SIGKILL, task); 153 if (nbd->task_recv)
154 force_sig(SIGKILL, nbd->task_recv);
153 155
154 task = READ_ONCE(nbd->task_send); 156 if (nbd->task_send)
155 if (task)
156 force_sig(SIGKILL, nbd->task_send); 157 force_sig(SIGKILL, nbd->task_send);
157 158
159 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
160
158 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); 161 dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
159} 162}
160 163
@@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd)
403{ 406{
404 struct request *req; 407 struct request *req;
405 int ret; 408 int ret;
409 unsigned long flags;
406 410
407 BUG_ON(nbd->magic != NBD_MAGIC); 411 BUG_ON(nbd->magic != NBD_MAGIC);
408 412
409 sk_set_memalloc(nbd->sock->sk); 413 sk_set_memalloc(nbd->sock->sk);
410 414
415 spin_lock_irqsave(&nbd->tasks_lock, flags);
411 nbd->task_recv = current; 416 nbd->task_recv = current;
417 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
412 418
413 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 419 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
414 if (ret) { 420 if (ret) {
415 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 421 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
422
423 spin_lock_irqsave(&nbd->tasks_lock, flags);
416 nbd->task_recv = NULL; 424 nbd->task_recv = NULL;
425 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
426
417 return ret; 427 return ret;
418 } 428 }
419 429
@@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd)
429 439
430 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 440 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
431 441
442 spin_lock_irqsave(&nbd->tasks_lock, flags);
432 nbd->task_recv = NULL; 443 nbd->task_recv = NULL;
444 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
433 445
434 if (signal_pending(current)) { 446 if (signal_pending(current)) {
435 siginfo_t info; 447 siginfo_t info;
@@ -534,8 +546,11 @@ static int nbd_thread_send(void *data)
534{ 546{
535 struct nbd_device *nbd = data; 547 struct nbd_device *nbd = data;
536 struct request *req; 548 struct request *req;
549 unsigned long flags;
537 550
551 spin_lock_irqsave(&nbd->tasks_lock, flags);
538 nbd->task_send = current; 552 nbd->task_send = current;
553 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
539 554
540 set_user_nice(current, MIN_NICE); 555 set_user_nice(current, MIN_NICE);
541 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { 556 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@@ -572,7 +587,15 @@ static int nbd_thread_send(void *data)
572 nbd_handle_req(nbd, req); 587 nbd_handle_req(nbd, req);
573 } 588 }
574 589
590 spin_lock_irqsave(&nbd->tasks_lock, flags);
575 nbd->task_send = NULL; 591 nbd->task_send = NULL;
592 spin_unlock_irqrestore(&nbd->tasks_lock, flags);
593
594 /* Clear maybe pending signals */
595 if (signal_pending(current)) {
596 siginfo_t info;
597 dequeue_signal_lock(current, &current->blocked, &info);
598 }
576 599
577 return 0; 600 return 0;
578} 601}
@@ -1052,6 +1075,7 @@ static int __init nbd_init(void)
1052 nbd_dev[i].magic = NBD_MAGIC; 1075 nbd_dev[i].magic = NBD_MAGIC;
1053 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 1076 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1054 spin_lock_init(&nbd_dev[i].queue_lock); 1077 spin_lock_init(&nbd_dev[i].queue_lock);
1078 spin_lock_init(&nbd_dev[i].tasks_lock);
1055 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 1079 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
1056 mutex_init(&nbd_dev[i].tx_lock); 1080 mutex_init(&nbd_dev[i].tx_lock);
1057 init_timer(&nbd_dev[i].timeout_timer); 1081 init_timer(&nbd_dev[i].timeout_timer);