aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nbd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/nbd.c')
-rw-r--r--drivers/block/nbd.c125
1 files changed, 61 insertions, 64 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9e268ddedfbd..6997d8e6bfb5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -54,11 +54,15 @@
54#include <linux/errno.h> 54#include <linux/errno.h>
55#include <linux/file.h> 55#include <linux/file.h>
56#include <linux/ioctl.h> 56#include <linux/ioctl.h>
57#include <linux/compiler.h>
58#include <linux/err.h>
59#include <linux/kernel.h>
57#include <net/sock.h> 60#include <net/sock.h>
58 61
59#include <linux/devfs_fs_kernel.h> 62#include <linux/devfs_fs_kernel.h>
60 63
61#include <asm/uaccess.h> 64#include <asm/uaccess.h>
65#include <asm/system.h>
62#include <asm/types.h> 66#include <asm/types.h>
63 67
64#include <linux/nbd.h> 68#include <linux/nbd.h>
@@ -136,7 +140,7 @@ static void nbd_end_request(struct request *req)
136 140
137 spin_lock_irqsave(q->queue_lock, flags); 141 spin_lock_irqsave(q->queue_lock, flags);
138 if (!end_that_request_first(req, uptodate, req->nr_sectors)) { 142 if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
139 end_that_request_last(req); 143 end_that_request_last(req, uptodate);
140 } 144 }
141 spin_unlock_irqrestore(q->queue_lock, flags); 145 spin_unlock_irqrestore(q->queue_lock, flags);
142} 146}
@@ -170,7 +174,6 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
170 msg.msg_namelen = 0; 174 msg.msg_namelen = 0;
171 msg.msg_control = NULL; 175 msg.msg_control = NULL;
172 msg.msg_controllen = 0; 176 msg.msg_controllen = 0;
173 msg.msg_namelen = 0;
174 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 177 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
175 178
176 if (send) 179 if (send)
@@ -230,14 +233,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
230 request.len = htonl(size); 233 request.len = htonl(size);
231 memcpy(request.handle, &req, sizeof(req)); 234 memcpy(request.handle, &req, sizeof(req));
232 235
233 down(&lo->tx_lock);
234
235 if (!sock || !lo->sock) {
236 printk(KERN_ERR "%s: Attempted send on closed socket\n",
237 lo->disk->disk_name);
238 goto error_out;
239 }
240
241 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 236 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
242 lo->disk->disk_name, req, 237 lo->disk->disk_name, req,
243 nbdcmd_to_ascii(nbd_cmd(req)), 238 nbdcmd_to_ascii(nbd_cmd(req)),
@@ -276,11 +271,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
276 } 271 }
277 } 272 }
278 } 273 }
279 up(&lo->tx_lock);
280 return 0; 274 return 0;
281 275
282error_out: 276error_out:
283 up(&lo->tx_lock);
284 return 1; 277 return 1;
285} 278}
286 279
@@ -289,9 +282,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
289 struct request *req; 282 struct request *req;
290 struct list_head *tmp; 283 struct list_head *tmp;
291 struct request *xreq; 284 struct request *xreq;
285 int err;
292 286
293 memcpy(&xreq, handle, sizeof(xreq)); 287 memcpy(&xreq, handle, sizeof(xreq));
294 288
289 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
290 if (unlikely(err))
291 goto out;
292
295 spin_lock(&lo->queue_lock); 293 spin_lock(&lo->queue_lock);
296 list_for_each(tmp, &lo->queue_head) { 294 list_for_each(tmp, &lo->queue_head) {
297 req = list_entry(tmp, struct request, queuelist); 295 req = list_entry(tmp, struct request, queuelist);
@@ -302,7 +300,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
302 return req; 300 return req;
303 } 301 }
304 spin_unlock(&lo->queue_lock); 302 spin_unlock(&lo->queue_lock);
305 return NULL; 303
304 err = -ENOENT;
305
306out:
307 return ERR_PTR(err);
306} 308}
307 309
308static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) 310static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
@@ -331,7 +333,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
331 goto harderror; 333 goto harderror;
332 } 334 }
333 req = nbd_find_request(lo, reply.handle); 335 req = nbd_find_request(lo, reply.handle);
334 if (req == NULL) { 336 if (unlikely(IS_ERR(req))) {
337 result = PTR_ERR(req);
338 if (result != -ENOENT)
339 goto harderror;
340
335 printk(KERN_ERR "%s: Unexpected reply (%p)\n", 341 printk(KERN_ERR "%s: Unexpected reply (%p)\n",
336 lo->disk->disk_name, reply.handle); 342 lo->disk->disk_name, reply.handle);
337 result = -EBADR; 343 result = -EBADR;
@@ -395,19 +401,24 @@ static void nbd_clear_que(struct nbd_device *lo)
395 401
396 BUG_ON(lo->magic != LO_MAGIC); 402 BUG_ON(lo->magic != LO_MAGIC);
397 403
398 do { 404 /*
399 req = NULL; 405 * Because we have set lo->sock to NULL under the tx_lock, all
400 spin_lock(&lo->queue_lock); 406 * modifications to the list must have completed by now. For
401 if (!list_empty(&lo->queue_head)) { 407 * the same reason, the active_req must be NULL.
402 req = list_entry(lo->queue_head.next, struct request, queuelist); 408 *
403 list_del_init(&req->queuelist); 409 * As a consequence, we don't need to take the spin lock while
404 } 410 * purging the list here.
405 spin_unlock(&lo->queue_lock); 411 */
406 if (req) { 412 BUG_ON(lo->sock);
407 req->errors++; 413 BUG_ON(lo->active_req);
408 nbd_end_request(req); 414
409 } 415 while (!list_empty(&lo->queue_head)) {
410 } while (req); 416 req = list_entry(lo->queue_head.next, struct request,
417 queuelist);
418 list_del_init(&req->queuelist);
419 req->errors++;
420 nbd_end_request(req);
421 }
411} 422}
412 423
413/* 424/*
@@ -435,11 +446,6 @@ static void do_nbd_request(request_queue_t * q)
435 446
436 BUG_ON(lo->magic != LO_MAGIC); 447 BUG_ON(lo->magic != LO_MAGIC);
437 448
438 if (!lo->file) {
439 printk(KERN_ERR "%s: Request when not-ready\n",
440 lo->disk->disk_name);
441 goto error_out;
442 }
443 nbd_cmd(req) = NBD_CMD_READ; 449 nbd_cmd(req) = NBD_CMD_READ;
444 if (rq_data_dir(req) == WRITE) { 450 if (rq_data_dir(req) == WRITE) {
445 nbd_cmd(req) = NBD_CMD_WRITE; 451 nbd_cmd(req) = NBD_CMD_WRITE;
@@ -453,32 +459,34 @@ static void do_nbd_request(request_queue_t * q)
453 req->errors = 0; 459 req->errors = 0;
454 spin_unlock_irq(q->queue_lock); 460 spin_unlock_irq(q->queue_lock);
455 461
456 spin_lock(&lo->queue_lock); 462 down(&lo->tx_lock);
457 463 if (unlikely(!lo->sock)) {
458 if (!lo->file) { 464 up(&lo->tx_lock);
459 spin_unlock(&lo->queue_lock); 465 printk(KERN_ERR "%s: Attempted send on closed socket\n",
460 printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n", 466 lo->disk->disk_name);
461 lo->disk->disk_name);
462 req->errors++; 467 req->errors++;
463 nbd_end_request(req); 468 nbd_end_request(req);
464 spin_lock_irq(q->queue_lock); 469 spin_lock_irq(q->queue_lock);
465 continue; 470 continue;
466 } 471 }
467 472
468 list_add(&req->queuelist, &lo->queue_head); 473 lo->active_req = req;
469 spin_unlock(&lo->queue_lock);
470 474
471 if (nbd_send_req(lo, req) != 0) { 475 if (nbd_send_req(lo, req) != 0) {
472 printk(KERN_ERR "%s: Request send failed\n", 476 printk(KERN_ERR "%s: Request send failed\n",
473 lo->disk->disk_name); 477 lo->disk->disk_name);
474 if (nbd_find_request(lo, (char *)&req) != NULL) { 478 req->errors++;
475 /* we still own req */ 479 nbd_end_request(req);
476 req->errors++; 480 } else {
477 nbd_end_request(req); 481 spin_lock(&lo->queue_lock);
478 } else /* we're racing with nbd_clear_que */ 482 list_add(&req->queuelist, &lo->queue_head);
479 printk(KERN_DEBUG "nbd: can't find req\n"); 483 spin_unlock(&lo->queue_lock);
480 } 484 }
481 485
486 lo->active_req = NULL;
487 up(&lo->tx_lock);
488 wake_up_all(&lo->active_wq);
489
482 spin_lock_irq(q->queue_lock); 490 spin_lock_irq(q->queue_lock);
483 continue; 491 continue;
484 492
@@ -529,17 +537,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
529 down(&lo->tx_lock); 537 down(&lo->tx_lock);
530 lo->sock = NULL; 538 lo->sock = NULL;
531 up(&lo->tx_lock); 539 up(&lo->tx_lock);
532 spin_lock(&lo->queue_lock);
533 file = lo->file; 540 file = lo->file;
534 lo->file = NULL; 541 lo->file = NULL;
535 spin_unlock(&lo->queue_lock);
536 nbd_clear_que(lo); 542 nbd_clear_que(lo);
537 spin_lock(&lo->queue_lock); 543 BUG_ON(!list_empty(&lo->queue_head));
538 if (!list_empty(&lo->queue_head)) {
539 printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n");
540 error = -EBUSY;
541 }
542 spin_unlock(&lo->queue_lock);
543 if (file) 544 if (file)
544 fput(file); 545 fput(file);
545 return error; 546 return error;
@@ -598,24 +599,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
598 lo->sock = NULL; 599 lo->sock = NULL;
599 } 600 }
600 up(&lo->tx_lock); 601 up(&lo->tx_lock);
601 spin_lock(&lo->queue_lock);
602 file = lo->file; 602 file = lo->file;
603 lo->file = NULL; 603 lo->file = NULL;
604 spin_unlock(&lo->queue_lock);
605 nbd_clear_que(lo); 604 nbd_clear_que(lo);
606 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); 605 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
607 if (file) 606 if (file)
608 fput(file); 607 fput(file);
609 return lo->harderror; 608 return lo->harderror;
610 case NBD_CLEAR_QUE: 609 case NBD_CLEAR_QUE:
611 down(&lo->tx_lock); 610 /*
612 if (lo->sock) { 611 * This is for compatibility only. The queue is always cleared
613 up(&lo->tx_lock); 612 * by NBD_DO_IT or NBD_CLEAR_SOCK.
614 return 0; /* probably should be error, but that would 613 */
615 * break "nbd-client -d", so just return 0 */ 614 BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
616 }
617 up(&lo->tx_lock);
618 nbd_clear_que(lo);
619 return 0; 615 return 0;
620 case NBD_PRINT_DEBUG: 616 case NBD_PRINT_DEBUG:
621 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", 617 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
@@ -688,6 +684,7 @@ static int __init nbd_init(void)
688 spin_lock_init(&nbd_dev[i].queue_lock); 684 spin_lock_init(&nbd_dev[i].queue_lock);
689 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 685 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
690 init_MUTEX(&nbd_dev[i].tx_lock); 686 init_MUTEX(&nbd_dev[i].tx_lock);
687 init_waitqueue_head(&nbd_dev[i].active_wq);
691 nbd_dev[i].blksize = 1024; 688 nbd_dev[i].blksize = 1024;
692 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ 689 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
693 disk->major = NBD_MAJOR; 690 disk->major = NBD_MAJOR;