aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2007-05-08 09:32:03 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-07-16 02:52:46 -0400
commite7d72173248c29c6f9ba14e40374266e1b954964 (patch)
tree5f4d14d210ca3d49d209942be0ce60e65a433ce7 /block
parent4cf0723ac89b5f2189da2ad07ef875de26b83c77 (diff)
bsg: fix a blocking read bug
This patch fixes a bug that read() returns ENODATA even with a blocking file descriptor when there are no commands pending. This also includes some cleanups. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/bsg.c84
1 files changed, 23 insertions, 61 deletions
diff --git a/block/bsg.c b/block/bsg.c
index a333c9337093..2f78d7d34b9d 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -115,9 +115,9 @@ static void bsg_free_command(struct bsg_command *bc)
115 wake_up(&bd->wq_free); 115 wake_up(&bd->wq_free);
116} 116}
117 117
118static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd) 118static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
119{ 119{
120 struct bsg_command *bc = NULL; 120 struct bsg_command *bc = ERR_PTR(-EINVAL);
121 121
122 spin_lock_irq(&bd->lock); 122 spin_lock_irq(&bd->lock);
123 123
@@ -131,6 +131,7 @@ static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd)
131 if (unlikely(!bc)) { 131 if (unlikely(!bc)) {
132 spin_lock_irq(&bd->lock); 132 spin_lock_irq(&bd->lock);
133 bd->queued_cmds--; 133 bd->queued_cmds--;
134 bc = ERR_PTR(-ENOMEM);
134 goto out; 135 goto out;
135 } 136 }
136 137
@@ -198,30 +199,6 @@ unlock:
198 return ret; 199 return ret;
199} 200}
200 201
201/*
202 * get a new free command, blocking if needed and specified
203 */
204static struct bsg_command *bsg_get_command(struct bsg_device *bd)
205{
206 struct bsg_command *bc;
207 int ret;
208
209 do {
210 bc = __bsg_alloc_command(bd);
211 if (bc)
212 break;
213
214 ret = bsg_io_schedule(bd, TASK_INTERRUPTIBLE);
215 if (ret) {
216 bc = ERR_PTR(ret);
217 break;
218 }
219
220 } while (1);
221
222 return bc;
223}
224
225static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, 202static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
226 struct sg_io_v4 *hdr, int has_write_perm) 203 struct sg_io_v4 *hdr, int has_write_perm)
227{ 204{
@@ -397,7 +374,7 @@ static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
397/* 374/*
398 * Get a finished command from the done list 375 * Get a finished command from the done list
399 */ 376 */
400static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state) 377static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
401{ 378{
402 struct bsg_command *bc; 379 struct bsg_command *bc;
403 int ret; 380 int ret;
@@ -407,9 +384,14 @@ static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state)
407 if (bc) 384 if (bc)
408 break; 385 break;
409 386
410 ret = bsg_io_schedule(bd, state); 387 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
388 bc = ERR_PTR(-EAGAIN);
389 break;
390 }
391
392 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
411 if (ret) { 393 if (ret) {
412 bc = ERR_PTR(ret); 394 bc = ERR_PTR(-ERESTARTSYS);
413 break; 395 break;
414 } 396 }
415 } while (1); 397 } while (1);
@@ -419,18 +401,6 @@ static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state)
419 return bc; 401 return bc;
420} 402}
421 403
422static struct bsg_command *
423bsg_get_done_cmd(struct bsg_device *bd, const struct iovec *iov)
424{
425 return __bsg_get_done_cmd(bd, TASK_INTERRUPTIBLE);
426}
427
428static struct bsg_command *
429bsg_get_done_cmd_nosignals(struct bsg_device *bd)
430{
431 return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE);
432}
433
434static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, 404static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
435 struct bio *bio) 405 struct bio *bio)
436{ 406{
@@ -496,19 +466,16 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
496 */ 466 */
497 ret = 0; 467 ret = 0;
498 do { 468 do {
499 bc = bsg_get_done_cmd_nosignals(bd); 469 spin_lock_irq(&bd->lock);
500 470 if (!bd->queued_cmds) {
501 /* 471 spin_unlock_irq(&bd->lock);
502 * we _must_ complete before restarting, because
503 * bsg_release can't handle this failing.
504 */
505 if (PTR_ERR(bc) == -ERESTARTSYS)
506 continue;
507 if (IS_ERR(bc)) {
508 ret = PTR_ERR(bc);
509 break; 472 break;
510 } 473 }
511 474
475 bc = bsg_get_done_cmd(bd);
476 if (IS_ERR(bc))
477 break;
478
512 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); 479 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio);
513 if (!ret) 480 if (!ret)
514 ret = tret; 481 ret = tret;
@@ -519,11 +486,9 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
519 return ret; 486 return ret;
520} 487}
521 488
522typedef struct bsg_command *(*bsg_command_callback)(struct bsg_device *bd, const struct iovec *iov);
523
524static ssize_t 489static ssize_t
525__bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc, 490__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
526 struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) 491 const struct iovec *iov, ssize_t *bytes_read)
527{ 492{
528 struct bsg_command *bc; 493 struct bsg_command *bc;
529 int nr_commands, ret; 494 int nr_commands, ret;
@@ -534,7 +499,7 @@ __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc,
534 ret = 0; 499 ret = 0;
535 nr_commands = count / sizeof(struct sg_io_v4); 500 nr_commands = count / sizeof(struct sg_io_v4);
536 while (nr_commands) { 501 while (nr_commands) {
537 bc = get_bc(bd, iov); 502 bc = bsg_get_done_cmd(bd);
538 if (IS_ERR(bc)) { 503 if (IS_ERR(bc)) {
539 ret = PTR_ERR(bc); 504 ret = PTR_ERR(bc);
540 break; 505 break;
@@ -598,8 +563,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
598 563
599 bsg_set_block(bd, file); 564 bsg_set_block(bd, file);
600 bytes_read = 0; 565 bytes_read = 0;
601 ret = __bsg_read(buf, count, bsg_get_done_cmd, 566 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
602 bd, NULL, &bytes_read);
603 *ppos = bytes_read; 567 *ppos = bytes_read;
604 568
605 if (!bytes_read || (bytes_read && err_block_err(ret))) 569 if (!bytes_read || (bytes_read && err_block_err(ret)))
@@ -625,9 +589,7 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
625 while (nr_commands) { 589 while (nr_commands) {
626 request_queue_t *q = bd->queue; 590 request_queue_t *q = bd->queue;
627 591
628 bc = bsg_get_command(bd); 592 bc = bsg_alloc_command(bd);
629 if (!bc)
630 break;
631 if (IS_ERR(bc)) { 593 if (IS_ERR(bc)) {
632 ret = PTR_ERR(bc); 594 ret = PTR_ERR(bc);
633 bc = NULL; 595 bc = NULL;