diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2015-02-03 06:55:31 -0500 |
|---|---|---|
| committer | Jens Axboe <axboe@fb.com> | 2015-02-04 11:57:52 -0500 |
| commit | 2c561246524c3319473bf47b558354f7ff47f0cf (patch) | |
| tree | de8d6589065919ca581aae1c34a54c769592b8e0 | |
| parent | b7f120b211510b80cb72c1d790d9a4531271edfa (diff) | |
block: Simplify bsg complete all
It took me a few tries to figure out what this code did; lets rewrite
it into a more regular form.
The thing that makes this one 'special' is the BSG_F_BLOCK flag, if
that is not set we're not supposed/allowed to block and should spin
wait for completion.
The (new) io_wait_event() will never see a false condition in case of
the spinning and we will therefore not block.
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
| -rw-r--r-- | block/bsg.c | 72 | ||||
| -rw-r--r-- | include/linux/wait.h | 15 |
2 files changed, 40 insertions, 47 deletions
diff --git a/block/bsg.c b/block/bsg.c index 276e869e686c..d214e929ce18 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
| @@ -136,42 +136,6 @@ static inline struct hlist_head *bsg_dev_idx_hash(int index) | |||
| 136 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; | 136 | return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | static int bsg_io_schedule(struct bsg_device *bd) | ||
| 140 | { | ||
| 141 | DEFINE_WAIT(wait); | ||
| 142 | int ret = 0; | ||
| 143 | |||
| 144 | spin_lock_irq(&bd->lock); | ||
| 145 | |||
| 146 | BUG_ON(bd->done_cmds > bd->queued_cmds); | ||
| 147 | |||
| 148 | /* | ||
| 149 | * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no | ||
| 150 | * work to do", even though we return -ENOSPC after this same test | ||
| 151 | * during bsg_write() -- there, it means our buffer can't have more | ||
| 152 | * bsg_commands added to it, thus has no space left. | ||
| 153 | */ | ||
| 154 | if (bd->done_cmds == bd->queued_cmds) { | ||
| 155 | ret = -ENODATA; | ||
| 156 | goto unlock; | ||
| 157 | } | ||
| 158 | |||
| 159 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { | ||
| 160 | ret = -EAGAIN; | ||
| 161 | goto unlock; | ||
| 162 | } | ||
| 163 | |||
| 164 | prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); | ||
| 165 | spin_unlock_irq(&bd->lock); | ||
| 166 | io_schedule(); | ||
| 167 | finish_wait(&bd->wq_done, &wait); | ||
| 168 | |||
| 169 | return ret; | ||
| 170 | unlock: | ||
| 171 | spin_unlock_irq(&bd->lock); | ||
| 172 | return ret; | ||
| 173 | } | ||
| 174 | |||
| 175 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, | 139 | static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, |
| 176 | struct sg_io_v4 *hdr, struct bsg_device *bd, | 140 | struct sg_io_v4 *hdr, struct bsg_device *bd, |
| 177 | fmode_t has_write_perm) | 141 | fmode_t has_write_perm) |
| @@ -482,6 +446,30 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
| 482 | return ret; | 446 | return ret; |
| 483 | } | 447 | } |
| 484 | 448 | ||
| 449 | static bool bsg_complete(struct bsg_device *bd) | ||
| 450 | { | ||
| 451 | bool ret = false; | ||
| 452 | bool spin; | ||
| 453 | |||
| 454 | do { | ||
| 455 | spin_lock_irq(&bd->lock); | ||
| 456 | |||
| 457 | BUG_ON(bd->done_cmds > bd->queued_cmds); | ||
| 458 | |||
| 459 | /* | ||
| 460 | * All commands consumed. | ||
| 461 | */ | ||
| 462 | if (bd->done_cmds == bd->queued_cmds) | ||
| 463 | ret = true; | ||
| 464 | |||
| 465 | spin = !test_bit(BSG_F_BLOCK, &bd->flags); | ||
| 466 | |||
| 467 | spin_unlock_irq(&bd->lock); | ||
| 468 | } while (!ret && spin); | ||
| 469 | |||
| 470 | return ret; | ||
| 471 | } | ||
| 472 | |||
| 485 | static int bsg_complete_all_commands(struct bsg_device *bd) | 473 | static int bsg_complete_all_commands(struct bsg_device *bd) |
| 486 | { | 474 | { |
| 487 | struct bsg_command *bc; | 475 | struct bsg_command *bc; |
| @@ -492,17 +480,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd) | |||
| 492 | /* | 480 | /* |
| 493 | * wait for all commands to complete | 481 | * wait for all commands to complete |
| 494 | */ | 482 | */ |
| 495 | ret = 0; | 483 | io_wait_event(bd->wq_done, bsg_complete(bd)); |
| 496 | do { | ||
| 497 | ret = bsg_io_schedule(bd); | ||
| 498 | /* | ||
| 499 | * look for -ENODATA specifically -- we'll sometimes get | ||
| 500 | * -ERESTARTSYS when we've taken a signal, but we can't | ||
| 501 | * return until we're done freeing the queue, so ignore | ||
| 502 | * it. The signal will get handled when we're done freeing | ||
| 503 | * the bsg_device. | ||
| 504 | */ | ||
| 505 | } while (ret != -ENODATA); | ||
| 506 | 484 | ||
| 507 | /* | 485 | /* |
| 508 | * discard done commands | 486 | * discard done commands |
diff --git a/include/linux/wait.h b/include/linux/wait.h index 2232ed16635a..71fc1d31e48d 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -267,6 +267,21 @@ do { \ | |||
| 267 | __wait_event(wq, condition); \ | 267 | __wait_event(wq, condition); \ |
| 268 | } while (0) | 268 | } while (0) |
| 269 | 269 | ||
| 270 | #define __io_wait_event(wq, condition) \ | ||
| 271 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ | ||
| 272 | io_schedule()) | ||
| 273 | |||
| 274 | /* | ||
| 275 | * io_wait_event() -- like wait_event() but with io_schedule() | ||
| 276 | */ | ||
| 277 | #define io_wait_event(wq, condition) \ | ||
| 278 | do { \ | ||
| 279 | might_sleep(); \ | ||
| 280 | if (condition) \ | ||
| 281 | break; \ | ||
| 282 | __io_wait_event(wq, condition); \ | ||
| 283 | } while (0) | ||
| 284 | |||
| 270 | #define __wait_event_freezable(wq, condition) \ | 285 | #define __wait_event_freezable(wq, condition) \ |
| 271 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ | 286 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
| 272 | schedule(); try_to_freeze()) | 287 | schedule(); try_to_freeze()) |
