diff options
author | Tejun Heo <tj@kernel.org> | 2011-10-19 08:31:25 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-10-19 08:31:25 -0400 |
commit | 315fceee81155ef2aeed9316ca72aeea9347db5c (patch) | |
tree | 91ac02284b6737e6b65e855da771f52dbb3ad32d /block | |
parent | 75eb6c372d41d6d140b893873f6687d78c987a44 (diff) |
block: drop unnecessary blk_get/put_queue() in scsi_cmd_ioctl() and blk_get_tg()
blk_get/put_queue() in scsi_cmd_ioctl() and throtl_get_tg() are
completely bogus. The caller must have a reference to the queue on
entry and taking an extra reference doesn't change anything.
For scsi_cmd_ioctl(), the only effect is that it ends up checking
QUEUE_FLAG_DEAD on entry; however, this is bogus as queue can die
right after blk_get_queue(). Dead queue should be and is handled in
request issue path (it's somewhat broken now but that's a separate
problem and doesn't affect this one much).
throtl_get_tg() incorrectly assumes that q is rcu freed. Also, it
doesn't check return value of blk_get_queue(). If the queue is
already dead, it ends up doing an extra put.
Drop them.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 8 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 3 |
2 files changed, 2 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index f3f495ea4ee..ecba5fcef20 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -324,12 +324,8 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | |||
324 | /* | 324 | /* |
325 | * Need to allocate a group. Allocation of group also needs allocation | 325 | * Need to allocate a group. Allocation of group also needs allocation |
326 | * of per cpu stats which in-turn takes a mutex() and can block. Hence | 326 | * of per cpu stats which in-turn takes a mutex() and can block. Hence |
327 | * we need to drop rcu lock and queue_lock before we call alloc | 327 | * we need to drop rcu lock and queue_lock before we call alloc. |
328 | * | ||
329 | * Take the request queue reference to make sure queue does not | ||
330 | * go away once we return from allocation. | ||
331 | */ | 328 | */ |
332 | blk_get_queue(q); | ||
333 | rcu_read_unlock(); | 329 | rcu_read_unlock(); |
334 | spin_unlock_irq(q->queue_lock); | 330 | spin_unlock_irq(q->queue_lock); |
335 | 331 | ||
@@ -339,13 +335,11 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | |||
339 | * dead | 335 | * dead |
340 | */ | 336 | */ |
341 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 337 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
342 | blk_put_queue(q); | ||
343 | if (tg) | 338 | if (tg) |
344 | kfree(tg); | 339 | kfree(tg); |
345 | 340 | ||
346 | return ERR_PTR(-ENODEV); | 341 | return ERR_PTR(-ENODEV); |
347 | } | 342 | } |
348 | blk_put_queue(q); | ||
349 | 343 | ||
350 | /* Group allocated and queue is still alive. take the lock */ | 344 | /* Group allocated and queue is still alive. take the lock */ |
351 | spin_lock_irq(q->queue_lock); | 345 | spin_lock_irq(q->queue_lock); |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 4f4230b79bb..fbdf0d802ec 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -565,7 +565,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
565 | { | 565 | { |
566 | int err; | 566 | int err; |
567 | 567 | ||
568 | if (!q || blk_get_queue(q)) | 568 | if (!q) |
569 | return -ENXIO; | 569 | return -ENXIO; |
570 | 570 | ||
571 | switch (cmd) { | 571 | switch (cmd) { |
@@ -686,7 +686,6 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
686 | err = -ENOTTY; | 686 | err = -ENOTTY; |
687 | } | 687 | } |
688 | 688 | ||
689 | blk_put_queue(q); | ||
690 | return err; | 689 | return err; |
691 | } | 690 | } |
692 | EXPORT_SYMBOL(scsi_cmd_ioctl); | 691 | EXPORT_SYMBOL(scsi_cmd_ioctl); |