diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 09:24:45 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:50:55 -0400 |
commit | 1011c1b9f2e45ce7c6e38888d2b83936aec38771 (patch) | |
tree | 63f1f8f604631f37df9917a941531de0148f8379 /drivers/message | |
parent | a2dec7b36364a5cc564c4d76cf16d2e7d33f5c05 (diff) |
block: blk_rq_[cur_]_{sectors|bytes}() usage cleanup
With the previous changes, the followings are now guaranteed for all
requests in any valid state.
* blk_rq_sectors() == blk_rq_bytes() >> 9
* blk_rq_cur_sectors() == blk_rq_cur_bytes() >> 9
Clean up accessor usages. Notable changes are
* nbd,i2o_block: end_all used instead of explicit byte count
* scsi_lib: unnecessary conditional on request type removed
[ Impact: cleanup ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Paul Clements <paul.clements@steeleye.com>
Cc: Pete Zaitcev <zaitcev@redhat.com>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Boaz Harrosh <bharrosh@panasas.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/message')
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 16 |
1 files changed, 4 insertions, 12 deletions
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 6b61d289d6c9..e153f5d5237d 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error, | |||
426 | struct request_queue *q = req->q; | 426 | struct request_queue *q = req->q; |
427 | unsigned long flags; | 427 | unsigned long flags; |
428 | 428 | ||
429 | if (blk_end_request(req, error, nr_bytes)) { | 429 | if (blk_end_request(req, error, nr_bytes)) |
430 | int leftover = (blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); | ||
431 | |||
432 | if (blk_pc_request(req)) | ||
433 | leftover = blk_rq_bytes(req); | ||
434 | |||
435 | if (error) | 430 | if (error) |
436 | blk_end_request(req, -EIO, leftover); | 431 | blk_end_request_all(req, -EIO); |
437 | } | ||
438 | 432 | ||
439 | spin_lock_irqsave(q->queue_lock, flags); | 433 | spin_lock_irqsave(q->queue_lock, flags); |
440 | 434 | ||
@@ -832,15 +826,13 @@ static int i2o_block_transfer(struct request *req) | |||
832 | 826 | ||
833 | memcpy(mptr, cmd, 10); | 827 | memcpy(mptr, cmd, 10); |
834 | mptr += 4; | 828 | mptr += 4; |
835 | *mptr++ = | 829 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); |
836 | cpu_to_le32(blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); | ||
837 | } else | 830 | } else |
838 | #endif | 831 | #endif |
839 | { | 832 | { |
840 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); | 833 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); |
841 | *mptr++ = cpu_to_le32(ctl_flags); | 834 | *mptr++ = cpu_to_le32(ctl_flags); |
842 | *mptr++ = | 835 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); |
843 | cpu_to_le32(blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); | ||
844 | *mptr++ = | 836 | *mptr++ = |
845 | cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); | 837 | cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); |
846 | *mptr++ = | 838 | *mptr++ = |