aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2006-12-19 02:27:31 -0500
committerJens Axboe <jens.axboe@oracle.com>2006-12-19 02:27:31 -0500
commit2985259b0e3928d4cd0723ac5aad0d1190ab7717 (patch)
tree70d1d7801699102d3bb52bc84b8ec8f4fc371c06 /block/ll_rw_blk.c
parenta52de245ef0b6217a56fb2472ff65c3a196cafd5 (diff)
[PATCH] ->nr_sectors and ->hard_nr_sectors are not used for BLOCK_PC requests
It's a file system thing, for block requests the only size used in the io paths is ->data_len as it is in bytes, not sectors. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 79807dbc306e..71a78a7e42fd 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2350,12 +2350,12 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2350 else 2350 else
2351 bio = bio_copy_user(q, uaddr, len, reading); 2351 bio = bio_copy_user(q, uaddr, len, reading);
2352 2352
2353 if (IS_ERR(bio)) { 2353 if (IS_ERR(bio))
2354 return PTR_ERR(bio); 2354 return PTR_ERR(bio);
2355 }
2356 2355
2357 orig_bio = bio; 2356 orig_bio = bio;
2358 blk_queue_bounce(q, &bio); 2357 blk_queue_bounce(q, &bio);
2358
2359 /* 2359 /*
2360 * We link the bounce buffer in and could have to traverse it 2360 * We link the bounce buffer in and could have to traverse it
2361 * later so we have to get a ref to prevent it from being freed 2361 * later so we have to get a ref to prevent it from being freed
@@ -2379,8 +2379,6 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2379 rq->biotail->bi_next = bio; 2379 rq->biotail->bi_next = bio;
2380 rq->biotail = bio; 2380 rq->biotail = bio;
2381 2381
2382 rq->nr_sectors += bio_sectors(bio);
2383 rq->hard_nr_sectors = rq->nr_sectors;
2384 rq->data_len += bio->bi_size; 2382 rq->data_len += bio->bi_size;
2385 } 2383 }
2386 spin_unlock_irq(q->queue_lock); 2384 spin_unlock_irq(q->queue_lock);