aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-07 09:24:39 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 03:50:54 -0400
commit83096ebf1263b2c1ee5e653ba37d993d02e3eb7b (patch)
tree2226b71a616ec1cb2d37511c6a09ba9507a1cd69
parent5b93629b4509c03ffa87a9316412fedf6f58cb37 (diff)
block: convert to pos and nr_sectors accessors
With recent cleanups, there is no place where low level driver directly manipulates request fields. This means that the 'hard' request fields always equal the !hard fields. Convert all rq->sectors, nr_sectors and current_nr_sectors references to accessors. While at it, drop superflous blk_rq_pos() < 0 test in swim.c. [ Impact: use pos and nr_sectors accessors ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Tested-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Grant Likely <grant.likely@secretlab.ca> Tested-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Mike Miller <mike.miller@hp.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Dario Ballabio <ballabio_dario@emc.com> Cc: David S. Miller <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: unsik Kim <donari75@gmail.com> Cc: Laurent Vivier <Laurent@lvivier.info> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--block/as-iosched.c18
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-core.c17
-rw-r--r--block/blk-merge.c10
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c22
-rw-r--r--drivers/block/DAC960.c6
-rw-r--r--drivers/block/amiflop.c6
-rw-r--r--drivers/block/ataflop.c10
-rw-r--r--drivers/block/cciss.c22
-rw-r--r--drivers/block/cpqarray.c9
-rw-r--r--drivers/block/floppy.c53
-rw-r--r--drivers/block/hd.c14
-rw-r--r--drivers/block/mg_disk.c25
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/paride/pcd.c4
-rw-r--r--drivers/block/paride/pd.c8
-rw-r--r--drivers/block/paride/pf.c8
-rw-r--r--drivers/block/ps3disk.c9
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/swim.c6
-rw-r--r--drivers/block/swim3.c34
-rw-r--r--drivers/block/sx8.c6
-rw-r--r--drivers/block/ub.c6
-rw-r--r--drivers/block/viodasd.c2
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xd.c4
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/block/xsysace.c17
-rw-r--r--drivers/block/z2ram.c6
-rw-r--r--drivers/cdrom/gdrom.c6
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/message/i2o/i2o_block.c20
-rw-r--r--drivers/mmc/card/block.c10
-rw-r--r--drivers/mtd/mtd_blkdevs.c7
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_fba.c7
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/sbus/char/jsflash.c4
-rw-r--r--drivers/scsi/eata.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c22
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/sd.c24
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sr.c15
-rw-r--r--drivers/scsi/u14-34f.c22
-rw-r--r--include/scsi/scsi_cmnd.h2
54 files changed, 292 insertions, 279 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 433012764a37..402ba8f70fc9 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1239,7 +1239,7 @@ static void do_ubd_request(struct request_queue *q)
1239 } 1239 }
1240 1240
1241 req = dev->request; 1241 req = dev->request;
1242 sector = req->sector; 1242 sector = blk_rq_pos(req);
1243 while(dev->start_sg < dev->end_sg){ 1243 while(dev->start_sg < dev->end_sg){
1244 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1244 struct scatterlist *sg = &dev->sg[dev->start_sg];
1245 1245
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 45bd07059c28..7a12cf6ee1d3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
306 data_dir = rq_is_sync(rq1); 306 data_dir = rq_is_sync(rq1);
307 307
308 last = ad->last_sector[data_dir]; 308 last = ad->last_sector[data_dir];
309 s1 = rq1->sector; 309 s1 = blk_rq_pos(rq1);
310 s2 = rq2->sector; 310 s2 = blk_rq_pos(rq2);
311 311
312 BUG_ON(data_dir != rq_is_sync(rq2)); 312 BUG_ON(data_dir != rq_is_sync(rq2));
313 313
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
566 as_update_thinktime(ad, aic, thinktime); 566 as_update_thinktime(ad, aic, thinktime);
567 567
568 /* Calculate read -> read seek distance */ 568 /* Calculate read -> read seek distance */
569 if (aic->last_request_pos < rq->sector) 569 if (aic->last_request_pos < blk_rq_pos(rq))
570 seek_dist = rq->sector - aic->last_request_pos; 570 seek_dist = blk_rq_pos(rq) -
571 aic->last_request_pos;
571 else 572 else
572 seek_dist = aic->last_request_pos - rq->sector; 573 seek_dist = aic->last_request_pos -
574 blk_rq_pos(rq);
573 as_update_seekdist(ad, aic, seek_dist); 575 as_update_seekdist(ad, aic, seek_dist);
574 } 576 }
575 aic->last_request_pos = rq->sector + rq->nr_sectors; 577 aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
576 set_bit(AS_TASK_IOSTARTED, &aic->state); 578 set_bit(AS_TASK_IOSTARTED, &aic->state);
577 spin_unlock(&aic->lock); 579 spin_unlock(&aic->lock);
578 } 580 }
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
587{ 589{
588 unsigned long delay; /* jiffies */ 590 unsigned long delay; /* jiffies */
589 sector_t last = ad->last_sector[ad->batch_data_dir]; 591 sector_t last = ad->last_sector[ad->batch_data_dir];
590 sector_t next = rq->sector; 592 sector_t next = blk_rq_pos(rq);
591 sector_t delta; /* acceptable close offset (in sectors) */ 593 sector_t delta; /* acceptable close offset (in sectors) */
592 sector_t s; 594 sector_t s;
593 595
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
981 * This has to be set in order to be correctly updated by 983 * This has to be set in order to be correctly updated by
982 * as_find_next_rq 984 * as_find_next_rq
983 */ 985 */
984 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 986 ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
985 987
986 if (data_dir == BLK_RW_SYNC) { 988 if (data_dir == BLK_RW_SYNC) {
987 struct io_context *ioc = RQ_IOC(rq); 989 struct io_context *ioc = RQ_IOC(rq);
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index c167de5b9eff..8713c2fbc4f6 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -324,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
324 /* 324 /*
325 * The driver must store the error location in ->bi_sector, if 325 * The driver must store the error location in ->bi_sector, if
326 * it supports it. For non-stacked drivers, this should be copied 326 * it supports it. For non-stacked drivers, this should be copied
327 * from rq->sector. 327 * from blk_rq_pos(rq).
328 */ 328 */
329 if (error_sector) 329 if (error_sector)
330 *error_sector = bio->bi_sector; 330 *error_sector = bio->bi_sector;
diff --git a/block/blk-core.c b/block/blk-core.c
index 895e55b74a40..82dc20621c06 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -72,7 +72,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
72 return; 72 return;
73 73
74 cpu = part_stat_lock(); 74 cpu = part_stat_lock();
75 part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 75 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
76 76
77 if (!new_io) 77 if (!new_io)
78 part_stat_inc(cpu, part, merges[rw]); 78 part_stat_inc(cpu, part, merges[rw]);
@@ -185,10 +185,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
185 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 185 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
186 rq->cmd_flags); 186 rq->cmd_flags);
187 187
188 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 188 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
189 (unsigned long long)rq->sector, 189 (unsigned long long)blk_rq_pos(rq),
190 rq->nr_sectors, 190 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
191 rq->current_nr_sectors);
192 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
193 rq->bio, rq->biotail, 192 rq->bio, rq->biotail,
194 rq->buffer, rq->data_len); 193 rq->buffer, rq->data_len);
@@ -1557,7 +1556,7 @@ EXPORT_SYMBOL(submit_bio);
1557 */ 1556 */
1558int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1557int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1559{ 1558{
1560 if (rq->nr_sectors > q->max_sectors || 1559 if (blk_rq_sectors(rq) > q->max_sectors ||
1561 rq->data_len > q->max_hw_sectors << 9) { 1560 rq->data_len > q->max_hw_sectors << 9) {
1562 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1561 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1563 return -EIO; 1562 return -EIO;
@@ -1645,7 +1644,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1645 int cpu; 1644 int cpu;
1646 1645
1647 cpu = part_stat_lock(); 1646 cpu = part_stat_lock();
1648 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1647 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1649 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1648 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1650 part_stat_unlock(); 1649 part_stat_unlock();
1651 } 1650 }
@@ -1665,7 +1664,7 @@ static void blk_account_io_done(struct request *req)
1665 int cpu; 1664 int cpu;
1666 1665
1667 cpu = part_stat_lock(); 1666 cpu = part_stat_lock();
1668 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1667 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1669 1668
1670 part_stat_inc(cpu, part, ios[rw]); 1669 part_stat_inc(cpu, part, ios[rw]);
1671 part_stat_add(cpu, part, ticks[rw], duration); 1670 part_stat_add(cpu, part, ticks[rw], duration);
@@ -1846,7 +1845,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1846 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1845 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1847 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1846 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1848 req->rq_disk ? req->rq_disk->disk_name : "?", 1847 req->rq_disk ? req->rq_disk->disk_name : "?",
1849 (unsigned long long)req->sector); 1848 (unsigned long long)blk_rq_pos(req));
1850 } 1849 }
1851 1850
1852 blk_account_io_completion(req, nr_bytes); 1851 blk_account_io_completion(req, nr_bytes);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a3..bf62a87a9da2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -259,7 +259,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
259 else 259 else
260 max_sectors = q->max_sectors; 260 max_sectors = q->max_sectors;
261 261
262 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 262 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
263 req->cmd_flags |= REQ_NOMERGE; 263 req->cmd_flags |= REQ_NOMERGE;
264 if (req == q->last_merge) 264 if (req == q->last_merge)
265 q->last_merge = NULL; 265 q->last_merge = NULL;
@@ -284,7 +284,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
284 max_sectors = q->max_sectors; 284 max_sectors = q->max_sectors;
285 285
286 286
287 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 287 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
288 req->cmd_flags |= REQ_NOMERGE; 288 req->cmd_flags |= REQ_NOMERGE;
289 if (req == q->last_merge) 289 if (req == q->last_merge)
290 q->last_merge = NULL; 290 q->last_merge = NULL;
@@ -315,7 +315,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
315 /* 315 /*
316 * Will it become too large? 316 * Will it become too large?
317 */ 317 */
318 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 318 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
319 return 0; 319 return 0;
320 320
321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -345,7 +345,7 @@ static void blk_account_io_merge(struct request *req)
345 int cpu; 345 int cpu;
346 346
347 cpu = part_stat_lock(); 347 cpu = part_stat_lock();
348 part = disk_map_sector_rcu(req->rq_disk, req->sector); 348 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
349 349
350 part_round_stats(cpu, part); 350 part_round_stats(cpu, part);
351 part_dec_in_flight(part); 351 part_dec_in_flight(part);
@@ -366,7 +366,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
366 /* 366 /*
367 * not contiguous 367 * not contiguous
368 */ 368 */
369 if (req->sector + req->nr_sectors != next->sector) 369 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
370 return 0; 370 return 0;
371 371
372 if (rq_data_dir(req) != rq_data_dir(next) 372 if (rq_data_dir(req) != rq_data_dir(next)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 575083a9ffe4..db4d990a66bf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
350 return rq2; 350 return rq2;
351 351
352 s1 = rq1->sector; 352 s1 = blk_rq_pos(rq1);
353 s2 = rq2->sector; 353 s2 = blk_rq_pos(rq2);
354 354
355 last = cfqd->last_position; 355 last = cfqd->last_position;
356 356
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
950 struct request *rq) 950 struct request *rq)
951{ 951{
952 if (rq->sector >= cfqd->last_position) 952 if (blk_rq_pos(rq) >= cfqd->last_position)
953 return rq->sector - cfqd->last_position; 953 return blk_rq_pos(rq) - cfqd->last_position;
954 else 954 else
955 return cfqd->last_position - rq->sector; 955 return cfqd->last_position - blk_rq_pos(rq);
956} 956}
957 957
958#define CIC_SEEK_THR 8 * 1024 958#define CIC_SEEK_THR 8 * 1024
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1918 1918
1919 if (!cic->last_request_pos) 1919 if (!cic->last_request_pos)
1920 sdist = 0; 1920 sdist = 0;
1921 else if (cic->last_request_pos < rq->sector) 1921 else if (cic->last_request_pos < blk_rq_pos(rq))
1922 sdist = rq->sector - cic->last_request_pos; 1922 sdist = blk_rq_pos(rq) - cic->last_request_pos;
1923 else 1923 else
1924 sdist = cic->last_request_pos - rq->sector; 1924 sdist = cic->last_request_pos - blk_rq_pos(rq);
1925 1925
1926 /* 1926 /*
1927 * Don't allow the seek distance to get too large from the 1927 * Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2071 cfq_update_io_seektime(cfqd, cic, rq); 2071 cfq_update_io_seektime(cfqd, cic, rq);
2072 cfq_update_idle_window(cfqd, cfqq, cic); 2072 cfq_update_idle_window(cfqd, cfqq, cic);
2073 2073
2074 cic->last_request_pos = rq->sector + rq->nr_sectors; 2074 cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2075 2075
2076 if (cfqq == cfqd->active_queue) { 2076 if (cfqq == cfqd->active_queue) {
2077 /* 2077 /*
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c4d991d4adef..b547cbca7b23 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
138 138
139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
140 if (__rq) { 140 if (__rq) {
141 BUG_ON(sector != __rq->sector); 141 BUG_ON(sector != blk_rq_pos(__rq));
142 142
143 if (elv_rq_merge_ok(__rq, bio)) { 143 if (elv_rq_merge_ok(__rq, bio)) {
144 ret = ELEVATOR_FRONT_MERGE; 144 ret = ELEVATOR_FRONT_MERGE;
diff --git a/block/elevator.c b/block/elevator.c
index 1af5d9f04aff..918920056e42 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -52,7 +52,7 @@ static const int elv_hash_shift = 6;
52#define ELV_HASH_FN(sec) \ 52#define ELV_HASH_FN(sec) \
53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
56 56
57DEFINE_TRACE(block_rq_insert); 57DEFINE_TRACE(block_rq_insert);
58DEFINE_TRACE(block_rq_issue); 58DEFINE_TRACE(block_rq_issue);
@@ -119,9 +119,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
119 * we can merge and sequence is ok, check if it's possible 119 * we can merge and sequence is ok, check if it's possible
120 */ 120 */
121 if (elv_rq_merge_ok(__rq, bio)) { 121 if (elv_rq_merge_ok(__rq, bio)) {
122 if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 122 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
123 ret = ELEVATOR_BACK_MERGE; 123 ret = ELEVATOR_BACK_MERGE;
124 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 124 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
125 ret = ELEVATOR_FRONT_MERGE; 125 ret = ELEVATOR_FRONT_MERGE;
126 } 126 }
127 127
@@ -370,9 +370,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
370 parent = *p; 370 parent = *p;
371 __rq = rb_entry(parent, struct request, rb_node); 371 __rq = rb_entry(parent, struct request, rb_node);
372 372
373 if (rq->sector < __rq->sector) 373 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
374 p = &(*p)->rb_left; 374 p = &(*p)->rb_left;
375 else if (rq->sector > __rq->sector) 375 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
376 p = &(*p)->rb_right; 376 p = &(*p)->rb_right;
377 else 377 else
378 return __rq; 378 return __rq;
@@ -400,9 +400,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
400 while (n) { 400 while (n) {
401 rq = rb_entry(n, struct request, rb_node); 401 rq = rb_entry(n, struct request, rb_node);
402 402
403 if (sector < rq->sector) 403 if (sector < blk_rq_pos(rq))
404 n = n->rb_left; 404 n = n->rb_left;
405 else if (sector > rq->sector) 405 else if (sector > blk_rq_pos(rq))
406 n = n->rb_right; 406 n = n->rb_right;
407 else 407 else
408 return rq; 408 return rq;
@@ -441,14 +441,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
441 break; 441 break;
442 if (pos->cmd_flags & stop_flags) 442 if (pos->cmd_flags & stop_flags)
443 break; 443 break;
444 if (rq->sector >= boundary) { 444 if (blk_rq_pos(rq) >= boundary) {
445 if (pos->sector < boundary) 445 if (blk_rq_pos(pos) < boundary)
446 continue; 446 continue;
447 } else { 447 } else {
448 if (pos->sector >= boundary) 448 if (blk_rq_pos(pos) >= boundary)
449 break; 449 break;
450 } 450 }
451 if (rq->sector >= pos->sector) 451 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
452 break; 452 break;
453 } 453 }
454 454
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index f22ed6cc69f2..774ab05973a9 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3338,8 +3338,8 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
3338 } 3338 }
3339 Command->Completion = Request->end_io_data; 3339 Command->Completion = Request->end_io_data;
3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
3341 Command->BlockNumber = Request->sector; 3341 Command->BlockNumber = blk_rq_pos(Request);
3342 Command->BlockCount = Request->nr_sectors; 3342 Command->BlockCount = blk_rq_sectors(Request);
3343 Command->Request = Request; 3343 Command->Request = Request;
3344 blkdev_dequeue_request(Request); 3344 blkdev_dequeue_request(Request);
3345 Command->SegmentCount = blk_rq_map_sg(req_q, 3345 Command->SegmentCount = blk_rq_map_sg(req_q,
@@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
3431 * successfully as possible. 3431 * successfully as possible.
3432 */ 3432 */
3433 Command->SegmentCount = 1; 3433 Command->SegmentCount = 1;
3434 Command->BlockNumber = Request->sector; 3434 Command->BlockNumber = blk_rq_pos(Request);
3435 Command->BlockCount = 1; 3435 Command->BlockCount = 1;
3436 DAC960_QueueReadWriteCommand(Command); 3436 DAC960_QueueReadWriteCommand(Command);
3437 return; 3437 return;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8ff95f2c0ede..e4a14b94828e 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1351,13 +1351,13 @@ static void redo_fd_request(void)
1351 drive = floppy - unit; 1351 drive = floppy - unit;
1352 1352
1353 /* Here someone could investigate to be more efficient */ 1353 /* Here someone could investigate to be more efficient */
1354 for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1354 for (cnt = 0; cnt < blk_rq_cur_sectors(CURRENT); cnt++) {
1355#ifdef DEBUG 1355#ifdef DEBUG
1356 printk("fd: sector %ld + %d requested for %s\n", 1356 printk("fd: sector %ld + %d requested for %s\n",
1357 CURRENT->sector,cnt, 1357 blk_rq_pos(CURRENT), cnt,
1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write");
1359#endif 1359#endif
1360 block = CURRENT->sector + cnt; 1360 block = blk_rq_pos(CURRENT) + cnt;
1361 if ((int)block > floppy->blocks) { 1361 if ((int)block > floppy->blocks) {
1362 __blk_end_request_cur(CURRENT, -EIO); 1362 __blk_end_request_cur(CURRENT, -EIO);
1363 goto repeat; 1363 goto repeat;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 25067287211f..234024cda5ec 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -725,7 +725,7 @@ static void do_fd_action( int drive )
725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
726 if (ReqCmd == READ) { 726 if (ReqCmd == READ) {
727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
728 if (++ReqCnt < CURRENT->current_nr_sectors) { 728 if (++ReqCnt < blk_rq_cur_sectors(CURRENT)) {
729 /* read next sector */ 729 /* read next sector */
730 setup_req_params( drive ); 730 setup_req_params( drive );
731 goto repeat; 731 goto repeat;
@@ -1130,7 +1130,7 @@ static void fd_rwsec_done1(int status)
1130 } 1130 }
1131 } 1131 }
1132 1132
1133 if (++ReqCnt < CURRENT->current_nr_sectors) { 1133 if (++ReqCnt < blk_rq_cur_sectors(CURRENT)) {
1134 /* read next sector */ 1134 /* read next sector */
1135 setup_req_params( SelectedDrive ); 1135 setup_req_params( SelectedDrive );
1136 do_fd_action( SelectedDrive ); 1136 do_fd_action( SelectedDrive );
@@ -1394,7 +1394,7 @@ static void redo_fd_request(void)
1394 1394
1395 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1395 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
1396 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1396 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
1397 CURRENT ? CURRENT->sector : 0 )); 1397 CURRENT ? blk_rq_pos(CURRENT) : 0 ));
1398 1398
1399 IsFormatting = 0; 1399 IsFormatting = 0;
1400 1400
@@ -1440,7 +1440,7 @@ repeat:
1440 UD.autoprobe = 0; 1440 UD.autoprobe = 0;
1441 } 1441 }
1442 1442
1443 if (CURRENT->sector + 1 > UDT->blocks) { 1443 if (blk_rq_pos(CURRENT) + 1 > UDT->blocks) {
1444 __blk_end_request_cur(CURRENT, -EIO); 1444 __blk_end_request_cur(CURRENT, -EIO);
1445 goto repeat; 1445 goto repeat;
1446 } 1446 }
@@ -1450,7 +1450,7 @@ repeat:
1450 1450
1451 ReqCnt = 0; 1451 ReqCnt = 0;
1452 ReqCmd = rq_data_dir(CURRENT); 1452 ReqCmd = rq_data_dir(CURRENT);
1453 ReqBlock = CURRENT->sector; 1453 ReqBlock = blk_rq_pos(CURRENT);
1454 ReqBuffer = CURRENT->buffer; 1454 ReqBuffer = CURRENT->buffer;
1455 setup_req_params( drive ); 1455 setup_req_params( drive );
1456 do_fd_action( drive ); 1456 do_fd_action( drive );
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index f22d4932433f..ab7b04c0db70 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2835,10 +2835,10 @@ static void do_cciss_request(struct request_queue *q)
2835 c->Request.Timeout = 0; // Don't time out 2835 c->Request.Timeout = 0; // Don't time out
2836 c->Request.CDB[0] = 2836 c->Request.CDB[0] =
2837 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 2837 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2838 start_blk = creq->sector; 2838 start_blk = blk_rq_pos(creq);
2839#ifdef CCISS_DEBUG 2839#ifdef CCISS_DEBUG
2840 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 2840 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
2841 (int)creq->nr_sectors); 2841 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
2842#endif /* CCISS_DEBUG */ 2842#endif /* CCISS_DEBUG */
2843 2843
2844 sg_init_table(tmp_sg, MAXSGENTRIES); 2844 sg_init_table(tmp_sg, MAXSGENTRIES);
@@ -2864,8 +2864,8 @@ static void do_cciss_request(struct request_queue *q)
2864 h->maxSG = seg; 2864 h->maxSG = seg;
2865 2865
2866#ifdef CCISS_DEBUG 2866#ifdef CCISS_DEBUG
2867 printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 2867 printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n",
2868 creq->nr_sectors, seg); 2868 blk_rq_sectors(creq), seg);
2869#endif /* CCISS_DEBUG */ 2869#endif /* CCISS_DEBUG */
2870 2870
2871 c->Header.SGList = c->Header.SGTotal = seg; 2871 c->Header.SGList = c->Header.SGTotal = seg;
@@ -2877,8 +2877,8 @@ static void do_cciss_request(struct request_queue *q)
2877 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 2877 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2878 c->Request.CDB[5] = start_blk & 0xff; 2878 c->Request.CDB[5] = start_blk & 0xff;
2879 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 2879 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2880 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 2880 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
2881 c->Request.CDB[8] = creq->nr_sectors & 0xff; 2881 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
2882 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 2882 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2883 } else { 2883 } else {
2884 u32 upper32 = upper_32_bits(start_blk); 2884 u32 upper32 = upper_32_bits(start_blk);
@@ -2893,10 +2893,10 @@ static void do_cciss_request(struct request_queue *q)
2893 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 2893 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2894 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 2894 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2895 c->Request.CDB[9]= start_blk & 0xff; 2895 c->Request.CDB[9]= start_blk & 0xff;
2896 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 2896 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
2897 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 2897 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
2898 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 2898 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
2899 c->Request.CDB[13]= creq->nr_sectors & 0xff; 2899 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
2900 c->Request.CDB[14] = c->Request.CDB[15] = 0; 2900 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2901 } 2901 }
2902 } else if (blk_pc_request(creq)) { 2902 } else if (blk_pc_request(creq)) {
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 488a8f4a60aa..a5caeff4718e 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -919,10 +919,11 @@ queue_next:
919 c->hdr.size = sizeof(rblk_t) >> 2; 919 c->hdr.size = sizeof(rblk_t) >> 2;
920 c->size += sizeof(rblk_t); 920 c->size += sizeof(rblk_t);
921 921
922 c->req.hdr.blk = creq->sector; 922 c->req.hdr.blk = blk_rq_pos(creq);
923 c->rq = creq; 923 c->rq = creq;
924DBGPX( 924DBGPX(
925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 printk("sector=%d, nr_sectors=%u\n",
926 blk_rq_pos(creq), blk_rq_sectors(creq));
926); 927);
927 sg_init_table(tmp_sg, SG_MAX); 928 sg_init_table(tmp_sg, SG_MAX);
928 seg = blk_rq_map_sg(q, creq, tmp_sg); 929 seg = blk_rq_map_sg(q, creq, tmp_sg);
@@ -940,9 +941,9 @@ DBGPX(
940 tmp_sg[i].offset, 941 tmp_sg[i].offset,
941 tmp_sg[i].length, dir); 942 tmp_sg[i].length, dir);
942 } 943 }
943DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 944DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
944 c->req.hdr.sg_cnt = seg; 945 c->req.hdr.sg_cnt = seg;
945 c->req.hdr.blk_cnt = creq->nr_sectors; 946 c->req.hdr.blk_cnt = blk_rq_sectors(creq);
946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 947 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
947 c->type = CMD_RWREQ; 948 c->type = CMD_RWREQ;
948 949
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1300df6f1642..452486283386 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
2303 2303
2304 /* current_count_sectors can be zero if transfer failed */ 2304 /* current_count_sectors can be zero if transfer failed */
2305 if (error) 2305 if (error)
2306 nr_sectors = req->current_nr_sectors; 2306 nr_sectors = blk_rq_cur_sectors(req);
2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2307 if (__blk_end_request(req, error, nr_sectors << 9))
2308 return; 2308 return;
2309 2309
@@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
2332 if (uptodate) { 2332 if (uptodate) {
2333 /* maintain values for invalidation on geometry 2333 /* maintain values for invalidation on geometry
2334 * change */ 2334 * change */
2335 block = current_count_sectors + req->sector; 2335 block = current_count_sectors + blk_rq_pos(req);
2336 INFBOUND(DRS->maxblock, block); 2336 INFBOUND(DRS->maxblock, block);
2337 if (block > _floppy->sect) 2337 if (block > _floppy->sect)
2338 DRS->maxtrack = 1; 2338 DRS->maxtrack = 1;
@@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
2346 /* record write error information */ 2346 /* record write error information */
2347 DRWE->write_errors++; 2347 DRWE->write_errors++;
2348 if (DRWE->write_errors == 1) { 2348 if (DRWE->write_errors == 1) {
2349 DRWE->first_error_sector = req->sector; 2349 DRWE->first_error_sector = blk_rq_pos(req);
2350 DRWE->first_error_generation = DRS->generation; 2350 DRWE->first_error_generation = DRS->generation;
2351 } 2351 }
2352 DRWE->last_error_sector = req->sector; 2352 DRWE->last_error_sector = blk_rq_pos(req);
2353 DRWE->last_error_generation = DRS->generation; 2353 DRWE->last_error_generation = DRS->generation;
2354 } 2354 }
2355 spin_lock_irqsave(q->queue_lock, flags); 2355 spin_lock_irqsave(q->queue_lock, flags);
@@ -2503,24 +2503,24 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2503 2503
2504 max_sector = transfer_size(ssize, 2504 max_sector = transfer_size(ssize,
2505 min(max_sector, max_sector_2), 2505 min(max_sector, max_sector_2),
2506 current_req->nr_sectors); 2506 blk_rq_sectors(current_req));
2507 2507
2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
2509 buffer_max > fsector_t + current_req->nr_sectors) 2509 buffer_max > fsector_t + blk_rq_sectors(current_req))
2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2510 current_count_sectors = min_t(int, buffer_max - fsector_t,
2511 current_req->nr_sectors); 2511 blk_rq_sectors(current_req));
2512 2512
2513 remaining = current_count_sectors << 9; 2513 remaining = current_count_sectors << 9;
2514#ifdef FLOPPY_SANITY_CHECK 2514#ifdef FLOPPY_SANITY_CHECK
2515 if ((remaining >> 9) > current_req->nr_sectors && 2515 if ((remaining >> 9) > blk_rq_sectors(current_req) &&
2516 CT(COMMAND) == FD_WRITE) { 2516 CT(COMMAND) == FD_WRITE) {
2517 DPRINT("in copy buffer\n"); 2517 DPRINT("in copy buffer\n");
2518 printk("current_count_sectors=%ld\n", current_count_sectors); 2518 printk("current_count_sectors=%ld\n", current_count_sectors);
2519 printk("remaining=%d\n", remaining >> 9); 2519 printk("remaining=%d\n", remaining >> 9);
2520 printk("current_req->nr_sectors=%ld\n", 2520 printk("current_req->nr_sectors=%u\n",
2521 current_req->nr_sectors); 2521 blk_rq_sectors(current_req));
2522 printk("current_req->current_nr_sectors=%u\n", 2522 printk("current_req->current_nr_sectors=%u\n",
2523 current_req->current_nr_sectors); 2523 blk_rq_cur_sectors(current_req));
2524 printk("max_sector=%d\n", max_sector); 2524 printk("max_sector=%d\n", max_sector);
2525 printk("ssize=%d\n", ssize); 2525 printk("ssize=%d\n", ssize);
2526 } 2526 }
@@ -2530,7 +2530,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2530 2530
2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
2532 2532
2533 size = current_req->current_nr_sectors << 9; 2533 size = blk_rq_cur_sectors(current_req) << 9;
2534 2534
2535 rq_for_each_segment(bv, current_req, iter) { 2535 rq_for_each_segment(bv, current_req, iter) {
2536 if (!remaining) 2536 if (!remaining)
@@ -2648,10 +2648,10 @@ static int make_raw_rw_request(void)
2648 2648
2649 max_sector = _floppy->sect * _floppy->head; 2649 max_sector = _floppy->sect * _floppy->head;
2650 2650
2651 TRACK = (int)current_req->sector / max_sector; 2651 TRACK = (int)blk_rq_pos(current_req) / max_sector;
2652 fsector_t = (int)current_req->sector % max_sector; 2652 fsector_t = (int)blk_rq_pos(current_req) % max_sector;
2653 if (_floppy->track && TRACK >= _floppy->track) { 2653 if (_floppy->track && TRACK >= _floppy->track) {
2654 if (current_req->current_nr_sectors & 1) { 2654 if (blk_rq_cur_sectors(current_req) & 1) {
2655 current_count_sectors = 1; 2655 current_count_sectors = 1;
2656 return 1; 2656 return 1;
2657 } else 2657 } else
@@ -2669,7 +2669,7 @@ static int make_raw_rw_request(void)
2669 if (fsector_t >= max_sector) { 2669 if (fsector_t >= max_sector) {
2670 current_count_sectors = 2670 current_count_sectors =
2671 min_t(int, _floppy->sect - fsector_t, 2671 min_t(int, _floppy->sect - fsector_t,
2672 current_req->nr_sectors); 2672 blk_rq_sectors(current_req));
2673 return 1; 2673 return 1;
2674 } 2674 }
2675 SIZECODE = 2; 2675 SIZECODE = 2;
@@ -2720,7 +2720,7 @@ static int make_raw_rw_request(void)
2720 2720
2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize;
2722 aligned_sector_t = fsector_t - in_sector_offset; 2722 aligned_sector_t = fsector_t - in_sector_offset;
2723 max_size = current_req->nr_sectors; 2723 max_size = blk_rq_sectors(current_req);
2724 if ((raw_cmd->track == buffer_track) && 2724 if ((raw_cmd->track == buffer_track) &&
2725 (current_drive == buffer_drive) && 2725 (current_drive == buffer_drive) &&
2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { 2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@@ -2729,10 +2729,10 @@ static int make_raw_rw_request(void)
2729 copy_buffer(1, max_sector, buffer_max); 2729 copy_buffer(1, max_sector, buffer_max);
2730 return 1; 2730 return 1;
2731 } 2731 }
2732 } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2732 } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
2733 if (CT(COMMAND) == FD_WRITE) { 2733 if (CT(COMMAND) == FD_WRITE) {
2734 if (fsector_t + current_req->nr_sectors > ssize && 2734 if (fsector_t + blk_rq_sectors(current_req) > ssize &&
2735 fsector_t + current_req->nr_sectors < ssize + ssize) 2735 fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
2736 max_size = ssize + ssize; 2736 max_size = ssize + ssize;
2737 else 2737 else
2738 max_size = ssize; 2738 max_size = ssize;
@@ -2776,7 +2776,7 @@ static int make_raw_rw_request(void)
2776 (indirect * 2 > direct * 3 && 2776 (indirect * 2 > direct * 3 &&
2777 *errors < DP->max_errors.read_track && ((!probing 2777 *errors < DP->max_errors.read_track && ((!probing
2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2778 || (DP->read_track & (1 << DRS->probed_format)))))) {
2779 max_size = current_req->nr_sectors; 2779 max_size = blk_rq_sectors(current_req);
2780 } else { 2780 } else {
2781 raw_cmd->kernel_data = current_req->buffer; 2781 raw_cmd->kernel_data = current_req->buffer;
2782 raw_cmd->length = current_count_sectors << 9; 2782 raw_cmd->length = current_count_sectors << 9;
@@ -2801,7 +2801,7 @@ static int make_raw_rw_request(void)
2801 fsector_t > buffer_max || 2801 fsector_t > buffer_max ||
2802 fsector_t < buffer_min || 2802 fsector_t < buffer_min ||
2803 ((CT(COMMAND) == FD_READ || 2803 ((CT(COMMAND) == FD_READ ||
2804 (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2804 (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2805 max_sector > 2 * max_buffer_sectors + buffer_min &&
2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
2807 /* not enough space */ 2807 /* not enough space */
@@ -2879,8 +2879,8 @@ static int make_raw_rw_request(void)
2879 printk("write\n"); 2879 printk("write\n");
2880 return 0; 2880 return 0;
2881 } 2881 }
2882 } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2882 } else if (raw_cmd->length > blk_rq_sectors(current_req) << 9 ||
2883 current_count_sectors > current_req->nr_sectors) { 2883 current_count_sectors > blk_rq_sectors(current_req)) {
2884 DPRINT("buffer overrun in direct transfer\n"); 2884 DPRINT("buffer overrun in direct transfer\n");
2885 return 0; 2885 return 0;
2886 } else if (raw_cmd->length < current_count_sectors << 9) { 2886 } else if (raw_cmd->length < current_count_sectors << 9) {
@@ -2990,8 +2990,9 @@ static void do_fd_request(struct request_queue * q)
2990 if (usage_count == 0) { 2990 if (usage_count == 0) {
2991 printk("warning: usage count=0, current_req=%p exiting\n", 2991 printk("warning: usage count=0, current_req=%p exiting\n",
2992 current_req); 2992 current_req);
2993 printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2993 printk("sect=%ld type=%x flags=%x\n",
2994 current_req->cmd_type, current_req->cmd_flags); 2994 (long)blk_rq_pos(current_req), current_req->cmd_type,
2995 current_req->cmd_flags);
2995 return; 2996 return;
2996 } 2997 }
2997 if (test_bit(0, &fdc_busy)) { 2998 if (test_bit(0, &fdc_busy)) {
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 75b9ca95c4eb..a3b39940ce02 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -228,7 +228,7 @@ static void dump_status(const char *msg, unsigned int stat)
228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
230 if (CURRENT) 230 if (CURRENT)
231 printk(", sector=%ld", CURRENT->sector); 231 printk(", sector=%ld", blk_rq_pos(CURRENT));
232 } 232 }
233 printk("\n"); 233 printk("\n");
234 } 234 }
@@ -457,9 +457,9 @@ ok_to_read:
457 req = CURRENT; 457 req = CURRENT;
458 insw(HD_DATA, req->buffer, 256); 458 insw(HD_DATA, req->buffer, 256);
459#ifdef DEBUG 459#ifdef DEBUG
460 printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 460 printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
461 req->rq_disk->disk_name, req->sector + 1, req->nr_sectors - 1, 461 req->rq_disk->disk_name, blk_rq_pos(req) + 1,
462 req->buffer+512); 462 blk_rq_sectors(req) - 1, req->buffer+512);
463#endif 463#endif
464 if (__blk_end_request(req, 0, 512)) { 464 if (__blk_end_request(req, 0, 512)) {
465 SET_HANDLER(&read_intr); 465 SET_HANDLER(&read_intr);
@@ -485,7 +485,7 @@ static void write_intr(void)
485 continue; 485 continue;
486 if (!OK_STATUS(i)) 486 if (!OK_STATUS(i))
487 break; 487 break;
488 if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 488 if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
489 goto ok_to_write; 489 goto ok_to_write;
490 } while (--retries > 0); 490 } while (--retries > 0);
491 dump_status("write_intr", i); 491 dump_status("write_intr", i);
@@ -589,8 +589,8 @@ repeat:
589 return; 589 return;
590 } 590 }
591 disk = req->rq_disk->private_data; 591 disk = req->rq_disk->private_data;
592 block = req->sector; 592 block = blk_rq_pos(req);
593 nsect = req->nr_sectors; 593 nsect = blk_rq_sectors(req);
594 if (block >= get_capacity(req->rq_disk) || 594 if (block >= get_capacity(req->rq_disk) ||
595 ((block+nsect) > get_capacity(req->rq_disk))) { 595 ((block+nsect) > get_capacity(req->rq_disk))) {
596 printk("%s: bad access: block=%d, count=%d\n", 596 printk("%s: bad access: block=%d, count=%d\n",
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 71e56cc28cac..826c3492b9fe 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -220,7 +220,8 @@ static void mg_dump_status(const char *msg, unsigned int stat,
220 if (host->breq) { 220 if (host->breq) {
221 req = elv_next_request(host->breq); 221 req = elv_next_request(host->breq);
222 if (req) 222 if (req)
223 printk(", sector=%u", (u32)req->sector); 223 printk(", sector=%u",
224 (unsigned int)blk_rq_pos(req));
224 } 225 }
225 226
226 } 227 }
@@ -493,12 +494,12 @@ static void mg_read(struct request *req)
493 u32 j; 494 u32 j;
494 struct mg_host *host = req->rq_disk->private_data; 495 struct mg_host *host = req->rq_disk->private_data;
495 496
496 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) != 497 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
497 MG_ERR_NONE) 498 MG_CMD_RD, NULL) != MG_ERR_NONE)
498 mg_bad_rw_intr(host); 499 mg_bad_rw_intr(host);
499 500
500 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 501 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
501 req->nr_sectors, req->sector, req->buffer); 502 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
502 503
503 do { 504 do {
504 u16 *buff = (u16 *)req->buffer; 505 u16 *buff = (u16 *)req->buffer;
@@ -522,14 +523,14 @@ static void mg_write(struct request *req)
522 u32 j; 523 u32 j;
523 struct mg_host *host = req->rq_disk->private_data; 524 struct mg_host *host = req->rq_disk->private_data;
524 525
525 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) != 526 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
526 MG_ERR_NONE) { 527 MG_CMD_WR, NULL) != MG_ERR_NONE) {
527 mg_bad_rw_intr(host); 528 mg_bad_rw_intr(host);
528 return; 529 return;
529 } 530 }
530 531
531 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 532 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
532 req->nr_sectors, req->sector, req->buffer); 533 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
533 534
534 do { 535 do {
535 u16 *buff = (u16 *)req->buffer; 536 u16 *buff = (u16 *)req->buffer;
@@ -579,7 +580,7 @@ ok_to_read:
579 (i << 1)); 580 (i << 1));
580 581
581 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 582 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
582 req->sector, req->nr_sectors - 1, req->buffer); 583 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
583 584
584 /* send read confirm */ 585 /* send read confirm */
585 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 586 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
@@ -609,7 +610,7 @@ static void mg_write_intr(struct mg_host *host)
609 break; 610 break;
610 if (!MG_READY_OK(i)) 611 if (!MG_READY_OK(i))
611 break; 612 break;
612 if ((req->nr_sectors <= 1) || (i & ATA_DRQ)) 613 if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
613 goto ok_to_write; 614 goto ok_to_write;
614 } while (0); 615 } while (0);
615 mg_dump_status("mg_write_intr", i, host); 616 mg_dump_status("mg_write_intr", i, host);
@@ -627,7 +628,7 @@ ok_to_write:
627 buff++; 628 buff++;
628 } 629 }
629 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 630 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
630 req->sector, req->nr_sectors, req->buffer); 631 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
631 host->mg_do_intr = mg_write_intr; 632 host->mg_do_intr = mg_write_intr;
632 mod_timer(&host->timer, jiffies + 3 * HZ); 633 mod_timer(&host->timer, jiffies + 3 * HZ);
633 } 634 }
@@ -749,9 +750,9 @@ static void mg_request(struct request_queue *q)
749 750
750 del_timer(&host->timer); 751 del_timer(&host->timer);
751 752
752 sect_num = req->sector; 753 sect_num = blk_rq_pos(req);
753 /* deal whole segments */ 754 /* deal whole segments */
754 sect_cnt = req->nr_sectors; 755 sect_cnt = blk_rq_sectors(req);
755 756
756 /* sanity check */ 757 /* sanity check */
757 if (sect_num >= get_capacity(req->rq_disk) || 758 if (sect_num >= get_capacity(req->rq_disk) ||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a9ab8be9d92f..977a57377930 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
110 req, error ? "failed" : "done"); 110 req, error ? "failed" : "done");
111 111
112 spin_lock_irqsave(q->queue_lock, flags); 112 spin_lock_irqsave(q->queue_lock, flags);
113 __blk_end_request(req, error, req->nr_sectors << 9); 113 __blk_end_request(req, error, blk_rq_sectors(req) << 9);
114 spin_unlock_irqrestore(q->queue_lock, flags); 114 spin_unlock_irqrestore(q->queue_lock, flags);
115} 115}
116 116
@@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
231{ 231{
232 int result, flags; 232 int result, flags;
233 struct nbd_request request; 233 struct nbd_request request;
234 unsigned long size = req->nr_sectors << 9; 234 unsigned long size = blk_rq_sectors(req) << 9;
235 235
236 request.magic = htonl(NBD_REQUEST_MAGIC); 236 request.magic = htonl(NBD_REQUEST_MAGIC);
237 request.type = htonl(nbd_cmd(req)); 237 request.type = htonl(nbd_cmd(req));
238 request.from = cpu_to_be64((u64) req->sector << 9); 238 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
239 request.len = htonl(size); 239 request.len = htonl(size);
240 memcpy(request.handle, &req, sizeof(req)); 240 memcpy(request.handle, &req, sizeof(req));
241 241
242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
243 lo->disk->disk_name, req, 243 lo->disk->disk_name, req,
244 nbdcmd_to_ascii(nbd_cmd(req)), 244 nbdcmd_to_ascii(nbd_cmd(req)),
245 (unsigned long long)req->sector << 9, 245 (unsigned long long)blk_rq_pos(req) << 9,
246 req->nr_sectors << 9); 246 blk_rq_sectors(req) << 9);
247 result = sock_xmit(lo, 1, &request, sizeof(request), 247 result = sock_xmit(lo, 1, &request, sizeof(request),
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
249 if (result <= 0) { 249 if (result <= 0) {
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 9fd57c2aa463..2d5dc0af55e4 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -728,8 +728,8 @@ static void do_pcd_request(struct request_queue * q)
728 if (cd != pcd_current) 728 if (cd != pcd_current)
729 pcd_bufblk = -1; 729 pcd_bufblk = -1;
730 pcd_current = cd; 730 pcd_current = cd;
731 pcd_sector = pcd_req->sector; 731 pcd_sector = blk_rq_pos(pcd_req);
732 pcd_count = pcd_req->current_nr_sectors; 732 pcd_count = blk_rq_cur_sectors(pcd_req);
733 pcd_buf = pcd_req->buffer; 733 pcd_buf = pcd_req->buffer;
734 pcd_busy = 1; 734 pcd_busy = 1;
735 ps_set_intr(do_pcd_read, NULL, 0, nice); 735 ps_set_intr(do_pcd_read, NULL, 0, nice);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 0732df4e901a..9ec5d4ac0b64 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -444,11 +444,11 @@ static enum action do_pd_io_start(void)
444 444
445 pd_cmd = rq_data_dir(pd_req); 445 pd_cmd = rq_data_dir(pd_req);
446 if (pd_cmd == READ || pd_cmd == WRITE) { 446 if (pd_cmd == READ || pd_cmd == WRITE) {
447 pd_block = pd_req->sector; 447 pd_block = blk_rq_pos(pd_req);
448 pd_count = pd_req->current_nr_sectors; 448 pd_count = blk_rq_cur_sectors(pd_req);
449 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 449 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
450 return Fail; 450 return Fail;
451 pd_run = pd_req->nr_sectors; 451 pd_run = blk_rq_sectors(pd_req);
452 pd_buf = pd_req->buffer; 452 pd_buf = pd_req->buffer;
453 pd_retries = 0; 453 pd_retries = 0;
454 if (pd_cmd == READ) 454 if (pd_cmd == READ)
@@ -479,7 +479,7 @@ static int pd_next_buf(void)
479 return 0; 479 return 0;
480 spin_lock_irqsave(&pd_lock, saved_flags); 480 spin_lock_irqsave(&pd_lock, saved_flags);
481 __blk_end_request_cur(pd_req, 0); 481 __blk_end_request_cur(pd_req, 0);
482 pd_count = pd_req->current_nr_sectors; 482 pd_count = blk_rq_cur_sectors(pd_req);
483 pd_buf = pd_req->buffer; 483 pd_buf = pd_req->buffer;
484 spin_unlock_irqrestore(&pd_lock, saved_flags); 484 spin_unlock_irqrestore(&pd_lock, saved_flags);
485 return 0; 485 return 0;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 3871e3586d6d..e88c889aa7f2 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -768,9 +768,9 @@ repeat:
768 return; 768 return;
769 769
770 pf_current = pf_req->rq_disk->private_data; 770 pf_current = pf_req->rq_disk->private_data;
771 pf_block = pf_req->sector; 771 pf_block = blk_rq_pos(pf_req);
772 pf_run = pf_req->nr_sectors; 772 pf_run = blk_rq_sectors(pf_req);
773 pf_count = pf_req->current_nr_sectors; 773 pf_count = blk_rq_cur_sectors(pf_req);
774 774
775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
776 pf_end_request(-EIO); 776 pf_end_request(-EIO);
@@ -810,7 +810,7 @@ static int pf_next_buf(void)
810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
811 if (!pf_req) 811 if (!pf_req)
812 return 1; 812 return 1;
813 pf_count = pf_req->current_nr_sectors; 813 pf_count = blk_rq_cur_sectors(pf_req);
814 pf_buf = pf_req->buffer; 814 pf_buf = pf_req->buffer;
815 } 815 }
816 return 0; 816 return 0;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c2388673684e..8d583081b50a 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
134 rq_for_each_segment(bv, req, iter) 134 rq_for_each_segment(bv, req, iter)
135 n++; 135 n++;
136 dev_dbg(&dev->sbd.core, 136 dev_dbg(&dev->sbd.core,
137 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 137 "%s:%u: %s req has %u bvecs for %u sectors\n",
138 __func__, __LINE__, op, n, req->nr_sectors, 138 __func__, __LINE__, op, n, blk_rq_sectors(req));
139 blk_rq_sectors(req));
140#endif 139#endif
141 140
142 start_sector = req->sector * priv->blocking_factor; 141 start_sector = blk_rq_pos(req) * priv->blocking_factor;
143 sectors = req->nr_sectors * priv->blocking_factor; 142 sectors = blk_rq_sectors(req) * priv->blocking_factor;
144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
145 __func__, __LINE__, op, sectors, start_sector); 144 __func__, __LINE__, op, sectors, start_sector);
146 145
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index f59887c5ffbd..9f351bfa15ea 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -416,7 +416,7 @@ static int __send_request(struct request *req)
416 desc->slice = 0; 416 desc->slice = 0;
417 } 417 }
418 desc->status = ~0; 418 desc->status = ~0;
419 desc->offset = (req->sector << 9) / port->vdisk_block_size; 419 desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
420 desc->size = len; 420 desc->size = len;
421 desc->ncookies = err; 421 desc->ncookies = err;
422 422
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 97ef4266c4c7..fc6a1c322933 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -531,7 +531,7 @@ static void redo_fd_request(struct request_queue *q)
531 while ((req = elv_next_request(q))) { 531 while ((req = elv_next_request(q))) {
532 532
533 fs = req->rq_disk->private_data; 533 fs = req->rq_disk->private_data;
534 if (req->sector < 0 || req->sector >= fs->total_secs) { 534 if (blk_rq_pos(req) >= fs->total_secs) {
535 __blk_end_request_cur(req, -EIO); 535 __blk_end_request_cur(req, -EIO);
536 continue; 536 continue;
537 } 537 }
@@ -551,8 +551,8 @@ static void redo_fd_request(struct request_queue *q)
551 __blk_end_request_cur(req, -EIO); 551 __blk_end_request_cur(req, -EIO);
552 break; 552 break;
553 case READ: 553 case READ:
554 if (floppy_read_sectors(fs, req->sector, 554 if (floppy_read_sectors(fs, blk_rq_pos(req),
555 req->current_nr_sectors, 555 blk_rq_cur_sectors(req),
556 req->buffer)) { 556 req->buffer)) {
557 __blk_end_request_cur(req, -EIO); 557 __blk_end_request_cur(req, -EIO);
558 continue; 558 continue;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 424855945b9b..c1b9a4dc11ba 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -312,14 +312,14 @@ static void start_request(struct floppy_state *fs)
312 } 312 }
313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
314#if 0 314#if 0
315 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 315 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
316 req->rq_disk->disk_name, req->cmd, 316 req->rq_disk->disk_name, req->cmd,
317 (long)req->sector, req->nr_sectors, req->buffer); 317 (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
318 printk(" errors=%d current_nr_sectors=%ld\n", 318 printk(" errors=%d current_nr_sectors=%u\n",
319 req->errors, req->current_nr_sectors); 319 req->errors, blk_rq_cur_sectors(req));
320#endif 320#endif
321 321
322 if (req->sector >= fs->total_secs) { 322 if (blk_rq_pos(req) >= fs->total_secs) {
323 __blk_end_request_cur(req, -EIO); 323 __blk_end_request_cur(req, -EIO);
324 continue; 324 continue;
325 } 325 }
@@ -337,13 +337,14 @@ static void start_request(struct floppy_state *fs)
337 } 337 }
338 } 338 }
339 339
340 /* Do not remove the cast. req->sector is now a sector_t and 340 /* Do not remove the cast. blk_rq_pos(req) is now a
341 * can be 64 bits, but it will never go past 32 bits for this 341 * sector_t and can be 64 bits, but it will never go
342 * driver anyway, so we can safely cast it down and not have 342 * past 32 bits for this driver anyway, so we can
343 * to do a 64/32 division 343 * safely cast it down and not have to do a 64/32
344 * division
344 */ 345 */
345 fs->req_cyl = ((long)req->sector) / fs->secpercyl; 346 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
346 x = ((long)req->sector) % fs->secpercyl; 347 x = ((long)blk_rq_pos(req)) % fs->secpercyl;
347 fs->head = x / fs->secpertrack; 348 fs->head = x / fs->secpertrack;
348 fs->req_sector = x % fs->secpertrack + 1; 349 fs->req_sector = x % fs->secpertrack + 1;
349 fd_req = req; 350 fd_req = req;
@@ -420,7 +421,7 @@ static inline void setup_transfer(struct floppy_state *fs)
420 struct dbdma_cmd *cp = fs->dma_cmd; 421 struct dbdma_cmd *cp = fs->dma_cmd;
421 struct dbdma_regs __iomem *dr = fs->dma; 422 struct dbdma_regs __iomem *dr = fs->dma;
422 423
423 if (fd_req->current_nr_sectors <= 0) { 424 if (blk_rq_cur_sectors(fd_req) <= 0) {
424 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 425 printk(KERN_ERR "swim3: transfer 0 sectors?\n");
425 return; 426 return;
426 } 427 }
@@ -428,8 +429,8 @@ static inline void setup_transfer(struct floppy_state *fs)
428 n = 1; 429 n = 1;
429 else { 430 else {
430 n = fs->secpertrack - fs->req_sector + 1; 431 n = fs->secpertrack - fs->req_sector + 1;
431 if (n > fd_req->current_nr_sectors) 432 if (n > blk_rq_cur_sectors(fd_req))
432 n = fd_req->current_nr_sectors; 433 n = blk_rq_cur_sectors(fd_req);
433 } 434 }
434 fs->scount = n; 435 fs->scount = n;
435 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); 436 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@@ -600,7 +601,8 @@ static void xfer_timeout(unsigned long data)
600 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 601 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
601 out_8(&sw->select, RELAX); 602 out_8(&sw->select, RELAX);
602 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 603 printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
603 (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 604 (rq_data_dir(fd_req)==WRITE? "writ": "read"),
605 (long)blk_rq_pos(fd_req));
604 __blk_end_request_cur(fd_req, -EIO); 606 __blk_end_request_cur(fd_req, -EIO);
605 fs->state = idle; 607 fs->state = idle;
606 start_request(fs); 608 start_request(fs);
@@ -714,7 +716,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
714 } else { 716 } else {
715 printk("swim3: error %sing block %ld (err=%x)\n", 717 printk("swim3: error %sing block %ld (err=%x)\n",
716 rq_data_dir(fd_req) == WRITE? "writ": "read", 718 rq_data_dir(fd_req) == WRITE? "writ": "read",
717 (long)fd_req->sector, err); 719 (long)blk_rq_pos(fd_req), err);
718 __blk_end_request_cur(fd_req, -EIO); 720 __blk_end_request_cur(fd_req, -EIO);
719 fs->state = idle; 721 fs->state = idle;
720 } 722 }
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 60e85bb6f790..087c94c8b2da 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -903,10 +903,10 @@ queue_one_request:
903 msg->sg_count = n_elem; 903 msg->sg_count = n_elem;
904 msg->sg_type = SGT_32BIT; 904 msg->sg_type = SGT_32BIT;
905 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 905 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
906 msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 906 msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
907 tmp = (rq->sector >> 16) >> 16; 907 tmp = (blk_rq_pos(rq) >> 16) >> 16;
908 msg->lba_high = cpu_to_le16( (u16) tmp ); 908 msg->lba_high = cpu_to_le16( (u16) tmp );
909 msg->lba_count = cpu_to_le16(rq->nr_sectors); 909 msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
910 910
911 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 911 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
912 for (i = 0; i < n_elem; i++) { 912 for (i = 0; i < n_elem; i++) {
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 8c2cc71327e3..dc3b899ad2b3 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -726,8 +726,8 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
726 * The call to blk_queue_hardsect_size() guarantees that request 726 * The call to blk_queue_hardsect_size() guarantees that request
727 * is aligned, but it is given in terms of 512 byte units, always. 727 * is aligned, but it is given in terms of 512 byte units, always.
728 */ 728 */
729 block = rq->sector >> lun->capacity.bshift; 729 block = blk_rq_pos(rq) >> lun->capacity.bshift;
730 nblks = rq->nr_sectors >> lun->capacity.bshift; 730 nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
731 731
732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ 733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -739,7 +739,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
739 cmd->cdb[8] = nblks; 739 cmd->cdb[8] = nblks;
740 cmd->cdb_len = 10; 740 cmd->cdb_len = 10;
741 741
742 cmd->len = rq->nr_sectors * 512; 742 cmd->len = blk_rq_sectors(rq) * 512;
743} 743}
744 744
745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 745static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index e821eed7132f..2086cb12d3ec 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,7 +252,7 @@ static int send_request(struct request *req)
252 struct viodasd_device *d; 252 struct viodasd_device *d;
253 unsigned long flags; 253 unsigned long flags;
254 254
255 start = (u64)req->sector << 9; 255 start = (u64)blk_rq_pos(req) << 9;
256 256
257 if (rq_data_dir(req) == READ) { 257 if (rq_data_dir(req) == READ) {
258 direction = DMA_FROM_DEVICE; 258 direction = DMA_FROM_DEVICE;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 50745e64414e..1980ab456356 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -85,7 +85,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
85 vbr->req = req; 85 vbr->req = req;
86 if (blk_fs_request(vbr->req)) { 86 if (blk_fs_request(vbr->req)) {
87 vbr->out_hdr.type = 0; 87 vbr->out_hdr.type = 0;
88 vbr->out_hdr.sector = vbr->req->sector; 88 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
90 } else if (blk_pc_request(vbr->req)) { 90 } else if (blk_pc_request(vbr->req)) {
91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 14be4c1ed1aa..4ef88018bcde 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -306,8 +306,8 @@ static void do_xd_request (struct request_queue * q)
306 return; 306 return;
307 307
308 while ((req = elv_next_request(q)) != NULL) { 308 while ((req = elv_next_request(q)) != NULL) {
309 unsigned block = req->sector; 309 unsigned block = blk_rq_pos(req);
310 unsigned count = req->nr_sectors; 310 unsigned count = blk_rq_sectors(req);
311 XD_INFO *disk = req->rq_disk->private_data; 311 XD_INFO *disk = req->rq_disk->private_data;
312 int res = 0; 312 int res = 0;
313 int retry; 313 int retry;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b4564479f641..91fc56597e9b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
231 info->shadow[id].request = (unsigned long)req; 231 info->shadow[id].request = (unsigned long)req;
232 232
233 ring_req->id = id; 233 ring_req->id = id;
234 ring_req->sector_number = (blkif_sector_t)req->sector; 234 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
235 ring_req->handle = info->handle; 235 ring_req->handle = info->handle;
236 236
237 ring_req->operation = rq_data_dir(req) ? 237 ring_req->operation = rq_data_dir(req) ?
@@ -310,11 +310,10 @@ static void do_blkif_request(struct request_queue *rq)
310 goto wait; 310 goto wait;
311 311
312 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 312 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
313 "(%u/%li) buffer:%p [%s]\n", 313 "(%u/%u) buffer:%p [%s]\n",
314 req, req->cmd, (unsigned long)req->sector, 314 req, req->cmd, (unsigned long)blk_rq_pos(req),
315 req->current_nr_sectors, 315 blk_rq_cur_sectors(req), blk_rq_sectors(req),
316 req->nr_sectors, req->buffer, 316 req->buffer, rq_data_dir(req) ? "write" : "read");
317 rq_data_dir(req) ? "write" : "read");
318 317
319 318
320 blkdev_dequeue_request(req); 319 blkdev_dequeue_request(req);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 5722931d14c5..97c99b43f881 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -646,13 +646,14 @@ static void ace_fsm_dostate(struct ace_device *ace)
646 /* Okay, it's a data request, set it up for transfer */ 646 /* Okay, it's a data request, set it up for transfer */
647 dev_dbg(ace->dev, 647 dev_dbg(ace->dev,
648 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", 648 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
649 (unsigned long long) req->sector, blk_rq_sectors(req), 649 (unsigned long long)blk_rq_pos(req),
650 req->current_nr_sectors, rq_data_dir(req)); 650 blk_rq_sectors(req), blk_rq_cur_sectors(req),
651 rq_data_dir(req));
651 652
652 ace->req = req; 653 ace->req = req;
653 ace->data_ptr = req->buffer; 654 ace->data_ptr = req->buffer;
654 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 655 ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
655 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 656 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
656 657
657 count = blk_rq_sectors(req); 658 count = blk_rq_sectors(req);
658 if (rq_data_dir(req)) { 659 if (rq_data_dir(req)) {
@@ -688,7 +689,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
688 dev_dbg(ace->dev, 689 dev_dbg(ace->dev,
689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 690 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
690 ace->fsm_task, ace->fsm_iter_num, 691 ace->fsm_task, ace->fsm_iter_num,
691 ace->req->current_nr_sectors * 16, 692 blk_rq_cur_sectors(ace->req) * 16,
692 ace->data_count, ace->in_irq); 693 ace->data_count, ace->in_irq);
693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 694 ace_fsm_yield(ace); /* need to poll CFBSY bit */
694 break; 695 break;
@@ -697,7 +698,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
697 dev_dbg(ace->dev, 698 dev_dbg(ace->dev,
698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 699 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
699 ace->fsm_task, ace->fsm_iter_num, 700 ace->fsm_task, ace->fsm_iter_num,
700 ace->req->current_nr_sectors * 16, 701 blk_rq_cur_sectors(ace->req) * 16,
701 ace->data_count, ace->in_irq); 702 ace->data_count, ace->in_irq);
702 ace_fsm_yieldirq(ace); 703 ace_fsm_yieldirq(ace);
703 break; 704 break;
@@ -721,10 +722,10 @@ static void ace_fsm_dostate(struct ace_device *ace)
721 blk_rq_cur_bytes(ace->req))) { 722 blk_rq_cur_bytes(ace->req))) {
722 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", 723 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
723 * blk_rq_sectors(ace->req), 724 * blk_rq_sectors(ace->req),
724 * ace->req->current_nr_sectors); 725 * blk_rq_cur_sectors(ace->req));
725 */ 726 */
726 ace->data_ptr = ace->req->buffer; 727 ace->data_ptr = ace->req->buffer;
727 ace->data_count = ace->req->current_nr_sectors * 16; 728 ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
728 ace_fsm_yieldirq(ace); 729 ace_fsm_yieldirq(ace);
729 break; 730 break;
730 } 731 }
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index b66ad58a3c38..d4e6b71f514a 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -71,12 +71,12 @@ static void do_z2_request(struct request_queue *q)
71{ 71{
72 struct request *req; 72 struct request *req;
73 while ((req = elv_next_request(q)) != NULL) { 73 while ((req = elv_next_request(q)) != NULL) {
74 unsigned long start = req->sector << 9; 74 unsigned long start = blk_rq_pos(req) << 9;
75 unsigned long len = req->current_nr_sectors << 9; 75 unsigned long len = blk_rq_cur_sectors(req) << 9;
76 76
77 if (start + len > z2ram_size) { 77 if (start + len > z2ram_size) {
78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
79 req->sector, req->current_nr_sectors); 79 blk_rq_pos(req), blk_rq_cur_sectors(req));
80 __blk_end_request_cur(req, -EIO); 80 __blk_end_request_cur(req, -EIO);
81 continue; 81 continue;
82 } 82 }
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index cab2b1fb2fe7..488423cab51a 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
584 list_for_each_safe(elem, next, &gdrom_deferred) { 584 list_for_each_safe(elem, next, &gdrom_deferred) {
585 req = list_entry(elem, struct request, queuelist); 585 req = list_entry(elem, struct request, queuelist);
586 spin_unlock(&gdrom_lock); 586 spin_unlock(&gdrom_lock);
587 block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 587 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
588 block_cnt = req->nr_sectors/GD_TO_BLK; 588 block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@@ -661,7 +661,7 @@ static void gdrom_request(struct request_queue *rq)
661 printk(" write request ignored\n"); 661 printk(" write request ignored\n");
662 __blk_end_request_cur(req, -EIO); 662 __blk_end_request_cur(req, -EIO);
663 } 663 }
664 if (req->nr_sectors) 664 if (blk_rq_sectors(req))
665 gdrom_request_handler_dma(req); 665 gdrom_request_handler_dma(req);
666 else 666 else
667 __blk_end_request_cur(req, -EIO); 667 __blk_end_request_cur(req, -EIO);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index cc3efa096e1a..6e190a93d8df 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -282,7 +282,7 @@ static int send_request(struct request *req)
282 viopath_targetinst(viopath_hostLp), 282 viopath_targetinst(viopath_hostLp),
283 (u64)req, VIOVERSION << 16, 283 (u64)req, VIOVERSION << 16,
284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
285 (u64)req->sector * 512, len, 0); 285 (u64)blk_rq_pos(req) * 512, len, 0);
286 if (hvrc != HvLpEvent_Rc_Good) { 286 if (hvrc != HvLpEvent_Rc_Good) {
287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
288 return -1; 288 return -1;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index a41634699f84..9e600d22f40e 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -677,10 +677,10 @@ try_again:
677 continue; 677 continue;
678 } 678 }
679 679
680 t_sec = msb->block_req->sector << 9; 680 t_sec = blk_rq_pos(msb->block_req) << 9;
681 sector_div(t_sec, msb->page_size); 681 sector_div(t_sec, msb->page_size);
682 682
683 count = msb->block_req->nr_sectors << 9; 683 count = blk_rq_sectors(msb->block_req) << 9;
684 count /= msb->page_size; 684 count /= msb->page_size;
685 685
686 param.system = msb->system; 686 param.system = msb->system;
@@ -745,7 +745,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
745 t_len *= msb->page_size; 745 t_len *= msb->page_size;
746 } 746 }
747 } else 747 } else
748 t_len = msb->block_req->nr_sectors << 9; 748 t_len = blk_rq_sectors(msb->block_req) << 9;
749 749
750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
751 751
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 56e60f0d5312..510eb4726374 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -761,7 +761,7 @@ static int i2o_block_transfer(struct request *req)
761 break; 761 break;
762 762
763 case CACHE_SMARTFETCH: 763 case CACHE_SMARTFETCH:
764 if (req->nr_sectors > 16) 764 if (blk_rq_sectors(req) > 16)
765 ctl_flags = 0x201F0008; 765 ctl_flags = 0x201F0008;
766 else 766 else
767 ctl_flags = 0x001F0000; 767 ctl_flags = 0x001F0000;
@@ -781,13 +781,13 @@ static int i2o_block_transfer(struct request *req)
781 ctl_flags = 0x001F0010; 781 ctl_flags = 0x001F0010;
782 break; 782 break;
783 case CACHE_SMARTBACK: 783 case CACHE_SMARTBACK:
784 if (req->nr_sectors > 16) 784 if (blk_rq_sectors(req) > 16)
785 ctl_flags = 0x001F0004; 785 ctl_flags = 0x001F0004;
786 else 786 else
787 ctl_flags = 0x001F0010; 787 ctl_flags = 0x001F0010;
788 break; 788 break;
789 case CACHE_SMARTTHROUGH: 789 case CACHE_SMARTTHROUGH:
790 if (req->nr_sectors > 16) 790 if (blk_rq_sectors(req) > 16)
791 ctl_flags = 0x001F0004; 791 ctl_flags = 0x001F0004;
792 else 792 else
793 ctl_flags = 0x001F0010; 793 ctl_flags = 0x001F0010;
@@ -827,22 +827,24 @@ static int i2o_block_transfer(struct request *req)
827 827
828 *mptr++ = cpu_to_le32(scsi_flags); 828 *mptr++ = cpu_to_le32(scsi_flags);
829 829
830 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 830 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
831 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 831 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
832 832
833 memcpy(mptr, cmd, 10); 833 memcpy(mptr, cmd, 10);
834 mptr += 4; 834 mptr += 4;
835 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 835 *mptr++ =
836 cpu_to_le32(blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT);
836 } else 837 } else
837#endif 838#endif
838 { 839 {
839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 840 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
840 *mptr++ = cpu_to_le32(ctl_flags); 841 *mptr++ = cpu_to_le32(ctl_flags);
841 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
842 *mptr++ = 842 *mptr++ =
843 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 843 cpu_to_le32(blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT);
844 *mptr++ =
845 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
844 *mptr++ = 846 *mptr++ =
845 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 847 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
846 } 848 }
847 849
848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 850 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index fe8041e619ea..949e99770ad6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
243 brq.mrq.cmd = &brq.cmd; 243 brq.mrq.cmd = &brq.cmd;
244 brq.mrq.data = &brq.data; 244 brq.mrq.data = &brq.data;
245 245
246 brq.cmd.arg = req->sector; 246 brq.cmd.arg = blk_rq_pos(req);
247 if (!mmc_card_blockaddr(card)) 247 if (!mmc_card_blockaddr(card))
248 brq.cmd.arg <<= 9; 248 brq.cmd.arg <<= 9;
249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 251 brq.stop.opcode = MMC_STOP_TRANSMISSION;
252 brq.stop.arg = 0; 252 brq.stop.arg = 0;
253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
254 brq.data.blocks = req->nr_sectors; 254 brq.data.blocks = blk_rq_sectors(req);
255 255
256 /* 256 /*
257 * After a read error, we redo the request one sector at a time 257 * After a read error, we redo the request one sector at a time
@@ -293,7 +293,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
293 * Adjust the sg list so it is the same size as the 293 * Adjust the sg list so it is the same size as the
294 * request. 294 * request.
295 */ 295 */
296 if (brq.data.blocks != req->nr_sectors) { 296 if (brq.data.blocks != blk_rq_sectors(req)) {
297 int i, data_size = brq.data.blocks << 9; 297 int i, data_size = brq.data.blocks << 9;
298 struct scatterlist *sg; 298 struct scatterlist *sg;
299 299
@@ -344,8 +344,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
344 printk(KERN_ERR "%s: error %d transferring data," 344 printk(KERN_ERR "%s: error %d transferring data,"
345 " sector %u, nr %u, card status %#x\n", 345 " sector %u, nr %u, card status %#x\n",
346 req->rq_disk->disk_name, brq.data.error, 346 req->rq_disk->disk_name, brq.data.error,
347 (unsigned)req->sector, 347 (unsigned)blk_rq_pos(req),
348 (unsigned)req->nr_sectors, status); 348 (unsigned)blk_rq_sectors(req), status);
349 } 349 }
350 350
351 if (brq.stop.error) { 351 if (brq.stop.error) {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 76c4c8d13073..4ea2e67ac97c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -47,8 +47,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
47 unsigned long block, nsect; 47 unsigned long block, nsect;
48 char *buf; 48 char *buf;
49 49
50 block = req->sector << 9 >> tr->blkshift; 50 block = blk_rq_pos(req) << 9 >> tr->blkshift;
51 nsect = req->current_nr_sectors << 9 >> tr->blkshift; 51 nsect = blk_rq_cur_sectors(req) << 9 >> tr->blkshift;
52 52
53 buf = req->buffer; 53 buf = req->buffer;
54 54
@@ -59,7 +59,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
59 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
60 return -EIO; 60 return -EIO;
61 61
62 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 62 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
63 get_capacity(req->rq_disk))
63 return -EIO; 64 return -EIO;
64 65
65 switch(rq_data_dir(req)) { 66 switch(rq_data_dir(req)) {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fabec95686b0..7df03c7aea0d 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
603 if (dasd_profile_level != DASD_PROFILE_ON) 603 if (dasd_profile_level != DASD_PROFILE_ON)
604 return; 604 return;
605 605
606 sectors = req->nr_sectors; 606 sectors = blk_rq_sectors(req);
607 if (!cqr->buildclk || !cqr->startclk || 607 if (!cqr->buildclk || !cqr->startclk ||
608 !cqr->stopclk || !cqr->endclk || 608 !cqr->stopclk || !cqr->endclk ||
609 !sectors) 609 !sectors)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index b9a7f7733446..2efaddfae560 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
505 return ERR_PTR(-EINVAL); 505 return ERR_PTR(-EINVAL);
506 blksize = block->bp_block; 506 blksize = block->bp_block;
507 /* Calculate record id of first and last block. */ 507 /* Calculate record id of first and last block. */
508 first_rec = req->sector >> block->s2b_shift; 508 first_rec = blk_rq_pos(req) >> block->s2b_shift;
509 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 509 last_rec =
510 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
510 /* Check struct bio and count the number of blocks for the request. */ 511 /* Check struct bio and count the number of blocks for the request. */
511 count = 0; 512 count = 0;
512 rq_for_each_segment(bv, req, iter) { 513 rq_for_each_segment(bv, req, iter) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb52da033f06..a41c94053e64 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2354 blksize = block->bp_block; 2354 blksize = block->bp_block;
2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2356 /* Calculate record id of first and last block. */ 2356 /* Calculate record id of first and last block. */
2357 first_rec = first_trk = req->sector >> block->s2b_shift; 2357 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2358 first_offs = sector_div(first_trk, blk_per_trk); 2358 first_offs = sector_div(first_trk, blk_per_trk);
2359 last_rec = last_trk = 2359 last_rec = last_trk =
2360 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2361 last_offs = sector_div(last_trk, blk_per_trk); 2361 last_offs = sector_div(last_trk, blk_per_trk);
2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2363 2363
@@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2420 private = (struct dasd_eckd_private *) cqr->block->base->private;
2421 blksize = cqr->block->bp_block; 2421 blksize = cqr->block->bp_block;
2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2423 recid = req->sector >> cqr->block->s2b_shift; 2423 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2424 ccw = cqr->cpaddr; 2424 ccw = cqr->cpaddr;
2425 /* Skip over define extent & locate record. */ 2425 /* Skip over define extent & locate record. */
2426 ccw++; 2426 ccw++;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a3eb6fd14673..8912358daa2f 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
270 return ERR_PTR(-EINVAL); 270 return ERR_PTR(-EINVAL);
271 blksize = block->bp_block; 271 blksize = block->bp_block;
272 /* Calculate record id of first and last block. */ 272 /* Calculate record id of first and last block. */
273 first_rec = req->sector >> block->s2b_shift; 273 first_rec = blk_rq_pos(req) >> block->s2b_shift;
274 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 274 last_rec =
275 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
275 /* Check struct bio and count the number of blocks for the request. */ 276 /* Check struct bio and count the number of blocks for the request. */
276 count = 0; 277 count = 0;
277 cidaw = 0; 278 cidaw = 0;
@@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
309 ccw = cqr->cpaddr; 310 ccw = cqr->cpaddr;
310 /* First ccw is define extent. */ 311 /* First ccw is define extent. */
311 define_extent(ccw++, cqr->data, rq_data_dir(req), 312 define_extent(ccw++, cqr->data, rq_data_dir(req),
312 block->bp_block, req->sector, req->nr_sectors); 313 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
313 /* Build locate_record + read/write ccws. */ 314 /* Build locate_record + read/write ccws. */
314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 315 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
315 LO_data = (struct LO_fba_data *) (idaws + cidaw); 316 LO_data = (struct LO_fba_data *) (idaws + cidaw);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5f8e8ef43dd3..2d00a383a475 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,7 +1134,7 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134 /* Setup ccws. */ 1134 /* Setup ccws. */
1135 request->op = TO_BLOCK; 1135 request->op = TO_BLOCK;
1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1136 start_block = (struct tape_34xx_block_id *) request->cpdata;
1137 start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1139 1139
1140 ccw = request->cpaddr; 1140 ccw = request->cpaddr;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 823b05bd0dd7..c453b2f3e9f4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -633,7 +633,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
633 struct req_iterator iter; 633 struct req_iterator iter;
634 634
635 DBF_EVENT(6, "xBREDid:"); 635 DBF_EVENT(6, "xBREDid:");
636 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
637 DBF_EVENT(6, "start_block = %i\n", start_block); 637 DBF_EVENT(6, "start_block = %i\n", start_block);
638 638
639 rq_for_each_segment(bv, req, iter) 639 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 86596d3813b5..5d035e4939dc 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -87,7 +87,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
87 if (ccw_req->rc == 0) 87 if (ccw_req->rc == 0)
88 /* Update position. */ 88 /* Update position. */
89 device->blk_data.block_position = 89 device->blk_data.block_position =
90 (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
91 else 91 else
92 /* We lost the position information due to an error. */ 92 /* We lost the position information due to an error. */
93 device->blk_data.block_position = -1; 93 device->blk_data.block_position = -1;
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 09617884a50b..2132c906e53a 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -188,8 +188,8 @@ static void jsfd_do_request(struct request_queue *q)
188 188
189 while ((req = elv_next_request(q)) != NULL) { 189 while ((req = elv_next_request(q)) != NULL) {
190 struct jsfd_part *jdp = req->rq_disk->private_data; 190 struct jsfd_part *jdp = req->rq_disk->private_data;
191 unsigned long offset = req->sector << 9; 191 unsigned long offset = blk_rq_pos(req) << 9;
192 size_t len = req->current_nr_sectors << 9; 192 size_t len = blk_rq_cur_sectors(req) << 9;
193 193
194 if ((offset + len) > jdp->dsize) { 194 if ((offset + len) > jdp->dsize) {
195 __blk_end_request_cur(req, -EIO); 195 __blk_end_request_cur(req, -EIO);
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index be5099dd94b5..c7076ce25e21 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1825,7 +1825,7 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt,
1825 if (linked_comm && SCpnt->device->queue_depth > 2 1825 if (linked_comm && SCpnt->device->queue_depth > 2
1826 && TLDEV(SCpnt->device->type)) { 1826 && TLDEV(SCpnt->device->type)) {
1827 ha->cp_stat[i] = READY; 1827 ha->cp_stat[i] = READY;
1828 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0);
1829 return 0; 1829 return 0;
1830 } 1830 }
1831 1831
@@ -2144,13 +2144,13 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2144 if (!cpp->din) 2144 if (!cpp->din)
2145 input_only = 0; 2145 input_only = 0;
2146 2146
2147 if (SCpnt->request->sector < minsec) 2147 if (blk_rq_pos(SCpnt->request) < minsec)
2148 minsec = SCpnt->request->sector; 2148 minsec = blk_rq_pos(SCpnt->request);
2149 if (SCpnt->request->sector > maxsec) 2149 if (blk_rq_pos(SCpnt->request) > maxsec)
2150 maxsec = SCpnt->request->sector; 2150 maxsec = blk_rq_pos(SCpnt->request);
2151 2151
2152 sl[n] = SCpnt->request->sector; 2152 sl[n] = blk_rq_pos(SCpnt->request);
2153 ioseek += SCpnt->request->nr_sectors; 2153 ioseek += blk_rq_sectors(SCpnt->request);
2154 2154
2155 if (!n) 2155 if (!n)
2156 continue; 2156 continue;
@@ -2190,7 +2190,7 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2190 k = il[n]; 2190 k = il[n];
2191 cpp = &ha->cp[k]; 2191 cpp = &ha->cp[k];
2192 SCpnt = cpp->SCpnt; 2192 SCpnt = cpp->SCpnt;
2193 ll[n] = SCpnt->request->nr_sectors; 2193 ll[n] = blk_rq_sectors(SCpnt->request);
2194 pl[n] = SCpnt->serial_number; 2194 pl[n] = SCpnt->serial_number;
2195 2195
2196 if (!n) 2196 if (!n)
@@ -2236,12 +2236,12 @@ static int reorder(struct hostdata *ha, unsigned long cursec,
2236 cpp = &ha->cp[k]; 2236 cpp = &ha->cp[k];
2237 SCpnt = cpp->SCpnt; 2237 SCpnt = cpp->SCpnt;
2238 scmd_printk(KERN_INFO, SCpnt, 2238 scmd_printk(KERN_INFO, SCpnt,
2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 "%s pid %ld mb %d fc %d nr %d sec %ld ns %u"
2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
2241 (ihdlr ? "ihdlr" : "qcomm"), 2241 (ihdlr ? "ihdlr" : "qcomm"),
2242 SCpnt->serial_number, k, flushcount, 2242 SCpnt->serial_number, k, flushcount,
2243 n_ready, SCpnt->request->sector, 2243 n_ready, blk_rq_pos(SCpnt->request),
2244 SCpnt->request->nr_sectors, cursec, YESNO(s), 2244 blk_rq_sectors(SCpnt->request), cursec, YESNO(s),
2245 YESNO(r), YESNO(rev), YESNO(input_only), 2245 YESNO(r), YESNO(rev), YESNO(input_only),
2246 YESNO(overlap), cpp->din); 2246 YESNO(overlap), cpp->din);
2247 } 2247 }
@@ -2408,7 +2408,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
2408 2408
2409 if (linked_comm && SCpnt->device->queue_depth > 2 2409 if (linked_comm && SCpnt->device->queue_depth > 2
2410 && TLDEV(SCpnt->device->type)) 2410 && TLDEV(SCpnt->device->type))
2411 flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1);
2412 2412
2413 tstatus = status_byte(spp->target_status); 2413 tstatus = status_byte(spp->target_status);
2414 2414
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 36fd2e75da1c..a8fab3977116 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1313,10 +1313,10 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1313 uint32_t bgstat = bgf->bgstat; 1313 uint32_t bgstat = bgf->bgstat;
1314 uint64_t failing_sector = 0; 1314 uint64_t failing_sector = 0;
1315 1315
1316 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1316 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1317 "bgstat=0x%x bghm=0x%x\n", 1317 "bgstat=0x%x bghm=0x%x\n",
1318 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1318 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1319 cmd->request->nr_sectors, bgstat, bghm); 1319 blk_rq_sectors(cmd->request), bgstat, bghm);
1320 1320
1321 spin_lock(&_dump_buf_lock); 1321 spin_lock(&_dump_buf_lock);
1322 if (!_dump_buf_done) { 1322 if (!_dump_buf_done) {
@@ -2375,15 +2375,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2375 if (cmnd->cmnd[0] == READ_10) 2375 if (cmnd->cmnd[0] == READ_10)
2376 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2376 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2377 "9035 BLKGRD: READ @ sector %llu, " 2377 "9035 BLKGRD: READ @ sector %llu, "
2378 "count %lu\n", 2378 "count %u\n",
2379 (unsigned long long)scsi_get_lba(cmnd), 2379 (unsigned long long)scsi_get_lba(cmnd),
2380 cmnd->request->nr_sectors); 2380 blk_rq_sectors(cmnd->request));
2381 else if (cmnd->cmnd[0] == WRITE_10) 2381 else if (cmnd->cmnd[0] == WRITE_10)
2382 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2382 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2383 "9036 BLKGRD: WRITE @ sector %llu, " 2383 "9036 BLKGRD: WRITE @ sector %llu, "
2384 "count %lu cmd=%p\n", 2384 "count %u cmd=%p\n",
2385 (unsigned long long)scsi_get_lba(cmnd), 2385 (unsigned long long)scsi_get_lba(cmnd),
2386 cmnd->request->nr_sectors, 2386 blk_rq_sectors(cmnd->request),
2387 cmnd); 2387 cmnd);
2388 2388
2389 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2389 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
@@ -2403,15 +2403,15 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2403 if (cmnd->cmnd[0] == READ_10) 2403 if (cmnd->cmnd[0] == READ_10)
2404 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2404 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2405 "9040 dbg: READ @ sector %llu, " 2405 "9040 dbg: READ @ sector %llu, "
2406 "count %lu\n", 2406 "count %u\n",
2407 (unsigned long long)scsi_get_lba(cmnd), 2407 (unsigned long long)scsi_get_lba(cmnd),
2408 cmnd->request->nr_sectors); 2408 blk_rq_sectors(cmnd->request));
2409 else if (cmnd->cmnd[0] == WRITE_10) 2409 else if (cmnd->cmnd[0] == WRITE_10)
2410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2411 "9041 dbg: WRITE @ sector %llu, " 2411 "9041 dbg: WRITE @ sector %llu, "
2412 "count %lu cmd=%p\n", 2412 "count %u cmd=%p\n",
2413 (unsigned long long)scsi_get_lba(cmnd), 2413 (unsigned long long)scsi_get_lba(cmnd),
2414 cmnd->request->nr_sectors, cmnd); 2414 blk_rq_sectors(cmnd->request), cmnd);
2415 else 2415 else
2416 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2416 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2417 "9042 dbg: parser not implemented\n"); 2417 "9042 dbg: parser not implemented\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ff0ca9988a9..39b3acfc0ddf 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -787,9 +787,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
787 * Next deal with any sectors which we were able to correctly 787 * Next deal with any sectors which we were able to correctly
788 * handle. 788 * handle.
789 */ 789 */
790 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 790 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
791 "%d bytes done.\n", 791 "%d bytes done.\n",
792 req->nr_sectors, good_bytes)); 792 blk_rq_sectors(req), good_bytes));
793 793
794 /* 794 /*
795 * Recovered errors need reporting, but they're always treated 795 * Recovered errors need reporting, but they're always treated
@@ -968,7 +968,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
968 if (blk_pc_request(req)) 968 if (blk_pc_request(req))
969 sdb->length = req->data_len; 969 sdb->length = req->data_len;
970 else 970 else
971 sdb->length = req->nr_sectors << 9; 971 sdb->length = blk_rq_sectors(req) << 9;
972 return BLKPREP_OK; 972 return BLKPREP_OK;
973} 973}
974 974
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3fcb64b91c43..70c4dd99bbf0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -383,9 +383,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
383 struct scsi_device *sdp = q->queuedata; 383 struct scsi_device *sdp = q->queuedata;
384 struct gendisk *disk = rq->rq_disk; 384 struct gendisk *disk = rq->rq_disk;
385 struct scsi_disk *sdkp; 385 struct scsi_disk *sdkp;
386 sector_t block = rq->sector; 386 sector_t block = blk_rq_pos(rq);
387 sector_t threshold; 387 sector_t threshold;
388 unsigned int this_count = rq->nr_sectors; 388 unsigned int this_count = blk_rq_sectors(rq);
389 int ret, host_dif; 389 int ret, host_dif;
390 390
391 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 391 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -412,10 +412,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
412 this_count)); 412 this_count));
413 413
414 if (!sdp || !scsi_device_online(sdp) || 414 if (!sdp || !scsi_device_online(sdp) ||
415 block + rq->nr_sectors > get_capacity(disk)) { 415 block + blk_rq_sectors(rq) > get_capacity(disk)) {
416 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 416 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
417 "Finishing %ld sectors\n", 417 "Finishing %u sectors\n",
418 rq->nr_sectors)); 418 blk_rq_sectors(rq)));
419 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 419 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
420 "Retry with 0x%p\n", SCpnt)); 420 "Retry with 0x%p\n", SCpnt));
421 goto out; 421 goto out;
@@ -462,7 +462,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
462 * for this. 462 * for this.
463 */ 463 */
464 if (sdp->sector_size == 1024) { 464 if (sdp->sector_size == 1024) {
465 if ((block & 1) || (rq->nr_sectors & 1)) { 465 if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
466 scmd_printk(KERN_ERR, SCpnt, 466 scmd_printk(KERN_ERR, SCpnt,
467 "Bad block number requested\n"); 467 "Bad block number requested\n");
468 goto out; 468 goto out;
@@ -472,7 +472,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
472 } 472 }
473 } 473 }
474 if (sdp->sector_size == 2048) { 474 if (sdp->sector_size == 2048) {
475 if ((block & 3) || (rq->nr_sectors & 3)) { 475 if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
476 scmd_printk(KERN_ERR, SCpnt, 476 scmd_printk(KERN_ERR, SCpnt,
477 "Bad block number requested\n"); 477 "Bad block number requested\n");
478 goto out; 478 goto out;
@@ -482,7 +482,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
482 } 482 }
483 } 483 }
484 if (sdp->sector_size == 4096) { 484 if (sdp->sector_size == 4096) {
485 if ((block & 7) || (rq->nr_sectors & 7)) { 485 if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
486 scmd_printk(KERN_ERR, SCpnt, 486 scmd_printk(KERN_ERR, SCpnt,
487 "Bad block number requested\n"); 487 "Bad block number requested\n");
488 goto out; 488 goto out;
@@ -511,10 +511,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
511 } 511 }
512 512
513 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 513 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
514 "%s %d/%ld 512 byte blocks.\n", 514 "%s %d/%u 512 byte blocks.\n",
515 (rq_data_dir(rq) == WRITE) ? 515 (rq_data_dir(rq) == WRITE) ?
516 "writing" : "reading", this_count, 516 "writing" : "reading", this_count,
517 rq->nr_sectors)); 517 blk_rq_sectors(rq)));
518 518
519 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 519 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
520 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 520 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@ -970,8 +970,8 @@ static struct block_device_operations sd_fops = {
970 970
971static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 971static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
972{ 972{
973 u64 start_lba = scmd->request->sector; 973 u64 start_lba = blk_rq_pos(scmd->request);
974 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 974 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
975 u64 bad_lba; 975 u64 bad_lba;
976 int info_valid; 976 int info_valid;
977 977
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 184dff492797..82f14a9482d0 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -507,7 +507,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
507 sector_sz = scmd->device->sector_size; 507 sector_sz = scmd->device->sector_size;
508 sectors = good_bytes / sector_sz; 508 sectors = good_bytes / sector_sz;
509 509
510 phys = scmd->request->sector & 0xffffffff; 510 phys = blk_rq_pos(scmd->request) & 0xffffffff;
511 if (sector_sz == 4096) 511 if (sector_sz == 4096)
512 phys >>= 3; 512 phys >>= 3;
513 513
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0e1a0f2d2ad5..fddba53c7fe5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,7 +292,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
292 if (cd->device->sector_size == 2048) 292 if (cd->device->sector_size == 2048)
293 error_sector <<= 2; 293 error_sector <<= 2;
294 error_sector &= ~(block_sectors - 1); 294 error_sector &= ~(block_sectors - 1);
295 good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 good_bytes = (error_sector -
296 blk_rq_pos(SCpnt->request)) << 9;
296 if (good_bytes < 0 || good_bytes >= this_count) 297 if (good_bytes < 0 || good_bytes >= this_count)
297 good_bytes = 0; 298 good_bytes = 0;
298 /* 299 /*
@@ -349,8 +350,8 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
349 cd->disk->disk_name, block)); 350 cd->disk->disk_name, block));
350 351
351 if (!cd->device || !scsi_device_online(cd->device)) { 352 if (!cd->device || !scsi_device_online(cd->device)) {
352 SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 353 SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
353 rq->nr_sectors)); 354 blk_rq_sectors(rq)));
354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 355 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
355 goto out; 356 goto out;
356 } 357 }
@@ -413,7 +414,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
413 /* 414 /*
414 * request doesn't start on hw block boundary, add scatter pads 415 * request doesn't start on hw block boundary, add scatter pads
415 */ 416 */
416 if (((unsigned int)rq->sector % (s_size >> 9)) || 417 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
417 (scsi_bufflen(SCpnt) % s_size)) { 418 (scsi_bufflen(SCpnt) % s_size)) {
418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 419 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
419 goto out; 420 goto out;
@@ -422,14 +423,14 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 423 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
423 424
424 425
425 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 426 SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
426 cd->cdi.name, 427 cd->cdi.name,
427 (rq_data_dir(rq) == WRITE) ? 428 (rq_data_dir(rq) == WRITE) ?
428 "writing" : "reading", 429 "writing" : "reading",
429 this_count, rq->nr_sectors)); 430 this_count, blk_rq_sectors(rq)));
430 431
431 SCpnt->cmnd[1] = 0; 432 SCpnt->cmnd[1] = 0;
432 block = (unsigned int)rq->sector / (s_size >> 9); 433 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
433 434
434 if (this_count > 0xffff) { 435 if (this_count > 0xffff) {
435 this_count = 0xffff; 436 this_count = 0xffff;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 601e95141cbe..54023d41fd15 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1306,7 +1306,7 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs
1306 if (linked_comm && SCpnt->device->queue_depth > 2 1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) { 1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY; 1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0; 1310 return 0;
1311 } 1311 }
1312 1312
@@ -1610,11 +1610,13 @@ static int reorder(unsigned int j, unsigned long cursec,
1610 1610
1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1612 1612
1613 if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1613 if (blk_rq_pos(SCpnt->request) < minsec)
1614 if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1614 minsec = blk_rq_pos(SCpnt->request);
1615 if (blk_rq_pos(SCpnt->request) > maxsec)
1616 maxsec = blk_rq_pos(SCpnt->request);
1615 1617
1616 sl[n] = SCpnt->request->sector; 1618 sl[n] = blk_rq_pos(SCpnt->request);
1617 ioseek += SCpnt->request->nr_sectors; 1619 ioseek += blk_rq_sectors(SCpnt->request);
1618 1620
1619 if (!n) continue; 1621 if (!n) continue;
1620 1622
@@ -1642,7 +1644,7 @@ static int reorder(unsigned int j, unsigned long cursec,
1642 1644
1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1645 if (!input_only) for (n = 0; n < n_ready; n++) {
1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1646 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1645 ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1647 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1646 1648
1647 if (!n) continue; 1649 if (!n) continue;
1648 1650
@@ -1666,12 +1668,12 @@ static int reorder(unsigned int j, unsigned long cursec,
1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1668 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1667 for (n = 0; n < n_ready; n++) { 1669 for (n = 0; n < n_ready; n++) {
1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1670 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1669 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1671 printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\
1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1672 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1673 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1674 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready,
1673 SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1675 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1674 YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1676 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1675 YESNO(overlap), cpp->xdir); 1677 YESNO(overlap), cpp->xdir);
1676 } 1678 }
1677#endif 1679#endif
@@ -1799,7 +1801,7 @@ static irqreturn_t ihdlr(unsigned int j)
1799 1801
1800 if (linked_comm && SCpnt->device->queue_depth > 2 1802 if (linked_comm && SCpnt->device->queue_depth > 2
1801 && TLDEV(SCpnt->device->type)) 1803 && TLDEV(SCpnt->device->type))
1802 flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1804 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1803 1805
1804 tstatus = status_byte(spp->target_status); 1806 tstatus = status_byte(spp->target_status);
1805 1807
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 43b50d36925c..3878d1dc7f59 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
270 270
271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 271static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
272{ 272{
273 return scmd->request->sector; 273 return blk_rq_pos(scmd->request);
274} 274}
275 275
276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) 276static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)