aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-07 09:24:39 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 03:50:54 -0400
commit83096ebf1263b2c1ee5e653ba37d993d02e3eb7b (patch)
tree2226b71a616ec1cb2d37511c6a09ba9507a1cd69 /block
parent5b93629b4509c03ffa87a9316412fedf6f58cb37 (diff)
block: convert to pos and nr_sectors accessors
With recent cleanups, there is no place where low level driver directly manipulates request fields. This means that the 'hard' request fields always equal the !hard fields. Convert all rq->sectors, nr_sectors and current_nr_sectors references to accessors. While at it, drop superflous blk_rq_pos() < 0 test in swim.c. [ Impact: use pos and nr_sectors accessors ] Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com> Tested-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Grant Likely <grant.likely@secretlab.ca> Tested-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Adrian McMenamin <adrian@mcmen.demon.co.uk> Acked-by: Mike Miller <mike.miller@hp.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Cc: Borislav Petkov <petkovbb@googlemail.com> Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com> Cc: Eric Moore <Eric.Moore@lsi.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Pete Zaitcev <zaitcev@redhat.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Paul Clements <paul.clements@steeleye.com> Cc: Tim Waugh <tim@cyberelk.net> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Dario Ballabio <ballabio_dario@emc.com> Cc: David S. Miller <davem@davemloft.net> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: unsik Kim <donari75@gmail.com> Cc: Laurent Vivier <Laurent@lvivier.info> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c18
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-core.c17
-rw-r--r--block/blk-merge.c10
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c22
7 files changed, 45 insertions, 44 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 45bd07059c28..7a12cf6ee1d3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
306 data_dir = rq_is_sync(rq1); 306 data_dir = rq_is_sync(rq1);
307 307
308 last = ad->last_sector[data_dir]; 308 last = ad->last_sector[data_dir];
309 s1 = rq1->sector; 309 s1 = blk_rq_pos(rq1);
310 s2 = rq2->sector; 310 s2 = blk_rq_pos(rq2);
311 311
312 BUG_ON(data_dir != rq_is_sync(rq2)); 312 BUG_ON(data_dir != rq_is_sync(rq2));
313 313
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
566 as_update_thinktime(ad, aic, thinktime); 566 as_update_thinktime(ad, aic, thinktime);
567 567
568 /* Calculate read -> read seek distance */ 568 /* Calculate read -> read seek distance */
569 if (aic->last_request_pos < rq->sector) 569 if (aic->last_request_pos < blk_rq_pos(rq))
570 seek_dist = rq->sector - aic->last_request_pos; 570 seek_dist = blk_rq_pos(rq) -
571 aic->last_request_pos;
571 else 572 else
572 seek_dist = aic->last_request_pos - rq->sector; 573 seek_dist = aic->last_request_pos -
574 blk_rq_pos(rq);
573 as_update_seekdist(ad, aic, seek_dist); 575 as_update_seekdist(ad, aic, seek_dist);
574 } 576 }
575 aic->last_request_pos = rq->sector + rq->nr_sectors; 577 aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
576 set_bit(AS_TASK_IOSTARTED, &aic->state); 578 set_bit(AS_TASK_IOSTARTED, &aic->state);
577 spin_unlock(&aic->lock); 579 spin_unlock(&aic->lock);
578 } 580 }
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
587{ 589{
588 unsigned long delay; /* jiffies */ 590 unsigned long delay; /* jiffies */
589 sector_t last = ad->last_sector[ad->batch_data_dir]; 591 sector_t last = ad->last_sector[ad->batch_data_dir];
590 sector_t next = rq->sector; 592 sector_t next = blk_rq_pos(rq);
591 sector_t delta; /* acceptable close offset (in sectors) */ 593 sector_t delta; /* acceptable close offset (in sectors) */
592 sector_t s; 594 sector_t s;
593 595
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
981 * This has to be set in order to be correctly updated by 983 * This has to be set in order to be correctly updated by
982 * as_find_next_rq 984 * as_find_next_rq
983 */ 985 */
984 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 986 ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
985 987
986 if (data_dir == BLK_RW_SYNC) { 988 if (data_dir == BLK_RW_SYNC) {
987 struct io_context *ioc = RQ_IOC(rq); 989 struct io_context *ioc = RQ_IOC(rq);
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index c167de5b9eff..8713c2fbc4f6 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -324,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
324 /* 324 /*
325 * The driver must store the error location in ->bi_sector, if 325 * The driver must store the error location in ->bi_sector, if
326 * it supports it. For non-stacked drivers, this should be copied 326 * it supports it. For non-stacked drivers, this should be copied
327 * from rq->sector. 327 * from blk_rq_pos(rq).
328 */ 328 */
329 if (error_sector) 329 if (error_sector)
330 *error_sector = bio->bi_sector; 330 *error_sector = bio->bi_sector;
diff --git a/block/blk-core.c b/block/blk-core.c
index 895e55b74a40..82dc20621c06 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -72,7 +72,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
72 return; 72 return;
73 73
74 cpu = part_stat_lock(); 74 cpu = part_stat_lock();
75 part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 75 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
76 76
77 if (!new_io) 77 if (!new_io)
78 part_stat_inc(cpu, part, merges[rw]); 78 part_stat_inc(cpu, part, merges[rw]);
@@ -185,10 +185,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
185 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 185 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
186 rq->cmd_flags); 186 rq->cmd_flags);
187 187
188 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 188 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
189 (unsigned long long)rq->sector, 189 (unsigned long long)blk_rq_pos(rq),
190 rq->nr_sectors, 190 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
191 rq->current_nr_sectors);
192 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
193 rq->bio, rq->biotail, 192 rq->bio, rq->biotail,
194 rq->buffer, rq->data_len); 193 rq->buffer, rq->data_len);
@@ -1557,7 +1556,7 @@ EXPORT_SYMBOL(submit_bio);
1557 */ 1556 */
1558int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1557int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1559{ 1558{
1560 if (rq->nr_sectors > q->max_sectors || 1559 if (blk_rq_sectors(rq) > q->max_sectors ||
1561 rq->data_len > q->max_hw_sectors << 9) { 1560 rq->data_len > q->max_hw_sectors << 9) {
1562 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1561 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1563 return -EIO; 1562 return -EIO;
@@ -1645,7 +1644,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1645 int cpu; 1644 int cpu;
1646 1645
1647 cpu = part_stat_lock(); 1646 cpu = part_stat_lock();
1648 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1647 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1649 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1648 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1650 part_stat_unlock(); 1649 part_stat_unlock();
1651 } 1650 }
@@ -1665,7 +1664,7 @@ static void blk_account_io_done(struct request *req)
1665 int cpu; 1664 int cpu;
1666 1665
1667 cpu = part_stat_lock(); 1666 cpu = part_stat_lock();
1668 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1667 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
1669 1668
1670 part_stat_inc(cpu, part, ios[rw]); 1669 part_stat_inc(cpu, part, ios[rw]);
1671 part_stat_add(cpu, part, ticks[rw], duration); 1670 part_stat_add(cpu, part, ticks[rw], duration);
@@ -1846,7 +1845,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1846 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1845 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1847 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1846 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1848 req->rq_disk ? req->rq_disk->disk_name : "?", 1847 req->rq_disk ? req->rq_disk->disk_name : "?",
1849 (unsigned long long)req->sector); 1848 (unsigned long long)blk_rq_pos(req));
1850 } 1849 }
1851 1850
1852 blk_account_io_completion(req, nr_bytes); 1851 blk_account_io_completion(req, nr_bytes);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a3..bf62a87a9da2 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -259,7 +259,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
259 else 259 else
260 max_sectors = q->max_sectors; 260 max_sectors = q->max_sectors;
261 261
262 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 262 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
263 req->cmd_flags |= REQ_NOMERGE; 263 req->cmd_flags |= REQ_NOMERGE;
264 if (req == q->last_merge) 264 if (req == q->last_merge)
265 q->last_merge = NULL; 265 q->last_merge = NULL;
@@ -284,7 +284,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
284 max_sectors = q->max_sectors; 284 max_sectors = q->max_sectors;
285 285
286 286
287 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 287 if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
288 req->cmd_flags |= REQ_NOMERGE; 288 req->cmd_flags |= REQ_NOMERGE;
289 if (req == q->last_merge) 289 if (req == q->last_merge)
290 q->last_merge = NULL; 290 q->last_merge = NULL;
@@ -315,7 +315,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
315 /* 315 /*
316 * Will it become too large? 316 * Will it become too large?
317 */ 317 */
318 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 318 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
319 return 0; 319 return 0;
320 320
321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -345,7 +345,7 @@ static void blk_account_io_merge(struct request *req)
345 int cpu; 345 int cpu;
346 346
347 cpu = part_stat_lock(); 347 cpu = part_stat_lock();
348 part = disk_map_sector_rcu(req->rq_disk, req->sector); 348 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
349 349
350 part_round_stats(cpu, part); 350 part_round_stats(cpu, part);
351 part_dec_in_flight(part); 351 part_dec_in_flight(part);
@@ -366,7 +366,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
366 /* 366 /*
367 * not contiguous 367 * not contiguous
368 */ 368 */
369 if (req->sector + req->nr_sectors != next->sector) 369 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
370 return 0; 370 return 0;
371 371
372 if (rq_data_dir(req) != rq_data_dir(next) 372 if (rq_data_dir(req) != rq_data_dir(next)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 575083a9ffe4..db4d990a66bf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
350 return rq2; 350 return rq2;
351 351
352 s1 = rq1->sector; 352 s1 = blk_rq_pos(rq1);
353 s2 = rq2->sector; 353 s2 = blk_rq_pos(rq2);
354 354
355 last = cfqd->last_position; 355 last = cfqd->last_position;
356 356
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 949static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
950 struct request *rq) 950 struct request *rq)
951{ 951{
952 if (rq->sector >= cfqd->last_position) 952 if (blk_rq_pos(rq) >= cfqd->last_position)
953 return rq->sector - cfqd->last_position; 953 return blk_rq_pos(rq) - cfqd->last_position;
954 else 954 else
955 return cfqd->last_position - rq->sector; 955 return cfqd->last_position - blk_rq_pos(rq);
956} 956}
957 957
958#define CIC_SEEK_THR 8 * 1024 958#define CIC_SEEK_THR 8 * 1024
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1918 1918
1919 if (!cic->last_request_pos) 1919 if (!cic->last_request_pos)
1920 sdist = 0; 1920 sdist = 0;
1921 else if (cic->last_request_pos < rq->sector) 1921 else if (cic->last_request_pos < blk_rq_pos(rq))
1922 sdist = rq->sector - cic->last_request_pos; 1922 sdist = blk_rq_pos(rq) - cic->last_request_pos;
1923 else 1923 else
1924 sdist = cic->last_request_pos - rq->sector; 1924 sdist = cic->last_request_pos - blk_rq_pos(rq);
1925 1925
1926 /* 1926 /*
1927 * Don't allow the seek distance to get too large from the 1927 * Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2071 cfq_update_io_seektime(cfqd, cic, rq); 2071 cfq_update_io_seektime(cfqd, cic, rq);
2072 cfq_update_idle_window(cfqd, cfqq, cic); 2072 cfq_update_idle_window(cfqd, cfqq, cic);
2073 2073
2074 cic->last_request_pos = rq->sector + rq->nr_sectors; 2074 cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2075 2075
2076 if (cfqq == cfqd->active_queue) { 2076 if (cfqq == cfqd->active_queue) {
2077 /* 2077 /*
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c4d991d4adef..b547cbca7b23 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
138 138
139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
140 if (__rq) { 140 if (__rq) {
141 BUG_ON(sector != __rq->sector); 141 BUG_ON(sector != blk_rq_pos(__rq));
142 142
143 if (elv_rq_merge_ok(__rq, bio)) { 143 if (elv_rq_merge_ok(__rq, bio)) {
144 ret = ELEVATOR_FRONT_MERGE; 144 ret = ELEVATOR_FRONT_MERGE;
diff --git a/block/elevator.c b/block/elevator.c
index 1af5d9f04aff..918920056e42 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -52,7 +52,7 @@ static const int elv_hash_shift = 6;
52#define ELV_HASH_FN(sec) \ 52#define ELV_HASH_FN(sec) \
53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
54#define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
55#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
56 56
57DEFINE_TRACE(block_rq_insert); 57DEFINE_TRACE(block_rq_insert);
58DEFINE_TRACE(block_rq_issue); 58DEFINE_TRACE(block_rq_issue);
@@ -119,9 +119,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
119 * we can merge and sequence is ok, check if it's possible 119 * we can merge and sequence is ok, check if it's possible
120 */ 120 */
121 if (elv_rq_merge_ok(__rq, bio)) { 121 if (elv_rq_merge_ok(__rq, bio)) {
122 if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 122 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
123 ret = ELEVATOR_BACK_MERGE; 123 ret = ELEVATOR_BACK_MERGE;
124 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 124 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
125 ret = ELEVATOR_FRONT_MERGE; 125 ret = ELEVATOR_FRONT_MERGE;
126 } 126 }
127 127
@@ -370,9 +370,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
370 parent = *p; 370 parent = *p;
371 __rq = rb_entry(parent, struct request, rb_node); 371 __rq = rb_entry(parent, struct request, rb_node);
372 372
373 if (rq->sector < __rq->sector) 373 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
374 p = &(*p)->rb_left; 374 p = &(*p)->rb_left;
375 else if (rq->sector > __rq->sector) 375 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
376 p = &(*p)->rb_right; 376 p = &(*p)->rb_right;
377 else 377 else
378 return __rq; 378 return __rq;
@@ -400,9 +400,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
400 while (n) { 400 while (n) {
401 rq = rb_entry(n, struct request, rb_node); 401 rq = rb_entry(n, struct request, rb_node);
402 402
403 if (sector < rq->sector) 403 if (sector < blk_rq_pos(rq))
404 n = n->rb_left; 404 n = n->rb_left;
405 else if (sector > rq->sector) 405 else if (sector > blk_rq_pos(rq))
406 n = n->rb_right; 406 n = n->rb_right;
407 else 407 else
408 return rq; 408 return rq;
@@ -441,14 +441,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
441 break; 441 break;
442 if (pos->cmd_flags & stop_flags) 442 if (pos->cmd_flags & stop_flags)
443 break; 443 break;
444 if (rq->sector >= boundary) { 444 if (blk_rq_pos(rq) >= boundary) {
445 if (pos->sector < boundary) 445 if (blk_rq_pos(pos) < boundary)
446 continue; 446 continue;
447 } else { 447 } else {
448 if (pos->sector >= boundary) 448 if (blk_rq_pos(pos) >= boundary)
449 break; 449 break;
450 } 450 }
451 if (rq->sector >= pos->sector) 451 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
452 break; 452 break;
453 } 453 }
454 454