aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-07 09:24:41 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 03:50:54 -0400
commit2e46e8b27aa57c6bd34b3102b40ee4d0144b4fab (patch)
tree134f560f66c2af277f0f25e5b85f6c1acaddfc06
parent9780e2dd8254351f6cbe11304849126b51dbd561 (diff)
block: drop request->hard_* and *nr_sectors
struct request has had a few different ways to represent some properties of a request. ->hard_* represent block layer's view of the request progress (completion cursor) and the ones without the prefix are supposed to represent the issue cursor and allowed to be updated as necessary by the low level drivers. The thing is that as block layer supports partial completion, the two cursors really aren't necessary and only cause confusion. In addition, manual management of request detail from low level drivers is cumbersome and error-prone at the very least. Another interesting duplicate fields are rq->[hard_]nr_sectors and rq->{hard_cur|current}_nr_sectors against rq->data_len and rq->bio->bi_size. This is more convoluted than the hard_ case. rq->[hard_]nr_sectors are initialized for requests with bio but blk_rq_bytes() uses it only for !pc requests. rq->data_len is initialized for all request but blk_rq_bytes() uses it only for pc requests. This causes good amount of confusion throughout block layer and its drivers and determining the request length has been a bit of black magic which may or may not work depending on circumstances and what the specific LLD is actually doing. rq->{hard_cur|current}_nr_sectors represent the number of sectors in the contiguous data area at the front. This is mainly used by drivers which transfers data by walking request segment-by-segment. This value always equals rq->bio->bi_size >> 9. However, data length for pc requests may not be multiple of 512 bytes and using this field becomes a bit confusing. In general, having multiple fields to represent the same property leads only to confusion and subtle bugs. With recent block low level driver cleanups, no driver is accessing or manipulating these duplicate fields directly. Drop all the duplicates. Now rq->sector means the current sector, rq->data_len the current total length and rq->bio->bi_size the current segment length. Everything else is defined in terms of these three and available only through accessors. * blk_recalc_rq_sectors() is collapsed into blk_update_request() and now handles pc and fs requests equally other than rq->sector update. This means that now pc requests can use partial completion too (no in-kernel user yet tho). * bio_cur_sectors() is replaced with bio_cur_bytes() as block layer now uses byte count as the primary data length. * blk_rq_pos() is now guranteed to be always correct. In-block users converted. * blk_rq_bytes() is now guaranteed to be always valid as is blk_rq_sectors(). In-block users converted. * blk_rq_sectors() is now guaranteed to equal blk_rq_bytes() >> 9. More convenient one is used. * blk_rq_bytes() and blk_rq_cur_bytes() are now inlined and take const pointer to request. [ Impact: API cleanup, single way to represent one property of a request ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Boaz Harrosh <bharrosh@panasas.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-core.c81
-rw-r--r--block/blk-merge.c36
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c10
-rw-r--r--include/linux/bio.h6
-rw-r--r--include/linux/blkdev.h37
-rw-r--r--include/linux/elevator.h2
-rw-r--r--kernel/trace/blktrace.c16
8 files changed, 67 insertions, 122 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 82dc20621c06..3596ca71909b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
127 INIT_LIST_HEAD(&rq->timeout_list); 127 INIT_LIST_HEAD(&rq->timeout_list);
128 rq->cpu = -1; 128 rq->cpu = -1;
129 rq->q = q; 129 rq->q = q;
130 rq->sector = rq->hard_sector = (sector_t) -1; 130 rq->sector = (sector_t) -1;
131 INIT_HLIST_NODE(&rq->hash); 131 INIT_HLIST_NODE(&rq->hash);
132 RB_CLEAR_NODE(&rq->rb_node); 132 RB_CLEAR_NODE(&rq->rb_node);
133 rq->cmd = rq->__cmd; 133 rq->cmd = rq->__cmd;
@@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
189 (unsigned long long)blk_rq_pos(rq), 189 (unsigned long long)blk_rq_pos(rq),
190 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 190 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
192 rq->bio, rq->biotail, 192 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
193 rq->buffer, rq->data_len);
194 193
195 if (blk_pc_request(rq)) { 194 if (blk_pc_request(rq)) {
196 printk(KERN_INFO " cdb: "); 195 printk(KERN_INFO " cdb: ");
@@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1096 req->cmd_flags |= REQ_NOIDLE; 1095 req->cmd_flags |= REQ_NOIDLE;
1097 1096
1098 req->errors = 0; 1097 req->errors = 0;
1099 req->hard_sector = req->sector = bio->bi_sector; 1098 req->sector = bio->bi_sector;
1100 req->ioprio = bio_prio(bio); 1099 req->ioprio = bio_prio(bio);
1101 blk_rq_bio_prep(req->q, req, bio); 1100 blk_rq_bio_prep(req->q, req, bio);
1102} 1101}
@@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q)
1113static int __make_request(struct request_queue *q, struct bio *bio) 1112static int __make_request(struct request_queue *q, struct bio *bio)
1114{ 1113{
1115 struct request *req; 1114 struct request *req;
1116 int el_ret, nr_sectors; 1115 int el_ret;
1116 unsigned int bytes = bio->bi_size;
1117 const unsigned short prio = bio_prio(bio); 1117 const unsigned short prio = bio_prio(bio);
1118 const int sync = bio_sync(bio); 1118 const int sync = bio_sync(bio);
1119 const int unplug = bio_unplug(bio); 1119 const int unplug = bio_unplug(bio);
1120 int rw_flags; 1120 int rw_flags;
1121 1121
1122 nr_sectors = bio_sectors(bio);
1123
1124 /* 1122 /*
1125 * low level driver can indicate that it wants pages above a 1123 * low level driver can indicate that it wants pages above a
1126 * certain limit bounced to low memory (ie for highmem, or even 1124 * certain limit bounced to low memory (ie for highmem, or even
@@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1145 1143
1146 req->biotail->bi_next = bio; 1144 req->biotail->bi_next = bio;
1147 req->biotail = bio; 1145 req->biotail = bio;
1148 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1146 req->data_len += bytes;
1149 req->ioprio = ioprio_best(req->ioprio, prio); 1147 req->ioprio = ioprio_best(req->ioprio, prio);
1150 if (!blk_rq_cpu_valid(req)) 1148 if (!blk_rq_cpu_valid(req))
1151 req->cpu = bio->bi_comp_cpu; 1149 req->cpu = bio->bi_comp_cpu;
@@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1171 * not touch req->buffer either... 1169 * not touch req->buffer either...
1172 */ 1170 */
1173 req->buffer = bio_data(bio); 1171 req->buffer = bio_data(bio);
1174 req->current_nr_sectors = bio_cur_sectors(bio); 1172 req->sector = bio->bi_sector;
1175 req->hard_cur_sectors = req->current_nr_sectors; 1173 req->data_len += bytes;
1176 req->sector = req->hard_sector = bio->bi_sector;
1177 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1178 req->ioprio = ioprio_best(req->ioprio, prio); 1174 req->ioprio = ioprio_best(req->ioprio, prio);
1179 if (!blk_rq_cpu_valid(req)) 1175 if (!blk_rq_cpu_valid(req))
1180 req->cpu = bio->bi_comp_cpu; 1176 req->cpu = bio->bi_comp_cpu;
@@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio);
1557int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1553int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1558{ 1554{
1559 if (blk_rq_sectors(rq) > q->max_sectors || 1555 if (blk_rq_sectors(rq) > q->max_sectors ||
1560 rq->data_len > q->max_hw_sectors << 9) { 1556 blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
1561 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1557 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1562 return -EIO; 1558 return -EIO;
1563 } 1559 }
@@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req)
1675 } 1671 }
1676} 1672}
1677 1673
1678/**
1679 * blk_rq_bytes - Returns bytes left to complete in the entire request
1680 * @rq: the request being processed
1681 **/
1682unsigned int blk_rq_bytes(struct request *rq)
1683{
1684 if (blk_fs_request(rq))
1685 return blk_rq_sectors(rq) << 9;
1686
1687 return rq->data_len;
1688}
1689EXPORT_SYMBOL_GPL(blk_rq_bytes);
1690
1691/**
1692 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1693 * @rq: the request being processed
1694 **/
1695unsigned int blk_rq_cur_bytes(struct request *rq)
1696{
1697 if (blk_fs_request(rq))
1698 return rq->current_nr_sectors << 9;
1699
1700 if (rq->bio)
1701 return rq->bio->bi_size;
1702
1703 return rq->data_len;
1704}
1705EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1706
1707struct request *elv_next_request(struct request_queue *q) 1674struct request *elv_next_request(struct request_queue *q)
1708{ 1675{
1709 struct request *rq; 1676 struct request *rq;
@@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q)
1736 if (rq->cmd_flags & REQ_DONTPREP) 1703 if (rq->cmd_flags & REQ_DONTPREP)
1737 break; 1704 break;
1738 1705
1739 if (q->dma_drain_size && rq->data_len) { 1706 if (q->dma_drain_size && blk_rq_bytes(rq)) {
1740 /* 1707 /*
1741 * make sure space for the drain appears we 1708 * make sure space for the drain appears we
1742 * know we can do this because max_hw_segments 1709 * know we can do this because max_hw_segments
@@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q)
1759 * avoid resource deadlock. REQ_STARTED will 1726 * avoid resource deadlock. REQ_STARTED will
1760 * prevent other fs requests from passing this one. 1727 * prevent other fs requests from passing this one.
1761 */ 1728 */
1762 if (q->dma_drain_size && rq->data_len && 1729 if (q->dma_drain_size && blk_rq_bytes(rq) &&
1763 !(rq->cmd_flags & REQ_DONTPREP)) { 1730 !(rq->cmd_flags & REQ_DONTPREP)) {
1764 /* 1731 /*
1765 * remove the space for the drain we added 1732 * remove the space for the drain we added
@@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1911 * can find how many bytes remain in the request 1878 * can find how many bytes remain in the request
1912 * later. 1879 * later.
1913 */ 1880 */
1914 req->nr_sectors = req->hard_nr_sectors = 0; 1881 req->data_len = 0;
1915 req->current_nr_sectors = req->hard_cur_sectors = 0;
1916 return false; 1882 return false;
1917 } 1883 }
1918 1884
@@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1926 bio_iovec(bio)->bv_len -= nr_bytes; 1892 bio_iovec(bio)->bv_len -= nr_bytes;
1927 } 1893 }
1928 1894
1929 blk_recalc_rq_sectors(req, total_bytes >> 9); 1895 req->data_len -= total_bytes;
1896 req->buffer = bio_data(req->bio);
1897
1898 /* update sector only for requests with clear definition of sector */
1899 if (blk_fs_request(req) || blk_discard_rq(req))
1900 req->sector += total_bytes >> 9;
1901
1902 /*
1903 * If total number of sectors is less than the first segment
1904 * size, something has gone terribly wrong.
1905 */
1906 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1907 printk(KERN_ERR "blk: request botched\n");
1908 req->data_len = blk_rq_cur_bytes(req);
1909 }
1910
1911 /* recalculate the number of segments */
1930 blk_recalc_rq_segments(req); 1912 blk_recalc_rq_segments(req);
1913
1931 return true; 1914 return true;
1932} 1915}
1933EXPORT_SYMBOL_GPL(blk_update_request); 1916EXPORT_SYMBOL_GPL(blk_update_request);
@@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2049 rq->nr_phys_segments = bio_phys_segments(q, bio); 2032 rq->nr_phys_segments = bio_phys_segments(q, bio);
2050 rq->buffer = bio_data(bio); 2033 rq->buffer = bio_data(bio);
2051 } 2034 }
2052 rq->current_nr_sectors = bio_cur_sectors(bio);
2053 rq->hard_cur_sectors = rq->current_nr_sectors;
2054 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2055 rq->data_len = bio->bi_size; 2035 rq->data_len = bio->bi_size;
2056
2057 rq->bio = rq->biotail = bio; 2036 rq->bio = rq->biotail = bio;
2058 2037
2059 if (bio->bi_bdev) 2038 if (bio->bi_bdev)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index bf62a87a9da2..b8df66aef0f8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12void blk_recalc_rq_sectors(struct request *rq, int nsect)
13{
14 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 rq->hard_sector += nsect;
16 rq->hard_nr_sectors -= nsect;
17
18 /*
19 * Move the I/O submission pointers ahead if required.
20 */
21 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
22 (rq->sector <= rq->hard_sector)) {
23 rq->sector = rq->hard_sector;
24 rq->nr_sectors = rq->hard_nr_sectors;
25 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
26 rq->current_nr_sectors = rq->hard_cur_sectors;
27 rq->buffer = bio_data(rq->bio);
28 }
29
30 /*
31 * if total number of sectors is less than the first segment
32 * size, something has gone terribly wrong
33 */
34 if (rq->nr_sectors < rq->current_nr_sectors) {
35 printk(KERN_ERR "blk: request botched\n");
36 rq->nr_sectors = rq->current_nr_sectors;
37 }
38 }
39}
40
41static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 struct bio *bio) 13 struct bio *bio)
43{ 14{
@@ -199,8 +170,9 @@ new_segment:
199 170
200 171
201 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 172 if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
202 (rq->data_len & q->dma_pad_mask)) { 173 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
203 unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; 174 unsigned int pad_len =
175 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
204 176
205 sg->length += pad_len; 177 sg->length += pad_len;
206 rq->extra_len += pad_len; 178 rq->extra_len += pad_len;
@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
398 req->biotail->bi_next = next->bio; 370 req->biotail->bi_next = next->bio;
399 req->biotail = next->biotail; 371 req->biotail = next->biotail;
400 372
401 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; 373 req->data_len += blk_rq_bytes(next);
402 374
403 elv_merge_requests(q, req, next); 375 elv_merge_requests(q, req, next);
404 376
diff --git a/block/blk.h b/block/blk.h
index 51115599df9b..ab54529103c0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -101,7 +101,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
101int attempt_back_merge(struct request_queue *q, struct request *rq); 101int attempt_back_merge(struct request_queue *q, struct request *rq);
102int attempt_front_merge(struct request_queue *q, struct request *rq); 102int attempt_front_merge(struct request_queue *q, struct request *rq);
103void blk_recalc_rq_segments(struct request *rq); 103void blk_recalc_rq_segments(struct request *rq);
104void blk_recalc_rq_sectors(struct request *rq, int nsect);
105 104
106void blk_queue_congestion_threshold(struct request_queue *q); 105void blk_queue_congestion_threshold(struct request_queue *q);
107 106
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index db4d990a66bf..99ac4304d711 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
579 * Sort strictly based on sector. Smallest to the left, 579 * Sort strictly based on sector. Smallest to the left,
580 * largest to the right. 580 * largest to the right.
581 */ 581 */
582 if (sector > cfqq->next_rq->sector) 582 if (sector > blk_rq_pos(cfqq->next_rq))
583 n = &(*p)->rb_right; 583 n = &(*p)->rb_right;
584 else if (sector < cfqq->next_rq->sector) 584 else if (sector < blk_rq_pos(cfqq->next_rq))
585 n = &(*p)->rb_left; 585 n = &(*p)->rb_left;
586 else 586 else
587 break; 587 break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
611 return; 611 return;
612 612
613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
614 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, 614 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
615 &parent, &p); 615 blk_rq_pos(cfqq->next_rq), &parent, &p);
616 if (!__cfqq) { 616 if (!__cfqq) {
617 rb_link_node(&cfqq->p_node, parent, p); 617 rb_link_node(&cfqq->p_node, parent, p);
618 rb_insert_color(&cfqq->p_node, cfqq->p_root); 618 rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
996 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 996 if (cfq_rq_close(cfqd, __cfqq->next_rq))
997 return __cfqq; 997 return __cfqq;
998 998
999 if (__cfqq->next_rq->sector < sector) 999 if (blk_rq_pos(__cfqq->next_rq) < sector)
1000 node = rb_next(&__cfqq->p_node); 1000 node = rb_next(&__cfqq->p_node);
1001 else 1001 else
1002 node = rb_prev(&__cfqq->p_node); 1002 node = rb_prev(&__cfqq->p_node);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f37ca8c726ba..d30ec6f30dd7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,12 +218,12 @@ struct bio {
218#define bio_sectors(bio) ((bio)->bi_size >> 9) 218#define bio_sectors(bio) ((bio)->bi_size >> 9)
219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) 219#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
220 220
221static inline unsigned int bio_cur_sectors(struct bio *bio) 221static inline unsigned int bio_cur_bytes(struct bio *bio)
222{ 222{
223 if (bio->bi_vcnt) 223 if (bio->bi_vcnt)
224 return bio_iovec(bio)->bv_len >> 9; 224 return bio_iovec(bio)->bv_len;
225 else /* dataless requests such as discard */ 225 else /* dataless requests such as discard */
226 return bio->bi_size >> 9; 226 return bio->bi_size;
227} 227}
228 228
229static inline void *bio_data(struct bio *bio) 229static inline void *bio_data(struct bio *bio)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4e5f85598728..ce2bf5efa9ba 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -166,19 +166,8 @@ struct request {
166 enum rq_cmd_type_bits cmd_type; 166 enum rq_cmd_type_bits cmd_type;
167 unsigned long atomic_flags; 167 unsigned long atomic_flags;
168 168
169 /* Maintain bio traversal state for part by part I/O submission. 169 sector_t sector; /* sector cursor */
170 * hard_* are block layer internals, no driver should touch them! 170 unsigned int data_len; /* total data len, don't access directly */
171 */
172
173 sector_t sector; /* next sector to submit */
174 sector_t hard_sector; /* next sector to complete */
175 unsigned long nr_sectors; /* no. of sectors left to submit */
176 unsigned long hard_nr_sectors; /* no. of sectors left to complete */
177 /* no. of sectors left to submit in the current segment */
178 unsigned int current_nr_sectors;
179
180 /* no. of sectors left to complete in the current segment */
181 unsigned int hard_cur_sectors;
182 171
183 struct bio *bio; 172 struct bio *bio;
184 struct bio *biotail; 173 struct bio *biotail;
@@ -226,7 +215,6 @@ struct request {
226 unsigned char __cmd[BLK_MAX_CDB]; 215 unsigned char __cmd[BLK_MAX_CDB];
227 unsigned char *cmd; 216 unsigned char *cmd;
228 217
229 unsigned int data_len;
230 unsigned int extra_len; /* length of alignment and padding */ 218 unsigned int extra_len; /* length of alignment and padding */
231 unsigned int sense_len; 219 unsigned int sense_len;
232 unsigned int resid_len; /* residual count */ 220 unsigned int resid_len; /* residual count */
@@ -840,20 +828,27 @@ extern void blkdev_dequeue_request(struct request *req);
840 */ 828 */
841static inline sector_t blk_rq_pos(const struct request *rq) 829static inline sector_t blk_rq_pos(const struct request *rq)
842{ 830{
843 return rq->hard_sector; 831 return rq->sector;
832}
833
834static inline unsigned int blk_rq_bytes(const struct request *rq)
835{
836 return rq->data_len;
844} 837}
845 838
846extern unsigned int blk_rq_bytes(struct request *rq); 839static inline int blk_rq_cur_bytes(const struct request *rq)
847extern unsigned int blk_rq_cur_bytes(struct request *rq); 840{
841 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
842}
848 843
849static inline unsigned int blk_rq_sectors(const struct request *rq) 844static inline unsigned int blk_rq_sectors(const struct request *rq)
850{ 845{
851 return rq->hard_nr_sectors; 846 return blk_rq_bytes(rq) >> 9;
852} 847}
853 848
854static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 849static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
855{ 850{
856 return rq->hard_cur_sectors; 851 return blk_rq_cur_bytes(rq) >> 9;
857} 852}
858 853
859/* 854/*
@@ -928,7 +923,7 @@ static inline void blk_end_request_all(struct request *rq, int error)
928 */ 923 */
929static inline bool blk_end_request_cur(struct request *rq, int error) 924static inline bool blk_end_request_cur(struct request *rq, int error)
930{ 925{
931 return blk_end_request(rq, error, rq->hard_cur_sectors << 9); 926 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
932} 927}
933 928
934/** 929/**
@@ -981,7 +976,7 @@ static inline void __blk_end_request_all(struct request *rq, int error)
981 */ 976 */
982static inline bool __blk_end_request_cur(struct request *rq, int error) 977static inline bool __blk_end_request_cur(struct request *rq, int error)
983{ 978{
984 return __blk_end_request(rq, error, rq->hard_cur_sectors << 9); 979 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
985} 980}
986 981
987extern void blk_complete_request(struct request *); 982extern void blk_complete_request(struct request *);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c59b769f62b0..4e462878c9ca 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -171,7 +171,7 @@ enum {
171 ELV_MQUEUE_MUST, 171 ELV_MQUEUE_MUST,
172}; 172};
173 173
174#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) 174#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
175#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 175#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
176 176
177/* 177/*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 42f1c11e754c..5708a14bee54 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -642,12 +642,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
642 642
643 if (blk_pc_request(rq)) { 643 if (blk_pc_request(rq)) {
644 what |= BLK_TC_ACT(BLK_TC_PC); 644 what |= BLK_TC_ACT(BLK_TC_PC);
645 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, 645 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
646 rq->cmd_len, rq->cmd); 646 what, rq->errors, rq->cmd_len, rq->cmd);
647 } else { 647 } else {
648 what |= BLK_TC_ACT(BLK_TC_FS); 648 what |= BLK_TC_ACT(BLK_TC_FS);
649 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, 649 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
650 rw, what, rq->errors, 0, NULL); 650 what, rq->errors, 0, NULL);
651 } 651 }
652} 652}
653 653
@@ -854,11 +854,11 @@ void blk_add_driver_data(struct request_queue *q,
854 return; 854 return;
855 855
856 if (blk_pc_request(rq)) 856 if (blk_pc_request(rq))
857 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, 857 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
858 rq->errors, len, data); 858 BLK_TA_DRV_DATA, rq->errors, len, data);
859 else 859 else
860 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, 860 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
861 0, BLK_TA_DRV_DATA, rq->errors, len, data); 861 BLK_TA_DRV_DATA, rq->errors, len, data);
862} 862}
863EXPORT_SYMBOL_GPL(blk_add_driver_data); 863EXPORT_SYMBOL_GPL(blk_add_driver_data);
864 864