aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-07 09:24:44 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 03:50:55 -0400
commita2dec7b36364a5cc564c4d76cf16d2e7d33f5c05 (patch)
tree76dfb84ba5fa097a929ab81cf5718c6fcbc9d720 /block/blk-core.c
parent34b7d2c957199834c474c9d46739265643f4d9c7 (diff)
block: hide request sector and data_len
Block low level drivers for some reason have been pretty good at abusing block layer API. Especially struct request's fields tend to get violated in all possible ways. Make it clear that low level drivers MUST NOT access or manipulate rq->sector and rq->data_len directly by prefixing them with double underscores. This change is also necessary to break build of out-of-tree codes which assume the previous block API where internal fields can be manipulated and rq->data_len carries residual count on completion. [ Impact: hide internal fields, block API change ] Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3596ca71909b..6226a380fb6d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
127 INIT_LIST_HEAD(&rq->timeout_list); 127 INIT_LIST_HEAD(&rq->timeout_list);
128 rq->cpu = -1; 128 rq->cpu = -1;
129 rq->q = q; 129 rq->q = q;
130 rq->sector = (sector_t) -1; 130 rq->__sector = (sector_t) -1;
131 INIT_HLIST_NODE(&rq->hash); 131 INIT_HLIST_NODE(&rq->hash);
132 RB_CLEAR_NODE(&rq->rb_node); 132 RB_CLEAR_NODE(&rq->rb_node);
133 rq->cmd = rq->__cmd; 133 rq->cmd = rq->__cmd;
@@ -1095,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1095 req->cmd_flags |= REQ_NOIDLE; 1095 req->cmd_flags |= REQ_NOIDLE;
1096 1096
1097 req->errors = 0; 1097 req->errors = 0;
1098 req->sector = bio->bi_sector; 1098 req->__sector = bio->bi_sector;
1099 req->ioprio = bio_prio(bio); 1099 req->ioprio = bio_prio(bio);
1100 blk_rq_bio_prep(req->q, req, bio); 1100 blk_rq_bio_prep(req->q, req, bio);
1101} 1101}
@@ -1143,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1143 1143
1144 req->biotail->bi_next = bio; 1144 req->biotail->bi_next = bio;
1145 req->biotail = bio; 1145 req->biotail = bio;
1146 req->data_len += bytes; 1146 req->__data_len += bytes;
1147 req->ioprio = ioprio_best(req->ioprio, prio); 1147 req->ioprio = ioprio_best(req->ioprio, prio);
1148 if (!blk_rq_cpu_valid(req)) 1148 if (!blk_rq_cpu_valid(req))
1149 req->cpu = bio->bi_comp_cpu; 1149 req->cpu = bio->bi_comp_cpu;
@@ -1169,8 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1169 * not touch req->buffer either... 1169 * not touch req->buffer either...
1170 */ 1170 */
1171 req->buffer = bio_data(bio); 1171 req->buffer = bio_data(bio);
1172 req->sector = bio->bi_sector; 1172 req->__sector = bio->bi_sector;
1173 req->data_len += bytes; 1173 req->__data_len += bytes;
1174 req->ioprio = ioprio_best(req->ioprio, prio); 1174 req->ioprio = ioprio_best(req->ioprio, prio);
1175 if (!blk_rq_cpu_valid(req)) 1175 if (!blk_rq_cpu_valid(req))
1176 req->cpu = bio->bi_comp_cpu; 1176 req->cpu = bio->bi_comp_cpu;
@@ -1878,7 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1878 * can find how many bytes remain in the request 1878 * can find how many bytes remain in the request
1879 * later. 1879 * later.
1880 */ 1880 */
1881 req->data_len = 0; 1881 req->__data_len = 0;
1882 return false; 1882 return false;
1883 } 1883 }
1884 1884
@@ -1892,12 +1892,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1892 bio_iovec(bio)->bv_len -= nr_bytes; 1892 bio_iovec(bio)->bv_len -= nr_bytes;
1893 } 1893 }
1894 1894
1895 req->data_len -= total_bytes; 1895 req->__data_len -= total_bytes;
1896 req->buffer = bio_data(req->bio); 1896 req->buffer = bio_data(req->bio);
1897 1897
1898 /* update sector only for requests with clear definition of sector */ 1898 /* update sector only for requests with clear definition of sector */
1899 if (blk_fs_request(req) || blk_discard_rq(req)) 1899 if (blk_fs_request(req) || blk_discard_rq(req))
1900 req->sector += total_bytes >> 9; 1900 req->__sector += total_bytes >> 9;
1901 1901
1902 /* 1902 /*
1903 * If total number of sectors is less than the first segment 1903 * If total number of sectors is less than the first segment
@@ -1905,7 +1905,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
1905 */ 1905 */
1906 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 1906 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1907 printk(KERN_ERR "blk: request botched\n"); 1907 printk(KERN_ERR "blk: request botched\n");
1908 req->data_len = blk_rq_cur_bytes(req); 1908 req->__data_len = blk_rq_cur_bytes(req);
1909 } 1909 }
1910 1910
1911 /* recalculate the number of segments */ 1911 /* recalculate the number of segments */
@@ -2032,7 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2032 rq->nr_phys_segments = bio_phys_segments(q, bio); 2032 rq->nr_phys_segments = bio_phys_segments(q, bio);
2033 rq->buffer = bio_data(bio); 2033 rq->buffer = bio_data(bio);
2034 } 2034 }
2035 rq->data_len = bio->bi_size; 2035 rq->__data_len = bio->bi_size;
2036 rq->bio = rq->biotail = bio; 2036 rq->bio = rq->biotail = bio;
2037 2037
2038 if (bio->bi_bdev) 2038 if (bio->bi_bdev)