diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 09:24:41 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:50:54 -0400 |
commit | 2e46e8b27aa57c6bd34b3102b40ee4d0144b4fab (patch) | |
tree | 134f560f66c2af277f0f25e5b85f6c1acaddfc06 /block/blk-core.c | |
parent | 9780e2dd8254351f6cbe11304849126b51dbd561 (diff) |
block: drop request->hard_* and *nr_sectors
struct request has had a few different ways to represent some
properties of a request. ->hard_* represent block layer's view of the
request progress (completion cursor) and the ones without the prefix
are supposed to represent the issue cursor and allowed to be updated
as necessary by the low level drivers. The thing is that as block
layer supports partial completion, the two cursors really aren't
necessary and only cause confusion. In addition, manual management of
request detail from low level drivers is cumbersome and error-prone at
the very least.
Another interesting duplicate fields are rq->[hard_]nr_sectors and
rq->{hard_cur|current}_nr_sectors against rq->data_len and
rq->bio->bi_size. This is more convoluted than the hard_ case.
rq->[hard_]nr_sectors are initialized for requests with bio but
blk_rq_bytes() uses it only for !pc requests. rq->data_len is
initialized for all request but blk_rq_bytes() uses it only for pc
requests. This causes good amount of confusion throughout block layer
and its drivers and determining the request length has been a bit of
black magic which may or may not work depending on circumstances and
what the specific LLD is actually doing.
rq->{hard_cur|current}_nr_sectors represent the number of sectors in
the contiguous data area at the front. This is mainly used by drivers
which transfers data by walking request segment-by-segment. This
value always equals rq->bio->bi_size >> 9. However, data length for
pc requests may not be multiple of 512 bytes and using this field
becomes a bit confusing.
In general, having multiple fields to represent the same property
leads only to confusion and subtle bugs. With recent block low level
driver cleanups, no driver is accessing or manipulating these
duplicate fields directly. Drop all the duplicates. Now rq->sector
means the current sector, rq->data_len the current total length and
rq->bio->bi_size the current segment length. Everything else is
defined in terms of these three and available only through accessors.
* blk_recalc_rq_sectors() is collapsed into blk_update_request() and
now handles pc and fs requests equally other than rq->sector update.
This means that now pc requests can use partial completion too (no
in-kernel user yet tho).
* bio_cur_sectors() is replaced with bio_cur_bytes() as block layer
now uses byte count as the primary data length.
* blk_rq_pos() is now guranteed to be always correct. In-block users
converted.
* blk_rq_bytes() is now guaranteed to be always valid as is
blk_rq_sectors(). In-block users converted.
* blk_rq_sectors() is now guaranteed to equal blk_rq_bytes() >> 9.
More convenient one is used.
* blk_rq_bytes() and blk_rq_cur_bytes() are now inlined and take const
pointer to request.
[ Impact: API cleanup, single way to represent one property of a request ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Boaz Harrosh <bharrosh@panasas.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 81 |
1 files changed, 30 insertions, 51 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 82dc20621c06..3596ca71909b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
127 | INIT_LIST_HEAD(&rq->timeout_list); | 127 | INIT_LIST_HEAD(&rq->timeout_list); |
128 | rq->cpu = -1; | 128 | rq->cpu = -1; |
129 | rq->q = q; | 129 | rq->q = q; |
130 | rq->sector = rq->hard_sector = (sector_t) -1; | 130 | rq->sector = (sector_t) -1; |
131 | INIT_HLIST_NODE(&rq->hash); | 131 | INIT_HLIST_NODE(&rq->hash); |
132 | RB_CLEAR_NODE(&rq->rb_node); | 132 | RB_CLEAR_NODE(&rq->rb_node); |
133 | rq->cmd = rq->__cmd; | 133 | rq->cmd = rq->__cmd; |
@@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
189 | (unsigned long long)blk_rq_pos(rq), | 189 | (unsigned long long)blk_rq_pos(rq), |
190 | blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); | 190 | blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); |
191 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", | 191 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", |
192 | rq->bio, rq->biotail, | 192 | rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); |
193 | rq->buffer, rq->data_len); | ||
194 | 193 | ||
195 | if (blk_pc_request(rq)) { | 194 | if (blk_pc_request(rq)) { |
196 | printk(KERN_INFO " cdb: "); | 195 | printk(KERN_INFO " cdb: "); |
@@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1096 | req->cmd_flags |= REQ_NOIDLE; | 1095 | req->cmd_flags |= REQ_NOIDLE; |
1097 | 1096 | ||
1098 | req->errors = 0; | 1097 | req->errors = 0; |
1099 | req->hard_sector = req->sector = bio->bi_sector; | 1098 | req->sector = bio->bi_sector; |
1100 | req->ioprio = bio_prio(bio); | 1099 | req->ioprio = bio_prio(bio); |
1101 | blk_rq_bio_prep(req->q, req, bio); | 1100 | blk_rq_bio_prep(req->q, req, bio); |
1102 | } | 1101 | } |
@@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q) | |||
1113 | static int __make_request(struct request_queue *q, struct bio *bio) | 1112 | static int __make_request(struct request_queue *q, struct bio *bio) |
1114 | { | 1113 | { |
1115 | struct request *req; | 1114 | struct request *req; |
1116 | int el_ret, nr_sectors; | 1115 | int el_ret; |
1116 | unsigned int bytes = bio->bi_size; | ||
1117 | const unsigned short prio = bio_prio(bio); | 1117 | const unsigned short prio = bio_prio(bio); |
1118 | const int sync = bio_sync(bio); | 1118 | const int sync = bio_sync(bio); |
1119 | const int unplug = bio_unplug(bio); | 1119 | const int unplug = bio_unplug(bio); |
1120 | int rw_flags; | 1120 | int rw_flags; |
1121 | 1121 | ||
1122 | nr_sectors = bio_sectors(bio); | ||
1123 | |||
1124 | /* | 1122 | /* |
1125 | * low level driver can indicate that it wants pages above a | 1123 | * low level driver can indicate that it wants pages above a |
1126 | * certain limit bounced to low memory (ie for highmem, or even | 1124 | * certain limit bounced to low memory (ie for highmem, or even |
@@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1145 | 1143 | ||
1146 | req->biotail->bi_next = bio; | 1144 | req->biotail->bi_next = bio; |
1147 | req->biotail = bio; | 1145 | req->biotail = bio; |
1148 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 1146 | req->data_len += bytes; |
1149 | req->ioprio = ioprio_best(req->ioprio, prio); | 1147 | req->ioprio = ioprio_best(req->ioprio, prio); |
1150 | if (!blk_rq_cpu_valid(req)) | 1148 | if (!blk_rq_cpu_valid(req)) |
1151 | req->cpu = bio->bi_comp_cpu; | 1149 | req->cpu = bio->bi_comp_cpu; |
@@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1171 | * not touch req->buffer either... | 1169 | * not touch req->buffer either... |
1172 | */ | 1170 | */ |
1173 | req->buffer = bio_data(bio); | 1171 | req->buffer = bio_data(bio); |
1174 | req->current_nr_sectors = bio_cur_sectors(bio); | 1172 | req->sector = bio->bi_sector; |
1175 | req->hard_cur_sectors = req->current_nr_sectors; | 1173 | req->data_len += bytes; |
1176 | req->sector = req->hard_sector = bio->bi_sector; | ||
1177 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
1178 | req->ioprio = ioprio_best(req->ioprio, prio); | 1174 | req->ioprio = ioprio_best(req->ioprio, prio); |
1179 | if (!blk_rq_cpu_valid(req)) | 1175 | if (!blk_rq_cpu_valid(req)) |
1180 | req->cpu = bio->bi_comp_cpu; | 1176 | req->cpu = bio->bi_comp_cpu; |
@@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio); | |||
1557 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 1553 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) |
1558 | { | 1554 | { |
1559 | if (blk_rq_sectors(rq) > q->max_sectors || | 1555 | if (blk_rq_sectors(rq) > q->max_sectors || |
1560 | rq->data_len > q->max_hw_sectors << 9) { | 1556 | blk_rq_bytes(rq) > q->max_hw_sectors << 9) { |
1561 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 1557 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
1562 | return -EIO; | 1558 | return -EIO; |
1563 | } | 1559 | } |
@@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req) | |||
1675 | } | 1671 | } |
1676 | } | 1672 | } |
1677 | 1673 | ||
1678 | /** | ||
1679 | * blk_rq_bytes - Returns bytes left to complete in the entire request | ||
1680 | * @rq: the request being processed | ||
1681 | **/ | ||
1682 | unsigned int blk_rq_bytes(struct request *rq) | ||
1683 | { | ||
1684 | if (blk_fs_request(rq)) | ||
1685 | return blk_rq_sectors(rq) << 9; | ||
1686 | |||
1687 | return rq->data_len; | ||
1688 | } | ||
1689 | EXPORT_SYMBOL_GPL(blk_rq_bytes); | ||
1690 | |||
1691 | /** | ||
1692 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment | ||
1693 | * @rq: the request being processed | ||
1694 | **/ | ||
1695 | unsigned int blk_rq_cur_bytes(struct request *rq) | ||
1696 | { | ||
1697 | if (blk_fs_request(rq)) | ||
1698 | return rq->current_nr_sectors << 9; | ||
1699 | |||
1700 | if (rq->bio) | ||
1701 | return rq->bio->bi_size; | ||
1702 | |||
1703 | return rq->data_len; | ||
1704 | } | ||
1705 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); | ||
1706 | |||
1707 | struct request *elv_next_request(struct request_queue *q) | 1674 | struct request *elv_next_request(struct request_queue *q) |
1708 | { | 1675 | { |
1709 | struct request *rq; | 1676 | struct request *rq; |
@@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q) | |||
1736 | if (rq->cmd_flags & REQ_DONTPREP) | 1703 | if (rq->cmd_flags & REQ_DONTPREP) |
1737 | break; | 1704 | break; |
1738 | 1705 | ||
1739 | if (q->dma_drain_size && rq->data_len) { | 1706 | if (q->dma_drain_size && blk_rq_bytes(rq)) { |
1740 | /* | 1707 | /* |
1741 | * make sure space for the drain appears we | 1708 | * make sure space for the drain appears we |
1742 | * know we can do this because max_hw_segments | 1709 | * know we can do this because max_hw_segments |
@@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q) | |||
1759 | * avoid resource deadlock. REQ_STARTED will | 1726 | * avoid resource deadlock. REQ_STARTED will |
1760 | * prevent other fs requests from passing this one. | 1727 | * prevent other fs requests from passing this one. |
1761 | */ | 1728 | */ |
1762 | if (q->dma_drain_size && rq->data_len && | 1729 | if (q->dma_drain_size && blk_rq_bytes(rq) && |
1763 | !(rq->cmd_flags & REQ_DONTPREP)) { | 1730 | !(rq->cmd_flags & REQ_DONTPREP)) { |
1764 | /* | 1731 | /* |
1765 | * remove the space for the drain we added | 1732 | * remove the space for the drain we added |
@@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
1911 | * can find how many bytes remain in the request | 1878 | * can find how many bytes remain in the request |
1912 | * later. | 1879 | * later. |
1913 | */ | 1880 | */ |
1914 | req->nr_sectors = req->hard_nr_sectors = 0; | 1881 | req->data_len = 0; |
1915 | req->current_nr_sectors = req->hard_cur_sectors = 0; | ||
1916 | return false; | 1882 | return false; |
1917 | } | 1883 | } |
1918 | 1884 | ||
@@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
1926 | bio_iovec(bio)->bv_len -= nr_bytes; | 1892 | bio_iovec(bio)->bv_len -= nr_bytes; |
1927 | } | 1893 | } |
1928 | 1894 | ||
1929 | blk_recalc_rq_sectors(req, total_bytes >> 9); | 1895 | req->data_len -= total_bytes; |
1896 | req->buffer = bio_data(req->bio); | ||
1897 | |||
1898 | /* update sector only for requests with clear definition of sector */ | ||
1899 | if (blk_fs_request(req) || blk_discard_rq(req)) | ||
1900 | req->sector += total_bytes >> 9; | ||
1901 | |||
1902 | /* | ||
1903 | * If total number of sectors is less than the first segment | ||
1904 | * size, something has gone terribly wrong. | ||
1905 | */ | ||
1906 | if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { | ||
1907 | printk(KERN_ERR "blk: request botched\n"); | ||
1908 | req->data_len = blk_rq_cur_bytes(req); | ||
1909 | } | ||
1910 | |||
1911 | /* recalculate the number of segments */ | ||
1930 | blk_recalc_rq_segments(req); | 1912 | blk_recalc_rq_segments(req); |
1913 | |||
1931 | return true; | 1914 | return true; |
1932 | } | 1915 | } |
1933 | EXPORT_SYMBOL_GPL(blk_update_request); | 1916 | EXPORT_SYMBOL_GPL(blk_update_request); |
@@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2049 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2032 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
2050 | rq->buffer = bio_data(bio); | 2033 | rq->buffer = bio_data(bio); |
2051 | } | 2034 | } |
2052 | rq->current_nr_sectors = bio_cur_sectors(bio); | ||
2053 | rq->hard_cur_sectors = rq->current_nr_sectors; | ||
2054 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | ||
2055 | rq->data_len = bio->bi_size; | 2035 | rq->data_len = bio->bi_size; |
2056 | |||
2057 | rq->bio = rq->biotail = bio; | 2036 | rq->bio = rq->biotail = bio; |
2058 | 2037 | ||
2059 | if (bio->bi_bdev) | 2038 | if (bio->bi_bdev) |