diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 81 | ||||
-rw-r--r-- | block/blk-merge.c | 36 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | block/cfq-iosched.c | 10 |
4 files changed, 39 insertions, 89 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 82dc20621c06..3596ca71909b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
127 | INIT_LIST_HEAD(&rq->timeout_list); | 127 | INIT_LIST_HEAD(&rq->timeout_list); |
128 | rq->cpu = -1; | 128 | rq->cpu = -1; |
129 | rq->q = q; | 129 | rq->q = q; |
130 | rq->sector = rq->hard_sector = (sector_t) -1; | 130 | rq->sector = (sector_t) -1; |
131 | INIT_HLIST_NODE(&rq->hash); | 131 | INIT_HLIST_NODE(&rq->hash); |
132 | RB_CLEAR_NODE(&rq->rb_node); | 132 | RB_CLEAR_NODE(&rq->rb_node); |
133 | rq->cmd = rq->__cmd; | 133 | rq->cmd = rq->__cmd; |
@@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
189 | (unsigned long long)blk_rq_pos(rq), | 189 | (unsigned long long)blk_rq_pos(rq), |
190 | blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); | 190 | blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); |
191 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", | 191 | printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", |
192 | rq->bio, rq->biotail, | 192 | rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); |
193 | rq->buffer, rq->data_len); | ||
194 | 193 | ||
195 | if (blk_pc_request(rq)) { | 194 | if (blk_pc_request(rq)) { |
196 | printk(KERN_INFO " cdb: "); | 195 | printk(KERN_INFO " cdb: "); |
@@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1096 | req->cmd_flags |= REQ_NOIDLE; | 1095 | req->cmd_flags |= REQ_NOIDLE; |
1097 | 1096 | ||
1098 | req->errors = 0; | 1097 | req->errors = 0; |
1099 | req->hard_sector = req->sector = bio->bi_sector; | 1098 | req->sector = bio->bi_sector; |
1100 | req->ioprio = bio_prio(bio); | 1099 | req->ioprio = bio_prio(bio); |
1101 | blk_rq_bio_prep(req->q, req, bio); | 1100 | blk_rq_bio_prep(req->q, req, bio); |
1102 | } | 1101 | } |
@@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q) | |||
1113 | static int __make_request(struct request_queue *q, struct bio *bio) | 1112 | static int __make_request(struct request_queue *q, struct bio *bio) |
1114 | { | 1113 | { |
1115 | struct request *req; | 1114 | struct request *req; |
1116 | int el_ret, nr_sectors; | 1115 | int el_ret; |
1116 | unsigned int bytes = bio->bi_size; | ||
1117 | const unsigned short prio = bio_prio(bio); | 1117 | const unsigned short prio = bio_prio(bio); |
1118 | const int sync = bio_sync(bio); | 1118 | const int sync = bio_sync(bio); |
1119 | const int unplug = bio_unplug(bio); | 1119 | const int unplug = bio_unplug(bio); |
1120 | int rw_flags; | 1120 | int rw_flags; |
1121 | 1121 | ||
1122 | nr_sectors = bio_sectors(bio); | ||
1123 | |||
1124 | /* | 1122 | /* |
1125 | * low level driver can indicate that it wants pages above a | 1123 | * low level driver can indicate that it wants pages above a |
1126 | * certain limit bounced to low memory (ie for highmem, or even | 1124 | * certain limit bounced to low memory (ie for highmem, or even |
@@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1145 | 1143 | ||
1146 | req->biotail->bi_next = bio; | 1144 | req->biotail->bi_next = bio; |
1147 | req->biotail = bio; | 1145 | req->biotail = bio; |
1148 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 1146 | req->data_len += bytes; |
1149 | req->ioprio = ioprio_best(req->ioprio, prio); | 1147 | req->ioprio = ioprio_best(req->ioprio, prio); |
1150 | if (!blk_rq_cpu_valid(req)) | 1148 | if (!blk_rq_cpu_valid(req)) |
1151 | req->cpu = bio->bi_comp_cpu; | 1149 | req->cpu = bio->bi_comp_cpu; |
@@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1171 | * not touch req->buffer either... | 1169 | * not touch req->buffer either... |
1172 | */ | 1170 | */ |
1173 | req->buffer = bio_data(bio); | 1171 | req->buffer = bio_data(bio); |
1174 | req->current_nr_sectors = bio_cur_sectors(bio); | 1172 | req->sector = bio->bi_sector; |
1175 | req->hard_cur_sectors = req->current_nr_sectors; | 1173 | req->data_len += bytes; |
1176 | req->sector = req->hard_sector = bio->bi_sector; | ||
1177 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | ||
1178 | req->ioprio = ioprio_best(req->ioprio, prio); | 1174 | req->ioprio = ioprio_best(req->ioprio, prio); |
1179 | if (!blk_rq_cpu_valid(req)) | 1175 | if (!blk_rq_cpu_valid(req)) |
1180 | req->cpu = bio->bi_comp_cpu; | 1176 | req->cpu = bio->bi_comp_cpu; |
@@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio); | |||
1557 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 1553 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) |
1558 | { | 1554 | { |
1559 | if (blk_rq_sectors(rq) > q->max_sectors || | 1555 | if (blk_rq_sectors(rq) > q->max_sectors || |
1560 | rq->data_len > q->max_hw_sectors << 9) { | 1556 | blk_rq_bytes(rq) > q->max_hw_sectors << 9) { |
1561 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 1557 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
1562 | return -EIO; | 1558 | return -EIO; |
1563 | } | 1559 | } |
@@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req) | |||
1675 | } | 1671 | } |
1676 | } | 1672 | } |
1677 | 1673 | ||
1678 | /** | ||
1679 | * blk_rq_bytes - Returns bytes left to complete in the entire request | ||
1680 | * @rq: the request being processed | ||
1681 | **/ | ||
1682 | unsigned int blk_rq_bytes(struct request *rq) | ||
1683 | { | ||
1684 | if (blk_fs_request(rq)) | ||
1685 | return blk_rq_sectors(rq) << 9; | ||
1686 | |||
1687 | return rq->data_len; | ||
1688 | } | ||
1689 | EXPORT_SYMBOL_GPL(blk_rq_bytes); | ||
1690 | |||
1691 | /** | ||
1692 | * blk_rq_cur_bytes - Returns bytes left to complete in the current segment | ||
1693 | * @rq: the request being processed | ||
1694 | **/ | ||
1695 | unsigned int blk_rq_cur_bytes(struct request *rq) | ||
1696 | { | ||
1697 | if (blk_fs_request(rq)) | ||
1698 | return rq->current_nr_sectors << 9; | ||
1699 | |||
1700 | if (rq->bio) | ||
1701 | return rq->bio->bi_size; | ||
1702 | |||
1703 | return rq->data_len; | ||
1704 | } | ||
1705 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); | ||
1706 | |||
1707 | struct request *elv_next_request(struct request_queue *q) | 1674 | struct request *elv_next_request(struct request_queue *q) |
1708 | { | 1675 | { |
1709 | struct request *rq; | 1676 | struct request *rq; |
@@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q) | |||
1736 | if (rq->cmd_flags & REQ_DONTPREP) | 1703 | if (rq->cmd_flags & REQ_DONTPREP) |
1737 | break; | 1704 | break; |
1738 | 1705 | ||
1739 | if (q->dma_drain_size && rq->data_len) { | 1706 | if (q->dma_drain_size && blk_rq_bytes(rq)) { |
1740 | /* | 1707 | /* |
1741 | * make sure space for the drain appears we | 1708 | * make sure space for the drain appears we |
1742 | * know we can do this because max_hw_segments | 1709 | * know we can do this because max_hw_segments |
@@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q) | |||
1759 | * avoid resource deadlock. REQ_STARTED will | 1726 | * avoid resource deadlock. REQ_STARTED will |
1760 | * prevent other fs requests from passing this one. | 1727 | * prevent other fs requests from passing this one. |
1761 | */ | 1728 | */ |
1762 | if (q->dma_drain_size && rq->data_len && | 1729 | if (q->dma_drain_size && blk_rq_bytes(rq) && |
1763 | !(rq->cmd_flags & REQ_DONTPREP)) { | 1730 | !(rq->cmd_flags & REQ_DONTPREP)) { |
1764 | /* | 1731 | /* |
1765 | * remove the space for the drain we added | 1732 | * remove the space for the drain we added |
@@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
1911 | * can find how many bytes remain in the request | 1878 | * can find how many bytes remain in the request |
1912 | * later. | 1879 | * later. |
1913 | */ | 1880 | */ |
1914 | req->nr_sectors = req->hard_nr_sectors = 0; | 1881 | req->data_len = 0; |
1915 | req->current_nr_sectors = req->hard_cur_sectors = 0; | ||
1916 | return false; | 1882 | return false; |
1917 | } | 1883 | } |
1918 | 1884 | ||
@@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
1926 | bio_iovec(bio)->bv_len -= nr_bytes; | 1892 | bio_iovec(bio)->bv_len -= nr_bytes; |
1927 | } | 1893 | } |
1928 | 1894 | ||
1929 | blk_recalc_rq_sectors(req, total_bytes >> 9); | 1895 | req->data_len -= total_bytes; |
1896 | req->buffer = bio_data(req->bio); | ||
1897 | |||
1898 | /* update sector only for requests with clear definition of sector */ | ||
1899 | if (blk_fs_request(req) || blk_discard_rq(req)) | ||
1900 | req->sector += total_bytes >> 9; | ||
1901 | |||
1902 | /* | ||
1903 | * If total number of sectors is less than the first segment | ||
1904 | * size, something has gone terribly wrong. | ||
1905 | */ | ||
1906 | if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { | ||
1907 | printk(KERN_ERR "blk: request botched\n"); | ||
1908 | req->data_len = blk_rq_cur_bytes(req); | ||
1909 | } | ||
1910 | |||
1911 | /* recalculate the number of segments */ | ||
1930 | blk_recalc_rq_segments(req); | 1912 | blk_recalc_rq_segments(req); |
1913 | |||
1931 | return true; | 1914 | return true; |
1932 | } | 1915 | } |
1933 | EXPORT_SYMBOL_GPL(blk_update_request); | 1916 | EXPORT_SYMBOL_GPL(blk_update_request); |
@@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2049 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2032 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
2050 | rq->buffer = bio_data(bio); | 2033 | rq->buffer = bio_data(bio); |
2051 | } | 2034 | } |
2052 | rq->current_nr_sectors = bio_cur_sectors(bio); | ||
2053 | rq->hard_cur_sectors = rq->current_nr_sectors; | ||
2054 | rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); | ||
2055 | rq->data_len = bio->bi_size; | 2035 | rq->data_len = bio->bi_size; |
2056 | |||
2057 | rq->bio = rq->biotail = bio; | 2036 | rq->bio = rq->biotail = bio; |
2058 | 2037 | ||
2059 | if (bio->bi_bdev) | 2038 | if (bio->bi_bdev) |
diff --git a/block/blk-merge.c b/block/blk-merge.c index bf62a87a9da2..b8df66aef0f8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -9,35 +9,6 @@ | |||
9 | 9 | ||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | ||
13 | { | ||
14 | if (blk_fs_request(rq) || blk_discard_rq(rq)) { | ||
15 | rq->hard_sector += nsect; | ||
16 | rq->hard_nr_sectors -= nsect; | ||
17 | |||
18 | /* | ||
19 | * Move the I/O submission pointers ahead if required. | ||
20 | */ | ||
21 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | ||
22 | (rq->sector <= rq->hard_sector)) { | ||
23 | rq->sector = rq->hard_sector; | ||
24 | rq->nr_sectors = rq->hard_nr_sectors; | ||
25 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | ||
26 | rq->current_nr_sectors = rq->hard_cur_sectors; | ||
27 | rq->buffer = bio_data(rq->bio); | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * if total number of sectors is less than the first segment | ||
32 | * size, something has gone terribly wrong | ||
33 | */ | ||
34 | if (rq->nr_sectors < rq->current_nr_sectors) { | ||
35 | printk(KERN_ERR "blk: request botched\n"); | ||
36 | rq->nr_sectors = rq->current_nr_sectors; | ||
37 | } | ||
38 | } | ||
39 | } | ||
40 | |||
41 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
42 | struct bio *bio) | 13 | struct bio *bio) |
43 | { | 14 | { |
@@ -199,8 +170,9 @@ new_segment: | |||
199 | 170 | ||
200 | 171 | ||
201 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | 172 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && |
202 | (rq->data_len & q->dma_pad_mask)) { | 173 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
203 | unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; | 174 | unsigned int pad_len = |
175 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | ||
204 | 176 | ||
205 | sg->length += pad_len; | 177 | sg->length += pad_len; |
206 | rq->extra_len += pad_len; | 178 | rq->extra_len += pad_len; |
@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
398 | req->biotail->bi_next = next->bio; | 370 | req->biotail->bi_next = next->bio; |
399 | req->biotail = next->biotail; | 371 | req->biotail = next->biotail; |
400 | 372 | ||
401 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | 373 | req->data_len += blk_rq_bytes(next); |
402 | 374 | ||
403 | elv_merge_requests(q, req, next); | 375 | elv_merge_requests(q, req, next); |
404 | 376 | ||
diff --git a/block/blk.h b/block/blk.h index 51115599df9b..ab54529103c0 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -101,7 +101,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
101 | int attempt_back_merge(struct request_queue *q, struct request *rq); | 101 | int attempt_back_merge(struct request_queue *q, struct request *rq); |
102 | int attempt_front_merge(struct request_queue *q, struct request *rq); | 102 | int attempt_front_merge(struct request_queue *q, struct request *rq); |
103 | void blk_recalc_rq_segments(struct request *rq); | 103 | void blk_recalc_rq_segments(struct request *rq); |
104 | void blk_recalc_rq_sectors(struct request *rq, int nsect); | ||
105 | 104 | ||
106 | void blk_queue_congestion_threshold(struct request_queue *q); | 105 | void blk_queue_congestion_threshold(struct request_queue *q); |
107 | 106 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index db4d990a66bf..99ac4304d711 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, | |||
579 | * Sort strictly based on sector. Smallest to the left, | 579 | * Sort strictly based on sector. Smallest to the left, |
580 | * largest to the right. | 580 | * largest to the right. |
581 | */ | 581 | */ |
582 | if (sector > cfqq->next_rq->sector) | 582 | if (sector > blk_rq_pos(cfqq->next_rq)) |
583 | n = &(*p)->rb_right; | 583 | n = &(*p)->rb_right; |
584 | else if (sector < cfqq->next_rq->sector) | 584 | else if (sector < blk_rq_pos(cfqq->next_rq)) |
585 | n = &(*p)->rb_left; | 585 | n = &(*p)->rb_left; |
586 | else | 586 | else |
587 | break; | 587 | break; |
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
611 | return; | 611 | return; |
612 | 612 | ||
613 | cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; | 613 | cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; |
614 | __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, | 614 | __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, |
615 | &parent, &p); | 615 | blk_rq_pos(cfqq->next_rq), &parent, &p); |
616 | if (!__cfqq) { | 616 | if (!__cfqq) { |
617 | rb_link_node(&cfqq->p_node, parent, p); | 617 | rb_link_node(&cfqq->p_node, parent, p); |
618 | rb_insert_color(&cfqq->p_node, cfqq->p_root); | 618 | rb_insert_color(&cfqq->p_node, cfqq->p_root); |
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
996 | if (cfq_rq_close(cfqd, __cfqq->next_rq)) | 996 | if (cfq_rq_close(cfqd, __cfqq->next_rq)) |
997 | return __cfqq; | 997 | return __cfqq; |
998 | 998 | ||
999 | if (__cfqq->next_rq->sector < sector) | 999 | if (blk_rq_pos(__cfqq->next_rq) < sector) |
1000 | node = rb_next(&__cfqq->p_node); | 1000 | node = rb_next(&__cfqq->p_node); |
1001 | else | 1001 | else |
1002 | node = rb_prev(&__cfqq->p_node); | 1002 | node = rb_prev(&__cfqq->p_node); |