diff options
-rw-r--r-- | block/blk-core.c | 99 | ||||
-rw-r--r-- | block/blk-merge.c | 43 | ||||
-rw-r--r-- | block/blk.h | 1 | ||||
-rw-r--r-- | include/linux/blkdev.h | 23 |
4 files changed, 161 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 4daae1ee2b23..c822239bcc9d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1157,6 +1157,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1157 | const unsigned short prio = bio_prio(bio); | 1157 | const unsigned short prio = bio_prio(bio); |
1158 | const int sync = bio_sync(bio); | 1158 | const int sync = bio_sync(bio); |
1159 | const int unplug = bio_unplug(bio); | 1159 | const int unplug = bio_unplug(bio); |
1160 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | ||
1160 | int rw_flags; | 1161 | int rw_flags; |
1161 | 1162 | ||
1162 | if (bio_barrier(bio) && bio_has_data(bio) && | 1163 | if (bio_barrier(bio) && bio_has_data(bio) && |
@@ -1186,6 +1187,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1186 | 1187 | ||
1187 | trace_block_bio_backmerge(q, bio); | 1188 | trace_block_bio_backmerge(q, bio); |
1188 | 1189 | ||
1190 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | ||
1191 | blk_rq_set_mixed_merge(req); | ||
1192 | |||
1189 | req->biotail->bi_next = bio; | 1193 | req->biotail->bi_next = bio; |
1190 | req->biotail = bio; | 1194 | req->biotail = bio; |
1191 | req->__data_len += bytes; | 1195 | req->__data_len += bytes; |
@@ -1205,6 +1209,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1205 | 1209 | ||
1206 | trace_block_bio_frontmerge(q, bio); | 1210 | trace_block_bio_frontmerge(q, bio); |
1207 | 1211 | ||
1212 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) { | ||
1213 | blk_rq_set_mixed_merge(req); | ||
1214 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | ||
1215 | req->cmd_flags |= ff; | ||
1216 | } | ||
1217 | |||
1208 | bio->bi_next = req->bio; | 1218 | bio->bi_next = req->bio; |
1209 | req->bio = bio; | 1219 | req->bio = bio; |
1210 | 1220 | ||
@@ -1649,6 +1659,50 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
1649 | } | 1659 | } |
1650 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 1660 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
1651 | 1661 | ||
1662 | /** | ||
1663 | * blk_rq_err_bytes - determine number of bytes till the next failure boundary | ||
1664 | * @rq: request to examine | ||
1665 | * | ||
1666 | * Description: | ||
1667 | * A request could be merge of IOs which require different failure | ||
1668 | * handling. This function determines the number of bytes which | ||
1669 | * can be failed from the beginning of the request without | ||
1670 | * crossing into area which need to be retried further. | ||
1671 | * | ||
1672 | * Return: | ||
1673 | * The number of bytes to fail. | ||
1674 | * | ||
1675 | * Context: | ||
1676 | * queue_lock must be held. | ||
1677 | */ | ||
1678 | unsigned int blk_rq_err_bytes(const struct request *rq) | ||
1679 | { | ||
1680 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | ||
1681 | unsigned int bytes = 0; | ||
1682 | struct bio *bio; | ||
1683 | |||
1684 | if (!(rq->cmd_flags & REQ_MIXED_MERGE)) | ||
1685 | return blk_rq_bytes(rq); | ||
1686 | |||
1687 | /* | ||
1688 | * Currently the only 'mixing' which can happen is between | ||
1689 | * different fastfail types. We can safely fail portions | ||
1690 | * which have all the failfast bits that the first one has - | ||
1691 | * the ones which are at least as eager to fail as the first | ||
1692 | * one. | ||
1693 | */ | ||
1694 | for (bio = rq->bio; bio; bio = bio->bi_next) { | ||
1695 | if ((bio->bi_rw & ff) != ff) | ||
1696 | break; | ||
1697 | bytes += bio->bi_size; | ||
1698 | } | ||
1699 | |||
1700 | /* this could lead to infinite loop */ | ||
1701 | BUG_ON(blk_rq_bytes(rq) && !bytes); | ||
1702 | return bytes; | ||
1703 | } | ||
1704 | EXPORT_SYMBOL_GPL(blk_rq_err_bytes); | ||
1705 | |||
1652 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | 1706 | static void blk_account_io_completion(struct request *req, unsigned int bytes) |
1653 | { | 1707 | { |
1654 | if (blk_do_io_stat(req)) { | 1708 | if (blk_do_io_stat(req)) { |
@@ -1995,6 +2049,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
1995 | if (blk_fs_request(req) || blk_discard_rq(req)) | 2049 | if (blk_fs_request(req) || blk_discard_rq(req)) |
1996 | req->__sector += total_bytes >> 9; | 2050 | req->__sector += total_bytes >> 9; |
1997 | 2051 | ||
2052 | /* mixed attributes always follow the first bio */ | ||
2053 | if (req->cmd_flags & REQ_MIXED_MERGE) { | ||
2054 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | ||
2055 | req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; | ||
2056 | } | ||
2057 | |||
1998 | /* | 2058 | /* |
1999 | * If total number of sectors is less than the first segment | 2059 | * If total number of sectors is less than the first segment |
2000 | * size, something has gone terribly wrong. | 2060 | * size, something has gone terribly wrong. |
@@ -2174,6 +2234,25 @@ bool blk_end_request_cur(struct request *rq, int error) | |||
2174 | EXPORT_SYMBOL(blk_end_request_cur); | 2234 | EXPORT_SYMBOL(blk_end_request_cur); |
2175 | 2235 | ||
2176 | /** | 2236 | /** |
2237 | * blk_end_request_err - Finish a request till the next failure boundary. | ||
2238 | * @rq: the request to finish till the next failure boundary for | ||
2239 | * @error: must be negative errno | ||
2240 | * | ||
2241 | * Description: | ||
2242 | * Complete @rq till the next failure boundary. | ||
2243 | * | ||
2244 | * Return: | ||
2245 | * %false - we are done with this request | ||
2246 | * %true - still buffers pending for this request | ||
2247 | */ | ||
2248 | bool blk_end_request_err(struct request *rq, int error) | ||
2249 | { | ||
2250 | WARN_ON(error >= 0); | ||
2251 | return blk_end_request(rq, error, blk_rq_err_bytes(rq)); | ||
2252 | } | ||
2253 | EXPORT_SYMBOL_GPL(blk_end_request_err); | ||
2254 | |||
2255 | /** | ||
2177 | * __blk_end_request - Helper function for drivers to complete the request. | 2256 | * __blk_end_request - Helper function for drivers to complete the request. |
2178 | * @rq: the request being processed | 2257 | * @rq: the request being processed |
2179 | * @error: %0 for success, < %0 for error | 2258 | * @error: %0 for success, < %0 for error |
@@ -2232,6 +2311,26 @@ bool __blk_end_request_cur(struct request *rq, int error) | |||
2232 | } | 2311 | } |
2233 | EXPORT_SYMBOL(__blk_end_request_cur); | 2312 | EXPORT_SYMBOL(__blk_end_request_cur); |
2234 | 2313 | ||
2314 | /** | ||
2315 | * __blk_end_request_err - Finish a request till the next failure boundary. | ||
2316 | * @rq: the request to finish till the next failure boundary for | ||
2317 | * @error: must be negative errno | ||
2318 | * | ||
2319 | * Description: | ||
2320 | * Complete @rq till the next failure boundary. Must be called | ||
2321 | * with queue lock held. | ||
2322 | * | ||
2323 | * Return: | ||
2324 | * %false - we are done with this request | ||
2325 | * %true - still buffers pending for this request | ||
2326 | */ | ||
2327 | bool __blk_end_request_err(struct request *rq, int error) | ||
2328 | { | ||
2329 | WARN_ON(error >= 0); | ||
2330 | return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); | ||
2331 | } | ||
2332 | EXPORT_SYMBOL_GPL(__blk_end_request_err); | ||
2333 | |||
2235 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2334 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
2236 | struct bio *bio) | 2335 | struct bio *bio) |
2237 | { | 2336 | { |
diff --git a/block/blk-merge.c b/block/blk-merge.c index e1999679a4d5..7c9ca01baa45 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -311,6 +311,36 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
311 | return 1; | 311 | return 1; |
312 | } | 312 | } |
313 | 313 | ||
314 | /** | ||
315 | * blk_rq_set_mixed_merge - mark a request as mixed merge | ||
316 | * @rq: request to mark as mixed merge | ||
317 | * | ||
318 | * Description: | ||
319 | * @rq is about to be mixed merged. Make sure the attributes | ||
320 | * which can be mixed are set in each bio and mark @rq as mixed | ||
321 | * merged. | ||
322 | */ | ||
323 | void blk_rq_set_mixed_merge(struct request *rq) | ||
324 | { | ||
325 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | ||
326 | struct bio *bio; | ||
327 | |||
328 | if (rq->cmd_flags & REQ_MIXED_MERGE) | ||
329 | return; | ||
330 | |||
331 | /* | ||
332 | * @rq will no longer represent mixable attributes for all the | ||
333 | * contained bios. It will just track those of the first one. | ||
334 | * Distributes the attributs to each bio. | ||
335 | */ | ||
336 | for (bio = rq->bio; bio; bio = bio->bi_next) { | ||
337 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && | ||
338 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); | ||
339 | bio->bi_rw |= ff; | ||
340 | } | ||
341 | rq->cmd_flags |= REQ_MIXED_MERGE; | ||
342 | } | ||
343 | |||
314 | static void blk_account_io_merge(struct request *req) | 344 | static void blk_account_io_merge(struct request *req) |
315 | { | 345 | { |
316 | if (blk_do_io_stat(req)) { | 346 | if (blk_do_io_stat(req)) { |
@@ -366,6 +396,19 @@ static int attempt_merge(struct request_queue *q, struct request *req, | |||
366 | return 0; | 396 | return 0; |
367 | 397 | ||
368 | /* | 398 | /* |
399 | * If failfast settings disagree or any of the two is already | ||
400 | * a mixed merge, mark both as mixed before proceeding. This | ||
401 | * makes sure that all involved bios have mixable attributes | ||
402 | * set properly. | ||
403 | */ | ||
404 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || | ||
405 | (req->cmd_flags & REQ_FAILFAST_MASK) != | ||
406 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | ||
407 | blk_rq_set_mixed_merge(req); | ||
408 | blk_rq_set_mixed_merge(next); | ||
409 | } | ||
410 | |||
411 | /* | ||
369 | * At this point we have either done a back merge | 412 | * At this point we have either done a back merge |
370 | * or front merge. We need the smaller start_time of | 413 | * or front merge. We need the smaller start_time of |
371 | * the merged requests to be the current request | 414 | * the merged requests to be the current request |
diff --git a/block/blk.h b/block/blk.h index 3fae6add5430..5ee3d7e72feb 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -104,6 +104,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, | |||
104 | int attempt_back_merge(struct request_queue *q, struct request *rq); | 104 | int attempt_back_merge(struct request_queue *q, struct request *rq); |
105 | int attempt_front_merge(struct request_queue *q, struct request *rq); | 105 | int attempt_front_merge(struct request_queue *q, struct request *rq); |
106 | void blk_recalc_rq_segments(struct request *rq); | 106 | void blk_recalc_rq_segments(struct request *rq); |
107 | void blk_rq_set_mixed_merge(struct request *rq); | ||
107 | 108 | ||
108 | void blk_queue_congestion_threshold(struct request_queue *q); | 109 | void blk_queue_congestion_threshold(struct request_queue *q); |
109 | 110 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c3015736d814..650b6a9cb679 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -115,6 +115,7 @@ enum rq_flag_bits { | |||
115 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 115 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
116 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | 116 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ |
117 | __REQ_IO_STAT, /* account I/O stat */ | 117 | __REQ_IO_STAT, /* account I/O stat */ |
118 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
118 | __REQ_NR_BITS, /* stops here */ | 119 | __REQ_NR_BITS, /* stops here */ |
119 | }; | 120 | }; |
120 | 121 | ||
@@ -143,6 +144,7 @@ enum rq_flag_bits { | |||
143 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 144 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
144 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | 145 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) |
145 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | 146 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) |
147 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
146 | 148 | ||
147 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ | 149 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ |
148 | REQ_FAILFAST_DRIVER) | 150 | REQ_FAILFAST_DRIVER) |
@@ -832,11 +834,13 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
832 | } | 834 | } |
833 | 835 | ||
834 | /* | 836 | /* |
835 | * blk_rq_pos() : the current sector | 837 | * blk_rq_pos() : the current sector |
836 | * blk_rq_bytes() : bytes left in the entire request | 838 | * blk_rq_bytes() : bytes left in the entire request |
837 | * blk_rq_cur_bytes() : bytes left in the current segment | 839 | * blk_rq_cur_bytes() : bytes left in the current segment |
838 | * blk_rq_sectors() : sectors left in the entire request | 840 | * blk_rq_err_bytes() : bytes left till the next error boundary |
839 | * blk_rq_cur_sectors() : sectors left in the current segment | 841 | * blk_rq_sectors() : sectors left in the entire request |
842 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
843 | * blk_rq_err_sectors() : sectors left till the next error boundary | ||
840 | */ | 844 | */ |
841 | static inline sector_t blk_rq_pos(const struct request *rq) | 845 | static inline sector_t blk_rq_pos(const struct request *rq) |
842 | { | 846 | { |
@@ -853,6 +857,8 @@ static inline int blk_rq_cur_bytes(const struct request *rq) | |||
853 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | 857 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; |
854 | } | 858 | } |
855 | 859 | ||
860 | extern unsigned int blk_rq_err_bytes(const struct request *rq); | ||
861 | |||
856 | static inline unsigned int blk_rq_sectors(const struct request *rq) | 862 | static inline unsigned int blk_rq_sectors(const struct request *rq) |
857 | { | 863 | { |
858 | return blk_rq_bytes(rq) >> 9; | 864 | return blk_rq_bytes(rq) >> 9; |
@@ -863,6 +869,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
863 | return blk_rq_cur_bytes(rq) >> 9; | 869 | return blk_rq_cur_bytes(rq) >> 9; |
864 | } | 870 | } |
865 | 871 | ||
872 | static inline unsigned int blk_rq_err_sectors(const struct request *rq) | ||
873 | { | ||
874 | return blk_rq_err_bytes(rq) >> 9; | ||
875 | } | ||
876 | |||
866 | /* | 877 | /* |
867 | * Request issue related functions. | 878 | * Request issue related functions. |
868 | */ | 879 | */ |
@@ -889,10 +900,12 @@ extern bool blk_end_request(struct request *rq, int error, | |||
889 | unsigned int nr_bytes); | 900 | unsigned int nr_bytes); |
890 | extern void blk_end_request_all(struct request *rq, int error); | 901 | extern void blk_end_request_all(struct request *rq, int error); |
891 | extern bool blk_end_request_cur(struct request *rq, int error); | 902 | extern bool blk_end_request_cur(struct request *rq, int error); |
903 | extern bool blk_end_request_err(struct request *rq, int error); | ||
892 | extern bool __blk_end_request(struct request *rq, int error, | 904 | extern bool __blk_end_request(struct request *rq, int error, |
893 | unsigned int nr_bytes); | 905 | unsigned int nr_bytes); |
894 | extern void __blk_end_request_all(struct request *rq, int error); | 906 | extern void __blk_end_request_all(struct request *rq, int error); |
895 | extern bool __blk_end_request_cur(struct request *rq, int error); | 907 | extern bool __blk_end_request_cur(struct request *rq, int error); |
908 | extern bool __blk_end_request_err(struct request *rq, int error); | ||
896 | 909 | ||
897 | extern void blk_complete_request(struct request *); | 910 | extern void blk_complete_request(struct request *); |
898 | extern void __blk_complete_request(struct request *); | 911 | extern void __blk_complete_request(struct request *); |