diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 4daae1ee2b23..c822239bcc9d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1157,6 +1157,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1157 | const unsigned short prio = bio_prio(bio); | 1157 | const unsigned short prio = bio_prio(bio); |
1158 | const int sync = bio_sync(bio); | 1158 | const int sync = bio_sync(bio); |
1159 | const int unplug = bio_unplug(bio); | 1159 | const int unplug = bio_unplug(bio); |
1160 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | ||
1160 | int rw_flags; | 1161 | int rw_flags; |
1161 | 1162 | ||
1162 | if (bio_barrier(bio) && bio_has_data(bio) && | 1163 | if (bio_barrier(bio) && bio_has_data(bio) && |
@@ -1186,6 +1187,9 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1186 | 1187 | ||
1187 | trace_block_bio_backmerge(q, bio); | 1188 | trace_block_bio_backmerge(q, bio); |
1188 | 1189 | ||
1190 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | ||
1191 | blk_rq_set_mixed_merge(req); | ||
1192 | |||
1189 | req->biotail->bi_next = bio; | 1193 | req->biotail->bi_next = bio; |
1190 | req->biotail = bio; | 1194 | req->biotail = bio; |
1191 | req->__data_len += bytes; | 1195 | req->__data_len += bytes; |
@@ -1205,6 +1209,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1205 | 1209 | ||
1206 | trace_block_bio_frontmerge(q, bio); | 1210 | trace_block_bio_frontmerge(q, bio); |
1207 | 1211 | ||
1212 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) { | ||
1213 | blk_rq_set_mixed_merge(req); | ||
1214 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | ||
1215 | req->cmd_flags |= ff; | ||
1216 | } | ||
1217 | |||
1208 | bio->bi_next = req->bio; | 1218 | bio->bi_next = req->bio; |
1209 | req->bio = bio; | 1219 | req->bio = bio; |
1210 | 1220 | ||
@@ -1649,6 +1659,50 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
1649 | } | 1659 | } |
1650 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); | 1660 | EXPORT_SYMBOL_GPL(blk_insert_cloned_request); |
1651 | 1661 | ||
1662 | /** | ||
1663 | * blk_rq_err_bytes - determine number of bytes till the next failure boundary | ||
1664 | * @rq: request to examine | ||
1665 | * | ||
1666 | * Description: | ||
1667 | * A request could be merge of IOs which require different failure | ||
1668 | * handling. This function determines the number of bytes which | ||
1669 | * can be failed from the beginning of the request without | ||
1670 | * crossing into area which need to be retried further. | ||
1671 | * | ||
1672 | * Return: | ||
1673 | * The number of bytes to fail. | ||
1674 | * | ||
1675 | * Context: | ||
1676 | * queue_lock must be held. | ||
1677 | */ | ||
1678 | unsigned int blk_rq_err_bytes(const struct request *rq) | ||
1679 | { | ||
1680 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | ||
1681 | unsigned int bytes = 0; | ||
1682 | struct bio *bio; | ||
1683 | |||
1684 | if (!(rq->cmd_flags & REQ_MIXED_MERGE)) | ||
1685 | return blk_rq_bytes(rq); | ||
1686 | |||
1687 | /* | ||
1688 | * Currently the only 'mixing' which can happen is between | ||
1689 | * different fastfail types. We can safely fail portions | ||
1690 | * which have all the failfast bits that the first one has - | ||
1691 | * the ones which are at least as eager to fail as the first | ||
1692 | * one. | ||
1693 | */ | ||
1694 | for (bio = rq->bio; bio; bio = bio->bi_next) { | ||
1695 | if ((bio->bi_rw & ff) != ff) | ||
1696 | break; | ||
1697 | bytes += bio->bi_size; | ||
1698 | } | ||
1699 | |||
1700 | /* this could lead to infinite loop */ | ||
1701 | BUG_ON(blk_rq_bytes(rq) && !bytes); | ||
1702 | return bytes; | ||
1703 | } | ||
1704 | EXPORT_SYMBOL_GPL(blk_rq_err_bytes); | ||
1705 | |||
1652 | static void blk_account_io_completion(struct request *req, unsigned int bytes) | 1706 | static void blk_account_io_completion(struct request *req, unsigned int bytes) |
1653 | { | 1707 | { |
1654 | if (blk_do_io_stat(req)) { | 1708 | if (blk_do_io_stat(req)) { |
@@ -1995,6 +2049,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) | |||
1995 | if (blk_fs_request(req) || blk_discard_rq(req)) | 2049 | if (blk_fs_request(req) || blk_discard_rq(req)) |
1996 | req->__sector += total_bytes >> 9; | 2050 | req->__sector += total_bytes >> 9; |
1997 | 2051 | ||
2052 | /* mixed attributes always follow the first bio */ | ||
2053 | if (req->cmd_flags & REQ_MIXED_MERGE) { | ||
2054 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | ||
2055 | req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; | ||
2056 | } | ||
2057 | |||
1998 | /* | 2058 | /* |
1999 | * If total number of sectors is less than the first segment | 2059 | * If total number of sectors is less than the first segment |
2000 | * size, something has gone terribly wrong. | 2060 | * size, something has gone terribly wrong. |
@@ -2174,6 +2234,25 @@ bool blk_end_request_cur(struct request *rq, int error) | |||
2174 | EXPORT_SYMBOL(blk_end_request_cur); | 2234 | EXPORT_SYMBOL(blk_end_request_cur); |
2175 | 2235 | ||
2176 | /** | 2236 | /** |
2237 | * blk_end_request_err - Finish a request till the next failure boundary. | ||
2238 | * @rq: the request to finish till the next failure boundary for | ||
2239 | * @error: must be negative errno | ||
2240 | * | ||
2241 | * Description: | ||
2242 | * Complete @rq till the next failure boundary. | ||
2243 | * | ||
2244 | * Return: | ||
2245 | * %false - we are done with this request | ||
2246 | * %true - still buffers pending for this request | ||
2247 | */ | ||
2248 | bool blk_end_request_err(struct request *rq, int error) | ||
2249 | { | ||
2250 | WARN_ON(error >= 0); | ||
2251 | return blk_end_request(rq, error, blk_rq_err_bytes(rq)); | ||
2252 | } | ||
2253 | EXPORT_SYMBOL_GPL(blk_end_request_err); | ||
2254 | |||
2255 | /** | ||
2177 | * __blk_end_request - Helper function for drivers to complete the request. | 2256 | * __blk_end_request - Helper function for drivers to complete the request. |
2178 | * @rq: the request being processed | 2257 | * @rq: the request being processed |
2179 | * @error: %0 for success, < %0 for error | 2258 | * @error: %0 for success, < %0 for error |
@@ -2232,6 +2311,26 @@ bool __blk_end_request_cur(struct request *rq, int error) | |||
2232 | } | 2311 | } |
2233 | EXPORT_SYMBOL(__blk_end_request_cur); | 2312 | EXPORT_SYMBOL(__blk_end_request_cur); |
2234 | 2313 | ||
2314 | /** | ||
2315 | * __blk_end_request_err - Finish a request till the next failure boundary. | ||
2316 | * @rq: the request to finish till the next failure boundary for | ||
2317 | * @error: must be negative errno | ||
2318 | * | ||
2319 | * Description: | ||
2320 | * Complete @rq till the next failure boundary. Must be called | ||
2321 | * with queue lock held. | ||
2322 | * | ||
2323 | * Return: | ||
2324 | * %false - we are done with this request | ||
2325 | * %true - still buffers pending for this request | ||
2326 | */ | ||
2327 | bool __blk_end_request_err(struct request *rq, int error) | ||
2328 | { | ||
2329 | WARN_ON(error >= 0); | ||
2330 | return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); | ||
2331 | } | ||
2332 | EXPORT_SYMBOL_GPL(__blk_end_request_err); | ||
2333 | |||
2235 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 2334 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
2236 | struct bio *bio) | 2335 | struct bio *bio) |
2237 | { | 2336 | { |