aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 04:48:16 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-11 08:33:27 -0400
commita82afdfcb8c0df09776b6458af6b68fc58b2e87b (patch)
treed53910e4c2e9d1502cf2a7ce67a67cc54decb7fa /block
parent0d03d59d9b31cd1e33b7e46a80b6fef66244b1f2 (diff)
block: use the same failfast bits for bio and request
bio and request use the same set of failfast bits. This patch makes the following changes to simplify things. * enumify BIO_RW* bits and reorder bits such that BIOS_RW_FAILFAST_* bits coincide with __REQ_FAILFAST_* bits. * The above pushes BIO_RW_AHEAD out of sync with __REQ_FAILFAST_DEV but the matching is useless anyway. init_request_from_bio() is responsible for setting FAILFAST bits on FS requests and non-FS requests never use BIO_RW_AHEAD. Drop the code and comment from blk_rq_bio_prep(). * Define REQ_FAILFAST_MASK which is OR of all FAILFAST bits and simplify FAILFAST flags handling in init_request_from_bio(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c19
1 files changed, 7 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a77a0d8..4daae1ee2b23 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1111,17 +1111,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1111 req->cmd_type = REQ_TYPE_FS; 1111 req->cmd_type = REQ_TYPE_FS;
1112 1112
1113 /* 1113 /*
1114 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) 1114 * Inherit FAILFAST from bio (for read-ahead, and explicit
1115 * FAILFAST). FAILFAST flags are identical for req and bio.
1115 */ 1116 */
1116 if (bio_rw_ahead(bio)) 1117 if (bio_rw_ahead(bio))
1117 req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | 1118 req->cmd_flags |= REQ_FAILFAST_MASK;
1118 REQ_FAILFAST_DRIVER); 1119 else
1119 if (bio_failfast_dev(bio)) 1120 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1120 req->cmd_flags |= REQ_FAILFAST_DEV;
1121 if (bio_failfast_transport(bio))
1122 req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1123 if (bio_failfast_driver(bio))
1124 req->cmd_flags |= REQ_FAILFAST_DRIVER;
1125 1121
1126 if (unlikely(bio_discard(bio))) { 1122 if (unlikely(bio_discard(bio))) {
1127 req->cmd_flags |= REQ_DISCARD; 1123 req->cmd_flags |= REQ_DISCARD;
@@ -2239,9 +2235,8 @@ EXPORT_SYMBOL(__blk_end_request_cur);
2239void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2235void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2240 struct bio *bio) 2236 struct bio *bio)
2241{ 2237{
2242 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and 2238 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2243 we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */ 2239 rq->cmd_flags |= bio->bi_rw & REQ_RW;
2244 rq->cmd_flags |= (bio->bi_rw & 3);
2245 2240
2246 if (bio_has_data(bio)) { 2241 if (bio_has_data(bio)) {
2247 rq->nr_phys_segments = bio_phys_segments(q, bio); 2242 rq->nr_phys_segments = bio_phys_segments(q, bio);