diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-09-11 08:32:04 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-09-11 08:33:31 -0400 |
commit | 1f98a13f623e0ef666690a18c1250335fc6d7ef1 (patch) | |
tree | 15ca2dddffaa18a0d1844957f4f8cc707cbb8117 /block/blk-core.c | |
parent | e7e503aedb1f4d165081cb8d47a58c38f80f0cb4 (diff) |
bio: first step in sanitizing the bio->bi_rw flag testing
Get rid of any functions that test for these bits and make callers
use bio_rw_flagged() directly. Then it is at least directly apparent
what variable and flag they check.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index c822239bcc9d..52559715cb90 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1114,24 +1114,24 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1114 | * Inherit FAILFAST from bio (for read-ahead, and explicit | 1114 | * Inherit FAILFAST from bio (for read-ahead, and explicit |
1115 | * FAILFAST). FAILFAST flags are identical for req and bio. | 1115 | * FAILFAST). FAILFAST flags are identical for req and bio. |
1116 | */ | 1116 | */ |
1117 | if (bio_rw_ahead(bio)) | 1117 | if (bio_rw_flagged(bio, BIO_RW_AHEAD)) |
1118 | req->cmd_flags |= REQ_FAILFAST_MASK; | 1118 | req->cmd_flags |= REQ_FAILFAST_MASK; |
1119 | else | 1119 | else |
1120 | req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; | 1120 | req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; |
1121 | 1121 | ||
1122 | if (unlikely(bio_discard(bio))) { | 1122 | if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { |
1123 | req->cmd_flags |= REQ_DISCARD; | 1123 | req->cmd_flags |= REQ_DISCARD; |
1124 | if (bio_barrier(bio)) | 1124 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) |
1125 | req->cmd_flags |= REQ_SOFTBARRIER; | 1125 | req->cmd_flags |= REQ_SOFTBARRIER; |
1126 | req->q->prepare_discard_fn(req->q, req); | 1126 | req->q->prepare_discard_fn(req->q, req); |
1127 | } else if (unlikely(bio_barrier(bio))) | 1127 | } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) |
1128 | req->cmd_flags |= REQ_HARDBARRIER; | 1128 | req->cmd_flags |= REQ_HARDBARRIER; |
1129 | 1129 | ||
1130 | if (bio_sync(bio)) | 1130 | if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) |
1131 | req->cmd_flags |= REQ_RW_SYNC; | 1131 | req->cmd_flags |= REQ_RW_SYNC; |
1132 | if (bio_rw_meta(bio)) | 1132 | if (bio_rw_flagged(bio, BIO_RW_META)) |
1133 | req->cmd_flags |= REQ_RW_META; | 1133 | req->cmd_flags |= REQ_RW_META; |
1134 | if (bio_noidle(bio)) | 1134 | if (bio_rw_flagged(bio, BIO_RW_NOIDLE)) |
1135 | req->cmd_flags |= REQ_NOIDLE; | 1135 | req->cmd_flags |= REQ_NOIDLE; |
1136 | 1136 | ||
1137 | req->errors = 0; | 1137 | req->errors = 0; |
@@ -1155,12 +1155,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1155 | int el_ret; | 1155 | int el_ret; |
1156 | unsigned int bytes = bio->bi_size; | 1156 | unsigned int bytes = bio->bi_size; |
1157 | const unsigned short prio = bio_prio(bio); | 1157 | const unsigned short prio = bio_prio(bio); |
1158 | const int sync = bio_sync(bio); | 1158 | const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); |
1159 | const int unplug = bio_unplug(bio); | 1159 | const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); |
1160 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1160 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1161 | int rw_flags; | 1161 | int rw_flags; |
1162 | 1162 | ||
1163 | if (bio_barrier(bio) && bio_has_data(bio) && | 1163 | if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) && |
1164 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1164 | (q->next_ordered == QUEUE_ORDERED_NONE)) { |
1165 | bio_endio(bio, -EOPNOTSUPP); | 1165 | bio_endio(bio, -EOPNOTSUPP); |
1166 | return 0; | 1166 | return 0; |
@@ -1174,7 +1174,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1174 | 1174 | ||
1175 | spin_lock_irq(q->queue_lock); | 1175 | spin_lock_irq(q->queue_lock); |
1176 | 1176 | ||
1177 | if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) | 1177 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) |
1178 | goto get_rq; | 1178 | goto get_rq; |
1179 | 1179 | ||
1180 | el_ret = elv_merge(q, &req, bio); | 1180 | el_ret = elv_merge(q, &req, bio); |
@@ -1470,7 +1470,8 @@ static inline void __generic_make_request(struct bio *bio) | |||
1470 | if (bio_check_eod(bio, nr_sectors)) | 1470 | if (bio_check_eod(bio, nr_sectors)) |
1471 | goto end_io; | 1471 | goto end_io; |
1472 | 1472 | ||
1473 | if (bio_discard(bio) && !q->prepare_discard_fn) { | 1473 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) && |
1474 | !q->prepare_discard_fn) { | ||
1474 | err = -EOPNOTSUPP; | 1475 | err = -EOPNOTSUPP; |
1475 | goto end_io; | 1476 | goto end_io; |
1476 | } | 1477 | } |