aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-09-11 08:32:04 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-11 08:33:31 -0400
commit1f98a13f623e0ef666690a18c1250335fc6d7ef1 (patch)
tree15ca2dddffaa18a0d1844957f4f8cc707cbb8117 /block
parente7e503aedb1f4d165081cb8d47a58c38f80f0cb4 (diff)
bio: first step in sanitizing the bio->bi_rw flag testing
Get rid of any functions that test for these bits and make callers use bio_rw_flagged() directly. Then it is at least directly apparent what variable and flag they check. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c25
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/elevator.c3
3 files changed, 16 insertions, 14 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c822239bcc9d..52559715cb90 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1114,24 +1114,24 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1114 * Inherit FAILFAST from bio (for read-ahead, and explicit 1114 * Inherit FAILFAST from bio (for read-ahead, and explicit
1115 * FAILFAST). FAILFAST flags are identical for req and bio. 1115 * FAILFAST). FAILFAST flags are identical for req and bio.
1116 */ 1116 */
1117 if (bio_rw_ahead(bio)) 1117 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1118 req->cmd_flags |= REQ_FAILFAST_MASK; 1118 req->cmd_flags |= REQ_FAILFAST_MASK;
1119 else 1119 else
1120 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1120 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1121 1121
1122 if (unlikely(bio_discard(bio))) { 1122 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
1123 req->cmd_flags |= REQ_DISCARD; 1123 req->cmd_flags |= REQ_DISCARD;
1124 if (bio_barrier(bio)) 1124 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1125 req->cmd_flags |= REQ_SOFTBARRIER; 1125 req->cmd_flags |= REQ_SOFTBARRIER;
1126 req->q->prepare_discard_fn(req->q, req); 1126 req->q->prepare_discard_fn(req->q, req);
1127 } else if (unlikely(bio_barrier(bio))) 1127 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1128 req->cmd_flags |= REQ_HARDBARRIER; 1128 req->cmd_flags |= REQ_HARDBARRIER;
1129 1129
1130 if (bio_sync(bio)) 1130 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1131 req->cmd_flags |= REQ_RW_SYNC; 1131 req->cmd_flags |= REQ_RW_SYNC;
1132 if (bio_rw_meta(bio)) 1132 if (bio_rw_flagged(bio, BIO_RW_META))
1133 req->cmd_flags |= REQ_RW_META; 1133 req->cmd_flags |= REQ_RW_META;
1134 if (bio_noidle(bio)) 1134 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1135 req->cmd_flags |= REQ_NOIDLE; 1135 req->cmd_flags |= REQ_NOIDLE;
1136 1136
1137 req->errors = 0; 1137 req->errors = 0;
@@ -1155,12 +1155,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1155 int el_ret; 1155 int el_ret;
1156 unsigned int bytes = bio->bi_size; 1156 unsigned int bytes = bio->bi_size;
1157 const unsigned short prio = bio_prio(bio); 1157 const unsigned short prio = bio_prio(bio);
1158 const int sync = bio_sync(bio); 1158 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
1159 const int unplug = bio_unplug(bio); 1159 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
1160 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1160 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1161 int rw_flags; 1161 int rw_flags;
1162 1162
1163 if (bio_barrier(bio) && bio_has_data(bio) && 1163 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) &&
1164 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1164 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1165 bio_endio(bio, -EOPNOTSUPP); 1165 bio_endio(bio, -EOPNOTSUPP);
1166 return 0; 1166 return 0;
@@ -1174,7 +1174,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1174 1174
1175 spin_lock_irq(q->queue_lock); 1175 spin_lock_irq(q->queue_lock);
1176 1176
1177 if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) 1177 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
1178 goto get_rq; 1178 goto get_rq;
1179 1179
1180 el_ret = elv_merge(q, &req, bio); 1180 el_ret = elv_merge(q, &req, bio);
@@ -1470,7 +1470,8 @@ static inline void __generic_make_request(struct bio *bio)
1470 if (bio_check_eod(bio, nr_sectors)) 1470 if (bio_check_eod(bio, nr_sectors))
1471 goto end_io; 1471 goto end_io;
1472 1472
1473 if (bio_discard(bio) && !q->prepare_discard_fn) { 1473 if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1474 !q->prepare_discard_fn) {
1474 err = -EOPNOTSUPP; 1475 err = -EOPNOTSUPP;
1475 goto end_io; 1476 goto end_io;
1476 } 1477 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ca0d7e71324b..9e6d0af6c990 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -257,7 +257,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
257 */ 257 */
258static inline int cfq_bio_sync(struct bio *bio) 258static inline int cfq_bio_sync(struct bio *bio)
259{ 259{
260 if (bio_data_dir(bio) == READ || bio_sync(bio)) 260 if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
261 return 1; 261 return 1;
262 262
263 return 0; 263 return 0;
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba41..1975b619c86d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -79,7 +79,8 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
79 /* 79 /*
80 * Don't merge file system requests and discard requests 80 * Don't merge file system requests and discard requests
81 */ 81 */
82 if (bio_discard(bio) != bio_discard(rq->bio)) 82 if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
83 bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
83 return 0; 84 return 0;
84 85
85 /* 86 /*