diff options
47 files changed, 242 insertions, 289 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 74e404393172..7c6f4a714687 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -203,7 +203,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) | |||
203 | /* initialize proxy request and queue it */ | 203 | /* initialize proxy request and queue it */ |
204 | blk_rq_init(q, rq); | 204 | blk_rq_init(q, rq); |
205 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | 205 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) |
206 | rq->cmd_flags |= REQ_RW; | 206 | rq->cmd_flags |= REQ_WRITE; |
207 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | 207 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
208 | rq->cmd_flags |= REQ_FUA; | 208 | rq->cmd_flags |= REQ_FUA; |
209 | init_request_from_bio(rq, q->orig_bar_rq->bio); | 209 | init_request_from_bio(rq, q->orig_bar_rq->bio); |
diff --git a/block/blk-core.c b/block/blk-core.c index dca43a31e725..66c3cfe94d0a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1140 | req->cpu = bio->bi_comp_cpu; | 1140 | req->cpu = bio->bi_comp_cpu; |
1141 | req->cmd_type = REQ_TYPE_FS; | 1141 | req->cmd_type = REQ_TYPE_FS; |
1142 | 1142 | ||
1143 | /* | 1143 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; |
1144 | * Inherit FAILFAST from bio (for read-ahead, and explicit | 1144 | if (bio->bi_rw & REQ_RAHEAD) |
1145 | * FAILFAST). FAILFAST flags are identical for req and bio. | ||
1146 | */ | ||
1147 | if (bio_rw_flagged(bio, BIO_RW_AHEAD)) | ||
1148 | req->cmd_flags |= REQ_FAILFAST_MASK; | 1145 | req->cmd_flags |= REQ_FAILFAST_MASK; |
1149 | else | ||
1150 | req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; | ||
1151 | |||
1152 | if (bio_rw_flagged(bio, BIO_RW_DISCARD)) | ||
1153 | req->cmd_flags |= REQ_DISCARD; | ||
1154 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) | ||
1155 | req->cmd_flags |= REQ_HARDBARRIER; | ||
1156 | if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) | ||
1157 | req->cmd_flags |= REQ_RW_SYNC; | ||
1158 | if (bio_rw_flagged(bio, BIO_RW_META)) | ||
1159 | req->cmd_flags |= REQ_RW_META; | ||
1160 | if (bio_rw_flagged(bio, BIO_RW_NOIDLE)) | ||
1161 | req->cmd_flags |= REQ_NOIDLE; | ||
1162 | 1146 | ||
1163 | req->errors = 0; | 1147 | req->errors = 0; |
1164 | req->__sector = bio->bi_sector; | 1148 | req->__sector = bio->bi_sector; |
@@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1181 | int el_ret; | 1165 | int el_ret; |
1182 | unsigned int bytes = bio->bi_size; | 1166 | unsigned int bytes = bio->bi_size; |
1183 | const unsigned short prio = bio_prio(bio); | 1167 | const unsigned short prio = bio_prio(bio); |
1184 | const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); | 1168 | const bool sync = (bio->bi_rw & REQ_SYNC); |
1185 | const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); | 1169 | const bool unplug = (bio->bi_rw & REQ_UNPLUG); |
1186 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1170 | const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1187 | int rw_flags; | 1171 | int rw_flags; |
1188 | 1172 | ||
1189 | if (bio_rw_flagged(bio, BIO_RW_BARRIER) && | 1173 | if ((bio->bi_rw & REQ_HARDBARRIER) && |
1190 | (q->next_ordered == QUEUE_ORDERED_NONE)) { | 1174 | (q->next_ordered == QUEUE_ORDERED_NONE)) { |
1191 | bio_endio(bio, -EOPNOTSUPP); | 1175 | bio_endio(bio, -EOPNOTSUPP); |
1192 | return 0; | 1176 | return 0; |
@@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1200 | 1184 | ||
1201 | spin_lock_irq(q->queue_lock); | 1185 | spin_lock_irq(q->queue_lock); |
1202 | 1186 | ||
1203 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) | 1187 | if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) |
1204 | goto get_rq; | 1188 | goto get_rq; |
1205 | 1189 | ||
1206 | el_ret = elv_merge(q, &req, bio); | 1190 | el_ret = elv_merge(q, &req, bio); |
@@ -1275,7 +1259,7 @@ get_rq: | |||
1275 | */ | 1259 | */ |
1276 | rw_flags = bio_data_dir(bio); | 1260 | rw_flags = bio_data_dir(bio); |
1277 | if (sync) | 1261 | if (sync) |
1278 | rw_flags |= REQ_RW_SYNC; | 1262 | rw_flags |= REQ_SYNC; |
1279 | 1263 | ||
1280 | /* | 1264 | /* |
1281 | * Grab a free request. This is might sleep but can not fail. | 1265 | * Grab a free request. This is might sleep but can not fail. |
@@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio) | |||
1464 | goto end_io; | 1448 | goto end_io; |
1465 | } | 1449 | } |
1466 | 1450 | ||
1467 | if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && | 1451 | if (unlikely(!(bio->bi_rw & REQ_DISCARD) && |
1468 | nr_sectors > queue_max_hw_sectors(q))) { | 1452 | nr_sectors > queue_max_hw_sectors(q))) { |
1469 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 1453 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", |
1470 | bdevname(bio->bi_bdev, b), | 1454 | bdevname(bio->bi_bdev, b), |
@@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio) | |||
1497 | if (bio_check_eod(bio, nr_sectors)) | 1481 | if (bio_check_eod(bio, nr_sectors)) |
1498 | goto end_io; | 1482 | goto end_io; |
1499 | 1483 | ||
1500 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) && | 1484 | if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) { |
1501 | !blk_queue_discard(q)) { | ||
1502 | err = -EOPNOTSUPP; | 1485 | err = -EOPNOTSUPP; |
1503 | goto end_io; | 1486 | goto end_io; |
1504 | } | 1487 | } |
@@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
2365 | struct bio *bio) | 2348 | struct bio *bio) |
2366 | { | 2349 | { |
2367 | /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ | 2350 | /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ |
2368 | rq->cmd_flags |= bio->bi_rw & REQ_RW; | 2351 | rq->cmd_flags |= bio->bi_rw & REQ_WRITE; |
2369 | 2352 | ||
2370 | if (bio_has_data(bio)) { | 2353 | if (bio_has_data(bio)) { |
2371 | rq->nr_phys_segments = bio_phys_segments(q, bio); | 2354 | rq->nr_phys_segments = bio_phys_segments(q, bio); |
diff --git a/block/blk-map.c b/block/blk-map.c index 9083cf0180cc..c65d7593f7f1 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
307 | return PTR_ERR(bio); | 307 | return PTR_ERR(bio); |
308 | 308 | ||
309 | if (rq_data_dir(rq) == WRITE) | 309 | if (rq_data_dir(rq) == WRITE) |
310 | bio->bi_rw |= (1 << BIO_RW); | 310 | bio->bi_rw |= (1 << REQ_WRITE); |
311 | 311 | ||
312 | if (do_copy) | 312 | if (do_copy) |
313 | rq->cmd_flags |= REQ_COPY_USER; | 313 | rq->cmd_flags |= REQ_COPY_USER; |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 87e4fb7d0e98..4852475521ea 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -180,7 +180,7 @@ new_segment: | |||
180 | } | 180 | } |
181 | 181 | ||
182 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { | 182 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
183 | if (rq->cmd_flags & REQ_RW) | 183 | if (rq->cmd_flags & REQ_WRITE) |
184 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | 184 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
185 | 185 | ||
186 | sg->page_link &= ~0x02; | 186 | sg->page_link &= ~0x02; |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d4edeb8fceb8..eb4086f7dfef 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) | |||
458 | */ | 458 | */ |
459 | static inline bool cfq_bio_sync(struct bio *bio) | 459 | static inline bool cfq_bio_sync(struct bio *bio) |
460 | { | 460 | { |
461 | return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); | 461 | return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); |
462 | } | 462 | } |
463 | 463 | ||
464 | /* | 464 | /* |
@@ -646,10 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, | |||
646 | return rq1; | 646 | return rq1; |
647 | else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) | 647 | else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) |
648 | return rq2; | 648 | return rq2; |
649 | if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META)) | 649 | if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) |
650 | return rq1; | 650 | return rq1; |
651 | else if ((rq2->cmd_flags & REQ_RW_META) && | 651 | else if ((rq2->cmd_flags & REQ_META) && |
652 | !(rq1->cmd_flags & REQ_RW_META)) | 652 | !(rq1->cmd_flags & REQ_META)) |
653 | return rq2; | 653 | return rq2; |
654 | 654 | ||
655 | s1 = blk_rq_pos(rq1); | 655 | s1 = blk_rq_pos(rq1); |
@@ -1485,7 +1485,7 @@ static void cfq_remove_request(struct request *rq) | |||
1485 | cfqq->cfqd->rq_queued--; | 1485 | cfqq->cfqd->rq_queued--; |
1486 | cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, | 1486 | cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, |
1487 | rq_data_dir(rq), rq_is_sync(rq)); | 1487 | rq_data_dir(rq), rq_is_sync(rq)); |
1488 | if (rq->cmd_flags & REQ_RW_META) { | 1488 | if (rq->cmd_flags & REQ_META) { |
1489 | WARN_ON(!cfqq->meta_pending); | 1489 | WARN_ON(!cfqq->meta_pending); |
1490 | cfqq->meta_pending--; | 1490 | cfqq->meta_pending--; |
1491 | } | 1491 | } |
@@ -3177,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
3177 | * So both queues are sync. Let the new request get disk time if | 3177 | * So both queues are sync. Let the new request get disk time if |
3178 | * it's a metadata request and the current queue is doing regular IO. | 3178 | * it's a metadata request and the current queue is doing regular IO. |
3179 | */ | 3179 | */ |
3180 | if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending) | 3180 | if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) |
3181 | return true; | 3181 | return true; |
3182 | 3182 | ||
3183 | /* | 3183 | /* |
@@ -3231,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3231 | struct cfq_io_context *cic = RQ_CIC(rq); | 3231 | struct cfq_io_context *cic = RQ_CIC(rq); |
3232 | 3232 | ||
3233 | cfqd->rq_queued++; | 3233 | cfqd->rq_queued++; |
3234 | if (rq->cmd_flags & REQ_RW_META) | 3234 | if (rq->cmd_flags & REQ_META) |
3235 | cfqq->meta_pending++; | 3235 | cfqq->meta_pending++; |
3236 | 3236 | ||
3237 | cfq_update_io_thinktime(cfqd, cic); | 3237 | cfq_update_io_thinktime(cfqd, cic); |
diff --git a/block/elevator.c b/block/elevator.c index aa99b59c03d6..816a7c8d6394 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
79 | /* | 79 | /* |
80 | * Don't merge file system requests and discard requests | 80 | * Don't merge file system requests and discard requests |
81 | */ | 81 | */ |
82 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) != | 82 | if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) |
83 | bio_rw_flagged(rq->bio, BIO_RW_DISCARD)) | ||
84 | return 0; | 83 | return 0; |
85 | 84 | ||
86 | /* | 85 | /* |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a5c08b082edb..0a8cd3484791 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1114,7 +1114,7 @@ static int atapi_drain_needed(struct request *rq) | |||
1114 | if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) | 1114 | if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) |
1115 | return 0; | 1115 | return 0; |
1116 | 1116 | ||
1117 | if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) | 1117 | if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) |
1118 | return 0; | 1118 | return 0; |
1119 | 1119 | ||
1120 | return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; | 1120 | return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 035cefe4045a..65deffde60ac 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -173,7 +173,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
173 | BUG(); | 173 | BUG(); |
174 | bio_endio(bio, -ENXIO); | 174 | bio_endio(bio, -ENXIO); |
175 | return 0; | 175 | return 0; |
176 | } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { | 176 | } else if (bio->bi_rw & REQ_HARDBARRIER) { |
177 | bio_endio(bio, -EOPNOTSUPP); | 177 | bio_endio(bio, -EOPNOTSUPP); |
178 | return 0; | 178 | return 0; |
179 | } else if (bio->bi_io_vec == NULL) { | 179 | } else if (bio->bi_io_vec == NULL) { |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index f1bf79d9bc0a..1b218c6b6820 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -340,7 +340,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) | |||
340 | get_capacity(bdev->bd_disk)) | 340 | get_capacity(bdev->bd_disk)) |
341 | goto out; | 341 | goto out; |
342 | 342 | ||
343 | if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { | 343 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
344 | err = 0; | 344 | err = 0; |
345 | discard_from_brd(brd, sector, bio->bi_size); | 345 | discard_from_brd(brd, sector, bio->bi_size); |
346 | goto out; | 346 | goto out; |
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index df018990c422..9400845d602e 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
79 | md_io.error = 0; | 79 | md_io.error = 0; |
80 | 80 | ||
81 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) | 81 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) |
82 | rw |= (1 << BIO_RW_BARRIER); | 82 | rw |= REQ_HARDBARRIER; |
83 | rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)); | 83 | rw |= REQ_UNPLUG | REQ_SYNC; |
84 | 84 | ||
85 | retry: | 85 | retry: |
86 | bio = bio_alloc(GFP_NOIO, 1); | 86 | bio = bio_alloc(GFP_NOIO, 1); |
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | |||
103 | /* check for unsupported barrier op. | 103 | /* check for unsupported barrier op. |
104 | * would rather check on EOPNOTSUPP, but that is not reliable. | 104 | * would rather check on EOPNOTSUPP, but that is not reliable. |
105 | * don't try again for ANY return value != 0 */ | 105 | * don't try again for ANY return value != 0 */ |
106 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) { | 106 | if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { |
107 | /* Try again with no barrier */ | 107 | /* Try again with no barrier */ |
108 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); | 108 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); |
109 | set_bit(MD_NO_BARRIER, &mdev->flags); | 109 | set_bit(MD_NO_BARRIER, &mdev->flags); |
110 | rw &= ~(1 << BIO_RW_BARRIER); | 110 | rw &= ~REQ_HARDBARRIER; |
111 | bio_put(bio); | 111 | bio_put(bio); |
112 | goto retry; | 112 | goto retry; |
113 | } | 113 | } |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7258c95e895e..e2ab13d99d69 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2425,15 +2425,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |||
2425 | /* NOTE: no need to check if barriers supported here as we would | 2425 | /* NOTE: no need to check if barriers supported here as we would |
2426 | * not pass the test in make_request_common in that case | 2426 | * not pass the test in make_request_common in that case |
2427 | */ | 2427 | */ |
2428 | if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) { | 2428 | if (req->master_bio->bi_rw & REQ_HARDBARRIER) { |
2429 | dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); | 2429 | dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); |
2430 | /* dp_flags |= DP_HARDBARRIER; */ | 2430 | /* dp_flags |= DP_HARDBARRIER; */ |
2431 | } | 2431 | } |
2432 | if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO)) | 2432 | if (req->master_bio->bi_rw & REQ_SYNC) |
2433 | dp_flags |= DP_RW_SYNC; | 2433 | dp_flags |= DP_RW_SYNC; |
2434 | /* for now handle SYNCIO and UNPLUG | 2434 | /* for now handle SYNCIO and UNPLUG |
2435 | * as if they still were one and the same flag */ | 2435 | * as if they still were one and the same flag */ |
2436 | if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG)) | 2436 | if (req->master_bio->bi_rw & REQ_UNPLUG) |
2437 | dp_flags |= DP_RW_SYNC; | 2437 | dp_flags |= DP_RW_SYNC; |
2438 | if (mdev->state.conn >= C_SYNC_SOURCE && | 2438 | if (mdev->state.conn >= C_SYNC_SOURCE && |
2439 | mdev->state.conn <= C_PAUSED_SYNC_T) | 2439 | mdev->state.conn <= C_PAUSED_SYNC_T) |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index dff48701b84d..cba1deb7b271 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -1180,7 +1180,7 @@ next_bio: | |||
1180 | bio->bi_sector = sector; | 1180 | bio->bi_sector = sector; |
1181 | bio->bi_bdev = mdev->ldev->backing_bdev; | 1181 | bio->bi_bdev = mdev->ldev->backing_bdev; |
1182 | /* we special case some flags in the multi-bio case, see below | 1182 | /* we special case some flags in the multi-bio case, see below |
1183 | * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */ | 1183 | * (REQ_UNPLUG, REQ_HARDBARRIER) */ |
1184 | bio->bi_rw = rw; | 1184 | bio->bi_rw = rw; |
1185 | bio->bi_private = e; | 1185 | bio->bi_private = e; |
1186 | bio->bi_end_io = drbd_endio_sec; | 1186 | bio->bi_end_io = drbd_endio_sec; |
@@ -1209,16 +1209,16 @@ next_bio: | |||
1209 | bios = bios->bi_next; | 1209 | bios = bios->bi_next; |
1210 | bio->bi_next = NULL; | 1210 | bio->bi_next = NULL; |
1211 | 1211 | ||
1212 | /* strip off BIO_RW_UNPLUG unless it is the last bio */ | 1212 | /* strip off REQ_UNPLUG unless it is the last bio */ |
1213 | if (bios) | 1213 | if (bios) |
1214 | bio->bi_rw &= ~(1<<BIO_RW_UNPLUG); | 1214 | bio->bi_rw &= ~REQ_UNPLUG; |
1215 | 1215 | ||
1216 | drbd_generic_make_request(mdev, fault_type, bio); | 1216 | drbd_generic_make_request(mdev, fault_type, bio); |
1217 | 1217 | ||
1218 | /* strip off BIO_RW_BARRIER, | 1218 | /* strip off REQ_HARDBARRIER, |
1219 | * unless it is the first or last bio */ | 1219 | * unless it is the first or last bio */ |
1220 | if (bios && bios->bi_next) | 1220 | if (bios && bios->bi_next) |
1221 | bios->bi_rw &= ~(1<<BIO_RW_BARRIER); | 1221 | bios->bi_rw &= ~REQ_HARDBARRIER; |
1222 | } while (bios); | 1222 | } while (bios); |
1223 | maybe_kick_lo(mdev); | 1223 | maybe_kick_lo(mdev); |
1224 | return 0; | 1224 | return 0; |
@@ -1233,7 +1233,7 @@ fail: | |||
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | /** | 1235 | /** |
1236 | * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set | 1236 | * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set |
1237 | * @mdev: DRBD device. | 1237 | * @mdev: DRBD device. |
1238 | * @w: work object. | 1238 | * @w: work object. |
1239 | * @cancel: The connection will be closed anyways (unused in this callback) | 1239 | * @cancel: The connection will be closed anyways (unused in this callback) |
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea | |||
1245 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) | 1245 | (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) |
1246 | so that we can finish that epoch in drbd_may_finish_epoch(). | 1246 | so that we can finish that epoch in drbd_may_finish_epoch(). |
1247 | That is necessary if we already have a long chain of Epochs, before | 1247 | That is necessary if we already have a long chain of Epochs, before |
1248 | we realize that BIO_RW_BARRIER is actually not supported */ | 1248 | we realize that REQ_HARDBARRIER is actually not supported */ |
1249 | 1249 | ||
1250 | /* As long as the -ENOTSUPP on the barrier is reported immediately | 1250 | /* As long as the -ENOTSUPP on the barrier is reported immediately |
1251 | that will never trigger. If it is reported late, we will just | 1251 | that will never trigger. If it is reported late, we will just |
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1824 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); | 1824 | epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); |
1825 | if (epoch == e->epoch) { | 1825 | if (epoch == e->epoch) { |
1826 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | 1826 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); |
1827 | rw |= (1<<BIO_RW_BARRIER); | 1827 | rw |= REQ_HARDBARRIER; |
1828 | e->flags |= EE_IS_BARRIER; | 1828 | e->flags |= EE_IS_BARRIER; |
1829 | } else { | 1829 | } else { |
1830 | if (atomic_read(&epoch->epoch_size) > 1 || | 1830 | if (atomic_read(&epoch->epoch_size) > 1 || |
1831 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { | 1831 | !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { |
1832 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); | 1832 | set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); |
1833 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); | 1833 | set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); |
1834 | rw |= (1<<BIO_RW_BARRIER); | 1834 | rw |= REQ_HARDBARRIER; |
1835 | e->flags |= EE_IS_BARRIER; | 1835 | e->flags |= EE_IS_BARRIER; |
1836 | } | 1836 | } |
1837 | } | 1837 | } |
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) | |||
1841 | dp_flags = be32_to_cpu(p->dp_flags); | 1841 | dp_flags = be32_to_cpu(p->dp_flags); |
1842 | if (dp_flags & DP_HARDBARRIER) { | 1842 | if (dp_flags & DP_HARDBARRIER) { |
1843 | dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); | 1843 | dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); |
1844 | /* rw |= (1<<BIO_RW_BARRIER); */ | 1844 | /* rw |= REQ_HARDBARRIER; */ |
1845 | } | 1845 | } |
1846 | if (dp_flags & DP_RW_SYNC) | 1846 | if (dp_flags & DP_RW_SYNC) |
1847 | rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); | 1847 | rw |= REQ_SYNC | REQ_UNPLUG; |
1848 | if (dp_flags & DP_MAY_SET_IN_SYNC) | 1848 | if (dp_flags & DP_MAY_SET_IN_SYNC) |
1849 | e->flags |= EE_MAY_SET_IN_SYNC; | 1849 | e->flags |= EE_MAY_SET_IN_SYNC; |
1850 | 1850 | ||
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 654f1ef5cbb0..f761d98a4e90 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) | |||
997 | * because of those XXX, this is not yet enabled, | 997 | * because of those XXX, this is not yet enabled, |
998 | * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. | 998 | * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. |
999 | */ | 999 | */ |
1000 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) { | 1000 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) { |
1001 | /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ | 1001 | /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ |
1002 | bio_endio(bio, -EOPNOTSUPP); | 1002 | bio_endio(bio, -EOPNOTSUPP); |
1003 | return 0; | 1003 | return 0; |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6120922f459f..fedfdb7d3cdf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -476,7 +476,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) | |||
476 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; | 476 | pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; |
477 | 477 | ||
478 | if (bio_rw(bio) == WRITE) { | 478 | if (bio_rw(bio) == WRITE) { |
479 | bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER); | 479 | bool barrier = (bio->bi_rw & REQ_HARDBARRIER); |
480 | struct file *file = lo->lo_backing_file; | 480 | struct file *file = lo->lo_backing_file; |
481 | 481 | ||
482 | if (barrier) { | 482 | if (barrier) { |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 8a549db2aa78..9f3e4454274b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1221,7 +1221,7 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
1221 | pkt->bio->bi_flags = 1 << BIO_UPTODATE; | 1221 | pkt->bio->bi_flags = 1 << BIO_UPTODATE; |
1222 | pkt->bio->bi_idx = 0; | 1222 | pkt->bio->bi_idx = 0; |
1223 | 1223 | ||
1224 | BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW)); | 1224 | BUG_ON(pkt->bio->bi_rw != REQ_WRITE); |
1225 | BUG_ON(pkt->bio->bi_vcnt != pkt->frames); | 1225 | BUG_ON(pkt->bio->bi_vcnt != pkt->frames); |
1226 | BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); | 1226 | BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); |
1227 | BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); | 1227 | BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 2f9470ff8f7c..8be57151f5d6 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -478,7 +478,7 @@ static void process_page(unsigned long data) | |||
478 | le32_to_cpu(desc->local_addr)>>9, | 478 | le32_to_cpu(desc->local_addr)>>9, |
479 | le32_to_cpu(desc->transfer_size)); | 479 | le32_to_cpu(desc->transfer_size)); |
480 | dump_dmastat(card, control); | 480 | dump_dmastat(card, control); |
481 | } else if (test_bit(BIO_RW, &bio->bi_rw) && | 481 | } else if ((bio->bi_rw & REQ_WRITE) && |
482 | le32_to_cpu(desc->local_addr) >> 9 == | 482 | le32_to_cpu(desc->local_addr) >> 9 == |
483 | card->init_size) { | 483 | card->init_size) { |
484 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; | 484 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; |
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 02712bf045c1..766b3deeb23c 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
@@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, | |||
454 | touch it at all. */ | 454 | touch it at all. */ |
455 | 455 | ||
456 | if (cgc->data_direction == CGC_DATA_WRITE) | 456 | if (cgc->data_direction == CGC_DATA_WRITE) |
457 | flags |= REQ_RW; | 457 | flags |= REQ_WRITE; |
458 | 458 | ||
459 | if (cgc->sense) | 459 | if (cgc->sense) |
460 | memset(cgc->sense, 0, sizeof(struct request_sense)); | 460 | memset(cgc->sense, 0, sizeof(struct request_sense)); |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index c7d0737bb18a..5406b6ea3ad1 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive, | |||
207 | memcpy(rq->cmd, pc->c, 12); | 207 | memcpy(rq->cmd, pc->c, 12); |
208 | 208 | ||
209 | pc->rq = rq; | 209 | pc->rq = rq; |
210 | if (rq->cmd_flags & REQ_RW) | 210 | if (rq->cmd_flags & REQ_WRITE) |
211 | pc->flags |= PC_FLAG_WRITING; | 211 | pc->flags |= PC_FLAG_WRITING; |
212 | 212 | ||
213 | pc->flags |= PC_FLAG_DMA_OK; | 213 | pc->flags |= PC_FLAG_DMA_OK; |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 10f457ca6af2..0590c75b0ab6 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
356 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); | 356 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
357 | 357 | ||
358 | if (sync) | 358 | if (sync) |
359 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 359 | rw |= REQ_SYNC | REQ_UNPLUG; |
360 | 360 | ||
361 | /* | 361 | /* |
362 | * For multiple regions we need to be careful to rewind | 362 | * For multiple regions we need to be careful to rewind |
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
364 | */ | 364 | */ |
365 | for (i = 0; i < num_regions; i++) { | 365 | for (i = 0; i < num_regions; i++) { |
366 | *dp = old_pages; | 366 | *dp = old_pages; |
367 | if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) | 367 | if (where[i].count || (rw & REQ_HARDBARRIER)) |
368 | do_region(rw, i, where + i, dp, io); | 368 | do_region(rw, i, where + i, dp, io); |
369 | } | 369 | } |
370 | 370 | ||
@@ -412,8 +412,8 @@ retry: | |||
412 | } | 412 | } |
413 | set_current_state(TASK_RUNNING); | 413 | set_current_state(TASK_RUNNING); |
414 | 414 | ||
415 | if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { | 415 | if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { |
416 | rw &= ~(1 << BIO_RW_BARRIER); | 416 | rw &= ~REQ_HARDBARRIER; |
417 | goto retry; | 417 | goto retry; |
418 | } | 418 | } |
419 | 419 | ||
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp) | |||
479 | * New collapsed (a)synchronous interface. | 479 | * New collapsed (a)synchronous interface. |
480 | * | 480 | * |
481 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | 481 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
482 | * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in | 482 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
483 | * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | 483 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to |
484 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. | 484 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
485 | */ | 485 | */ |
486 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | 486 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index addf83475040..d8587bac5682 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job) | |||
345 | { | 345 | { |
346 | int r; | 346 | int r; |
347 | struct dm_io_request io_req = { | 347 | struct dm_io_request io_req = { |
348 | .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), | 348 | .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, |
349 | .mem.type = DM_IO_PAGE_LIST, | 349 | .mem.type = DM_IO_PAGE_LIST, |
350 | .mem.ptr.pl = job->pages, | 350 | .mem.ptr.pl = job->pages, |
351 | .mem.offset = job->offset, | 351 | .mem.offset = job->offset, |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ddda531723dc..74136262d654 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, | |||
1211 | if (error == -EOPNOTSUPP) | 1211 | if (error == -EOPNOTSUPP) |
1212 | goto out; | 1212 | goto out; |
1213 | 1213 | ||
1214 | if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) | 1214 | if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) |
1215 | goto out; | 1215 | goto out; |
1216 | 1216 | ||
1217 | if (unlikely(error)) { | 1217 | if (unlikely(error)) { |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e610725db766..d6e28d732b4d 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, | |||
284 | if (!error) | 284 | if (!error) |
285 | return 0; /* I/O complete */ | 285 | return 0; /* I/O complete */ |
286 | 286 | ||
287 | if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) | 287 | if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) |
288 | return error; | 288 | return error; |
289 | 289 | ||
290 | if (error == -EOPNOTSUPP) | 290 | if (error == -EOPNOTSUPP) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1e0e6dd51501..d6f77baeafd6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
614 | */ | 614 | */ |
615 | spin_lock_irqsave(&md->deferred_lock, flags); | 615 | spin_lock_irqsave(&md->deferred_lock, flags); |
616 | if (__noflush_suspending(md)) { | 616 | if (__noflush_suspending(md)) { |
617 | if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) | 617 | if (!(io->bio->bi_rw & REQ_HARDBARRIER)) |
618 | bio_list_add_head(&md->deferred, | 618 | bio_list_add_head(&md->deferred, |
619 | io->bio); | 619 | io->bio); |
620 | } else | 620 | } else |
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
626 | io_error = io->error; | 626 | io_error = io->error; |
627 | bio = io->bio; | 627 | bio = io->bio; |
628 | 628 | ||
629 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { | 629 | if (bio->bi_rw & REQ_HARDBARRIER) { |
630 | /* | 630 | /* |
631 | * There can be just one barrier request so we use | 631 | * There can be just one barrier request so we use |
632 | * a per-device variable for error reporting. | 632 | * a per-device variable for error reporting. |
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1106 | 1106 | ||
1107 | clone->bi_sector = sector; | 1107 | clone->bi_sector = sector; |
1108 | clone->bi_bdev = bio->bi_bdev; | 1108 | clone->bi_bdev = bio->bi_bdev; |
1109 | clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); | 1109 | clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; |
1110 | clone->bi_vcnt = 1; | 1110 | clone->bi_vcnt = 1; |
1111 | clone->bi_size = to_bytes(len); | 1111 | clone->bi_size = to_bytes(len); |
1112 | clone->bi_io_vec->bv_offset = offset; | 1112 | clone->bi_io_vec->bv_offset = offset; |
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1133 | 1133 | ||
1134 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | 1134 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); |
1135 | __bio_clone(clone, bio); | 1135 | __bio_clone(clone, bio); |
1136 | clone->bi_rw &= ~(1 << BIO_RW_BARRIER); | 1136 | clone->bi_rw &= ~REQ_HARDBARRIER; |
1137 | clone->bi_destructor = dm_bio_destructor; | 1137 | clone->bi_destructor = dm_bio_destructor; |
1138 | clone->bi_sector = sector; | 1138 | clone->bi_sector = sector; |
1139 | clone->bi_idx = idx; | 1139 | clone->bi_idx = idx; |
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
1301 | 1301 | ||
1302 | ci.map = dm_get_live_table(md); | 1302 | ci.map = dm_get_live_table(md); |
1303 | if (unlikely(!ci.map)) { | 1303 | if (unlikely(!ci.map)) { |
1304 | if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) | 1304 | if (!(bio->bi_rw & REQ_HARDBARRIER)) |
1305 | bio_io_error(bio); | 1305 | bio_io_error(bio); |
1306 | else | 1306 | else |
1307 | if (!md->barrier_error) | 1307 | if (!md->barrier_error) |
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio) | |||
1414 | * we have to queue this io for later. | 1414 | * we have to queue this io for later. |
1415 | */ | 1415 | */ |
1416 | if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || | 1416 | if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || |
1417 | unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 1417 | unlikely(bio->bi_rw & REQ_HARDBARRIER)) { |
1418 | up_read(&md->io_lock); | 1418 | up_read(&md->io_lock); |
1419 | 1419 | ||
1420 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && | 1420 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && |
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work) | |||
2296 | if (dm_request_based(md)) | 2296 | if (dm_request_based(md)) |
2297 | generic_make_request(c); | 2297 | generic_make_request(c); |
2298 | else { | 2298 | else { |
2299 | if (bio_rw_flagged(c, BIO_RW_BARRIER)) | 2299 | if (c->bi_rw & REQ_HARDBARRIER) |
2300 | process_barrier(md, c); | 2300 | process_barrier(md, c); |
2301 | else | 2301 | else |
2302 | __split_and_process_bio(md, c); | 2302 | __split_and_process_bio(md, c); |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 7e0e057db9a7..ba19060bcf3f 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) | |||
294 | dev_info_t *tmp_dev; | 294 | dev_info_t *tmp_dev; |
295 | sector_t start_sector; | 295 | sector_t start_sector; |
296 | 296 | ||
297 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 297 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { |
298 | md_barrier_request(mddev, bio); | 298 | md_barrier_request(mddev, bio); |
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
diff --git a/drivers/md/md.c b/drivers/md/md.c index cb20d0b0555a..1893af678779 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws) | |||
353 | /* an empty barrier - all done */ | 353 | /* an empty barrier - all done */ |
354 | bio_endio(bio, 0); | 354 | bio_endio(bio, 0); |
355 | else { | 355 | else { |
356 | bio->bi_rw &= ~(1<<BIO_RW_BARRIER); | 356 | bio->bi_rw &= ~REQ_HARDBARRIER; |
357 | if (mddev->pers->make_request(mddev, bio)) | 357 | if (mddev->pers->make_request(mddev, bio)) |
358 | generic_make_request(bio); | 358 | generic_make_request(bio); |
359 | mddev->barrier = POST_REQUEST_BARRIER; | 359 | mddev->barrier = POST_REQUEST_BARRIER; |
@@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
675 | * if zero is reached. | 675 | * if zero is reached. |
676 | * If an error occurred, call md_error | 676 | * If an error occurred, call md_error |
677 | * | 677 | * |
678 | * As we might need to resubmit the request if BIO_RW_BARRIER | 678 | * As we might need to resubmit the request if REQ_HARDBARRIER |
679 | * causes ENOTSUPP, we allocate a spare bio... | 679 | * causes ENOTSUPP, we allocate a spare bio... |
680 | */ | 680 | */ |
681 | struct bio *bio = bio_alloc(GFP_NOIO, 1); | 681 | struct bio *bio = bio_alloc(GFP_NOIO, 1); |
682 | int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); | 682 | int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG; |
683 | 683 | ||
684 | bio->bi_bdev = rdev->bdev; | 684 | bio->bi_bdev = rdev->bdev; |
685 | bio->bi_sector = sector; | 685 | bio->bi_sector = sector; |
@@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
691 | atomic_inc(&mddev->pending_writes); | 691 | atomic_inc(&mddev->pending_writes); |
692 | if (!test_bit(BarriersNotsupp, &rdev->flags)) { | 692 | if (!test_bit(BarriersNotsupp, &rdev->flags)) { |
693 | struct bio *rbio; | 693 | struct bio *rbio; |
694 | rw |= (1<<BIO_RW_BARRIER); | 694 | rw |= REQ_HARDBARRIER; |
695 | rbio = bio_clone(bio, GFP_NOIO); | 695 | rbio = bio_clone(bio, GFP_NOIO); |
696 | rbio->bi_private = bio; | 696 | rbio->bi_private = bio; |
697 | rbio->bi_end_io = super_written_barrier; | 697 | rbio->bi_end_io = super_written_barrier; |
@@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, | |||
736 | struct completion event; | 736 | struct completion event; |
737 | int ret; | 737 | int ret; |
738 | 738 | ||
739 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 739 | rw |= REQ_SYNC | REQ_UNPLUG; |
740 | 740 | ||
741 | bio->bi_bdev = bdev; | 741 | bio->bi_bdev = bdev; |
742 | bio->bi_sector = sector; | 742 | bio->bi_sector = sector; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 10597bfec000..fc56e0f21c80 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -67,7 +67,7 @@ struct mdk_rdev_s | |||
67 | #define Faulty 1 /* device is known to have a fault */ | 67 | #define Faulty 1 /* device is known to have a fault */ |
68 | #define In_sync 2 /* device is in_sync with rest of array */ | 68 | #define In_sync 2 /* device is in_sync with rest of array */ |
69 | #define WriteMostly 4 /* Avoid reading if at all possible */ | 69 | #define WriteMostly 4 /* Avoid reading if at all possible */ |
70 | #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ | 70 | #define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */ |
71 | #define AllReserved 6 /* If whole device is reserved for | 71 | #define AllReserved 6 /* If whole device is reserved for |
72 | * one array */ | 72 | * one array */ |
73 | #define AutoDetected 7 /* added by auto-detect */ | 73 | #define AutoDetected 7 /* added by auto-detect */ |
@@ -254,7 +254,7 @@ struct mddev_s | |||
254 | * fails. Only supported | 254 | * fails. Only supported |
255 | */ | 255 | */ |
256 | struct bio *biolist; /* bios that need to be retried | 256 | struct bio *biolist; /* bios that need to be retried |
257 | * because BIO_RW_BARRIER is not supported | 257 | * because REQ_HARDBARRIER is not supported |
258 | */ | 258 | */ |
259 | 259 | ||
260 | atomic_t recovery_active; /* blocks scheduled, but not written */ | 260 | atomic_t recovery_active; /* blocks scheduled, but not written */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 410fb60699ac..0307d217e7a4 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
91 | 91 | ||
92 | if (uptodate) | 92 | if (uptodate) |
93 | multipath_end_bh_io(mp_bh, 0); | 93 | multipath_end_bh_io(mp_bh, 0); |
94 | else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { | 94 | else if (!(bio->bi_rw & REQ_RAHEAD)) { |
95 | /* | 95 | /* |
96 | * oops, IO error: | 96 | * oops, IO error: |
97 | */ | 97 | */ |
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) | |||
142 | struct multipath_bh * mp_bh; | 142 | struct multipath_bh * mp_bh; |
143 | struct multipath_info *multipath; | 143 | struct multipath_info *multipath; |
144 | 144 | ||
145 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 145 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { |
146 | md_barrier_request(mddev, bio); | 146 | md_barrier_request(mddev, bio); |
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) | |||
163 | mp_bh->bio = *bio; | 163 | mp_bh->bio = *bio; |
164 | mp_bh->bio.bi_sector += multipath->rdev->data_offset; | 164 | mp_bh->bio.bi_sector += multipath->rdev->data_offset; |
165 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; | 165 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; |
166 | mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); | 166 | mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; |
167 | mp_bh->bio.bi_end_io = multipath_end_request; | 167 | mp_bh->bio.bi_end_io = multipath_end_request; |
168 | mp_bh->bio.bi_private = mp_bh; | 168 | mp_bh->bio.bi_private = mp_bh; |
169 | generic_make_request(&mp_bh->bio); | 169 | generic_make_request(&mp_bh->bio); |
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev) | |||
398 | *bio = *(mp_bh->master_bio); | 398 | *bio = *(mp_bh->master_bio); |
399 | bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; | 399 | bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; |
400 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; | 400 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; |
401 | bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); | 401 | bio->bi_rw |= REQ_FAILFAST_TRANSPORT; |
402 | bio->bi_end_io = multipath_end_request; | 402 | bio->bi_end_io = multipath_end_request; |
403 | bio->bi_private = mp_bh; | 403 | bio->bi_private = mp_bh; |
404 | generic_make_request(bio); | 404 | generic_make_request(bio); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 563abed5a2cb..6f7af46d623c 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) | |||
483 | struct strip_zone *zone; | 483 | struct strip_zone *zone; |
484 | mdk_rdev_t *tmp_dev; | 484 | mdk_rdev_t *tmp_dev; |
485 | 485 | ||
486 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 486 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { |
487 | md_barrier_request(mddev, bio); | 487 | md_barrier_request(mddev, bio); |
488 | return 0; | 488 | return 0; |
489 | } | 489 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a948da8012de..73cc74ffc26b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
787 | struct bio_list bl; | 787 | struct bio_list bl; |
788 | struct page **behind_pages = NULL; | 788 | struct page **behind_pages = NULL; |
789 | const int rw = bio_data_dir(bio); | 789 | const int rw = bio_data_dir(bio); |
790 | const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); | 790 | const bool do_sync = (bio->bi_rw & REQ_SYNC); |
791 | bool do_barriers; | 791 | bool do_barriers; |
792 | mdk_rdev_t *blocked_rdev; | 792 | mdk_rdev_t *blocked_rdev; |
793 | 793 | ||
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
822 | finish_wait(&conf->wait_barrier, &w); | 822 | finish_wait(&conf->wait_barrier, &w); |
823 | } | 823 | } |
824 | if (unlikely(!mddev->barriers_work && | 824 | if (unlikely(!mddev->barriers_work && |
825 | bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 825 | (bio->bi_rw & REQ_HARDBARRIER))) { |
826 | if (rw == WRITE) | 826 | if (rw == WRITE) |
827 | md_write_end(mddev); | 827 | md_write_end(mddev); |
828 | bio_endio(bio, -EOPNOTSUPP); | 828 | bio_endio(bio, -EOPNOTSUPP); |
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
877 | read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; | 877 | read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; |
878 | read_bio->bi_bdev = mirror->rdev->bdev; | 878 | read_bio->bi_bdev = mirror->rdev->bdev; |
879 | read_bio->bi_end_io = raid1_end_read_request; | 879 | read_bio->bi_end_io = raid1_end_read_request; |
880 | read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); | 880 | read_bio->bi_rw = READ | do_sync; |
881 | read_bio->bi_private = r1_bio; | 881 | read_bio->bi_private = r1_bio; |
882 | 882 | ||
883 | generic_make_request(read_bio); | 883 | generic_make_request(read_bio); |
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
959 | atomic_set(&r1_bio->remaining, 0); | 959 | atomic_set(&r1_bio->remaining, 0); |
960 | atomic_set(&r1_bio->behind_remaining, 0); | 960 | atomic_set(&r1_bio->behind_remaining, 0); |
961 | 961 | ||
962 | do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); | 962 | do_barriers = bio->bi_rw & REQ_HARDBARRIER; |
963 | if (do_barriers) | 963 | if (do_barriers) |
964 | set_bit(R1BIO_Barrier, &r1_bio->state); | 964 | set_bit(R1BIO_Barrier, &r1_bio->state); |
965 | 965 | ||
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
975 | mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; | 975 | mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; |
976 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 976 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
977 | mbio->bi_end_io = raid1_end_write_request; | 977 | mbio->bi_end_io = raid1_end_write_request; |
978 | mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | | 978 | mbio->bi_rw = WRITE | do_barriers | do_sync; |
979 | (do_sync << BIO_RW_SYNCIO); | ||
980 | mbio->bi_private = r1_bio; | 979 | mbio->bi_private = r1_bio; |
981 | 980 | ||
982 | if (behind_pages) { | 981 | if (behind_pages) { |
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev) | |||
1633 | sync_request_write(mddev, r1_bio); | 1632 | sync_request_write(mddev, r1_bio); |
1634 | unplug = 1; | 1633 | unplug = 1; |
1635 | } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { | 1634 | } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { |
1636 | /* some requests in the r1bio were BIO_RW_BARRIER | 1635 | /* some requests in the r1bio were REQ_HARDBARRIER |
1637 | * requests which failed with -EOPNOTSUPP. Hohumm.. | 1636 | * requests which failed with -EOPNOTSUPP. Hohumm.. |
1638 | * Better resubmit without the barrier. | 1637 | * Better resubmit without the barrier. |
1639 | * We know which devices to resubmit for, because | 1638 | * We know which devices to resubmit for, because |
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev) | |||
1641 | * We already have a nr_pending reference on these rdevs. | 1640 | * We already have a nr_pending reference on these rdevs. |
1642 | */ | 1641 | */ |
1643 | int i; | 1642 | int i; |
1644 | const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); | 1643 | const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); |
1645 | clear_bit(R1BIO_BarrierRetry, &r1_bio->state); | 1644 | clear_bit(R1BIO_BarrierRetry, &r1_bio->state); |
1646 | clear_bit(R1BIO_Barrier, &r1_bio->state); | 1645 | clear_bit(R1BIO_Barrier, &r1_bio->state); |
1647 | for (i=0; i < conf->raid_disks; i++) | 1646 | for (i=0; i < conf->raid_disks; i++) |
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev) | |||
1662 | conf->mirrors[i].rdev->data_offset; | 1661 | conf->mirrors[i].rdev->data_offset; |
1663 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1662 | bio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1664 | bio->bi_end_io = raid1_end_write_request; | 1663 | bio->bi_end_io = raid1_end_write_request; |
1665 | bio->bi_rw = WRITE | | 1664 | bio->bi_rw = WRITE | do_sync; |
1666 | (do_sync << BIO_RW_SYNCIO); | ||
1667 | bio->bi_private = r1_bio; | 1665 | bio->bi_private = r1_bio; |
1668 | r1_bio->bios[i] = bio; | 1666 | r1_bio->bios[i] = bio; |
1669 | generic_make_request(bio); | 1667 | generic_make_request(bio); |
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev) | |||
1698 | (unsigned long long)r1_bio->sector); | 1696 | (unsigned long long)r1_bio->sector); |
1699 | raid_end_bio_io(r1_bio); | 1697 | raid_end_bio_io(r1_bio); |
1700 | } else { | 1698 | } else { |
1701 | const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); | 1699 | const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; |
1702 | r1_bio->bios[r1_bio->read_disk] = | 1700 | r1_bio->bios[r1_bio->read_disk] = |
1703 | mddev->ro ? IO_BLOCKED : NULL; | 1701 | mddev->ro ? IO_BLOCKED : NULL; |
1704 | r1_bio->read_disk = disk; | 1702 | r1_bio->read_disk = disk; |
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev) | |||
1715 | bio->bi_sector = r1_bio->sector + rdev->data_offset; | 1713 | bio->bi_sector = r1_bio->sector + rdev->data_offset; |
1716 | bio->bi_bdev = rdev->bdev; | 1714 | bio->bi_bdev = rdev->bdev; |
1717 | bio->bi_end_io = raid1_end_read_request; | 1715 | bio->bi_end_io = raid1_end_read_request; |
1718 | bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); | 1716 | bio->bi_rw = READ | do_sync; |
1719 | bio->bi_private = r1_bio; | 1717 | bio->bi_private = r1_bio; |
1720 | unplug = 1; | 1718 | unplug = 1; |
1721 | generic_make_request(bio); | 1719 | generic_make_request(bio); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 42e64e4e5e25..62ecb6650fd0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
799 | int i; | 799 | int i; |
800 | int chunk_sects = conf->chunk_mask + 1; | 800 | int chunk_sects = conf->chunk_mask + 1; |
801 | const int rw = bio_data_dir(bio); | 801 | const int rw = bio_data_dir(bio); |
802 | const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); | 802 | const bool do_sync = (bio->bi_rw & REQ_SYNC); |
803 | struct bio_list bl; | 803 | struct bio_list bl; |
804 | unsigned long flags; | 804 | unsigned long flags; |
805 | mdk_rdev_t *blocked_rdev; | 805 | mdk_rdev_t *blocked_rdev; |
806 | 806 | ||
807 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 807 | if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { |
808 | md_barrier_request(mddev, bio); | 808 | md_barrier_request(mddev, bio); |
809 | return 0; | 809 | return 0; |
810 | } | 810 | } |
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
879 | mirror->rdev->data_offset; | 879 | mirror->rdev->data_offset; |
880 | read_bio->bi_bdev = mirror->rdev->bdev; | 880 | read_bio->bi_bdev = mirror->rdev->bdev; |
881 | read_bio->bi_end_io = raid10_end_read_request; | 881 | read_bio->bi_end_io = raid10_end_read_request; |
882 | read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); | 882 | read_bio->bi_rw = READ | do_sync; |
883 | read_bio->bi_private = r10_bio; | 883 | read_bio->bi_private = r10_bio; |
884 | 884 | ||
885 | generic_make_request(read_bio); | 885 | generic_make_request(read_bio); |
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
947 | conf->mirrors[d].rdev->data_offset; | 947 | conf->mirrors[d].rdev->data_offset; |
948 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; | 948 | mbio->bi_bdev = conf->mirrors[d].rdev->bdev; |
949 | mbio->bi_end_io = raid10_end_write_request; | 949 | mbio->bi_end_io = raid10_end_write_request; |
950 | mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); | 950 | mbio->bi_rw = WRITE | do_sync; |
951 | mbio->bi_private = r10_bio; | 951 | mbio->bi_private = r10_bio; |
952 | 952 | ||
953 | atomic_inc(&r10_bio->remaining); | 953 | atomic_inc(&r10_bio->remaining); |
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev) | |||
1716 | raid_end_bio_io(r10_bio); | 1716 | raid_end_bio_io(r10_bio); |
1717 | bio_put(bio); | 1717 | bio_put(bio); |
1718 | } else { | 1718 | } else { |
1719 | const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); | 1719 | const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); |
1720 | bio_put(bio); | 1720 | bio_put(bio); |
1721 | rdev = conf->mirrors[mirror].rdev; | 1721 | rdev = conf->mirrors[mirror].rdev; |
1722 | if (printk_ratelimit()) | 1722 | if (printk_ratelimit()) |
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev) | |||
1730 | bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr | 1730 | bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr |
1731 | + rdev->data_offset; | 1731 | + rdev->data_offset; |
1732 | bio->bi_bdev = rdev->bdev; | 1732 | bio->bi_bdev = rdev->bdev; |
1733 | bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); | 1733 | bio->bi_rw = READ | do_sync; |
1734 | bio->bi_private = r10_bio; | 1734 | bio->bi_private = r10_bio; |
1735 | bio->bi_end_io = raid10_end_read_request; | 1735 | bio->bi_end_io = raid10_end_read_request; |
1736 | unplug = 1; | 1736 | unplug = 1; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 96c690279fc6..20ac2f14376a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
3958 | const int rw = bio_data_dir(bi); | 3958 | const int rw = bio_data_dir(bi); |
3959 | int remaining; | 3959 | int remaining; |
3960 | 3960 | ||
3961 | if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { | 3961 | if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) { |
3962 | /* Drain all pending writes. We only really need | 3962 | /* Drain all pending writes. We only really need |
3963 | * to ensure they have been submitted, but this is | 3963 | * to ensure they have been submitted, but this is |
3964 | * easier. | 3964 | * easier. |
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index ee4b6914667f..fda4de3440c4 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c | |||
@@ -716,7 +716,7 @@ static int _osd_req_list_objects(struct osd_request *or, | |||
716 | return PTR_ERR(bio); | 716 | return PTR_ERR(bio); |
717 | } | 717 | } |
718 | 718 | ||
719 | bio->bi_rw &= ~(1 << BIO_RW); | 719 | bio->bi_rw &= ~REQ_WRITE; |
720 | or->in.bio = bio; | 720 | or->in.bio = bio; |
721 | or->in.total_bytes = bio->bi_size; | 721 | or->in.total_bytes = bio->bi_size; |
722 | return 0; | 722 | return 0; |
@@ -814,7 +814,7 @@ void osd_req_write(struct osd_request *or, | |||
814 | { | 814 | { |
815 | _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); | 815 | _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); |
816 | WARN_ON(or->out.bio || or->out.total_bytes); | 816 | WARN_ON(or->out.bio || or->out.total_bytes); |
817 | WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); | 817 | WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); |
818 | or->out.bio = bio; | 818 | or->out.bio = bio; |
819 | or->out.total_bytes = len; | 819 | or->out.total_bytes = len; |
820 | } | 820 | } |
@@ -829,7 +829,7 @@ int osd_req_write_kern(struct osd_request *or, | |||
829 | if (IS_ERR(bio)) | 829 | if (IS_ERR(bio)) |
830 | return PTR_ERR(bio); | 830 | return PTR_ERR(bio); |
831 | 831 | ||
832 | bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ | 832 | bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ |
833 | osd_req_write(or, obj, offset, bio, len); | 833 | osd_req_write(or, obj, offset, bio, len); |
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or, | |||
865 | { | 865 | { |
866 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); | 866 | _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); |
867 | WARN_ON(or->in.bio || or->in.total_bytes); | 867 | WARN_ON(or->in.bio || or->in.total_bytes); |
868 | WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); | 868 | WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); |
869 | or->in.bio = bio; | 869 | or->in.bio = bio; |
870 | or->in.total_bytes = len; | 870 | or->in.total_bytes = len; |
871 | } | 871 | } |
@@ -843,7 +843,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
843 | if (!bio) | 843 | if (!bio) |
844 | goto out_bmd; | 844 | goto out_bmd; |
845 | 845 | ||
846 | bio->bi_rw |= (!write_to_vm << BIO_RW); | 846 | if (!write_to_vm) |
847 | bio->bi_rw |= REQ_WRITE; | ||
847 | 848 | ||
848 | ret = 0; | 849 | ret = 0; |
849 | 850 | ||
@@ -1024,7 +1025,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, | |||
1024 | * set data direction, and check if mapped pages need bouncing | 1025 | * set data direction, and check if mapped pages need bouncing |
1025 | */ | 1026 | */ |
1026 | if (!write_to_vm) | 1027 | if (!write_to_vm) |
1027 | bio->bi_rw |= (1 << BIO_RW); | 1028 | bio->bi_rw |= REQ_WRITE; |
1028 | 1029 | ||
1029 | bio->bi_bdev = bdev; | 1030 | bio->bi_bdev = bdev; |
1030 | bio->bi_flags |= (1 << BIO_USER_MAPPED); | 1031 | bio->bi_flags |= (1 << BIO_USER_MAPPED); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 34f7c375567e..64f10082f048 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -480,7 +480,7 @@ static void end_workqueue_bio(struct bio *bio, int err) | |||
480 | end_io_wq->work.func = end_workqueue_fn; | 480 | end_io_wq->work.func = end_workqueue_fn; |
481 | end_io_wq->work.flags = 0; | 481 | end_io_wq->work.flags = 0; |
482 | 482 | ||
483 | if (bio->bi_rw & (1 << BIO_RW)) { | 483 | if (bio->bi_rw & REQ_WRITE) { |
484 | if (end_io_wq->metadata) | 484 | if (end_io_wq->metadata) |
485 | btrfs_queue_worker(&fs_info->endio_meta_write_workers, | 485 | btrfs_queue_worker(&fs_info->endio_meta_write_workers, |
486 | &end_io_wq->work); | 486 | &end_io_wq->work); |
@@ -604,7 +604,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
604 | 604 | ||
605 | atomic_inc(&fs_info->nr_async_submits); | 605 | atomic_inc(&fs_info->nr_async_submits); |
606 | 606 | ||
607 | if (rw & (1 << BIO_RW_SYNCIO)) | 607 | if (rw & REQ_SYNC) |
608 | btrfs_set_work_high_prio(&async->work); | 608 | btrfs_set_work_high_prio(&async->work); |
609 | 609 | ||
610 | btrfs_queue_worker(&fs_info->workers, &async->work); | 610 | btrfs_queue_worker(&fs_info->workers, &async->work); |
@@ -668,7 +668,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
668 | bio, 1); | 668 | bio, 1); |
669 | BUG_ON(ret); | 669 | BUG_ON(ret); |
670 | 670 | ||
671 | if (!(rw & (1 << BIO_RW))) { | 671 | if (!(rw & REQ_WRITE)) { |
672 | /* | 672 | /* |
673 | * called for a read, do the setup so that checksum validation | 673 | * called for a read, do the setup so that checksum validation |
674 | * can happen in the async kernel threads | 674 | * can happen in the async kernel threads |
@@ -1427,7 +1427,7 @@ static void end_workqueue_fn(struct btrfs_work *work) | |||
1427 | * ram and up to date before trying to verify things. For | 1427 | * ram and up to date before trying to verify things. For |
1428 | * blocksize <= pagesize, it is basically a noop | 1428 | * blocksize <= pagesize, it is basically a noop |
1429 | */ | 1429 | */ |
1430 | if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && | 1430 | if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata && |
1431 | !bio_ready_for_csum(bio)) { | 1431 | !bio_ready_for_csum(bio)) { |
1432 | btrfs_queue_worker(&fs_info->endio_meta_workers, | 1432 | btrfs_queue_worker(&fs_info->endio_meta_workers, |
1433 | &end_io_wq->work); | 1433 | &end_io_wq->work); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1bff92ad4744..e975d7180a88 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1429,7 +1429,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1429 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | 1429 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); |
1430 | BUG_ON(ret); | 1430 | BUG_ON(ret); |
1431 | 1431 | ||
1432 | if (!(rw & (1 << BIO_RW))) { | 1432 | if (!(rw & REQ_WRITE)) { |
1433 | if (bio_flags & EXTENT_BIO_COMPRESSED) { | 1433 | if (bio_flags & EXTENT_BIO_COMPRESSED) { |
1434 | return btrfs_submit_compressed_read(inode, bio, | 1434 | return btrfs_submit_compressed_read(inode, bio, |
1435 | mirror_num, bio_flags); | 1435 | mirror_num, bio_flags); |
@@ -1841,7 +1841,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, | |||
1841 | bio->bi_size = 0; | 1841 | bio->bi_size = 0; |
1842 | 1842 | ||
1843 | bio_add_page(bio, page, failrec->len, start - page_offset(page)); | 1843 | bio_add_page(bio, page, failrec->len, start - page_offset(page)); |
1844 | if (failed_bio->bi_rw & (1 << BIO_RW)) | 1844 | if (failed_bio->bi_rw & REQ_WRITE) |
1845 | rw = WRITE; | 1845 | rw = WRITE; |
1846 | else | 1846 | else |
1847 | rw = READ; | 1847 | rw = READ; |
@@ -5642,7 +5642,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |||
5642 | struct bio_vec *bvec = bio->bi_io_vec; | 5642 | struct bio_vec *bvec = bio->bi_io_vec; |
5643 | u64 start; | 5643 | u64 start; |
5644 | int skip_sum; | 5644 | int skip_sum; |
5645 | int write = rw & (1 << BIO_RW); | 5645 | int write = rw & REQ_WRITE; |
5646 | int ret = 0; | 5646 | int ret = 0; |
5647 | 5647 | ||
5648 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 5648 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d6e3af8be95b..dd318ff280b2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -258,7 +258,7 @@ loop_lock: | |||
258 | 258 | ||
259 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | 259 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); |
260 | 260 | ||
261 | if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) | 261 | if (cur->bi_rw & REQ_SYNC) |
262 | num_sync_run++; | 262 | num_sync_run++; |
263 | 263 | ||
264 | submit_bio(cur->bi_rw, cur); | 264 | submit_bio(cur->bi_rw, cur); |
@@ -2651,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
2651 | int max_errors = 0; | 2651 | int max_errors = 0; |
2652 | struct btrfs_multi_bio *multi = NULL; | 2652 | struct btrfs_multi_bio *multi = NULL; |
2653 | 2653 | ||
2654 | if (multi_ret && !(rw & (1 << BIO_RW))) | 2654 | if (multi_ret && !(rw & REQ_WRITE)) |
2655 | stripes_allocated = 1; | 2655 | stripes_allocated = 1; |
2656 | again: | 2656 | again: |
2657 | if (multi_ret) { | 2657 | if (multi_ret) { |
@@ -2687,7 +2687,7 @@ again: | |||
2687 | mirror_num = 0; | 2687 | mirror_num = 0; |
2688 | 2688 | ||
2689 | /* if our multi bio struct is too small, back off and try again */ | 2689 | /* if our multi bio struct is too small, back off and try again */ |
2690 | if (rw & (1 << BIO_RW)) { | 2690 | if (rw & REQ_WRITE) { |
2691 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | | 2691 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | |
2692 | BTRFS_BLOCK_GROUP_DUP)) { | 2692 | BTRFS_BLOCK_GROUP_DUP)) { |
2693 | stripes_required = map->num_stripes; | 2693 | stripes_required = map->num_stripes; |
@@ -2697,7 +2697,7 @@ again: | |||
2697 | max_errors = 1; | 2697 | max_errors = 1; |
2698 | } | 2698 | } |
2699 | } | 2699 | } |
2700 | if (multi_ret && (rw & (1 << BIO_RW)) && | 2700 | if (multi_ret && (rw & REQ_WRITE) && |
2701 | stripes_allocated < stripes_required) { | 2701 | stripes_allocated < stripes_required) { |
2702 | stripes_allocated = map->num_stripes; | 2702 | stripes_allocated = map->num_stripes; |
2703 | free_extent_map(em); | 2703 | free_extent_map(em); |
@@ -2733,7 +2733,7 @@ again: | |||
2733 | num_stripes = 1; | 2733 | num_stripes = 1; |
2734 | stripe_index = 0; | 2734 | stripe_index = 0; |
2735 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | 2735 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
2736 | if (unplug_page || (rw & (1 << BIO_RW))) | 2736 | if (unplug_page || (rw & REQ_WRITE)) |
2737 | num_stripes = map->num_stripes; | 2737 | num_stripes = map->num_stripes; |
2738 | else if (mirror_num) | 2738 | else if (mirror_num) |
2739 | stripe_index = mirror_num - 1; | 2739 | stripe_index = mirror_num - 1; |
@@ -2744,7 +2744,7 @@ again: | |||
2744 | } | 2744 | } |
2745 | 2745 | ||
2746 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | 2746 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
2747 | if (rw & (1 << BIO_RW)) | 2747 | if (rw & REQ_WRITE) |
2748 | num_stripes = map->num_stripes; | 2748 | num_stripes = map->num_stripes; |
2749 | else if (mirror_num) | 2749 | else if (mirror_num) |
2750 | stripe_index = mirror_num - 1; | 2750 | stripe_index = mirror_num - 1; |
@@ -2755,7 +2755,7 @@ again: | |||
2755 | stripe_index = do_div(stripe_nr, factor); | 2755 | stripe_index = do_div(stripe_nr, factor); |
2756 | stripe_index *= map->sub_stripes; | 2756 | stripe_index *= map->sub_stripes; |
2757 | 2757 | ||
2758 | if (unplug_page || (rw & (1 << BIO_RW))) | 2758 | if (unplug_page || (rw & REQ_WRITE)) |
2759 | num_stripes = map->sub_stripes; | 2759 | num_stripes = map->sub_stripes; |
2760 | else if (mirror_num) | 2760 | else if (mirror_num) |
2761 | stripe_index += mirror_num - 1; | 2761 | stripe_index += mirror_num - 1; |
@@ -2945,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root, | |||
2945 | struct btrfs_pending_bios *pending_bios; | 2945 | struct btrfs_pending_bios *pending_bios; |
2946 | 2946 | ||
2947 | /* don't bother with additional async steps for reads, right now */ | 2947 | /* don't bother with additional async steps for reads, right now */ |
2948 | if (!(rw & (1 << BIO_RW))) { | 2948 | if (!(rw & REQ_WRITE)) { |
2949 | bio_get(bio); | 2949 | bio_get(bio); |
2950 | submit_bio(rw, bio); | 2950 | submit_bio(rw, bio); |
2951 | bio_put(bio); | 2951 | bio_put(bio); |
@@ -2964,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root, | |||
2964 | bio->bi_rw |= rw; | 2964 | bio->bi_rw |= rw; |
2965 | 2965 | ||
2966 | spin_lock(&device->io_lock); | 2966 | spin_lock(&device->io_lock); |
2967 | if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) | 2967 | if (bio->bi_rw & REQ_SYNC) |
2968 | pending_bios = &device->pending_sync_bios; | 2968 | pending_bios = &device->pending_sync_bios; |
2969 | else | 2969 | else |
2970 | pending_bios = &device->pending_bios; | 2970 | pending_bios = &device->pending_bios; |
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c index 4337cad7777b..e2732203fa93 100644 --- a/fs/exofs/ios.c +++ b/fs/exofs/ios.c | |||
@@ -599,7 +599,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) | |||
599 | } else { | 599 | } else { |
600 | bio = master_dev->bio; | 600 | bio = master_dev->bio; |
601 | /* FIXME: bio_set_dir() */ | 601 | /* FIXME: bio_set_dir() */ |
602 | bio->bi_rw |= (1 << BIO_RW); | 602 | bio->bi_rw |= REQ_WRITE; |
603 | } | 603 | } |
604 | 604 | ||
605 | osd_req_write(or, &ios->obj, per_dev->offset, bio, | 605 | osd_req_write(or, &ios->obj, per_dev->offset, bio, |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index efc3539ac5a1..cde1248a6225 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) | 595 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) |
596 | goto skip_barrier; | 596 | goto skip_barrier; |
597 | get_bh(bh); | 597 | get_bh(bh); |
598 | submit_bh(WRITE_BARRIER | (1 << BIO_RW_META), bh); | 598 | submit_bh(WRITE_BARRIER | REQ_META, bh); |
599 | wait_on_buffer(bh); | 599 | wait_on_buffer(bh); |
600 | if (buffer_eopnotsupp(bh)) { | 600 | if (buffer_eopnotsupp(bh)) { |
601 | clear_buffer_eopnotsupp(bh); | 601 | clear_buffer_eopnotsupp(bh); |
@@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) | |||
605 | lock_buffer(bh); | 605 | lock_buffer(bh); |
606 | skip_barrier: | 606 | skip_barrier: |
607 | get_bh(bh); | 607 | get_bh(bh); |
608 | submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh); | 608 | submit_bh(WRITE_SYNC | REQ_META, bh); |
609 | wait_on_buffer(bh); | 609 | wait_on_buffer(bh); |
610 | } | 610 | } |
611 | if (!buffer_uptodate(bh)) | 611 | if (!buffer_uptodate(bh)) |
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 18176d0b75d7..f3b071f921aa 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c | |||
@@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb | |||
36 | { | 36 | { |
37 | struct buffer_head *bh, *head; | 37 | struct buffer_head *bh, *head; |
38 | int nr_underway = 0; | 38 | int nr_underway = 0; |
39 | int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? | 39 | int write_op = REQ_META | |
40 | WRITE_SYNC_PLUG : WRITE)); | 40 | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); |
41 | 41 | ||
42 | BUG_ON(!PageLocked(page)); | 42 | BUG_ON(!PageLocked(page)); |
43 | BUG_ON(!page_has_buffers(page)); | 43 | BUG_ON(!page_has_buffers(page)); |
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, | |||
225 | } | 225 | } |
226 | bh->b_end_io = end_buffer_read_sync; | 226 | bh->b_end_io = end_buffer_read_sync; |
227 | get_bh(bh); | 227 | get_bh(bh); |
228 | submit_bh(READ_SYNC | (1 << BIO_RW_META), bh); | 228 | submit_bh(READ_SYNC | REQ_META, bh); |
229 | if (!(flags & DIO_WAIT)) | 229 | if (!(flags & DIO_WAIT)) |
230 | return 0; | 230 | return 0; |
231 | 231 | ||
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) | |||
432 | if (buffer_uptodate(first_bh)) | 432 | if (buffer_uptodate(first_bh)) |
433 | goto out; | 433 | goto out; |
434 | if (!buffer_locked(first_bh)) | 434 | if (!buffer_locked(first_bh)) |
435 | ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh); | 435 | ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); |
436 | 436 | ||
437 | dblock++; | 437 | dblock++; |
438 | extlen--; | 438 | extlen--; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3593b3a7290e..fd4f8946abf5 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -275,7 +275,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector) | |||
275 | 275 | ||
276 | bio->bi_end_io = end_bio_io_page; | 276 | bio->bi_end_io = end_bio_io_page; |
277 | bio->bi_private = page; | 277 | bio->bi_private = page; |
278 | submit_bio(READ_SYNC | (1 << BIO_RW_META), bio); | 278 | submit_bio(READ_SYNC | REQ_META, bio); |
279 | wait_on_page_locked(page); | 279 | wait_on_page_locked(page); |
280 | bio_put(bio); | 280 | bio_put(bio); |
281 | if (!PageUptodate(page)) { | 281 | if (!PageUptodate(page)) { |
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 2e6a2723b8fa..4588fb9e93df 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c | |||
@@ -508,7 +508,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, | |||
508 | * Last BIO is always sent through the following | 508 | * Last BIO is always sent through the following |
509 | * submission. | 509 | * submission. |
510 | */ | 510 | */ |
511 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 511 | rw |= REQ_SYNC | REQ_UNPLUG; |
512 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); | 512 | res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); |
513 | } | 513 | } |
514 | 514 | ||
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7fc5606e6ea5..4d379c8250ae 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -138,55 +138,83 @@ struct bio { | |||
138 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) | 138 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * bio bi_rw flags | 141 | * Request flags. For use in the cmd_flags field of struct request, and in |
142 | * | 142 | * bi_rw of struct bio. Note that some flags are only valid in either one. |
143 | * bit 0 -- data direction | ||
144 | * If not set, bio is a read from device. If set, it's a write to device. | ||
145 | * bit 1 -- fail fast device errors | ||
146 | * bit 2 -- fail fast transport errors | ||
147 | * bit 3 -- fail fast driver errors | ||
148 | * bit 4 -- rw-ahead when set | ||
149 | * bit 5 -- barrier | ||
150 | * Insert a serialization point in the IO queue, forcing previously | ||
151 | * submitted IO to be completed before this one is issued. | ||
152 | * bit 6 -- synchronous I/O hint. | ||
153 | * bit 7 -- Unplug the device immediately after submitting this bio. | ||
154 | * bit 8 -- metadata request | ||
155 | * Used for tracing to differentiate metadata and data IO. May also | ||
156 | * get some preferential treatment in the IO scheduler | ||
157 | * bit 9 -- discard sectors | ||
158 | * Informs the lower level device that this range of sectors is no longer | ||
159 | * used by the file system and may thus be freed by the device. Used | ||
160 | * for flash based storage. | ||
161 | * Don't want driver retries for any fast fail whatever the reason. | ||
162 | * bit 10 -- Tell the IO scheduler not to wait for more requests after this | ||
163 | one has been submitted, even if it is a SYNC request. | ||
164 | */ | 143 | */ |
165 | enum bio_rw_flags { | 144 | enum rq_flag_bits { |
166 | BIO_RW, | 145 | /* common flags */ |
167 | BIO_RW_FAILFAST_DEV, | 146 | __REQ_WRITE, /* not set, read. set, write */ |
168 | BIO_RW_FAILFAST_TRANSPORT, | 147 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
169 | BIO_RW_FAILFAST_DRIVER, | 148 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
170 | /* above flags must match REQ_* */ | 149 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
171 | BIO_RW_AHEAD, | 150 | |
172 | BIO_RW_BARRIER, | 151 | __REQ_HARDBARRIER, /* may not be passed by drive either */ |
173 | BIO_RW_SYNCIO, | 152 | __REQ_SYNC, /* request is sync (sync write or read) */ |
174 | BIO_RW_UNPLUG, | 153 | __REQ_META, /* metadata io request */ |
175 | BIO_RW_META, | 154 | __REQ_DISCARD, /* request to discard sectors */ |
176 | BIO_RW_DISCARD, | 155 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
177 | BIO_RW_NOIDLE, | 156 | |
157 | /* bio only flags */ | ||
158 | __REQ_UNPLUG, /* unplug the immediately after submission */ | ||
159 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | ||
160 | |||
161 | /* request only flags */ | ||
162 | __REQ_SORTED, /* elevator knows about this request */ | ||
163 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
164 | __REQ_FUA, /* forced unit access */ | ||
165 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
166 | __REQ_STARTED, /* drive already may have started this one */ | ||
167 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
168 | __REQ_QUEUED, /* uses queueing */ | ||
169 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
170 | __REQ_FAILED, /* set if the request failed */ | ||
171 | __REQ_QUIET, /* don't worry about errors */ | ||
172 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
173 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
174 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
175 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
176 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
177 | __REQ_IO_STAT, /* account I/O stat */ | ||
178 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
179 | __REQ_NR_BITS, /* stops here */ | ||
178 | }; | 180 | }; |
179 | 181 | ||
180 | /* | 182 | #define REQ_WRITE (1 << __REQ_WRITE) |
181 | * First four bits must match between bio->bi_rw and rq->cmd_flags, make | 183 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) |
182 | * that explicit here. | 184 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) |
183 | */ | 185 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) |
184 | #define BIO_RW_RQ_MASK 0xf | 186 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) |
185 | 187 | #define REQ_SYNC (1 << __REQ_SYNC) | |
186 | static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) | 188 | #define REQ_META (1 << __REQ_META) |
187 | { | 189 | #define REQ_DISCARD (1 << __REQ_DISCARD) |
188 | return (bio->bi_rw & (1 << flag)) != 0; | 190 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) |
189 | } | 191 | |
192 | #define REQ_FAILFAST_MASK \ | ||
193 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | ||
194 | #define REQ_COMMON_MASK \ | ||
195 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ | ||
196 | REQ_META| REQ_DISCARD | REQ_NOIDLE) | ||
197 | |||
198 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | ||
199 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | ||
200 | |||
201 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
202 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
203 | #define REQ_FUA (1 << __REQ_FUA) | ||
204 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
205 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
206 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
207 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
208 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
209 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
210 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
211 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
212 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
213 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | ||
214 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
215 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
216 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
217 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
190 | 218 | ||
191 | /* | 219 | /* |
192 | * upper 16 bits of bi_rw define the io priority of this bio | 220 | * upper 16 bits of bi_rw define the io priority of this bio |
@@ -211,7 +239,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) | |||
211 | #define bio_offset(bio) bio_iovec((bio))->bv_offset | 239 | #define bio_offset(bio) bio_iovec((bio))->bv_offset |
212 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) | 240 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) |
213 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 241 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
214 | #define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) | 242 | #define bio_empty_barrier(bio) \ |
243 | ((bio->bi_rw & REQ_HARDBARRIER) && \ | ||
244 | !bio_has_data(bio) && \ | ||
245 | !(bio->bi_rw & REQ_DISCARD)) | ||
215 | 246 | ||
216 | static inline unsigned int bio_cur_bytes(struct bio *bio) | 247 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
217 | { | 248 | { |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3ecd28ef9ba4..3fc0f5908619 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -84,70 +84,6 @@ enum { | |||
84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | 84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
85 | }; | 85 | }; |
86 | 86 | ||
87 | /* | ||
88 | * request type modified bits. first four bits match BIO_RW* bits, important | ||
89 | */ | ||
90 | enum rq_flag_bits { | ||
91 | __REQ_RW, /* not set, read. set, write */ | ||
92 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | ||
93 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
94 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
95 | /* above flags must match BIO_RW_* */ | ||
96 | __REQ_DISCARD, /* request to discard sectors */ | ||
97 | __REQ_SORTED, /* elevator knows about this request */ | ||
98 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
99 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
100 | __REQ_FUA, /* forced unit access */ | ||
101 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
102 | __REQ_STARTED, /* drive already may have started this one */ | ||
103 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
104 | __REQ_QUEUED, /* uses queueing */ | ||
105 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
106 | __REQ_FAILED, /* set if the request failed */ | ||
107 | __REQ_QUIET, /* don't worry about errors */ | ||
108 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
109 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
110 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ | ||
111 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
112 | __REQ_RW_META, /* metadata io request */ | ||
113 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
114 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
115 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
116 | __REQ_IO_STAT, /* account I/O stat */ | ||
117 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
118 | __REQ_NR_BITS, /* stops here */ | ||
119 | }; | ||
120 | |||
121 | #define REQ_RW (1 << __REQ_RW) | ||
122 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) | ||
123 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
124 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
125 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
126 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
127 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
128 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
129 | #define REQ_FUA (1 << __REQ_FUA) | ||
130 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
131 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
132 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
133 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
134 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
135 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
136 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
137 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
138 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
139 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
140 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | ||
141 | #define REQ_RW_META (1 << __REQ_RW_META) | ||
142 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
143 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
144 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
145 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
146 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
147 | |||
148 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ | ||
149 | REQ_FAILFAST_DRIVER) | ||
150 | |||
151 | #define BLK_MAX_CDB 16 | 87 | #define BLK_MAX_CDB 16 |
152 | 88 | ||
153 | /* | 89 | /* |
@@ -631,7 +567,7 @@ enum { | |||
631 | */ | 567 | */ |
632 | static inline bool rw_is_sync(unsigned int rw_flags) | 568 | static inline bool rw_is_sync(unsigned int rw_flags) |
633 | { | 569 | { |
634 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | 570 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); |
635 | } | 571 | } |
636 | 572 | ||
637 | static inline bool rq_is_sync(struct request *rq) | 573 | static inline bool rq_is_sync(struct request *rq) |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 598878831497..c5c92943c767 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -144,29 +144,31 @@ struct inodes_stat_t { | |||
144 | * of this IO. | 144 | * of this IO. |
145 | * | 145 | * |
146 | */ | 146 | */ |
147 | #define RW_MASK 1 | 147 | #define RW_MASK 1 |
148 | #define RWA_MASK 2 | 148 | #define RWA_MASK 2 |
149 | #define READ 0 | 149 | |
150 | #define WRITE 1 | 150 | #define READ 0 |
151 | #define READA 2 /* read-ahead - don't block if no resources */ | 151 | #define WRITE 1 |
152 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ | 152 | #define READA 2 /* readahead - don't block if no resources */ |
153 | #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) | 153 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ |
154 | #define READ_META (READ | (1 << BIO_RW_META)) | 154 | |
155 | #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 155 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) |
156 | #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 156 | #define READ_META (READ | REQ_META) |
157 | #define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) | 157 | #define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) |
158 | #define WRITE_META (WRITE | (1 << BIO_RW_META)) | 158 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) |
159 | #define SWRITE_SYNC_PLUG \ | 159 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) |
160 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 160 | #define WRITE_META (WRITE | REQ_META) |
161 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 161 | #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
162 | #define WRITE_BARRIER (WRITE_SYNC | (1 << BIO_RW_BARRIER)) | 162 | REQ_HARDBARRIER) |
163 | #define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE) | ||
164 | #define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | ||
163 | 165 | ||
164 | /* | 166 | /* |
165 | * These aren't really reads or writes, they pass down information about | 167 | * These aren't really reads or writes, they pass down information about |
166 | * parts of device that are now unused by the file system. | 168 | * parts of device that are now unused by the file system. |
167 | */ | 169 | */ |
168 | #define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) | 170 | #define DISCARD_NOBARRIER (WRITE | REQ_DISCARD) |
169 | #define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) | 171 | #define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER) |
170 | 172 | ||
171 | #define SEL_IN 1 | 173 | #define SEL_IN 1 |
172 | #define SEL_OUT 2 | 174 | #define SEL_OUT 2 |
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index 97024fd40cd5..83bbc7c02df9 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static int submit(int rw, struct block_device *bdev, sector_t sector, | 28 | static int submit(int rw, struct block_device *bdev, sector_t sector, |
29 | struct page *page, struct bio **bio_chain) | 29 | struct page *page, struct bio **bio_chain) |
30 | { | 30 | { |
31 | const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 31 | const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; |
32 | struct bio *bio; | 32 | struct bio *bio; |
33 | 33 | ||
34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 4f149944cb89..3b4a695051b6 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
169 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | 169 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
170 | BLK_TC_ACT(BLK_TC_WRITE) }; | 170 | BLK_TC_ACT(BLK_TC_WRITE) }; |
171 | 171 | ||
172 | #define BLK_TC_HARDBARRIER BLK_TC_BARRIER | ||
173 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | ||
174 | |||
172 | /* The ilog2() calls fall out because they're constant */ | 175 | /* The ilog2() calls fall out because they're constant */ |
173 | #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ | 176 | #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ |
174 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) | 177 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) |
175 | 178 | ||
176 | /* | 179 | /* |
177 | * The worker for the various blk_add_trace*() types. Fills out a | 180 | * The worker for the various blk_add_trace*() types. Fills out a |
@@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
194 | return; | 197 | return; |
195 | 198 | ||
196 | what |= ddir_act[rw & WRITE]; | 199 | what |= ddir_act[rw & WRITE]; |
197 | what |= MASK_TC_BIT(rw, BARRIER); | 200 | what |= MASK_TC_BIT(rw, HARDBARRIER); |
198 | what |= MASK_TC_BIT(rw, SYNCIO); | 201 | what |= MASK_TC_BIT(rw, SYNC); |
199 | what |= MASK_TC_BIT(rw, AHEAD); | 202 | what |= MASK_TC_BIT(rw, RAHEAD); |
200 | what |= MASK_TC_BIT(rw, META); | 203 | what |= MASK_TC_BIT(rw, META); |
201 | what |= MASK_TC_BIT(rw, DISCARD); | 204 | what |= MASK_TC_BIT(rw, DISCARD); |
202 | 205 | ||
@@ -662,7 +665,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
662 | return; | 665 | return; |
663 | 666 | ||
664 | if (rq->cmd_flags & REQ_DISCARD) | 667 | if (rq->cmd_flags & REQ_DISCARD) |
665 | rw |= (1 << BIO_RW_DISCARD); | 668 | rw |= REQ_DISCARD; |
666 | 669 | ||
667 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 670 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
668 | what |= BLK_TC_ACT(BLK_TC_PC); | 671 | what |= BLK_TC_ACT(BLK_TC_PC); |
@@ -1755,20 +1758,20 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1755 | 1758 | ||
1756 | if (rw & WRITE) | 1759 | if (rw & WRITE) |
1757 | rwbs[i++] = 'W'; | 1760 | rwbs[i++] = 'W'; |
1758 | else if (rw & 1 << BIO_RW_DISCARD) | 1761 | else if (rw & REQ_DISCARD) |
1759 | rwbs[i++] = 'D'; | 1762 | rwbs[i++] = 'D'; |
1760 | else if (bytes) | 1763 | else if (bytes) |
1761 | rwbs[i++] = 'R'; | 1764 | rwbs[i++] = 'R'; |
1762 | else | 1765 | else |
1763 | rwbs[i++] = 'N'; | 1766 | rwbs[i++] = 'N'; |
1764 | 1767 | ||
1765 | if (rw & 1 << BIO_RW_AHEAD) | 1768 | if (rw & REQ_RAHEAD) |
1766 | rwbs[i++] = 'A'; | 1769 | rwbs[i++] = 'A'; |
1767 | if (rw & 1 << BIO_RW_BARRIER) | 1770 | if (rw & REQ_HARDBARRIER) |
1768 | rwbs[i++] = 'B'; | 1771 | rwbs[i++] = 'B'; |
1769 | if (rw & 1 << BIO_RW_SYNCIO) | 1772 | if (rw & REQ_SYNC) |
1770 | rwbs[i++] = 'S'; | 1773 | rwbs[i++] = 'S'; |
1771 | if (rw & 1 << BIO_RW_META) | 1774 | if (rw & REQ_META) |
1772 | rwbs[i++] = 'M'; | 1775 | rwbs[i++] = 'M'; |
1773 | 1776 | ||
1774 | rwbs[i] = '\0'; | 1777 | rwbs[i] = '\0'; |
@@ -1780,7 +1783,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) | |||
1780 | int bytes; | 1783 | int bytes; |
1781 | 1784 | ||
1782 | if (rq->cmd_flags & REQ_DISCARD) | 1785 | if (rq->cmd_flags & REQ_DISCARD) |
1783 | rw |= (1 << BIO_RW_DISCARD); | 1786 | rw |= REQ_DISCARD; |
1784 | 1787 | ||
1785 | bytes = blk_rq_bytes(rq); | 1788 | bytes = blk_rq_bytes(rq); |
1786 | 1789 | ||
diff --git a/mm/page_io.c b/mm/page_io.c index 31a3b962230a..2dee975bf469 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
106 | goto out; | 106 | goto out; |
107 | } | 107 | } |
108 | if (wbc->sync_mode == WB_SYNC_ALL) | 108 | if (wbc->sync_mode == WB_SYNC_ALL) |
109 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 109 | rw |= REQ_SYNC | REQ_UNPLUG; |
110 | count_vm_event(PSWPOUT); | 110 | count_vm_event(PSWPOUT); |
111 | set_page_writeback(page); | 111 | set_page_writeback(page); |
112 | unlock_page(page); | 112 | unlock_page(page); |