aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-09-11 08:32:04 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-11 08:33:31 -0400
commit1f98a13f623e0ef666690a18c1250335fc6d7ef1 (patch)
tree15ca2dddffaa18a0d1844957f4f8cc707cbb8117
parente7e503aedb1f4d165081cb8d47a58c38f80f0cb4 (diff)
bio: first step in sanitizing the bio->bi_rw flag testing
Get rid of any functions that test for these bits and make callers use bio_rw_flagged() directly. Then it is at least directly apparent what variable and flag they check. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-core.c25
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/elevator.c3
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/staging/dst/dcore.c5
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--include/linux/bio.h25
-rw-r--r--include/linux/blkdev.h2
17 files changed, 54 insertions, 60 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c822239bcc9d..52559715cb90 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1114,24 +1114,24 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1114 * Inherit FAILFAST from bio (for read-ahead, and explicit 1114 * Inherit FAILFAST from bio (for read-ahead, and explicit
1115 * FAILFAST). FAILFAST flags are identical for req and bio. 1115 * FAILFAST). FAILFAST flags are identical for req and bio.
1116 */ 1116 */
1117 if (bio_rw_ahead(bio)) 1117 if (bio_rw_flagged(bio, BIO_RW_AHEAD))
1118 req->cmd_flags |= REQ_FAILFAST_MASK; 1118 req->cmd_flags |= REQ_FAILFAST_MASK;
1119 else 1119 else
1120 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1120 req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
1121 1121
1122 if (unlikely(bio_discard(bio))) { 1122 if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
1123 req->cmd_flags |= REQ_DISCARD; 1123 req->cmd_flags |= REQ_DISCARD;
1124 if (bio_barrier(bio)) 1124 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1125 req->cmd_flags |= REQ_SOFTBARRIER; 1125 req->cmd_flags |= REQ_SOFTBARRIER;
1126 req->q->prepare_discard_fn(req->q, req); 1126 req->q->prepare_discard_fn(req->q, req);
1127 } else if (unlikely(bio_barrier(bio))) 1127 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1128 req->cmd_flags |= REQ_HARDBARRIER; 1128 req->cmd_flags |= REQ_HARDBARRIER;
1129 1129
1130 if (bio_sync(bio)) 1130 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
1131 req->cmd_flags |= REQ_RW_SYNC; 1131 req->cmd_flags |= REQ_RW_SYNC;
1132 if (bio_rw_meta(bio)) 1132 if (bio_rw_flagged(bio, BIO_RW_META))
1133 req->cmd_flags |= REQ_RW_META; 1133 req->cmd_flags |= REQ_RW_META;
1134 if (bio_noidle(bio)) 1134 if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
1135 req->cmd_flags |= REQ_NOIDLE; 1135 req->cmd_flags |= REQ_NOIDLE;
1136 1136
1137 req->errors = 0; 1137 req->errors = 0;
@@ -1155,12 +1155,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1155 int el_ret; 1155 int el_ret;
1156 unsigned int bytes = bio->bi_size; 1156 unsigned int bytes = bio->bi_size;
1157 const unsigned short prio = bio_prio(bio); 1157 const unsigned short prio = bio_prio(bio);
1158 const int sync = bio_sync(bio); 1158 const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
1159 const int unplug = bio_unplug(bio); 1159 const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
1160 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1160 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1161 int rw_flags; 1161 int rw_flags;
1162 1162
1163 if (bio_barrier(bio) && bio_has_data(bio) && 1163 if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) &&
1164 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1164 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1165 bio_endio(bio, -EOPNOTSUPP); 1165 bio_endio(bio, -EOPNOTSUPP);
1166 return 0; 1166 return 0;
@@ -1174,7 +1174,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1174 1174
1175 spin_lock_irq(q->queue_lock); 1175 spin_lock_irq(q->queue_lock);
1176 1176
1177 if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) 1177 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
1178 goto get_rq; 1178 goto get_rq;
1179 1179
1180 el_ret = elv_merge(q, &req, bio); 1180 el_ret = elv_merge(q, &req, bio);
@@ -1470,7 +1470,8 @@ static inline void __generic_make_request(struct bio *bio)
1470 if (bio_check_eod(bio, nr_sectors)) 1470 if (bio_check_eod(bio, nr_sectors))
1471 goto end_io; 1471 goto end_io;
1472 1472
1473 if (bio_discard(bio) && !q->prepare_discard_fn) { 1473 if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1474 !q->prepare_discard_fn) {
1474 err = -EOPNOTSUPP; 1475 err = -EOPNOTSUPP;
1475 goto end_io; 1476 goto end_io;
1476 } 1477 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ca0d7e71324b..9e6d0af6c990 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -257,7 +257,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
257 */ 257 */
258static inline int cfq_bio_sync(struct bio *bio) 258static inline int cfq_bio_sync(struct bio *bio)
259{ 259{
260 if (bio_data_dir(bio) == READ || bio_sync(bio)) 260 if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
261 return 1; 261 return 1;
262 262
263 return 0; 263 return 0;
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba41..1975b619c86d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -79,7 +79,8 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
79 /* 79 /*
80 * Don't merge file system requests and discard requests 80 * Don't merge file system requests and discard requests
81 */ 81 */
82 if (bio_discard(bio) != bio_discard(rq->bio)) 82 if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
83 bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
83 return 0; 84 return 0;
84 85
85 /* 86 /*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5757188cd1fb..bbb79441d895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -475,7 +475,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
475 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 475 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
476 476
477 if (bio_rw(bio) == WRITE) { 477 if (bio_rw(bio) == WRITE) {
478 int barrier = bio_barrier(bio); 478 bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
479 struct file *file = lo->lo_backing_file; 479 struct file *file = lo->lo_backing_file;
480 480
481 if (barrier) { 481 if (barrier) {
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 33f179e66bf5..cc9dc79b0784 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1129,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1129 if (error == -EOPNOTSUPP) 1129 if (error == -EOPNOTSUPP)
1130 goto out; 1130 goto out;
1131 1131
1132 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 1132 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1133 goto out; 1133 goto out;
1134 1134
1135 if (unlikely(error)) { 1135 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3e563d251733..fde658ccbcec 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
285 if (!error) 285 if (!error)
286 return 0; /* I/O complete */ 286 return 0; /* I/O complete */
287 287
288 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 288 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
289 return error; 289 return error;
290 290
291 if (error == -EOPNOTSUPP) 291 if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b4845b14740d..ec012f030240 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -586,7 +586,7 @@ static void dec_pending(struct dm_io *io, int error)
586 */ 586 */
587 spin_lock_irqsave(&md->deferred_lock, flags); 587 spin_lock_irqsave(&md->deferred_lock, flags);
588 if (__noflush_suspending(md)) { 588 if (__noflush_suspending(md)) {
589 if (!bio_barrier(io->bio)) 589 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
590 bio_list_add_head(&md->deferred, 590 bio_list_add_head(&md->deferred,
591 io->bio); 591 io->bio);
592 } else 592 } else
@@ -598,7 +598,7 @@ static void dec_pending(struct dm_io *io, int error)
598 io_error = io->error; 598 io_error = io->error;
599 bio = io->bio; 599 bio = io->bio;
600 600
601 if (bio_barrier(bio)) { 601 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
602 /* 602 /*
603 * There can be just one barrier request so we use 603 * There can be just one barrier request so we use
604 * a per-device variable for error reporting. 604 * a per-device variable for error reporting.
@@ -1209,7 +1209,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1209 1209
1210 ci.map = dm_get_table(md); 1210 ci.map = dm_get_table(md);
1211 if (unlikely(!ci.map)) { 1211 if (unlikely(!ci.map)) {
1212 if (!bio_barrier(bio)) 1212 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
1213 bio_io_error(bio); 1213 bio_io_error(bio);
1214 else 1214 else
1215 if (!md->barrier_error) 1215 if (!md->barrier_error)
@@ -1321,7 +1321,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1321 * we have to queue this io for later. 1321 * we have to queue this io for later.
1322 */ 1322 */
1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1324 unlikely(bio_barrier(bio))) { 1324 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1325 up_read(&md->io_lock); 1325 up_read(&md->io_lock);
1326 1326
1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1344,7 +1344,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
1344{ 1344{
1345 struct mapped_device *md = q->queuedata; 1345 struct mapped_device *md = q->queuedata;
1346 1346
1347 if (unlikely(bio_barrier(bio))) { 1347 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1348 bio_endio(bio, -EOPNOTSUPP); 1348 bio_endio(bio, -EOPNOTSUPP);
1349 return 0; 1349 return 0;
1350 } 1350 }
@@ -2164,7 +2164,7 @@ static void dm_wq_work(struct work_struct *work)
2164 if (dm_request_based(md)) 2164 if (dm_request_based(md))
2165 generic_make_request(c); 2165 generic_make_request(c);
2166 else { 2166 else {
2167 if (bio_barrier(c)) 2167 if (bio_rw_flagged(c, BIO_RW_BARRIER))
2168 process_barrier(md, c); 2168 process_barrier(md, c);
2169 else 2169 else
2170 __split_and_process_bio(md, c); 2170 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5fe39c2a3d2b..ea4842905444 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,7 +288,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
288 sector_t start_sector; 288 sector_t start_sector;
289 int cpu; 289 int cpu;
290 290
291 if (unlikely(bio_barrier(bio))) { 291 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
292 bio_endio(bio, -EOPNOTSUPP); 292 bio_endio(bio, -EOPNOTSUPP);
293 return 0; 293 return 0;
294 } 294 }
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 7140909f6662..89e76819f61f 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error)
90 90
91 if (uptodate) 91 if (uptodate)
92 multipath_end_bh_io(mp_bh, 0); 92 multipath_end_bh_io(mp_bh, 0);
93 else if (!bio_rw_ahead(bio)) { 93 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
94 /* 94 /*
95 * oops, IO error: 95 * oops, IO error:
96 */ 96 */
@@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
144 const int rw = bio_data_dir(bio); 144 const int rw = bio_data_dir(bio);
145 int cpu; 145 int cpu;
146 146
147 if (unlikely(bio_barrier(bio))) { 147 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
148 bio_endio(bio, -EOPNOTSUPP); 148 bio_endio(bio, -EOPNOTSUPP);
149 return 0; 149 return 0;
150 } 150 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 898e2bdfee47..f845ed98fec9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -448,7 +448,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
448 const int rw = bio_data_dir(bio); 448 const int rw = bio_data_dir(bio);
449 int cpu; 449 int cpu;
450 450
451 if (unlikely(bio_barrier(bio))) { 451 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
452 bio_endio(bio, -EOPNOTSUPP); 452 bio_endio(bio, -EOPNOTSUPP);
453 return 0; 453 return 0;
454 } 454 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8726fd7ebce5..ff7ed3335995 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -782,8 +782,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
782 struct bio_list bl; 782 struct bio_list bl;
783 struct page **behind_pages = NULL; 783 struct page **behind_pages = NULL;
784 const int rw = bio_data_dir(bio); 784 const int rw = bio_data_dir(bio);
785 const int do_sync = bio_sync(bio); 785 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
786 int cpu, do_barriers; 786 int cpu;
787 bool do_barriers;
787 mdk_rdev_t *blocked_rdev; 788 mdk_rdev_t *blocked_rdev;
788 789
789 /* 790 /*
@@ -797,7 +798,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
797 798
798 md_write_start(mddev, bio); /* wait on superblock update early */ 799 md_write_start(mddev, bio); /* wait on superblock update early */
799 800
800 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 801 if (unlikely(!mddev->barriers_work &&
802 bio_rw_flagged(bio, BIO_RW_BARRIER))) {
801 if (rw == WRITE) 803 if (rw == WRITE)
802 md_write_end(mddev); 804 md_write_end(mddev);
803 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
@@ -925,7 +927,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
925 atomic_set(&r1_bio->remaining, 0); 927 atomic_set(&r1_bio->remaining, 0);
926 atomic_set(&r1_bio->behind_remaining, 0); 928 atomic_set(&r1_bio->behind_remaining, 0);
927 929
928 do_barriers = bio_barrier(bio); 930 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
929 if (do_barriers) 931 if (do_barriers)
930 set_bit(R1BIO_Barrier, &r1_bio->state); 932 set_bit(R1BIO_Barrier, &r1_bio->state);
931 933
@@ -1600,7 +1602,7 @@ static void raid1d(mddev_t *mddev)
1600 * We already have a nr_pending reference on these rdevs. 1602 * We already have a nr_pending reference on these rdevs.
1601 */ 1603 */
1602 int i; 1604 int i;
1603 const int do_sync = bio_sync(r1_bio->master_bio); 1605 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1604 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1606 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1605 clear_bit(R1BIO_Barrier, &r1_bio->state); 1607 clear_bit(R1BIO_Barrier, &r1_bio->state);
1606 for (i=0; i < conf->raid_disks; i++) 1608 for (i=0; i < conf->raid_disks; i++)
@@ -1654,7 +1656,7 @@ static void raid1d(mddev_t *mddev)
1654 (unsigned long long)r1_bio->sector); 1656 (unsigned long long)r1_bio->sector);
1655 raid_end_bio_io(r1_bio); 1657 raid_end_bio_io(r1_bio);
1656 } else { 1658 } else {
1657 const int do_sync = bio_sync(r1_bio->master_bio); 1659 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1658 r1_bio->bios[r1_bio->read_disk] = 1660 r1_bio->bios[r1_bio->read_disk] =
1659 mddev->ro ? IO_BLOCKED : NULL; 1661 mddev->ro ? IO_BLOCKED : NULL;
1660 r1_bio->read_disk = disk; 1662 r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3d9020cf6f6e..d0a2152e064f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -796,12 +796,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 int i; 796 int i;
797 int chunk_sects = conf->chunk_mask + 1; 797 int chunk_sects = conf->chunk_mask + 1;
798 const int rw = bio_data_dir(bio); 798 const int rw = bio_data_dir(bio);
799 const int do_sync = bio_sync(bio); 799 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
800 struct bio_list bl; 800 struct bio_list bl;
801 unsigned long flags; 801 unsigned long flags;
802 mdk_rdev_t *blocked_rdev; 802 mdk_rdev_t *blocked_rdev;
803 803
804 if (unlikely(bio_barrier(bio))) { 804 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
805 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
806 return 0; 806 return 0;
807 } 807 }
@@ -1610,7 +1610,7 @@ static void raid10d(mddev_t *mddev)
1610 raid_end_bio_io(r10_bio); 1610 raid_end_bio_io(r10_bio);
1611 bio_put(bio); 1611 bio_put(bio);
1612 } else { 1612 } else {
1613 const int do_sync = bio_sync(r10_bio->master_bio); 1613 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
1614 bio_put(bio); 1614 bio_put(bio);
1615 rdev = conf->mirrors[mirror].rdev; 1615 rdev = conf->mirrors[mirror].rdev;
1616 if (printk_ratelimit()) 1616 if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b8a2c5dc67ba..826eb3467357 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3606,7 +3606,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3606 const int rw = bio_data_dir(bi); 3606 const int rw = bio_data_dir(bi);
3607 int cpu, remaining; 3607 int cpu, remaining;
3608 3608
3609 if (unlikely(bio_barrier(bi))) { 3609 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
3610 bio_endio(bi, -EOPNOTSUPP); 3610 bio_endio(bi, -EOPNOTSUPP);
3611 return 0; 3611 return 0;
3612 } 3612 }
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index fad25b753042..b1c258ca2102 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -112,8 +112,9 @@ static int dst_request(struct request_queue *q, struct bio *bio)
112 * I worked with. 112 * I worked with.
113 * 113 *
114 * Empty barriers are not allowed anyway, see 51fd77bd9f512 114 * Empty barriers are not allowed anyway, see 51fd77bd9f512
115 * for example, although later it was changed to bio_discard() 115 * for example, although later it was changed to
116 * only, which does not work in this case. 116 * bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
117 * work in this case.
117 */ 118 */
118 //err = -EOPNOTSUPP; 119 //err = -EOPNOTSUPP;
119 err = 0; 120 err = 0;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5dbefd11b4af..5cf405b0828d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -260,7 +260,7 @@ loop_lock:
260 num_run++; 260 num_run++;
261 batch_run++; 261 batch_run++;
262 262
263 if (bio_sync(cur)) 263 if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
264 num_sync_run++; 264 num_sync_run++;
265 265
266 if (need_resched()) { 266 if (need_resched()) {
@@ -2903,7 +2903,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
2903 bio->bi_rw |= rw; 2903 bio->bi_rw |= rw;
2904 2904
2905 spin_lock(&device->io_lock); 2905 spin_lock(&device->io_lock);
2906 if (bio_sync(bio)) 2906 if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
2907 pending_bios = &device->pending_sync_bios; 2907 pending_bios = &device->pending_sync_bios;
2908 else 2908 else
2909 pending_bios = &device->pending_bios; 2909 pending_bios = &device->pending_bios;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 4f8fd0221cd2..5be93f18d842 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -177,29 +177,18 @@ enum bio_rw_flags {
177 BIO_RW_NOIDLE, 177 BIO_RW_NOIDLE,
178}; 178};
179 179
180/*
181 * First four bits must match between bio->bi_rw and rq->cmd_flags, make
182 * that explicit here.
183 */
184#define BIO_RW_RQ_MASK 0xf
185
180static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) 186static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
181{ 187{
182 return (bio->bi_rw & (1 << flag)) != 0; 188 return (bio->bi_rw & (1 << flag)) != 0;
183} 189}
184 190
185/* 191/*
186 * Old defines, these should eventually be replaced by direct usage of
187 * bio_rw_flagged()
188 */
189#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
190#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
191#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
192#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
193#define bio_failfast_transport(bio) \
194 bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
195#define bio_failfast_driver(bio) \
196 bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
197#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
198#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
199#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
200#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
201
202/*
203 * upper 16 bits of bi_rw define the io priority of this bio 192 * upper 16 bits of bi_rw define the io priority of this bio
204 */ 193 */
205#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 194#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
@@ -222,7 +211,7 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
222#define bio_offset(bio) bio_iovec((bio))->bv_offset 211#define bio_offset(bio) bio_iovec((bio))->bv_offset
223#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 212#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
224#define bio_sectors(bio) ((bio)->bi_size >> 9) 213#define bio_sectors(bio) ((bio)->bi_size >> 9)
225#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) 214#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD))
226 215
227static inline unsigned int bio_cur_bytes(struct bio *bio) 216static inline unsigned int bio_cur_bytes(struct bio *bio)
228{ 217{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 650b6a9cb679..88edb62def82 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -86,7 +86,7 @@ enum {
86}; 86};
87 87
88/* 88/*
89 * request type modified bits. first two bits match BIO_RW* bits, important 89 * request type modified bits. first four bits match BIO_RW* bits, important
90 */ 90 */
91enum rq_flag_bits { 91enum rq_flag_bits {
92 __REQ_RW, /* not set, read. set, write */ 92 __REQ_RW, /* not set, read. set, write */