aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-09-11 08:32:04 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-11 08:33:31 -0400
commit1f98a13f623e0ef666690a18c1250335fc6d7ef1 (patch)
tree15ca2dddffaa18a0d1844957f4f8cc707cbb8117 /drivers
parente7e503aedb1f4d165081cb8d47a58c38f80f0cb4 (diff)
bio: first step in sanitizing the bio->bi_rw flag testing
Get rid of any functions that test for these bits and make callers use bio_rw_flagged() directly. Then it is at least directly apparent what variable and flag they check. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/staging/dst/dcore.c5
11 files changed, 28 insertions, 25 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5757188cd1fb..bbb79441d895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -475,7 +475,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
475 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 475 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
476 476
477 if (bio_rw(bio) == WRITE) { 477 if (bio_rw(bio) == WRITE) {
478 int barrier = bio_barrier(bio); 478 bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
479 struct file *file = lo->lo_backing_file; 479 struct file *file = lo->lo_backing_file;
480 480
481 if (barrier) { 481 if (barrier) {
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 33f179e66bf5..cc9dc79b0784 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1129,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1129 if (error == -EOPNOTSUPP) 1129 if (error == -EOPNOTSUPP)
1130 goto out; 1130 goto out;
1131 1131
1132 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 1132 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1133 goto out; 1133 goto out;
1134 1134
1135 if (unlikely(error)) { 1135 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3e563d251733..fde658ccbcec 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
285 if (!error) 285 if (!error)
286 return 0; /* I/O complete */ 286 return 0; /* I/O complete */
287 287
288 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio)) 288 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
289 return error; 289 return error;
290 290
291 if (error == -EOPNOTSUPP) 291 if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b4845b14740d..ec012f030240 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -586,7 +586,7 @@ static void dec_pending(struct dm_io *io, int error)
586 */ 586 */
587 spin_lock_irqsave(&md->deferred_lock, flags); 587 spin_lock_irqsave(&md->deferred_lock, flags);
588 if (__noflush_suspending(md)) { 588 if (__noflush_suspending(md)) {
589 if (!bio_barrier(io->bio)) 589 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
590 bio_list_add_head(&md->deferred, 590 bio_list_add_head(&md->deferred,
591 io->bio); 591 io->bio);
592 } else 592 } else
@@ -598,7 +598,7 @@ static void dec_pending(struct dm_io *io, int error)
598 io_error = io->error; 598 io_error = io->error;
599 bio = io->bio; 599 bio = io->bio;
600 600
601 if (bio_barrier(bio)) { 601 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
602 /* 602 /*
603 * There can be just one barrier request so we use 603 * There can be just one barrier request so we use
604 * a per-device variable for error reporting. 604 * a per-device variable for error reporting.
@@ -1209,7 +1209,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1209 1209
1210 ci.map = dm_get_table(md); 1210 ci.map = dm_get_table(md);
1211 if (unlikely(!ci.map)) { 1211 if (unlikely(!ci.map)) {
1212 if (!bio_barrier(bio)) 1212 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
1213 bio_io_error(bio); 1213 bio_io_error(bio);
1214 else 1214 else
1215 if (!md->barrier_error) 1215 if (!md->barrier_error)
@@ -1321,7 +1321,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1321 * we have to queue this io for later. 1321 * we have to queue this io for later.
1322 */ 1322 */
1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1324 unlikely(bio_barrier(bio))) { 1324 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1325 up_read(&md->io_lock); 1325 up_read(&md->io_lock);
1326 1326
1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1344,7 +1344,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
1344{ 1344{
1345 struct mapped_device *md = q->queuedata; 1345 struct mapped_device *md = q->queuedata;
1346 1346
1347 if (unlikely(bio_barrier(bio))) { 1347 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1348 bio_endio(bio, -EOPNOTSUPP); 1348 bio_endio(bio, -EOPNOTSUPP);
1349 return 0; 1349 return 0;
1350 } 1350 }
@@ -2164,7 +2164,7 @@ static void dm_wq_work(struct work_struct *work)
2164 if (dm_request_based(md)) 2164 if (dm_request_based(md))
2165 generic_make_request(c); 2165 generic_make_request(c);
2166 else { 2166 else {
2167 if (bio_barrier(c)) 2167 if (bio_rw_flagged(c, BIO_RW_BARRIER))
2168 process_barrier(md, c); 2168 process_barrier(md, c);
2169 else 2169 else
2170 __split_and_process_bio(md, c); 2170 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5fe39c2a3d2b..ea4842905444 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,7 +288,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
288 sector_t start_sector; 288 sector_t start_sector;
289 int cpu; 289 int cpu;
290 290
291 if (unlikely(bio_barrier(bio))) { 291 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
292 bio_endio(bio, -EOPNOTSUPP); 292 bio_endio(bio, -EOPNOTSUPP);
293 return 0; 293 return 0;
294 } 294 }
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 7140909f6662..89e76819f61f 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error)
90 90
91 if (uptodate) 91 if (uptodate)
92 multipath_end_bh_io(mp_bh, 0); 92 multipath_end_bh_io(mp_bh, 0);
93 else if (!bio_rw_ahead(bio)) { 93 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
94 /* 94 /*
95 * oops, IO error: 95 * oops, IO error:
96 */ 96 */
@@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
144 const int rw = bio_data_dir(bio); 144 const int rw = bio_data_dir(bio);
145 int cpu; 145 int cpu;
146 146
147 if (unlikely(bio_barrier(bio))) { 147 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
148 bio_endio(bio, -EOPNOTSUPP); 148 bio_endio(bio, -EOPNOTSUPP);
149 return 0; 149 return 0;
150 } 150 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 898e2bdfee47..f845ed98fec9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -448,7 +448,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
448 const int rw = bio_data_dir(bio); 448 const int rw = bio_data_dir(bio);
449 int cpu; 449 int cpu;
450 450
451 if (unlikely(bio_barrier(bio))) { 451 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
452 bio_endio(bio, -EOPNOTSUPP); 452 bio_endio(bio, -EOPNOTSUPP);
453 return 0; 453 return 0;
454 } 454 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8726fd7ebce5..ff7ed3335995 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -782,8 +782,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
782 struct bio_list bl; 782 struct bio_list bl;
783 struct page **behind_pages = NULL; 783 struct page **behind_pages = NULL;
784 const int rw = bio_data_dir(bio); 784 const int rw = bio_data_dir(bio);
785 const int do_sync = bio_sync(bio); 785 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
786 int cpu, do_barriers; 786 int cpu;
787 bool do_barriers;
787 mdk_rdev_t *blocked_rdev; 788 mdk_rdev_t *blocked_rdev;
788 789
789 /* 790 /*
@@ -797,7 +798,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
797 798
798 md_write_start(mddev, bio); /* wait on superblock update early */ 799 md_write_start(mddev, bio); /* wait on superblock update early */
799 800
800 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 801 if (unlikely(!mddev->barriers_work &&
802 bio_rw_flagged(bio, BIO_RW_BARRIER))) {
801 if (rw == WRITE) 803 if (rw == WRITE)
802 md_write_end(mddev); 804 md_write_end(mddev);
803 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
@@ -925,7 +927,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
925 atomic_set(&r1_bio->remaining, 0); 927 atomic_set(&r1_bio->remaining, 0);
926 atomic_set(&r1_bio->behind_remaining, 0); 928 atomic_set(&r1_bio->behind_remaining, 0);
927 929
928 do_barriers = bio_barrier(bio); 930 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
929 if (do_barriers) 931 if (do_barriers)
930 set_bit(R1BIO_Barrier, &r1_bio->state); 932 set_bit(R1BIO_Barrier, &r1_bio->state);
931 933
@@ -1600,7 +1602,7 @@ static void raid1d(mddev_t *mddev)
1600 * We already have a nr_pending reference on these rdevs. 1602 * We already have a nr_pending reference on these rdevs.
1601 */ 1603 */
1602 int i; 1604 int i;
1603 const int do_sync = bio_sync(r1_bio->master_bio); 1605 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1604 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1606 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1605 clear_bit(R1BIO_Barrier, &r1_bio->state); 1607 clear_bit(R1BIO_Barrier, &r1_bio->state);
1606 for (i=0; i < conf->raid_disks; i++) 1608 for (i=0; i < conf->raid_disks; i++)
@@ -1654,7 +1656,7 @@ static void raid1d(mddev_t *mddev)
1654 (unsigned long long)r1_bio->sector); 1656 (unsigned long long)r1_bio->sector);
1655 raid_end_bio_io(r1_bio); 1657 raid_end_bio_io(r1_bio);
1656 } else { 1658 } else {
1657 const int do_sync = bio_sync(r1_bio->master_bio); 1659 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
1658 r1_bio->bios[r1_bio->read_disk] = 1660 r1_bio->bios[r1_bio->read_disk] =
1659 mddev->ro ? IO_BLOCKED : NULL; 1661 mddev->ro ? IO_BLOCKED : NULL;
1660 r1_bio->read_disk = disk; 1662 r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3d9020cf6f6e..d0a2152e064f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -796,12 +796,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 int i; 796 int i;
797 int chunk_sects = conf->chunk_mask + 1; 797 int chunk_sects = conf->chunk_mask + 1;
798 const int rw = bio_data_dir(bio); 798 const int rw = bio_data_dir(bio);
799 const int do_sync = bio_sync(bio); 799 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
800 struct bio_list bl; 800 struct bio_list bl;
801 unsigned long flags; 801 unsigned long flags;
802 mdk_rdev_t *blocked_rdev; 802 mdk_rdev_t *blocked_rdev;
803 803
804 if (unlikely(bio_barrier(bio))) { 804 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
805 bio_endio(bio, -EOPNOTSUPP); 805 bio_endio(bio, -EOPNOTSUPP);
806 return 0; 806 return 0;
807 } 807 }
@@ -1610,7 +1610,7 @@ static void raid10d(mddev_t *mddev)
1610 raid_end_bio_io(r10_bio); 1610 raid_end_bio_io(r10_bio);
1611 bio_put(bio); 1611 bio_put(bio);
1612 } else { 1612 } else {
1613 const int do_sync = bio_sync(r10_bio->master_bio); 1613 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
1614 bio_put(bio); 1614 bio_put(bio);
1615 rdev = conf->mirrors[mirror].rdev; 1615 rdev = conf->mirrors[mirror].rdev;
1616 if (printk_ratelimit()) 1616 if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b8a2c5dc67ba..826eb3467357 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3606,7 +3606,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3606 const int rw = bio_data_dir(bi); 3606 const int rw = bio_data_dir(bi);
3607 int cpu, remaining; 3607 int cpu, remaining;
3608 3608
3609 if (unlikely(bio_barrier(bi))) { 3609 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
3610 bio_endio(bi, -EOPNOTSUPP); 3610 bio_endio(bi, -EOPNOTSUPP);
3611 return 0; 3611 return 0;
3612 } 3612 }
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index fad25b753042..b1c258ca2102 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -112,8 +112,9 @@ static int dst_request(struct request_queue *q, struct bio *bio)
112 * I worked with. 112 * I worked with.
113 * 113 *
114 * Empty barriers are not allowed anyway, see 51fd77bd9f512 114 * Empty barriers are not allowed anyway, see 51fd77bd9f512
115 * for example, although later it was changed to bio_discard() 115 * for example, although later it was changed to
116 * only, which does not work in this case. 116 * bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
117 * work in this case.
117 */ 118 */
118 //err = -EOPNOTSUPP; 119 //err = -EOPNOTSUPP;
119 err = 0; 120 err = 0;