aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-io.c12
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm.c14
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c22
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c2
13 files changed, 46 insertions, 48 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 10f457ca6af..0590c75b0ab 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
357 357
358 if (sync) 358 if (sync)
359 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 359 rw |= REQ_SYNC | REQ_UNPLUG;
360 360
361 /* 361 /*
362 * For multiple regions we need to be careful to rewind 362 * For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
364 */ 364 */
365 for (i = 0; i < num_regions; i++) { 365 for (i = 0; i < num_regions; i++) {
366 *dp = old_pages; 366 *dp = old_pages;
367 if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) 367 if (where[i].count || (rw & REQ_HARDBARRIER))
368 do_region(rw, i, where + i, dp, io); 368 do_region(rw, i, where + i, dp, io);
369 } 369 }
370 370
@@ -412,8 +412,8 @@ retry:
412 } 412 }
413 set_current_state(TASK_RUNNING); 413 set_current_state(TASK_RUNNING);
414 414
415 if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { 415 if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
416 rw &= ~(1 << BIO_RW_BARRIER); 416 rw &= ~REQ_HARDBARRIER;
417 goto retry; 417 goto retry;
418 } 418 }
419 419
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
479 * New collapsed (a)synchronous interface. 479 * New collapsed (a)synchronous interface.
480 * 480 *
481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
482 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in 482 * the queue with blk_unplug() some time later or set REQ_SYNC in
483 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 483io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
485 */ 485 */
486int dm_io(struct dm_io_request *io_req, unsigned num_regions, 486int dm_io(struct dm_io_request *io_req, unsigned num_regions,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index addf8347504..d8587bac568 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
345{ 345{
346 int r; 346 int r;
347 struct dm_io_request io_req = { 347 struct dm_io_request io_req = {
348 .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), 348 .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
349 .mem.type = DM_IO_PAGE_LIST, 349 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages, 350 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset, 351 .mem.offset = job->offset,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddda531723d..74136262d65 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1211 if (error == -EOPNOTSUPP) 1211 if (error == -EOPNOTSUPP)
1212 goto out; 1212 goto out;
1213 1213
1214 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 1214 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1215 goto out; 1215 goto out;
1216 1216
1217 if (unlikely(error)) { 1217 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e610725db76..d6e28d732b4 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
284 if (!error) 284 if (!error)
285 return 0; /* I/O complete */ 285 return 0; /* I/O complete */
286 286
287 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 287 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
288 return error; 288 return error;
289 289
290 if (error == -EOPNOTSUPP) 290 if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1e0e6dd5150..d6f77baeafd 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
614 */ 614 */
615 spin_lock_irqsave(&md->deferred_lock, flags); 615 spin_lock_irqsave(&md->deferred_lock, flags);
616 if (__noflush_suspending(md)) { 616 if (__noflush_suspending(md)) {
617 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) 617 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
618 bio_list_add_head(&md->deferred, 618 bio_list_add_head(&md->deferred,
619 io->bio); 619 io->bio);
620 } else 620 } else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
626 io_error = io->error; 626 io_error = io->error;
627 bio = io->bio; 627 bio = io->bio;
628 628
629 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 629 if (bio->bi_rw & REQ_HARDBARRIER) {
630 /* 630 /*
631 * There can be just one barrier request so we use 631 * There can be just one barrier request so we use
632 * a per-device variable for error reporting. 632 * a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1106 1106
1107 clone->bi_sector = sector; 1107 clone->bi_sector = sector;
1108 clone->bi_bdev = bio->bi_bdev; 1108 clone->bi_bdev = bio->bi_bdev;
1109 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); 1109 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
1110 clone->bi_vcnt = 1; 1110 clone->bi_vcnt = 1;
1111 clone->bi_size = to_bytes(len); 1111 clone->bi_size = to_bytes(len);
1112 clone->bi_io_vec->bv_offset = offset; 1112 clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1133 1133
1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1135 __bio_clone(clone, bio); 1135 __bio_clone(clone, bio);
1136 clone->bi_rw &= ~(1 << BIO_RW_BARRIER); 1136 clone->bi_rw &= ~REQ_HARDBARRIER;
1137 clone->bi_destructor = dm_bio_destructor; 1137 clone->bi_destructor = dm_bio_destructor;
1138 clone->bi_sector = sector; 1138 clone->bi_sector = sector;
1139 clone->bi_idx = idx; 1139 clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1301 1301
1302 ci.map = dm_get_live_table(md); 1302 ci.map = dm_get_live_table(md);
1303 if (unlikely(!ci.map)) { 1303 if (unlikely(!ci.map)) {
1304 if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) 1304 if (!(bio->bi_rw & REQ_HARDBARRIER))
1305 bio_io_error(bio); 1305 bio_io_error(bio);
1306 else 1306 else
1307 if (!md->barrier_error) 1307 if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1414 * we have to queue this io for later. 1414 * we have to queue this io for later.
1415 */ 1415 */
1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1417 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 1417 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
1418 up_read(&md->io_lock); 1418 up_read(&md->io_lock);
1419 1419
1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
2296 if (dm_request_based(md)) 2296 if (dm_request_based(md))
2297 generic_make_request(c); 2297 generic_make_request(c);
2298 else { 2298 else {
2299 if (bio_rw_flagged(c, BIO_RW_BARRIER)) 2299 if (c->bi_rw & REQ_HARDBARRIER)
2300 process_barrier(md, c); 2300 process_barrier(md, c);
2301 else 2301 else
2302 __split_and_process_bio(md, c); 2302 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7e0e057db9a..ba19060bcf3 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
294 dev_info_t *tmp_dev; 294 dev_info_t *tmp_dev;
295 sector_t start_sector; 295 sector_t start_sector;
296 296
297 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 297 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
298 md_barrier_request(mddev, bio); 298 md_barrier_request(mddev, bio);
299 return 0; 299 return 0;
300 } 300 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb20d0b0555..1893af67877 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws)
353 /* an empty barrier - all done */ 353 /* an empty barrier - all done */
354 bio_endio(bio, 0); 354 bio_endio(bio, 0);
355 else { 355 else {
356 bio->bi_rw &= ~(1<<BIO_RW_BARRIER); 356 bio->bi_rw &= ~REQ_HARDBARRIER;
357 if (mddev->pers->make_request(mddev, bio)) 357 if (mddev->pers->make_request(mddev, bio))
358 generic_make_request(bio); 358 generic_make_request(bio);
359 mddev->barrier = POST_REQUEST_BARRIER; 359 mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
675 * if zero is reached. 675 * if zero is reached.
676 * If an error occurred, call md_error 676 * If an error occurred, call md_error
677 * 677 *
678 * As we might need to resubmit the request if BIO_RW_BARRIER 678 * As we might need to resubmit the request if REQ_HARDBARRIER
679 * causes ENOTSUPP, we allocate a spare bio... 679 * causes ENOTSUPP, we allocate a spare bio...
680 */ 680 */
681 struct bio *bio = bio_alloc(GFP_NOIO, 1); 681 struct bio *bio = bio_alloc(GFP_NOIO, 1);
682 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 682 int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
683 683
684 bio->bi_bdev = rdev->bdev; 684 bio->bi_bdev = rdev->bdev;
685 bio->bi_sector = sector; 685 bio->bi_sector = sector;
@@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
691 atomic_inc(&mddev->pending_writes); 691 atomic_inc(&mddev->pending_writes);
692 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 692 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
693 struct bio *rbio; 693 struct bio *rbio;
694 rw |= (1<<BIO_RW_BARRIER); 694 rw |= REQ_HARDBARRIER;
695 rbio = bio_clone(bio, GFP_NOIO); 695 rbio = bio_clone(bio, GFP_NOIO);
696 rbio->bi_private = bio; 696 rbio->bi_private = bio;
697 rbio->bi_end_io = super_written_barrier; 697 rbio->bi_end_io = super_written_barrier;
@@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
736 struct completion event; 736 struct completion event;
737 int ret; 737 int ret;
738 738
739 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 739 rw |= REQ_SYNC | REQ_UNPLUG;
740 740
741 bio->bi_bdev = bdev; 741 bio->bi_bdev = bdev;
742 bio->bi_sector = sector; 742 bio->bi_sector = sector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10597bfec00..fc56e0f21c8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -67,7 +67,7 @@ struct mdk_rdev_s
67#define Faulty 1 /* device is known to have a fault */ 67#define Faulty 1 /* device is known to have a fault */
68#define In_sync 2 /* device is in_sync with rest of array */ 68#define In_sync 2 /* device is in_sync with rest of array */
69#define WriteMostly 4 /* Avoid reading if at all possible */ 69#define WriteMostly 4 /* Avoid reading if at all possible */
70#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ 70#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
71#define AllReserved 6 /* If whole device is reserved for 71#define AllReserved 6 /* If whole device is reserved for
72 * one array */ 72 * one array */
73#define AutoDetected 7 /* added by auto-detect */ 73#define AutoDetected 7 /* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
254 * fails. Only supported 254 * fails. Only supported
255 */ 255 */
256 struct bio *biolist; /* bios that need to be retried 256 struct bio *biolist; /* bios that need to be retried
257 * because BIO_RW_BARRIER is not supported 257 * because REQ_HARDBARRIER is not supported
258 */ 258 */
259 259
260 atomic_t recovery_active; /* blocks scheduled, but not written */ 260 atomic_t recovery_active; /* blocks scheduled, but not written */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 410fb60699a..0307d217e7a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
91 91
92 if (uptodate) 92 if (uptodate)
93 multipath_end_bh_io(mp_bh, 0); 93 multipath_end_bh_io(mp_bh, 0);
94 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { 94 else if (!(bio->bi_rw & REQ_RAHEAD)) {
95 /* 95 /*
96 * oops, IO error: 96 * oops, IO error:
97 */ 97 */
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
142 struct multipath_bh * mp_bh; 142 struct multipath_bh * mp_bh;
143 struct multipath_info *multipath; 143 struct multipath_info *multipath;
144 144
145 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 145 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
146 md_barrier_request(mddev, bio); 146 md_barrier_request(mddev, bio);
147 return 0; 147 return 0;
148 } 148 }
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
163 mp_bh->bio = *bio; 163 mp_bh->bio = *bio;
164 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 164 mp_bh->bio.bi_sector += multipath->rdev->data_offset;
165 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 165 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
166 mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 166 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
167 mp_bh->bio.bi_end_io = multipath_end_request; 167 mp_bh->bio.bi_end_io = multipath_end_request;
168 mp_bh->bio.bi_private = mp_bh; 168 mp_bh->bio.bi_private = mp_bh;
169 generic_make_request(&mp_bh->bio); 169 generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
398 *bio = *(mp_bh->master_bio); 398 *bio = *(mp_bh->master_bio);
399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
401 bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 401 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
402 bio->bi_end_io = multipath_end_request; 402 bio->bi_end_io = multipath_end_request;
403 bio->bi_private = mp_bh; 403 bio->bi_private = mp_bh;
404 generic_make_request(bio); 404 generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 563abed5a2c..6f7af46d623 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
483 struct strip_zone *zone; 483 struct strip_zone *zone;
484 mdk_rdev_t *tmp_dev; 484 mdk_rdev_t *tmp_dev;
485 485
486 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 486 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
487 md_barrier_request(mddev, bio); 487 md_barrier_request(mddev, bio);
488 return 0; 488 return 0;
489 } 489 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a948da8012d..73cc74ffc26 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
787 struct bio_list bl; 787 struct bio_list bl;
788 struct page **behind_pages = NULL; 788 struct page **behind_pages = NULL;
789 const int rw = bio_data_dir(bio); 789 const int rw = bio_data_dir(bio);
790 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 790 const bool do_sync = (bio->bi_rw & REQ_SYNC);
791 bool do_barriers; 791 bool do_barriers;
792 mdk_rdev_t *blocked_rdev; 792 mdk_rdev_t *blocked_rdev;
793 793
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
822 finish_wait(&conf->wait_barrier, &w); 822 finish_wait(&conf->wait_barrier, &w);
823 } 823 }
824 if (unlikely(!mddev->barriers_work && 824 if (unlikely(!mddev->barriers_work &&
825 bio_rw_flagged(bio, BIO_RW_BARRIER))) { 825 (bio->bi_rw & REQ_HARDBARRIER))) {
826 if (rw == WRITE) 826 if (rw == WRITE)
827 md_write_end(mddev); 827 md_write_end(mddev);
828 bio_endio(bio, -EOPNOTSUPP); 828 bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
878 read_bio->bi_bdev = mirror->rdev->bdev; 878 read_bio->bi_bdev = mirror->rdev->bdev;
879 read_bio->bi_end_io = raid1_end_read_request; 879 read_bio->bi_end_io = raid1_end_read_request;
880 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 880 read_bio->bi_rw = READ | do_sync;
881 read_bio->bi_private = r1_bio; 881 read_bio->bi_private = r1_bio;
882 882
883 generic_make_request(read_bio); 883 generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_set(&r1_bio->remaining, 0); 959 atomic_set(&r1_bio->remaining, 0);
960 atomic_set(&r1_bio->behind_remaining, 0); 960 atomic_set(&r1_bio->behind_remaining, 0);
961 961
962 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); 962 do_barriers = bio->bi_rw & REQ_HARDBARRIER;
963 if (do_barriers) 963 if (do_barriers)
964 set_bit(R1BIO_Barrier, &r1_bio->state); 964 set_bit(R1BIO_Barrier, &r1_bio->state);
965 965
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
977 mbio->bi_end_io = raid1_end_write_request; 977 mbio->bi_end_io = raid1_end_write_request;
978 mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | 978 mbio->bi_rw = WRITE | do_barriers | do_sync;
979 (do_sync << BIO_RW_SYNCIO);
980 mbio->bi_private = r1_bio; 979 mbio->bi_private = r1_bio;
981 980
982 if (behind_pages) { 981 if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
1633 sync_request_write(mddev, r1_bio); 1632 sync_request_write(mddev, r1_bio);
1634 unplug = 1; 1633 unplug = 1;
1635 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1634 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1636 /* some requests in the r1bio were BIO_RW_BARRIER 1635 /* some requests in the r1bio were REQ_HARDBARRIER
1637 * requests which failed with -EOPNOTSUPP. Hohumm.. 1636 * requests which failed with -EOPNOTSUPP. Hohumm..
1638 * Better resubmit without the barrier. 1637 * Better resubmit without the barrier.
1639 * We know which devices to resubmit for, because 1638 * We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
1641 * We already have a nr_pending reference on these rdevs. 1640 * We already have a nr_pending reference on these rdevs.
1642 */ 1641 */
1643 int i; 1642 int i;
1644 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1643 const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
1645 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1644 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1646 clear_bit(R1BIO_Barrier, &r1_bio->state); 1645 clear_bit(R1BIO_Barrier, &r1_bio->state);
1647 for (i=0; i < conf->raid_disks; i++) 1646 for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
1662 conf->mirrors[i].rdev->data_offset; 1661 conf->mirrors[i].rdev->data_offset;
1663 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1662 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1664 bio->bi_end_io = raid1_end_write_request; 1663 bio->bi_end_io = raid1_end_write_request;
1665 bio->bi_rw = WRITE | 1664 bio->bi_rw = WRITE | do_sync;
1666 (do_sync << BIO_RW_SYNCIO);
1667 bio->bi_private = r1_bio; 1665 bio->bi_private = r1_bio;
1668 r1_bio->bios[i] = bio; 1666 r1_bio->bios[i] = bio;
1669 generic_make_request(bio); 1667 generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
1698 (unsigned long long)r1_bio->sector); 1696 (unsigned long long)r1_bio->sector);
1699 raid_end_bio_io(r1_bio); 1697 raid_end_bio_io(r1_bio);
1700 } else { 1698 } else {
1701 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1699 const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
1702 r1_bio->bios[r1_bio->read_disk] = 1700 r1_bio->bios[r1_bio->read_disk] =
1703 mddev->ro ? IO_BLOCKED : NULL; 1701 mddev->ro ? IO_BLOCKED : NULL;
1704 r1_bio->read_disk = disk; 1702 r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
1715 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1713 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1716 bio->bi_bdev = rdev->bdev; 1714 bio->bi_bdev = rdev->bdev;
1717 bio->bi_end_io = raid1_end_read_request; 1715 bio->bi_end_io = raid1_end_read_request;
1718 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1716 bio->bi_rw = READ | do_sync;
1719 bio->bi_private = r1_bio; 1717 bio->bi_private = r1_bio;
1720 unplug = 1; 1718 unplug = 1;
1721 generic_make_request(bio); 1719 generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 42e64e4e5e2..62ecb6650fd 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
799 int i; 799 int i;
800 int chunk_sects = conf->chunk_mask + 1; 800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio); 801 const int rw = bio_data_dir(bio);
802 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 802 const bool do_sync = (bio->bi_rw & REQ_SYNC);
803 struct bio_list bl; 803 struct bio_list bl;
804 unsigned long flags; 804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev; 805 mdk_rdev_t *blocked_rdev;
806 806
807 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 807 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
808 md_barrier_request(mddev, bio); 808 md_barrier_request(mddev, bio);
809 return 0; 809 return 0;
810 } 810 }
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
879 mirror->rdev->data_offset; 879 mirror->rdev->data_offset;
880 read_bio->bi_bdev = mirror->rdev->bdev; 880 read_bio->bi_bdev = mirror->rdev->bdev;
881 read_bio->bi_end_io = raid10_end_read_request; 881 read_bio->bi_end_io = raid10_end_read_request;
882 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 882 read_bio->bi_rw = READ | do_sync;
883 read_bio->bi_private = r10_bio; 883 read_bio->bi_private = r10_bio;
884 884
885 generic_make_request(read_bio); 885 generic_make_request(read_bio);
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
947 conf->mirrors[d].rdev->data_offset; 947 conf->mirrors[d].rdev->data_offset;
948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
949 mbio->bi_end_io = raid10_end_write_request; 949 mbio->bi_end_io = raid10_end_write_request;
950 mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); 950 mbio->bi_rw = WRITE | do_sync;
951 mbio->bi_private = r10_bio; 951 mbio->bi_private = r10_bio;
952 952
953 atomic_inc(&r10_bio->remaining); 953 atomic_inc(&r10_bio->remaining);
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev)
1716 raid_end_bio_io(r10_bio); 1716 raid_end_bio_io(r10_bio);
1717 bio_put(bio); 1717 bio_put(bio);
1718 } else { 1718 } else {
1719 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); 1719 const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1720 bio_put(bio); 1720 bio_put(bio);
1721 rdev = conf->mirrors[mirror].rdev; 1721 rdev = conf->mirrors[mirror].rdev;
1722 if (printk_ratelimit()) 1722 if (printk_ratelimit())
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev)
1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1731 + rdev->data_offset; 1731 + rdev->data_offset;
1732 bio->bi_bdev = rdev->bdev; 1732 bio->bi_bdev = rdev->bdev;
1733 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1733 bio->bi_rw = READ | do_sync;
1734 bio->bi_private = r10_bio; 1734 bio->bi_private = r10_bio;
1735 bio->bi_end_io = raid10_end_read_request; 1735 bio->bi_end_io = raid10_end_read_request;
1736 unplug = 1; 1736 unplug = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 96c690279fc..20ac2f14376 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3958 const int rw = bio_data_dir(bi); 3958 const int rw = bio_data_dir(bi);
3959 int remaining; 3959 int remaining;
3960 3960
3961 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { 3961 if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
3962 /* Drain all pending writes. We only really need 3962 /* Drain all pending writes. We only really need
3963 * to ensure they have been submitted, but this is 3963 * to ensure they have been submitted, but this is
3964 * easier. 3964 * easier.