aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-io.c12
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c16
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c22
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c2
13 files changed, 66 insertions, 67 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 10f457ca6af2..0590c75b0ab6 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
357 357
358 if (sync) 358 if (sync)
359 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 359 rw |= REQ_SYNC | REQ_UNPLUG;
360 360
361 /* 361 /*
362 * For multiple regions we need to be careful to rewind 362 * For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
364 */ 364 */
365 for (i = 0; i < num_regions; i++) { 365 for (i = 0; i < num_regions; i++) {
366 *dp = old_pages; 366 *dp = old_pages;
367 if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) 367 if (where[i].count || (rw & REQ_HARDBARRIER))
368 do_region(rw, i, where + i, dp, io); 368 do_region(rw, i, where + i, dp, io);
369 } 369 }
370 370
@@ -412,8 +412,8 @@ retry:
412 } 412 }
413 set_current_state(TASK_RUNNING); 413 set_current_state(TASK_RUNNING);
414 414
415 if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { 415 if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
416 rw &= ~(1 << BIO_RW_BARRIER); 416 rw &= ~REQ_HARDBARRIER;
417 goto retry; 417 goto retry;
418 } 418 }
419 419
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
479 * New collapsed (a)synchronous interface. 479 * New collapsed (a)synchronous interface.
480 * 480 *
481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
482 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in 482 * the queue with blk_unplug() some time later or set REQ_SYNC in
483 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 483io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
485 */ 485 */
486int dm_io(struct dm_io_request *io_req, unsigned num_regions, 486int dm_io(struct dm_io_request *io_req, unsigned num_regions,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index addf83475040..d8587bac5682 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
345{ 345{
346 int r; 346 int r;
347 struct dm_io_request io_req = { 347 struct dm_io_request io_req = {
348 .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), 348 .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
349 .mem.type = DM_IO_PAGE_LIST, 349 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages, 350 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset, 351 .mem.offset = job->offset,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddda531723dc..74136262d654 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1211 if (error == -EOPNOTSUPP) 1211 if (error == -EOPNOTSUPP)
1212 goto out; 1212 goto out;
1213 1213
1214 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 1214 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1215 goto out; 1215 goto out;
1216 1216
1217 if (unlikely(error)) { 1217 if (unlikely(error)) {
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e610725db766..d6e28d732b4d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
284 if (!error) 284 if (!error)
285 return 0; /* I/O complete */ 285 return 0; /* I/O complete */
286 286
287 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 287 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
288 return error; 288 return error;
289 289
290 if (error == -EOPNOTSUPP) 290 if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d21e1284604f..a3f21dc02bd8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -15,6 +15,7 @@
15#include <linux/blkpg.h> 15#include <linux/blkpg.h>
16#include <linux/bio.h> 16#include <linux/bio.h>
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/smp_lock.h>
18#include <linux/mempool.h> 19#include <linux/mempool.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/idr.h> 21#include <linux/idr.h>
@@ -338,6 +339,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
338{ 339{
339 struct mapped_device *md; 340 struct mapped_device *md;
340 341
342 lock_kernel();
341 spin_lock(&_minor_lock); 343 spin_lock(&_minor_lock);
342 344
343 md = bdev->bd_disk->private_data; 345 md = bdev->bd_disk->private_data;
@@ -355,6 +357,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
355 357
356out: 358out:
357 spin_unlock(&_minor_lock); 359 spin_unlock(&_minor_lock);
360 unlock_kernel();
358 361
359 return md ? 0 : -ENXIO; 362 return md ? 0 : -ENXIO;
360} 363}
@@ -362,8 +365,12 @@ out:
362static int dm_blk_close(struct gendisk *disk, fmode_t mode) 365static int dm_blk_close(struct gendisk *disk, fmode_t mode)
363{ 366{
364 struct mapped_device *md = disk->private_data; 367 struct mapped_device *md = disk->private_data;
368
369 lock_kernel();
365 atomic_dec(&md->open_count); 370 atomic_dec(&md->open_count);
366 dm_put(md); 371 dm_put(md);
372 unlock_kernel();
373
367 return 0; 374 return 0;
368} 375}
369 376
@@ -614,7 +621,7 @@ static void dec_pending(struct dm_io *io, int error)
614 */ 621 */
615 spin_lock_irqsave(&md->deferred_lock, flags); 622 spin_lock_irqsave(&md->deferred_lock, flags);
616 if (__noflush_suspending(md)) { 623 if (__noflush_suspending(md)) {
617 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) 624 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
618 bio_list_add_head(&md->deferred, 625 bio_list_add_head(&md->deferred,
619 io->bio); 626 io->bio);
620 } else 627 } else
@@ -626,7 +633,7 @@ static void dec_pending(struct dm_io *io, int error)
626 io_error = io->error; 633 io_error = io->error;
627 bio = io->bio; 634 bio = io->bio;
628 635
629 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 636 if (bio->bi_rw & REQ_HARDBARRIER) {
630 /* 637 /*
631 * There can be just one barrier request so we use 638 * There can be just one barrier request so we use
632 * a per-device variable for error reporting. 639 * a per-device variable for error reporting.
@@ -792,12 +799,12 @@ static void dm_end_request(struct request *clone, int error)
792{ 799{
793 int rw = rq_data_dir(clone); 800 int rw = rq_data_dir(clone);
794 int run_queue = 1; 801 int run_queue = 1;
795 bool is_barrier = blk_barrier_rq(clone); 802 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
796 struct dm_rq_target_io *tio = clone->end_io_data; 803 struct dm_rq_target_io *tio = clone->end_io_data;
797 struct mapped_device *md = tio->md; 804 struct mapped_device *md = tio->md;
798 struct request *rq = tio->orig; 805 struct request *rq = tio->orig;
799 806
800 if (blk_pc_request(rq) && !is_barrier) { 807 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
801 rq->errors = clone->errors; 808 rq->errors = clone->errors;
802 rq->resid_len = clone->resid_len; 809 rq->resid_len = clone->resid_len;
803 810
@@ -844,7 +851,7 @@ void dm_requeue_unmapped_request(struct request *clone)
844 struct request_queue *q = rq->q; 851 struct request_queue *q = rq->q;
845 unsigned long flags; 852 unsigned long flags;
846 853
847 if (unlikely(blk_barrier_rq(clone))) { 854 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
848 /* 855 /*
849 * Barrier clones share an original request. 856 * Barrier clones share an original request.
850 * Leave it to dm_end_request(), which handles this special 857 * Leave it to dm_end_request(), which handles this special
@@ -943,7 +950,7 @@ static void dm_complete_request(struct request *clone, int error)
943 struct dm_rq_target_io *tio = clone->end_io_data; 950 struct dm_rq_target_io *tio = clone->end_io_data;
944 struct request *rq = tio->orig; 951 struct request *rq = tio->orig;
945 952
946 if (unlikely(blk_barrier_rq(clone))) { 953 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
947 /* 954 /*
948 * Barrier clones share an original request. So can't use 955 * Barrier clones share an original request. So can't use
949 * softirq_done with the original. 956 * softirq_done with the original.
@@ -972,7 +979,7 @@ void dm_kill_unmapped_request(struct request *clone, int error)
972 struct dm_rq_target_io *tio = clone->end_io_data; 979 struct dm_rq_target_io *tio = clone->end_io_data;
973 struct request *rq = tio->orig; 980 struct request *rq = tio->orig;
974 981
975 if (unlikely(blk_barrier_rq(clone))) { 982 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
976 /* 983 /*
977 * Barrier clones share an original request. 984 * Barrier clones share an original request.
978 * Leave it to dm_end_request(), which handles this special 985 * Leave it to dm_end_request(), which handles this special
@@ -1106,7 +1113,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
1106 1113
1107 clone->bi_sector = sector; 1114 clone->bi_sector = sector;
1108 clone->bi_bdev = bio->bi_bdev; 1115 clone->bi_bdev = bio->bi_bdev;
1109 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); 1116 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
1110 clone->bi_vcnt = 1; 1117 clone->bi_vcnt = 1;
1111 clone->bi_size = to_bytes(len); 1118 clone->bi_size = to_bytes(len);
1112 clone->bi_io_vec->bv_offset = offset; 1119 clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1140,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
1133 1140
1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1141 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1135 __bio_clone(clone, bio); 1142 __bio_clone(clone, bio);
1136 clone->bi_rw &= ~(1 << BIO_RW_BARRIER); 1143 clone->bi_rw &= ~REQ_HARDBARRIER;
1137 clone->bi_destructor = dm_bio_destructor; 1144 clone->bi_destructor = dm_bio_destructor;
1138 clone->bi_sector = sector; 1145 clone->bi_sector = sector;
1139 clone->bi_idx = idx; 1146 clone->bi_idx = idx;
@@ -1301,7 +1308,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1301 1308
1302 ci.map = dm_get_live_table(md); 1309 ci.map = dm_get_live_table(md);
1303 if (unlikely(!ci.map)) { 1310 if (unlikely(!ci.map)) {
1304 if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) 1311 if (!(bio->bi_rw & REQ_HARDBARRIER))
1305 bio_io_error(bio); 1312 bio_io_error(bio);
1306 else 1313 else
1307 if (!md->barrier_error) 1314 if (!md->barrier_error)
@@ -1414,7 +1421,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
1414 * we have to queue this io for later. 1421 * we have to queue this io for later.
1415 */ 1422 */
1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1423 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1417 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 1424 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
1418 up_read(&md->io_lock); 1425 up_read(&md->io_lock);
1419 1426
1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && 1427 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1455,20 +1462,9 @@ static int dm_request(struct request_queue *q, struct bio *bio)
1455 return _dm_request(q, bio); 1462 return _dm_request(q, bio);
1456} 1463}
1457 1464
1458/*
1459 * Mark this request as flush request, so that dm_request_fn() can
1460 * recognize.
1461 */
1462static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
1463{
1464 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
1465 rq->cmd[0] = REQ_LB_OP_FLUSH;
1466}
1467
1468static bool dm_rq_is_flush_request(struct request *rq) 1465static bool dm_rq_is_flush_request(struct request *rq)
1469{ 1466{
1470 if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK && 1467 if (rq->cmd_flags & REQ_FLUSH)
1471 rq->cmd[0] == REQ_LB_OP_FLUSH)
1472 return true; 1468 return true;
1473 else 1469 else
1474 return false; 1470 return false;
@@ -1912,8 +1908,7 @@ static struct mapped_device *alloc_dev(int minor)
1912 blk_queue_softirq_done(md->queue, dm_softirq_done); 1908 blk_queue_softirq_done(md->queue, dm_softirq_done);
1913 blk_queue_prep_rq(md->queue, dm_prep_fn); 1909 blk_queue_prep_rq(md->queue, dm_prep_fn);
1914 blk_queue_lld_busy(md->queue, dm_lld_busy); 1910 blk_queue_lld_busy(md->queue, dm_lld_busy);
1915 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH, 1911 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
1916 dm_rq_prepare_flush);
1917 1912
1918 md->disk = alloc_disk(1); 1913 md->disk = alloc_disk(1);
1919 if (!md->disk) 1914 if (!md->disk)
@@ -2296,7 +2291,7 @@ static void dm_wq_work(struct work_struct *work)
2296 if (dm_request_based(md)) 2291 if (dm_request_based(md))
2297 generic_make_request(c); 2292 generic_make_request(c);
2298 else { 2293 else {
2299 if (bio_rw_flagged(c, BIO_RW_BARRIER)) 2294 if (c->bi_rw & REQ_HARDBARRIER)
2300 process_barrier(md, c); 2295 process_barrier(md, c);
2301 else 2296 else
2302 __split_and_process_bio(md, c); 2297 __split_and_process_bio(md, c);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 7e0e057db9a7..ba19060bcf3f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
294 dev_info_t *tmp_dev; 294 dev_info_t *tmp_dev;
295 sector_t start_sector; 295 sector_t start_sector;
296 296
297 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 297 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
298 md_barrier_request(mddev, bio); 298 md_barrier_request(mddev, bio);
299 return 0; 299 return 0;
300 } 300 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb20d0b0555a..700c96edf9b2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,6 +36,7 @@
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/sysctl.h> 37#include <linux/sysctl.h>
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/smp_lock.h>
39#include <linux/buffer_head.h> /* for invalidate_bdev */ 40#include <linux/buffer_head.h> /* for invalidate_bdev */
40#include <linux/poll.h> 41#include <linux/poll.h>
41#include <linux/ctype.h> 42#include <linux/ctype.h>
@@ -353,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
353 /* an empty barrier - all done */ 354 /* an empty barrier - all done */
354 bio_endio(bio, 0); 355 bio_endio(bio, 0);
355 else { 356 else {
356 bio->bi_rw &= ~(1<<BIO_RW_BARRIER); 357 bio->bi_rw &= ~REQ_HARDBARRIER;
357 if (mddev->pers->make_request(mddev, bio)) 358 if (mddev->pers->make_request(mddev, bio))
358 generic_make_request(bio); 359 generic_make_request(bio);
359 mddev->barrier = POST_REQUEST_BARRIER; 360 mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +676,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
675 * if zero is reached. 676 * if zero is reached.
676 * If an error occurred, call md_error 677 * If an error occurred, call md_error
677 * 678 *
678 * As we might need to resubmit the request if BIO_RW_BARRIER 679 * As we might need to resubmit the request if REQ_HARDBARRIER
679 * causes ENOTSUPP, we allocate a spare bio... 680 * causes ENOTSUPP, we allocate a spare bio...
680 */ 681 */
681 struct bio *bio = bio_alloc(GFP_NOIO, 1); 682 struct bio *bio = bio_alloc(GFP_NOIO, 1);
682 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 683 int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
683 684
684 bio->bi_bdev = rdev->bdev; 685 bio->bi_bdev = rdev->bdev;
685 bio->bi_sector = sector; 686 bio->bi_sector = sector;
@@ -691,7 +692,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
691 atomic_inc(&mddev->pending_writes); 692 atomic_inc(&mddev->pending_writes);
692 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 693 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
693 struct bio *rbio; 694 struct bio *rbio;
694 rw |= (1<<BIO_RW_BARRIER); 695 rw |= REQ_HARDBARRIER;
695 rbio = bio_clone(bio, GFP_NOIO); 696 rbio = bio_clone(bio, GFP_NOIO);
696 rbio->bi_private = bio; 697 rbio->bi_private = bio;
697 rbio->bi_end_io = super_written_barrier; 698 rbio->bi_end_io = super_written_barrier;
@@ -736,7 +737,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
736 struct completion event; 737 struct completion event;
737 int ret; 738 int ret;
738 739
739 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 740 rw |= REQ_SYNC | REQ_UNPLUG;
740 741
741 bio->bi_bdev = bdev; 742 bio->bi_bdev = bdev;
742 bio->bi_sector = sector; 743 bio->bi_sector = sector;
@@ -5902,6 +5903,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
5902 mddev_t *mddev = mddev_find(bdev->bd_dev); 5903 mddev_t *mddev = mddev_find(bdev->bd_dev);
5903 int err; 5904 int err;
5904 5905
5906 lock_kernel();
5905 if (mddev->gendisk != bdev->bd_disk) { 5907 if (mddev->gendisk != bdev->bd_disk) {
5906 /* we are racing with mddev_put which is discarding this 5908 /* we are racing with mddev_put which is discarding this
5907 * bd_disk. 5909 * bd_disk.
@@ -5910,6 +5912,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
5910 /* Wait until bdev->bd_disk is definitely gone */ 5912 /* Wait until bdev->bd_disk is definitely gone */
5911 flush_scheduled_work(); 5913 flush_scheduled_work();
5912 /* Then retry the open from the top */ 5914 /* Then retry the open from the top */
5915 unlock_kernel();
5913 return -ERESTARTSYS; 5916 return -ERESTARTSYS;
5914 } 5917 }
5915 BUG_ON(mddev != bdev->bd_disk->private_data); 5918 BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -5923,6 +5926,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
5923 5926
5924 check_disk_size_change(mddev->gendisk, bdev); 5927 check_disk_size_change(mddev->gendisk, bdev);
5925 out: 5928 out:
5929 unlock_kernel();
5926 return err; 5930 return err;
5927} 5931}
5928 5932
@@ -5931,8 +5935,10 @@ static int md_release(struct gendisk *disk, fmode_t mode)
5931 mddev_t *mddev = disk->private_data; 5935 mddev_t *mddev = disk->private_data;
5932 5936
5933 BUG_ON(!mddev); 5937 BUG_ON(!mddev);
5938 lock_kernel();
5934 atomic_dec(&mddev->openers); 5939 atomic_dec(&mddev->openers);
5935 mddev_put(mddev); 5940 mddev_put(mddev);
5941 unlock_kernel();
5936 5942
5937 return 0; 5943 return 0;
5938} 5944}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 10597bfec000..fc56e0f21c80 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -67,7 +67,7 @@ struct mdk_rdev_s
67#define Faulty 1 /* device is known to have a fault */ 67#define Faulty 1 /* device is known to have a fault */
68#define In_sync 2 /* device is in_sync with rest of array */ 68#define In_sync 2 /* device is in_sync with rest of array */
69#define WriteMostly 4 /* Avoid reading if at all possible */ 69#define WriteMostly 4 /* Avoid reading if at all possible */
70#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ 70#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
71#define AllReserved 6 /* If whole device is reserved for 71#define AllReserved 6 /* If whole device is reserved for
72 * one array */ 72 * one array */
73#define AutoDetected 7 /* added by auto-detect */ 73#define AutoDetected 7 /* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
254 * fails. Only supported 254 * fails. Only supported
255 */ 255 */
256 struct bio *biolist; /* bios that need to be retried 256 struct bio *biolist; /* bios that need to be retried
257 * because BIO_RW_BARRIER is not supported 257 * because REQ_HARDBARRIER is not supported
258 */ 258 */
259 259
260 atomic_t recovery_active; /* blocks scheduled, but not written */ 260 atomic_t recovery_active; /* blocks scheduled, but not written */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 410fb60699ac..0307d217e7a4 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
91 91
92 if (uptodate) 92 if (uptodate)
93 multipath_end_bh_io(mp_bh, 0); 93 multipath_end_bh_io(mp_bh, 0);
94 else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { 94 else if (!(bio->bi_rw & REQ_RAHEAD)) {
95 /* 95 /*
96 * oops, IO error: 96 * oops, IO error:
97 */ 97 */
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
142 struct multipath_bh * mp_bh; 142 struct multipath_bh * mp_bh;
143 struct multipath_info *multipath; 143 struct multipath_info *multipath;
144 144
145 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 145 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
146 md_barrier_request(mddev, bio); 146 md_barrier_request(mddev, bio);
147 return 0; 147 return 0;
148 } 148 }
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
163 mp_bh->bio = *bio; 163 mp_bh->bio = *bio;
164 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 164 mp_bh->bio.bi_sector += multipath->rdev->data_offset;
165 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 165 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
166 mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 166 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
167 mp_bh->bio.bi_end_io = multipath_end_request; 167 mp_bh->bio.bi_end_io = multipath_end_request;
168 mp_bh->bio.bi_private = mp_bh; 168 mp_bh->bio.bi_private = mp_bh;
169 generic_make_request(&mp_bh->bio); 169 generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
398 *bio = *(mp_bh->master_bio); 398 *bio = *(mp_bh->master_bio);
399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
401 bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 401 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
402 bio->bi_end_io = multipath_end_request; 402 bio->bi_end_io = multipath_end_request;
403 bio->bi_private = mp_bh; 403 bio->bi_private = mp_bh;
404 generic_make_request(bio); 404 generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 563abed5a2cb..6f7af46d623c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
483 struct strip_zone *zone; 483 struct strip_zone *zone;
484 mdk_rdev_t *tmp_dev; 484 mdk_rdev_t *tmp_dev;
485 485
486 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 486 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
487 md_barrier_request(mddev, bio); 487 md_barrier_request(mddev, bio);
488 return 0; 488 return 0;
489 } 489 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a948da8012de..73cc74ffc26b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
787 struct bio_list bl; 787 struct bio_list bl;
788 struct page **behind_pages = NULL; 788 struct page **behind_pages = NULL;
789 const int rw = bio_data_dir(bio); 789 const int rw = bio_data_dir(bio);
790 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 790 const bool do_sync = (bio->bi_rw & REQ_SYNC);
791 bool do_barriers; 791 bool do_barriers;
792 mdk_rdev_t *blocked_rdev; 792 mdk_rdev_t *blocked_rdev;
793 793
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
822 finish_wait(&conf->wait_barrier, &w); 822 finish_wait(&conf->wait_barrier, &w);
823 } 823 }
824 if (unlikely(!mddev->barriers_work && 824 if (unlikely(!mddev->barriers_work &&
825 bio_rw_flagged(bio, BIO_RW_BARRIER))) { 825 (bio->bi_rw & REQ_HARDBARRIER))) {
826 if (rw == WRITE) 826 if (rw == WRITE)
827 md_write_end(mddev); 827 md_write_end(mddev);
828 bio_endio(bio, -EOPNOTSUPP); 828 bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
878 read_bio->bi_bdev = mirror->rdev->bdev; 878 read_bio->bi_bdev = mirror->rdev->bdev;
879 read_bio->bi_end_io = raid1_end_read_request; 879 read_bio->bi_end_io = raid1_end_read_request;
880 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 880 read_bio->bi_rw = READ | do_sync;
881 read_bio->bi_private = r1_bio; 881 read_bio->bi_private = r1_bio;
882 882
883 generic_make_request(read_bio); 883 generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_set(&r1_bio->remaining, 0); 959 atomic_set(&r1_bio->remaining, 0);
960 atomic_set(&r1_bio->behind_remaining, 0); 960 atomic_set(&r1_bio->behind_remaining, 0);
961 961
962 do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); 962 do_barriers = bio->bi_rw & REQ_HARDBARRIER;
963 if (do_barriers) 963 if (do_barriers)
964 set_bit(R1BIO_Barrier, &r1_bio->state); 964 set_bit(R1BIO_Barrier, &r1_bio->state);
965 965
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
977 mbio->bi_end_io = raid1_end_write_request; 977 mbio->bi_end_io = raid1_end_write_request;
978 mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | 978 mbio->bi_rw = WRITE | do_barriers | do_sync;
979 (do_sync << BIO_RW_SYNCIO);
980 mbio->bi_private = r1_bio; 979 mbio->bi_private = r1_bio;
981 980
982 if (behind_pages) { 981 if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
1633 sync_request_write(mddev, r1_bio); 1632 sync_request_write(mddev, r1_bio);
1634 unplug = 1; 1633 unplug = 1;
1635 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1634 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1636 /* some requests in the r1bio were BIO_RW_BARRIER 1635 /* some requests in the r1bio were REQ_HARDBARRIER
1637 * requests which failed with -EOPNOTSUPP. Hohumm.. 1636 * requests which failed with -EOPNOTSUPP. Hohumm..
1638 * Better resubmit without the barrier. 1637 * Better resubmit without the barrier.
1639 * We know which devices to resubmit for, because 1638 * We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
1641 * We already have a nr_pending reference on these rdevs. 1640 * We already have a nr_pending reference on these rdevs.
1642 */ 1641 */
1643 int i; 1642 int i;
1644 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1643 const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
1645 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1644 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1646 clear_bit(R1BIO_Barrier, &r1_bio->state); 1645 clear_bit(R1BIO_Barrier, &r1_bio->state);
1647 for (i=0; i < conf->raid_disks; i++) 1646 for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
1662 conf->mirrors[i].rdev->data_offset; 1661 conf->mirrors[i].rdev->data_offset;
1663 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1662 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1664 bio->bi_end_io = raid1_end_write_request; 1663 bio->bi_end_io = raid1_end_write_request;
1665 bio->bi_rw = WRITE | 1664 bio->bi_rw = WRITE | do_sync;
1666 (do_sync << BIO_RW_SYNCIO);
1667 bio->bi_private = r1_bio; 1665 bio->bi_private = r1_bio;
1668 r1_bio->bios[i] = bio; 1666 r1_bio->bios[i] = bio;
1669 generic_make_request(bio); 1667 generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
1698 (unsigned long long)r1_bio->sector); 1696 (unsigned long long)r1_bio->sector);
1699 raid_end_bio_io(r1_bio); 1697 raid_end_bio_io(r1_bio);
1700 } else { 1698 } else {
1701 const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1699 const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
1702 r1_bio->bios[r1_bio->read_disk] = 1700 r1_bio->bios[r1_bio->read_disk] =
1703 mddev->ro ? IO_BLOCKED : NULL; 1701 mddev->ro ? IO_BLOCKED : NULL;
1704 r1_bio->read_disk = disk; 1702 r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
1715 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1713 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1716 bio->bi_bdev = rdev->bdev; 1714 bio->bi_bdev = rdev->bdev;
1717 bio->bi_end_io = raid1_end_read_request; 1715 bio->bi_end_io = raid1_end_read_request;
1718 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1716 bio->bi_rw = READ | do_sync;
1719 bio->bi_private = r1_bio; 1717 bio->bi_private = r1_bio;
1720 unplug = 1; 1718 unplug = 1;
1721 generic_make_request(bio); 1719 generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 42e64e4e5e25..62ecb6650fd0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
799 int i; 799 int i;
800 int chunk_sects = conf->chunk_mask + 1; 800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio); 801 const int rw = bio_data_dir(bio);
802 const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 802 const bool do_sync = (bio->bi_rw & REQ_SYNC);
803 struct bio_list bl; 803 struct bio_list bl;
804 unsigned long flags; 804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev; 805 mdk_rdev_t *blocked_rdev;
806 806
807 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 807 if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
808 md_barrier_request(mddev, bio); 808 md_barrier_request(mddev, bio);
809 return 0; 809 return 0;
810 } 810 }
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
879 mirror->rdev->data_offset; 879 mirror->rdev->data_offset;
880 read_bio->bi_bdev = mirror->rdev->bdev; 880 read_bio->bi_bdev = mirror->rdev->bdev;
881 read_bio->bi_end_io = raid10_end_read_request; 881 read_bio->bi_end_io = raid10_end_read_request;
882 read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 882 read_bio->bi_rw = READ | do_sync;
883 read_bio->bi_private = r10_bio; 883 read_bio->bi_private = r10_bio;
884 884
885 generic_make_request(read_bio); 885 generic_make_request(read_bio);
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
947 conf->mirrors[d].rdev->data_offset; 947 conf->mirrors[d].rdev->data_offset;
948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
949 mbio->bi_end_io = raid10_end_write_request; 949 mbio->bi_end_io = raid10_end_write_request;
950 mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); 950 mbio->bi_rw = WRITE | do_sync;
951 mbio->bi_private = r10_bio; 951 mbio->bi_private = r10_bio;
952 952
953 atomic_inc(&r10_bio->remaining); 953 atomic_inc(&r10_bio->remaining);
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev)
1716 raid_end_bio_io(r10_bio); 1716 raid_end_bio_io(r10_bio);
1717 bio_put(bio); 1717 bio_put(bio);
1718 } else { 1718 } else {
1719 const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); 1719 const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1720 bio_put(bio); 1720 bio_put(bio);
1721 rdev = conf->mirrors[mirror].rdev; 1721 rdev = conf->mirrors[mirror].rdev;
1722 if (printk_ratelimit()) 1722 if (printk_ratelimit())
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev)
1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1731 + rdev->data_offset; 1731 + rdev->data_offset;
1732 bio->bi_bdev = rdev->bdev; 1732 bio->bi_bdev = rdev->bdev;
1733 bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1733 bio->bi_rw = READ | do_sync;
1734 bio->bi_private = r10_bio; 1734 bio->bi_private = r10_bio;
1735 bio->bi_end_io = raid10_end_read_request; 1735 bio->bi_end_io = raid10_end_read_request;
1736 unplug = 1; 1736 unplug = 1;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 96c690279fc6..20ac2f14376a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3958 const int rw = bio_data_dir(bi); 3958 const int rw = bio_data_dir(bi);
3959 int remaining; 3959 int remaining;
3960 3960
3961 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { 3961 if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
3962 /* Drain all pending writes. We only really need 3962 /* Drain all pending writes. We only really need
3963 * to ensure they have been submitted, but this is 3963 * to ensure they have been submitted, but this is
3964 * easier. 3964 * easier.