diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 47 |
1 files changed, 21 insertions, 26 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d21e1284604f..a3f21dc02bd8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/blkpg.h> | 15 | #include <linux/blkpg.h> |
16 | #include <linux/bio.h> | 16 | #include <linux/bio.h> |
17 | #include <linux/buffer_head.h> | 17 | #include <linux/buffer_head.h> |
18 | #include <linux/smp_lock.h> | ||
18 | #include <linux/mempool.h> | 19 | #include <linux/mempool.h> |
19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
20 | #include <linux/idr.h> | 21 | #include <linux/idr.h> |
@@ -338,6 +339,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) | |||
338 | { | 339 | { |
339 | struct mapped_device *md; | 340 | struct mapped_device *md; |
340 | 341 | ||
342 | lock_kernel(); | ||
341 | spin_lock(&_minor_lock); | 343 | spin_lock(&_minor_lock); |
342 | 344 | ||
343 | md = bdev->bd_disk->private_data; | 345 | md = bdev->bd_disk->private_data; |
@@ -355,6 +357,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) | |||
355 | 357 | ||
356 | out: | 358 | out: |
357 | spin_unlock(&_minor_lock); | 359 | spin_unlock(&_minor_lock); |
360 | unlock_kernel(); | ||
358 | 361 | ||
359 | return md ? 0 : -ENXIO; | 362 | return md ? 0 : -ENXIO; |
360 | } | 363 | } |
@@ -362,8 +365,12 @@ out: | |||
362 | static int dm_blk_close(struct gendisk *disk, fmode_t mode) | 365 | static int dm_blk_close(struct gendisk *disk, fmode_t mode) |
363 | { | 366 | { |
364 | struct mapped_device *md = disk->private_data; | 367 | struct mapped_device *md = disk->private_data; |
368 | |||
369 | lock_kernel(); | ||
365 | atomic_dec(&md->open_count); | 370 | atomic_dec(&md->open_count); |
366 | dm_put(md); | 371 | dm_put(md); |
372 | unlock_kernel(); | ||
373 | |||
367 | return 0; | 374 | return 0; |
368 | } | 375 | } |
369 | 376 | ||
@@ -614,7 +621,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
614 | */ | 621 | */ |
615 | spin_lock_irqsave(&md->deferred_lock, flags); | 622 | spin_lock_irqsave(&md->deferred_lock, flags); |
616 | if (__noflush_suspending(md)) { | 623 | if (__noflush_suspending(md)) { |
617 | if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) | 624 | if (!(io->bio->bi_rw & REQ_HARDBARRIER)) |
618 | bio_list_add_head(&md->deferred, | 625 | bio_list_add_head(&md->deferred, |
619 | io->bio); | 626 | io->bio); |
620 | } else | 627 | } else |
@@ -626,7 +633,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
626 | io_error = io->error; | 633 | io_error = io->error; |
627 | bio = io->bio; | 634 | bio = io->bio; |
628 | 635 | ||
629 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { | 636 | if (bio->bi_rw & REQ_HARDBARRIER) { |
630 | /* | 637 | /* |
631 | * There can be just one barrier request so we use | 638 | * There can be just one barrier request so we use |
632 | * a per-device variable for error reporting. | 639 | * a per-device variable for error reporting. |
@@ -792,12 +799,12 @@ static void dm_end_request(struct request *clone, int error) | |||
792 | { | 799 | { |
793 | int rw = rq_data_dir(clone); | 800 | int rw = rq_data_dir(clone); |
794 | int run_queue = 1; | 801 | int run_queue = 1; |
795 | bool is_barrier = blk_barrier_rq(clone); | 802 | bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER; |
796 | struct dm_rq_target_io *tio = clone->end_io_data; | 803 | struct dm_rq_target_io *tio = clone->end_io_data; |
797 | struct mapped_device *md = tio->md; | 804 | struct mapped_device *md = tio->md; |
798 | struct request *rq = tio->orig; | 805 | struct request *rq = tio->orig; |
799 | 806 | ||
800 | if (blk_pc_request(rq) && !is_barrier) { | 807 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) { |
801 | rq->errors = clone->errors; | 808 | rq->errors = clone->errors; |
802 | rq->resid_len = clone->resid_len; | 809 | rq->resid_len = clone->resid_len; |
803 | 810 | ||
@@ -844,7 +851,7 @@ void dm_requeue_unmapped_request(struct request *clone) | |||
844 | struct request_queue *q = rq->q; | 851 | struct request_queue *q = rq->q; |
845 | unsigned long flags; | 852 | unsigned long flags; |
846 | 853 | ||
847 | if (unlikely(blk_barrier_rq(clone))) { | 854 | if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { |
848 | /* | 855 | /* |
849 | * Barrier clones share an original request. | 856 | * Barrier clones share an original request. |
850 | * Leave it to dm_end_request(), which handles this special | 857 | * Leave it to dm_end_request(), which handles this special |
@@ -943,7 +950,7 @@ static void dm_complete_request(struct request *clone, int error) | |||
943 | struct dm_rq_target_io *tio = clone->end_io_data; | 950 | struct dm_rq_target_io *tio = clone->end_io_data; |
944 | struct request *rq = tio->orig; | 951 | struct request *rq = tio->orig; |
945 | 952 | ||
946 | if (unlikely(blk_barrier_rq(clone))) { | 953 | if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { |
947 | /* | 954 | /* |
948 | * Barrier clones share an original request. So can't use | 955 | * Barrier clones share an original request. So can't use |
949 | * softirq_done with the original. | 956 | * softirq_done with the original. |
@@ -972,7 +979,7 @@ void dm_kill_unmapped_request(struct request *clone, int error) | |||
972 | struct dm_rq_target_io *tio = clone->end_io_data; | 979 | struct dm_rq_target_io *tio = clone->end_io_data; |
973 | struct request *rq = tio->orig; | 980 | struct request *rq = tio->orig; |
974 | 981 | ||
975 | if (unlikely(blk_barrier_rq(clone))) { | 982 | if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { |
976 | /* | 983 | /* |
977 | * Barrier clones share an original request. | 984 | * Barrier clones share an original request. |
978 | * Leave it to dm_end_request(), which handles this special | 985 | * Leave it to dm_end_request(), which handles this special |
@@ -1106,7 +1113,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, | |||
1106 | 1113 | ||
1107 | clone->bi_sector = sector; | 1114 | clone->bi_sector = sector; |
1108 | clone->bi_bdev = bio->bi_bdev; | 1115 | clone->bi_bdev = bio->bi_bdev; |
1109 | clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); | 1116 | clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; |
1110 | clone->bi_vcnt = 1; | 1117 | clone->bi_vcnt = 1; |
1111 | clone->bi_size = to_bytes(len); | 1118 | clone->bi_size = to_bytes(len); |
1112 | clone->bi_io_vec->bv_offset = offset; | 1119 | clone->bi_io_vec->bv_offset = offset; |
@@ -1133,7 +1140,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, | |||
1133 | 1140 | ||
1134 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); | 1141 | clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); |
1135 | __bio_clone(clone, bio); | 1142 | __bio_clone(clone, bio); |
1136 | clone->bi_rw &= ~(1 << BIO_RW_BARRIER); | 1143 | clone->bi_rw &= ~REQ_HARDBARRIER; |
1137 | clone->bi_destructor = dm_bio_destructor; | 1144 | clone->bi_destructor = dm_bio_destructor; |
1138 | clone->bi_sector = sector; | 1145 | clone->bi_sector = sector; |
1139 | clone->bi_idx = idx; | 1146 | clone->bi_idx = idx; |
@@ -1301,7 +1308,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
1301 | 1308 | ||
1302 | ci.map = dm_get_live_table(md); | 1309 | ci.map = dm_get_live_table(md); |
1303 | if (unlikely(!ci.map)) { | 1310 | if (unlikely(!ci.map)) { |
1304 | if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) | 1311 | if (!(bio->bi_rw & REQ_HARDBARRIER)) |
1305 | bio_io_error(bio); | 1312 | bio_io_error(bio); |
1306 | else | 1313 | else |
1307 | if (!md->barrier_error) | 1314 | if (!md->barrier_error) |
@@ -1414,7 +1421,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio) | |||
1414 | * we have to queue this io for later. | 1421 | * we have to queue this io for later. |
1415 | */ | 1422 | */ |
1416 | if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || | 1423 | if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || |
1417 | unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { | 1424 | unlikely(bio->bi_rw & REQ_HARDBARRIER)) { |
1418 | up_read(&md->io_lock); | 1425 | up_read(&md->io_lock); |
1419 | 1426 | ||
1420 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && | 1427 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && |
@@ -1455,20 +1462,9 @@ static int dm_request(struct request_queue *q, struct bio *bio) | |||
1455 | return _dm_request(q, bio); | 1462 | return _dm_request(q, bio); |
1456 | } | 1463 | } |
1457 | 1464 | ||
1458 | /* | ||
1459 | * Mark this request as flush request, so that dm_request_fn() can | ||
1460 | * recognize. | ||
1461 | */ | ||
1462 | static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq) | ||
1463 | { | ||
1464 | rq->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
1465 | rq->cmd[0] = REQ_LB_OP_FLUSH; | ||
1466 | } | ||
1467 | |||
1468 | static bool dm_rq_is_flush_request(struct request *rq) | 1465 | static bool dm_rq_is_flush_request(struct request *rq) |
1469 | { | 1466 | { |
1470 | if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK && | 1467 | if (rq->cmd_flags & REQ_FLUSH) |
1471 | rq->cmd[0] == REQ_LB_OP_FLUSH) | ||
1472 | return true; | 1468 | return true; |
1473 | else | 1469 | else |
1474 | return false; | 1470 | return false; |
@@ -1912,8 +1908,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
1912 | blk_queue_softirq_done(md->queue, dm_softirq_done); | 1908 | blk_queue_softirq_done(md->queue, dm_softirq_done); |
1913 | blk_queue_prep_rq(md->queue, dm_prep_fn); | 1909 | blk_queue_prep_rq(md->queue, dm_prep_fn); |
1914 | blk_queue_lld_busy(md->queue, dm_lld_busy); | 1910 | blk_queue_lld_busy(md->queue, dm_lld_busy); |
1915 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH, | 1911 | blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); |
1916 | dm_rq_prepare_flush); | ||
1917 | 1912 | ||
1918 | md->disk = alloc_disk(1); | 1913 | md->disk = alloc_disk(1); |
1919 | if (!md->disk) | 1914 | if (!md->disk) |
@@ -2296,7 +2291,7 @@ static void dm_wq_work(struct work_struct *work) | |||
2296 | if (dm_request_based(md)) | 2291 | if (dm_request_based(md)) |
2297 | generic_make_request(c); | 2292 | generic_make_request(c); |
2298 | else { | 2293 | else { |
2299 | if (bio_rw_flagged(c, BIO_RW_BARRIER)) | 2294 | if (c->bi_rw & REQ_HARDBARRIER) |
2300 | process_barrier(md, c); | 2295 | process_barrier(md, c); |
2301 | else | 2296 | else |
2302 | __split_and_process_bio(md, c); | 2297 | __split_and_process_bio(md, c); |