aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-03-10 11:45:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-03-10 11:45:44 -0500
commitb3b25b1d9e104352b8272488ab94145fe84c4261 (patch)
tree0a09b1e8670c1d23527d876b1fd8c360d83a1869 /drivers
parent2f64e70cd0fc5db1d2e41dac1fc668951840f9ed (diff)
parentc934edadcc7a64e399942ae34b912939057a77a7 (diff)
Merge tag 'for-4.16/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: - Fix an uninitialized variable false warning in dm bufio - Fix DM's passthrough ioctl support to be race free against an underlying device being removed. - Fix corner-case of DM raid resync reporting if/when the raid becomes degraded during resync; otherwise automated raid repair will fail. - A few DM multipath fixes to make non-SCSI optimizations, that were introduced during the 4.16 merge, useful for all non-SCSI devices, rather than narrowly define this non-SCSI mode in terms of "nvme". This allows the removal of "queue_mode nvme" that really didn't need to be introduced. Instead DM core will internalize whether nvme-specific IO submission optimizations are doable and DM multipath will only do SCSI-specific device handler operations if SCSI is in use. * tag 'for-4.16/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm table: allow upgrade from bio-based to specialized bio-based variant dm mpath: remove unnecessary NVMe branching in favor of scsi_dh checks dm table: fix "nvme" test dm raid: fix incorrect sync_ratio when degraded dm: use blkdev_get rather than bdgrab when issuing pass-through ioctl dm bufio: avoid false-positive Wmaybe-uninitialized warning
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-bufio.c16
-rw-r--r--drivers/md/dm-mpath.c66
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c35
5 files changed, 65 insertions, 75 deletions
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 414c9af54ded..aa2032fa80d4 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
387 enum data_mode *data_mode) 387 enum data_mode *data_mode)
388{ 388{
389 unsigned noio_flag;
390 void *ptr;
391
392 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { 389 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
393 *data_mode = DATA_MODE_SLAB; 390 *data_mode = DATA_MODE_SLAB;
394 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); 391 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
412 * all allocations done by this process (including pagetables) are done 409 * all allocations done by this process (including pagetables) are done
413 * as if GFP_NOIO was specified. 410 * as if GFP_NOIO was specified.
414 */ 411 */
412 if (gfp_mask & __GFP_NORETRY) {
413 unsigned noio_flag = memalloc_noio_save();
414 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
415 415
416 if (gfp_mask & __GFP_NORETRY)
417 noio_flag = memalloc_noio_save();
418
419 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
420
421 if (gfp_mask & __GFP_NORETRY)
422 memalloc_noio_restore(noio_flag); 416 memalloc_noio_restore(noio_flag);
417 return ptr;
418 }
423 419
424 return ptr; 420 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
425} 421}
426 422
427/* 423/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7d3e572072f5..3fde9e9faddd 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -22,6 +22,7 @@
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <scsi/scsi_device.h>
25#include <scsi/scsi_dh.h> 26#include <scsi/scsi_dh.h>
26#include <linux/atomic.h> 27#include <linux/atomic.h>
27#include <linux/blk-mq.h> 28#include <linux/blk-mq.h>
@@ -211,25 +212,13 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
211 else 212 else
212 m->queue_mode = DM_TYPE_REQUEST_BASED; 213 m->queue_mode = DM_TYPE_REQUEST_BASED;
213 214
214 } else if (m->queue_mode == DM_TYPE_BIO_BASED || 215 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
215 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
216 INIT_WORK(&m->process_queued_bios, process_queued_bios); 216 INIT_WORK(&m->process_queued_bios, process_queued_bios);
217 217 /*
218 if (m->queue_mode == DM_TYPE_BIO_BASED) { 218 * bio-based doesn't support any direct scsi_dh management;
219 /* 219 * it just discovers if a scsi_dh is attached.
220 * bio-based doesn't support any direct scsi_dh management; 220 */
221 * it just discovers if a scsi_dh is attached. 221 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
222 */
223 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
224 }
225 }
226
227 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
228 set_bit(MPATHF_QUEUE_IO, &m->flags);
229 atomic_set(&m->pg_init_in_progress, 0);
230 atomic_set(&m->pg_init_count, 0);
231 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
232 init_waitqueue_head(&m->pg_init_wait);
233 } 222 }
234 223
235 dm_table_set_type(ti->table, m->queue_mode); 224 dm_table_set_type(ti->table, m->queue_mode);
@@ -337,14 +326,12 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
337{ 326{
338 m->current_pg = pg; 327 m->current_pg = pg;
339 328
340 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
341 return;
342
343 /* Must we initialise the PG first, and queue I/O till it's ready? */ 329 /* Must we initialise the PG first, and queue I/O till it's ready? */
344 if (m->hw_handler_name) { 330 if (m->hw_handler_name) {
345 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 331 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
346 set_bit(MPATHF_QUEUE_IO, &m->flags); 332 set_bit(MPATHF_QUEUE_IO, &m->flags);
347 } else { 333 } else {
334 /* FIXME: not needed if no scsi_dh is attached */
348 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 335 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
349 clear_bit(MPATHF_QUEUE_IO, &m->flags); 336 clear_bit(MPATHF_QUEUE_IO, &m->flags);
350 } 337 }
@@ -385,8 +372,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
385 unsigned bypassed = 1; 372 unsigned bypassed = 1;
386 373
387 if (!atomic_read(&m->nr_valid_paths)) { 374 if (!atomic_read(&m->nr_valid_paths)) {
388 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) 375 clear_bit(MPATHF_QUEUE_IO, &m->flags);
389 clear_bit(MPATHF_QUEUE_IO, &m->flags);
390 goto failed; 376 goto failed;
391 } 377 }
392 378
@@ -599,7 +585,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
599 return pgpath; 585 return pgpath;
600} 586}
601 587
602static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) 588static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
603{ 589{
604 struct pgpath *pgpath; 590 struct pgpath *pgpath;
605 unsigned long flags; 591 unsigned long flags;
@@ -634,8 +620,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
634{ 620{
635 struct pgpath *pgpath; 621 struct pgpath *pgpath;
636 622
637 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) 623 if (!m->hw_handler_name)
638 pgpath = __map_bio_nvme(m, bio); 624 pgpath = __map_bio_fast(m, bio);
639 else 625 else
640 pgpath = __map_bio(m, bio); 626 pgpath = __map_bio(m, bio);
641 627
@@ -675,8 +661,7 @@ static void process_queued_io_list(struct multipath *m)
675{ 661{
676 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 662 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
677 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 663 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
678 else if (m->queue_mode == DM_TYPE_BIO_BASED || 664 else if (m->queue_mode == DM_TYPE_BIO_BASED)
679 m->queue_mode == DM_TYPE_NVME_BIO_BASED)
680 queue_work(kmultipathd, &m->process_queued_bios); 665 queue_work(kmultipathd, &m->process_queued_bios);
681} 666}
682 667
@@ -838,6 +823,16 @@ retain:
838 */ 823 */
839 kfree(m->hw_handler_name); 824 kfree(m->hw_handler_name);
840 m->hw_handler_name = attached_handler_name; 825 m->hw_handler_name = attached_handler_name;
826
827 /*
828 * Init fields that are only used when a scsi_dh is attached
829 */
830 if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) {
831 atomic_set(&m->pg_init_in_progress, 0);
832 atomic_set(&m->pg_init_count, 0);
833 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
834 init_waitqueue_head(&m->pg_init_wait);
835 }
841 } 836 }
842 } 837 }
843 838
@@ -873,6 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
873 int r; 868 int r;
874 struct pgpath *p; 869 struct pgpath *p;
875 struct multipath *m = ti->private; 870 struct multipath *m = ti->private;
871 struct scsi_device *sdev;
876 872
877 /* we need at least a path arg */ 873 /* we need at least a path arg */
878 if (as->argc < 1) { 874 if (as->argc < 1) {
@@ -891,7 +887,9 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
891 goto bad; 887 goto bad;
892 } 888 }
893 889
894 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { 890 sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev));
891 if (sdev) {
892 put_device(&sdev->sdev_gendev);
895 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 893 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
896 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); 894 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
897 if (r) { 895 if (r) {
@@ -1001,8 +999,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1001 if (!hw_argc) 999 if (!hw_argc)
1002 return 0; 1000 return 0;
1003 1001
1004 if (m->queue_mode == DM_TYPE_BIO_BASED || 1002 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1005 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
1006 dm_consume_args(as, hw_argc); 1003 dm_consume_args(as, hw_argc);
1007 DMERR("bio-based multipath doesn't allow hardware handler args"); 1004 DMERR("bio-based multipath doesn't allow hardware handler args");
1008 return 0; 1005 return 0;
@@ -1091,8 +1088,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1091 1088
1092 if (!strcasecmp(queue_mode_name, "bio")) 1089 if (!strcasecmp(queue_mode_name, "bio"))
1093 m->queue_mode = DM_TYPE_BIO_BASED; 1090 m->queue_mode = DM_TYPE_BIO_BASED;
1094 else if (!strcasecmp(queue_mode_name, "nvme"))
1095 m->queue_mode = DM_TYPE_NVME_BIO_BASED;
1096 else if (!strcasecmp(queue_mode_name, "rq")) 1091 else if (!strcasecmp(queue_mode_name, "rq"))
1097 m->queue_mode = DM_TYPE_REQUEST_BASED; 1092 m->queue_mode = DM_TYPE_REQUEST_BASED;
1098 else if (!strcasecmp(queue_mode_name, "mq")) 1093 else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1193,7 +1188,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1193 ti->num_discard_bios = 1; 1188 ti->num_discard_bios = 1;
1194 ti->num_write_same_bios = 1; 1189 ti->num_write_same_bios = 1;
1195 ti->num_write_zeroes_bios = 1; 1190 ti->num_write_zeroes_bios = 1;
1196 if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) 1191 if (m->queue_mode == DM_TYPE_BIO_BASED)
1197 ti->per_io_data_size = multipath_per_bio_data_size(); 1192 ti->per_io_data_size = multipath_per_bio_data_size();
1198 else 1193 else
1199 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1194 ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1730,9 +1725,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1730 case DM_TYPE_BIO_BASED: 1725 case DM_TYPE_BIO_BASED:
1731 DMEMIT("queue_mode bio "); 1726 DMEMIT("queue_mode bio ");
1732 break; 1727 break;
1733 case DM_TYPE_NVME_BIO_BASED:
1734 DMEMIT("queue_mode nvme ");
1735 break;
1736 case DM_TYPE_MQ_REQUEST_BASED: 1728 case DM_TYPE_MQ_REQUEST_BASED:
1737 DMEMIT("queue_mode mq "); 1729 DMEMIT("queue_mode mq ");
1738 break; 1730 break;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7ef469e902c6..c1d1034ff7b7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3409 3409
3410 } else { 3410 } else {
3411 if (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3411 if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
3412 test_bit(MD_RECOVERY_RESHAPE, &recovery) || 3412 (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
3413 test_bit(MD_RECOVERY_RUNNING, &recovery)) 3413 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3414 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3414 r = mddev->curr_resync_completed; 3415 r = mddev->curr_resync_completed;
3415 else 3416 else
3416 r = mddev->recovery_cp; 3417 r = mddev->recovery_cp;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5fe7ec356c33..7eb3e2a3c07d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t)
942 942
943 if (t->type != DM_TYPE_NONE) { 943 if (t->type != DM_TYPE_NONE) {
944 /* target already set the table's type */ 944 /* target already set the table's type */
945 if (t->type == DM_TYPE_BIO_BASED) 945 if (t->type == DM_TYPE_BIO_BASED) {
946 return 0; 946 /* possibly upgrade to a variant of bio-based */
947 else if (t->type == DM_TYPE_NVME_BIO_BASED) { 947 goto verify_bio_based;
948 if (!dm_table_does_not_support_partial_completion(t)) {
949 DMERR("nvme bio-based is only possible with devices"
950 " that don't support partial completion");
951 return -EINVAL;
952 }
953 /* Fallthru, also verify all devices are blk-mq */
954 } 948 }
955 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 949 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
950 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
956 goto verify_rq_based; 951 goto verify_rq_based;
957 } 952 }
958 953
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t)
985 } 980 }
986 981
987 if (bio_based) { 982 if (bio_based) {
983verify_bio_based:
988 /* We must use this table as bio-based */ 984 /* We must use this table as bio-based */
989 t->type = DM_TYPE_BIO_BASED; 985 t->type = DM_TYPE_BIO_BASED;
990 if (dm_table_supports_dax(t) || 986 if (dm_table_supports_dax(t) ||
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev
1755 char b[BDEVNAME_SIZE]; 1751 char b[BDEVNAME_SIZE];
1756 1752
1757 /* For now, NVMe devices are the only devices of this class */ 1753 /* For now, NVMe devices are the only devices of this class */
1758 return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); 1754 return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1759} 1755}
1760 1756
1761static bool dm_table_does_not_support_partial_completion(struct dm_table *t) 1757static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68136806d365..45328d8b2859 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
458 return dm_get_geometry(md, geo); 458 return dm_get_geometry(md, geo);
459} 459}
460 460
461static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 461static char *_dm_claim_ptr = "I belong to device-mapper";
462 struct block_device **bdev, 462
463 fmode_t *mode) 463static int dm_get_bdev_for_ioctl(struct mapped_device *md,
464 struct block_device **bdev,
465 fmode_t *mode)
464{ 466{
465 struct dm_target *tgt; 467 struct dm_target *tgt;
466 struct dm_table *map; 468 struct dm_table *map;
@@ -490,6 +492,10 @@ retry:
490 goto out; 492 goto out;
491 493
492 bdgrab(*bdev); 494 bdgrab(*bdev);
495 r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
496 if (r < 0)
497 goto out;
498
493 dm_put_live_table(md, srcu_idx); 499 dm_put_live_table(md, srcu_idx);
494 return r; 500 return r;
495 501
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
508 struct mapped_device *md = bdev->bd_disk->private_data; 514 struct mapped_device *md = bdev->bd_disk->private_data;
509 int r; 515 int r;
510 516
511 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 517 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
512 if (r < 0) 518 if (r < 0)
513 return r; 519 return r;
514 520
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
528 534
529 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 535 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
530out: 536out:
531 bdput(bdev); 537 blkdev_put(bdev, mode);
532 return r; 538 return r;
533} 539}
534 540
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
708static int open_table_device(struct table_device *td, dev_t dev, 714static int open_table_device(struct table_device *td, dev_t dev,
709 struct mapped_device *md) 715 struct mapped_device *md)
710{ 716{
711 static char *_claim_ptr = "I belong to device-mapper";
712 struct block_device *bdev; 717 struct block_device *bdev;
713 718
714 int r; 719 int r;
715 720
716 BUG_ON(td->dm_dev.bdev); 721 BUG_ON(td->dm_dev.bdev);
717 722
718 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 723 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
719 if (IS_ERR(bdev)) 724 if (IS_ERR(bdev))
720 return PTR_ERR(bdev); 725 return PTR_ERR(bdev);
721 726
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3011 fmode_t mode; 3016 fmode_t mode;
3012 int r; 3017 int r;
3013 3018
3014 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3019 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3015 if (r < 0) 3020 if (r < 0)
3016 return r; 3021 return r;
3017 3022
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3021 else 3026 else
3022 r = -EOPNOTSUPP; 3027 r = -EOPNOTSUPP;
3023 3028
3024 bdput(bdev); 3029 blkdev_put(bdev, mode);
3025 return r; 3030 return r;
3026} 3031}
3027 3032
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3032 fmode_t mode; 3037 fmode_t mode;
3033 int r; 3038 int r;
3034 3039
3035 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3040 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3036 if (r < 0) 3041 if (r < 0)
3037 return r; 3042 return r;
3038 3043
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3042 else 3047 else
3043 r = -EOPNOTSUPP; 3048 r = -EOPNOTSUPP;
3044 3049
3045 bdput(bdev); 3050 blkdev_put(bdev, mode);
3046 return r; 3051 return r;
3047} 3052}
3048 3053
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3054 fmode_t mode; 3059 fmode_t mode;
3055 int r; 3060 int r;
3056 3061
3057 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3062 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3058 if (r < 0) 3063 if (r < 0)
3059 return r; 3064 return r;
3060 3065
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3064 else 3069 else
3065 r = -EOPNOTSUPP; 3070 r = -EOPNOTSUPP;
3066 3071
3067 bdput(bdev); 3072 blkdev_put(bdev, mode);
3068 return r; 3073 return r;
3069} 3074}
3070 3075
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3075 fmode_t mode; 3080 fmode_t mode;
3076 int r; 3081 int r;
3077 3082
3078 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3083 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3079 if (r < 0) 3084 if (r < 0)
3080 return r; 3085 return r;
3081 3086
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3085 else 3090 else
3086 r = -EOPNOTSUPP; 3091 r = -EOPNOTSUPP;
3087 3092
3088 bdput(bdev); 3093 blkdev_put(bdev, mode);
3089 return r; 3094 return r;
3090} 3095}
3091 3096