diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/super.c | 27 | ||||
-rw-r--r-- | drivers/md/dm-bufio.c | 16 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 77 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 7 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 16 | ||||
-rw-r--r-- | drivers/md/dm.c | 35 |
6 files changed, 91 insertions, 87 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4d1d8dfb2d2a..f2273143b3cb 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
963 | uint32_t rtime = cpu_to_le32(get_seconds()); | 963 | uint32_t rtime = cpu_to_le32(get_seconds()); |
964 | struct uuid_entry *u; | 964 | struct uuid_entry *u; |
965 | char buf[BDEVNAME_SIZE]; | 965 | char buf[BDEVNAME_SIZE]; |
966 | struct cached_dev *exist_dc, *t; | ||
966 | 967 | ||
967 | bdevname(dc->bdev, buf); | 968 | bdevname(dc->bdev, buf); |
968 | 969 | ||
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
987 | return -EINVAL; | 988 | return -EINVAL; |
988 | } | 989 | } |
989 | 990 | ||
991 | /* Check whether already attached */ | ||
992 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { | ||
993 | if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { | ||
994 | pr_err("Tried to attach %s but duplicate UUID already attached", | ||
995 | buf); | ||
996 | |||
997 | return -EINVAL; | ||
998 | } | ||
999 | } | ||
1000 | |||
990 | u = uuid_find(c, dc->sb.uuid); | 1001 | u = uuid_find(c, dc->sb.uuid); |
991 | 1002 | ||
992 | if (u && | 1003 | if (u && |
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, | |||
1204 | 1215 | ||
1205 | return; | 1216 | return; |
1206 | err: | 1217 | err: |
1207 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1218 | pr_notice("error %s: %s", bdevname(bdev, name), err); |
1208 | bcache_device_stop(&dc->disk); | 1219 | bcache_device_stop(&dc->disk); |
1209 | } | 1220 | } |
1210 | 1221 | ||
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1883 | const char *err = NULL; /* must be set for any error case */ | 1894 | const char *err = NULL; /* must be set for any error case */ |
1884 | int ret = 0; | 1895 | int ret = 0; |
1885 | 1896 | ||
1897 | bdevname(bdev, name); | ||
1898 | |||
1886 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1899 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
1887 | ca->bdev = bdev; | 1900 | ca->bdev = bdev; |
1888 | ca->bdev->bd_holder = ca; | 1901 | ca->bdev->bd_holder = ca; |
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1891 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; | 1904 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; |
1892 | get_page(sb_page); | 1905 | get_page(sb_page); |
1893 | 1906 | ||
1894 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) | 1907 | if (blk_queue_discard(bdev_get_queue(bdev))) |
1895 | ca->discard = CACHE_DISCARD(&ca->sb); | 1908 | ca->discard = CACHE_DISCARD(&ca->sb); |
1896 | 1909 | ||
1897 | ret = cache_alloc(ca); | 1910 | ret = cache_alloc(ca); |
1898 | if (ret != 0) { | 1911 | if (ret != 0) { |
1912 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
1899 | if (ret == -ENOMEM) | 1913 | if (ret == -ENOMEM) |
1900 | err = "cache_alloc(): -ENOMEM"; | 1914 | err = "cache_alloc(): -ENOMEM"; |
1901 | else | 1915 | else |
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1918 | goto out; | 1932 | goto out; |
1919 | } | 1933 | } |
1920 | 1934 | ||
1921 | pr_info("registered cache device %s", bdevname(bdev, name)); | 1935 | pr_info("registered cache device %s", name); |
1922 | 1936 | ||
1923 | out: | 1937 | out: |
1924 | kobject_put(&ca->kobj); | 1938 | kobject_put(&ca->kobj); |
1925 | 1939 | ||
1926 | err: | 1940 | err: |
1927 | if (err) | 1941 | if (err) |
1928 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1942 | pr_notice("error %s: %s", name, err); |
1929 | 1943 | ||
1930 | return ret; | 1944 | return ret; |
1931 | } | 1945 | } |
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2014 | if (err) | 2028 | if (err) |
2015 | goto err_close; | 2029 | goto err_close; |
2016 | 2030 | ||
2031 | err = "failed to register device"; | ||
2017 | if (SB_IS_BDEV(sb)) { | 2032 | if (SB_IS_BDEV(sb)) { |
2018 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); | 2033 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
2019 | if (!dc) | 2034 | if (!dc) |
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2028 | goto err_close; | 2043 | goto err_close; |
2029 | 2044 | ||
2030 | if (register_cache(sb, sb_page, bdev, ca) != 0) | 2045 | if (register_cache(sb, sb_page, bdev, ca) != 0) |
2031 | goto err_close; | 2046 | goto err; |
2032 | } | 2047 | } |
2033 | out: | 2048 | out: |
2034 | if (sb_page) | 2049 | if (sb_page) |
@@ -2041,7 +2056,7 @@ out: | |||
2041 | err_close: | 2056 | err_close: |
2042 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | 2057 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
2043 | err: | 2058 | err: |
2044 | pr_info("error opening %s: %s", path, err); | 2059 | pr_info("error %s: %s", path, err); |
2045 | ret = -EINVAL; | 2060 | ret = -EINVAL; |
2046 | goto out; | 2061 | goto out; |
2047 | } | 2062 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 414c9af54ded..aa2032fa80d4 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void) | |||
386 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | 386 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, |
387 | enum data_mode *data_mode) | 387 | enum data_mode *data_mode) |
388 | { | 388 | { |
389 | unsigned noio_flag; | ||
390 | void *ptr; | ||
391 | |||
392 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { | 389 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { |
393 | *data_mode = DATA_MODE_SLAB; | 390 | *data_mode = DATA_MODE_SLAB; |
394 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); | 391 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); |
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | |||
412 | * all allocations done by this process (including pagetables) are done | 409 | * all allocations done by this process (including pagetables) are done |
413 | * as if GFP_NOIO was specified. | 410 | * as if GFP_NOIO was specified. |
414 | */ | 411 | */ |
412 | if (gfp_mask & __GFP_NORETRY) { | ||
413 | unsigned noio_flag = memalloc_noio_save(); | ||
414 | void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
415 | 415 | ||
416 | if (gfp_mask & __GFP_NORETRY) | ||
417 | noio_flag = memalloc_noio_save(); | ||
418 | |||
419 | ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
420 | |||
421 | if (gfp_mask & __GFP_NORETRY) | ||
422 | memalloc_noio_restore(noio_flag); | 416 | memalloc_noio_restore(noio_flag); |
417 | return ptr; | ||
418 | } | ||
423 | 419 | ||
424 | return ptr; | 420 | return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); |
425 | } | 421 | } |
426 | 422 | ||
427 | /* | 423 | /* |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 7d3e572072f5..a05a560d3cba 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -211,29 +211,27 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) | |||
211 | else | 211 | else |
212 | m->queue_mode = DM_TYPE_REQUEST_BASED; | 212 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
213 | 213 | ||
214 | } else if (m->queue_mode == DM_TYPE_BIO_BASED || | 214 | } else if (m->queue_mode == DM_TYPE_BIO_BASED) { |
215 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||
216 | INIT_WORK(&m->process_queued_bios, process_queued_bios); | 215 | INIT_WORK(&m->process_queued_bios, process_queued_bios); |
217 | 216 | /* | |
218 | if (m->queue_mode == DM_TYPE_BIO_BASED) { | 217 | * bio-based doesn't support any direct scsi_dh management; |
219 | /* | 218 | * it just discovers if a scsi_dh is attached. |
220 | * bio-based doesn't support any direct scsi_dh management; | 219 | */ |
221 | * it just discovers if a scsi_dh is attached. | 220 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); |
222 | */ | ||
223 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | ||
228 | set_bit(MPATHF_QUEUE_IO, &m->flags); | ||
229 | atomic_set(&m->pg_init_in_progress, 0); | ||
230 | atomic_set(&m->pg_init_count, 0); | ||
231 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||
232 | init_waitqueue_head(&m->pg_init_wait); | ||
233 | } | 221 | } |
234 | 222 | ||
235 | dm_table_set_type(ti->table, m->queue_mode); | 223 | dm_table_set_type(ti->table, m->queue_mode); |
236 | 224 | ||
225 | /* | ||
226 | * Init fields that are only used when a scsi_dh is attached | ||
227 | * - must do this unconditionally (really doesn't hurt non-SCSI uses) | ||
228 | */ | ||
229 | set_bit(MPATHF_QUEUE_IO, &m->flags); | ||
230 | atomic_set(&m->pg_init_in_progress, 0); | ||
231 | atomic_set(&m->pg_init_count, 0); | ||
232 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||
233 | init_waitqueue_head(&m->pg_init_wait); | ||
234 | |||
237 | return 0; | 235 | return 0; |
238 | } | 236 | } |
239 | 237 | ||
@@ -337,9 +335,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) | |||
337 | { | 335 | { |
338 | m->current_pg = pg; | 336 | m->current_pg = pg; |
339 | 337 | ||
340 | if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||
341 | return; | ||
342 | |||
343 | /* Must we initialise the PG first, and queue I/O till it's ready? */ | 338 | /* Must we initialise the PG first, and queue I/O till it's ready? */ |
344 | if (m->hw_handler_name) { | 339 | if (m->hw_handler_name) { |
345 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); | 340 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); |
@@ -385,8 +380,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
385 | unsigned bypassed = 1; | 380 | unsigned bypassed = 1; |
386 | 381 | ||
387 | if (!atomic_read(&m->nr_valid_paths)) { | 382 | if (!atomic_read(&m->nr_valid_paths)) { |
388 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) | 383 | clear_bit(MPATHF_QUEUE_IO, &m->flags); |
389 | clear_bit(MPATHF_QUEUE_IO, &m->flags); | ||
390 | goto failed; | 384 | goto failed; |
391 | } | 385 | } |
392 | 386 | ||
@@ -599,7 +593,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) | |||
599 | return pgpath; | 593 | return pgpath; |
600 | } | 594 | } |
601 | 595 | ||
602 | static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) | 596 | static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) |
603 | { | 597 | { |
604 | struct pgpath *pgpath; | 598 | struct pgpath *pgpath; |
605 | unsigned long flags; | 599 | unsigned long flags; |
@@ -634,8 +628,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, | |||
634 | { | 628 | { |
635 | struct pgpath *pgpath; | 629 | struct pgpath *pgpath; |
636 | 630 | ||
637 | if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | 631 | if (!m->hw_handler_name) |
638 | pgpath = __map_bio_nvme(m, bio); | 632 | pgpath = __map_bio_fast(m, bio); |
639 | else | 633 | else |
640 | pgpath = __map_bio(m, bio); | 634 | pgpath = __map_bio(m, bio); |
641 | 635 | ||
@@ -675,8 +669,7 @@ static void process_queued_io_list(struct multipath *m) | |||
675 | { | 669 | { |
676 | if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) | 670 | if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) |
677 | dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); | 671 | dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); |
678 | else if (m->queue_mode == DM_TYPE_BIO_BASED || | 672 | else if (m->queue_mode == DM_TYPE_BIO_BASED) |
679 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||
680 | queue_work(kmultipathd, &m->process_queued_bios); | 673 | queue_work(kmultipathd, &m->process_queued_bios); |
681 | } | 674 | } |
682 | 675 | ||
@@ -811,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, | |||
811 | return 0; | 804 | return 0; |
812 | } | 805 | } |
813 | 806 | ||
814 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) | 807 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, |
808 | const char *attached_handler_name, char **error) | ||
815 | { | 809 | { |
816 | struct request_queue *q = bdev_get_queue(bdev); | 810 | struct request_queue *q = bdev_get_queue(bdev); |
817 | const char *attached_handler_name; | ||
818 | int r; | 811 | int r; |
819 | 812 | ||
820 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { | 813 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { |
821 | retain: | 814 | retain: |
822 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); | ||
823 | if (attached_handler_name) { | 815 | if (attached_handler_name) { |
824 | /* | 816 | /* |
825 | * Clear any hw_handler_params associated with a | 817 | * Clear any hw_handler_params associated with a |
@@ -873,6 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
873 | int r; | 865 | int r; |
874 | struct pgpath *p; | 866 | struct pgpath *p; |
875 | struct multipath *m = ti->private; | 867 | struct multipath *m = ti->private; |
868 | struct request_queue *q; | ||
869 | const char *attached_handler_name; | ||
876 | 870 | ||
877 | /* we need at least a path arg */ | 871 | /* we need at least a path arg */ |
878 | if (as->argc < 1) { | 872 | if (as->argc < 1) { |
@@ -891,9 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
891 | goto bad; | 885 | goto bad; |
892 | } | 886 | } |
893 | 887 | ||
894 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | 888 | q = bdev_get_queue(p->path.dev->bdev); |
889 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); | ||
890 | if (attached_handler_name) { | ||
895 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); | 891 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); |
896 | r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); | 892 | r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error); |
897 | if (r) { | 893 | if (r) { |
898 | dm_put_device(ti, p->path.dev); | 894 | dm_put_device(ti, p->path.dev); |
899 | goto bad; | 895 | goto bad; |
@@ -1001,8 +997,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) | |||
1001 | if (!hw_argc) | 997 | if (!hw_argc) |
1002 | return 0; | 998 | return 0; |
1003 | 999 | ||
1004 | if (m->queue_mode == DM_TYPE_BIO_BASED || | 1000 | if (m->queue_mode == DM_TYPE_BIO_BASED) { |
1005 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||
1006 | dm_consume_args(as, hw_argc); | 1001 | dm_consume_args(as, hw_argc); |
1007 | DMERR("bio-based multipath doesn't allow hardware handler args"); | 1002 | DMERR("bio-based multipath doesn't allow hardware handler args"); |
1008 | return 0; | 1003 | return 0; |
@@ -1091,8 +1086,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) | |||
1091 | 1086 | ||
1092 | if (!strcasecmp(queue_mode_name, "bio")) | 1087 | if (!strcasecmp(queue_mode_name, "bio")) |
1093 | m->queue_mode = DM_TYPE_BIO_BASED; | 1088 | m->queue_mode = DM_TYPE_BIO_BASED; |
1094 | else if (!strcasecmp(queue_mode_name, "nvme")) | ||
1095 | m->queue_mode = DM_TYPE_NVME_BIO_BASED; | ||
1096 | else if (!strcasecmp(queue_mode_name, "rq")) | 1089 | else if (!strcasecmp(queue_mode_name, "rq")) |
1097 | m->queue_mode = DM_TYPE_REQUEST_BASED; | 1090 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
1098 | else if (!strcasecmp(queue_mode_name, "mq")) | 1091 | else if (!strcasecmp(queue_mode_name, "mq")) |
@@ -1193,7 +1186,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1193 | ti->num_discard_bios = 1; | 1186 | ti->num_discard_bios = 1; |
1194 | ti->num_write_same_bios = 1; | 1187 | ti->num_write_same_bios = 1; |
1195 | ti->num_write_zeroes_bios = 1; | 1188 | ti->num_write_zeroes_bios = 1; |
1196 | if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) | 1189 | if (m->queue_mode == DM_TYPE_BIO_BASED) |
1197 | ti->per_io_data_size = multipath_per_bio_data_size(); | 1190 | ti->per_io_data_size = multipath_per_bio_data_size(); |
1198 | else | 1191 | else |
1199 | ti->per_io_data_size = sizeof(struct dm_mpath_io); | 1192 | ti->per_io_data_size = sizeof(struct dm_mpath_io); |
@@ -1730,9 +1723,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, | |||
1730 | case DM_TYPE_BIO_BASED: | 1723 | case DM_TYPE_BIO_BASED: |
1731 | DMEMIT("queue_mode bio "); | 1724 | DMEMIT("queue_mode bio "); |
1732 | break; | 1725 | break; |
1733 | case DM_TYPE_NVME_BIO_BASED: | ||
1734 | DMEMIT("queue_mode nvme "); | ||
1735 | break; | ||
1736 | case DM_TYPE_MQ_REQUEST_BASED: | 1726 | case DM_TYPE_MQ_REQUEST_BASED: |
1737 | DMEMIT("queue_mode mq "); | 1727 | DMEMIT("queue_mode mq "); |
1738 | break; | 1728 | break; |
@@ -2030,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti) | |||
2030 | *---------------------------------------------------------------*/ | 2020 | *---------------------------------------------------------------*/ |
2031 | static struct target_type multipath_target = { | 2021 | static struct target_type multipath_target = { |
2032 | .name = "multipath", | 2022 | .name = "multipath", |
2033 | .version = {1, 12, 0}, | 2023 | .version = {1, 13, 0}, |
2034 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, | 2024 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | |
2025 | DM_TARGET_PASSES_INTEGRITY, | ||
2035 | .module = THIS_MODULE, | 2026 | .module = THIS_MODULE, |
2036 | .ctr = multipath_ctr, | 2027 | .ctr = multipath_ctr, |
2037 | .dtr = multipath_dtr, | 2028 | .dtr = multipath_dtr, |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7ef469e902c6..c1d1034ff7b7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
3408 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3408 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
3409 | 3409 | ||
3410 | } else { | 3410 | } else { |
3411 | if (test_bit(MD_RECOVERY_NEEDED, &recovery) || | 3411 | if (!test_bit(MD_RECOVERY_INTR, &recovery) && |
3412 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || | 3412 | (test_bit(MD_RECOVERY_NEEDED, &recovery) || |
3413 | test_bit(MD_RECOVERY_RUNNING, &recovery)) | 3413 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || |
3414 | test_bit(MD_RECOVERY_RUNNING, &recovery))) | ||
3414 | r = mddev->curr_resync_completed; | 3415 | r = mddev->curr_resync_completed; |
3415 | else | 3416 | else |
3416 | r = mddev->recovery_cp; | 3417 | r = mddev->recovery_cp; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..7eb3e2a3c07d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t) | |||
942 | 942 | ||
943 | if (t->type != DM_TYPE_NONE) { | 943 | if (t->type != DM_TYPE_NONE) { |
944 | /* target already set the table's type */ | 944 | /* target already set the table's type */ |
945 | if (t->type == DM_TYPE_BIO_BASED) | 945 | if (t->type == DM_TYPE_BIO_BASED) { |
946 | return 0; | 946 | /* possibly upgrade to a variant of bio-based */ |
947 | else if (t->type == DM_TYPE_NVME_BIO_BASED) { | 947 | goto verify_bio_based; |
948 | if (!dm_table_does_not_support_partial_completion(t)) { | ||
949 | DMERR("nvme bio-based is only possible with devices" | ||
950 | " that don't support partial completion"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | /* Fallthru, also verify all devices are blk-mq */ | ||
954 | } | 948 | } |
955 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); | 949 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); |
950 | BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); | ||
956 | goto verify_rq_based; | 951 | goto verify_rq_based; |
957 | } | 952 | } |
958 | 953 | ||
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t) | |||
985 | } | 980 | } |
986 | 981 | ||
987 | if (bio_based) { | 982 | if (bio_based) { |
983 | verify_bio_based: | ||
988 | /* We must use this table as bio-based */ | 984 | /* We must use this table as bio-based */ |
989 | t->type = DM_TYPE_BIO_BASED; | 985 | t->type = DM_TYPE_BIO_BASED; |
990 | if (dm_table_supports_dax(t) || | 986 | if (dm_table_supports_dax(t) || |
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev | |||
1755 | char b[BDEVNAME_SIZE]; | 1751 | char b[BDEVNAME_SIZE]; |
1756 | 1752 | ||
1757 | /* For now, NVMe devices are the only devices of this class */ | 1753 | /* For now, NVMe devices are the only devices of this class */ |
1758 | return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); | 1754 | return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); |
1759 | } | 1755 | } |
1760 | 1756 | ||
1761 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) | 1757 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68136806d365..45328d8b2859 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
458 | return dm_get_geometry(md, geo); | 458 | return dm_get_geometry(md, geo); |
459 | } | 459 | } |
460 | 460 | ||
461 | static int dm_grab_bdev_for_ioctl(struct mapped_device *md, | 461 | static char *_dm_claim_ptr = "I belong to device-mapper"; |
462 | struct block_device **bdev, | 462 | |
463 | fmode_t *mode) | 463 | static int dm_get_bdev_for_ioctl(struct mapped_device *md, |
464 | struct block_device **bdev, | ||
465 | fmode_t *mode) | ||
464 | { | 466 | { |
465 | struct dm_target *tgt; | 467 | struct dm_target *tgt; |
466 | struct dm_table *map; | 468 | struct dm_table *map; |
@@ -490,6 +492,10 @@ retry: | |||
490 | goto out; | 492 | goto out; |
491 | 493 | ||
492 | bdgrab(*bdev); | 494 | bdgrab(*bdev); |
495 | r = blkdev_get(*bdev, *mode, _dm_claim_ptr); | ||
496 | if (r < 0) | ||
497 | goto out; | ||
498 | |||
493 | dm_put_live_table(md, srcu_idx); | 499 | dm_put_live_table(md, srcu_idx); |
494 | return r; | 500 | return r; |
495 | 501 | ||
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
508 | struct mapped_device *md = bdev->bd_disk->private_data; | 514 | struct mapped_device *md = bdev->bd_disk->private_data; |
509 | int r; | 515 | int r; |
510 | 516 | ||
511 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 517 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
512 | if (r < 0) | 518 | if (r < 0) |
513 | return r; | 519 | return r; |
514 | 520 | ||
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
528 | 534 | ||
529 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 535 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
530 | out: | 536 | out: |
531 | bdput(bdev); | 537 | blkdev_put(bdev, mode); |
532 | return r; | 538 | return r; |
533 | } | 539 | } |
534 | 540 | ||
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) | |||
708 | static int open_table_device(struct table_device *td, dev_t dev, | 714 | static int open_table_device(struct table_device *td, dev_t dev, |
709 | struct mapped_device *md) | 715 | struct mapped_device *md) |
710 | { | 716 | { |
711 | static char *_claim_ptr = "I belong to device-mapper"; | ||
712 | struct block_device *bdev; | 717 | struct block_device *bdev; |
713 | 718 | ||
714 | int r; | 719 | int r; |
715 | 720 | ||
716 | BUG_ON(td->dm_dev.bdev); | 721 | BUG_ON(td->dm_dev.bdev); |
717 | 722 | ||
718 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); | 723 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); |
719 | if (IS_ERR(bdev)) | 724 | if (IS_ERR(bdev)) |
720 | return PTR_ERR(bdev); | 725 | return PTR_ERR(bdev); |
721 | 726 | ||
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, | |||
3011 | fmode_t mode; | 3016 | fmode_t mode; |
3012 | int r; | 3017 | int r; |
3013 | 3018 | ||
3014 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3019 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3015 | if (r < 0) | 3020 | if (r < 0) |
3016 | return r; | 3021 | return r; |
3017 | 3022 | ||
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, | |||
3021 | else | 3026 | else |
3022 | r = -EOPNOTSUPP; | 3027 | r = -EOPNOTSUPP; |
3023 | 3028 | ||
3024 | bdput(bdev); | 3029 | blkdev_put(bdev, mode); |
3025 | return r; | 3030 | return r; |
3026 | } | 3031 | } |
3027 | 3032 | ||
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |||
3032 | fmode_t mode; | 3037 | fmode_t mode; |
3033 | int r; | 3038 | int r; |
3034 | 3039 | ||
3035 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3040 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3036 | if (r < 0) | 3041 | if (r < 0) |
3037 | return r; | 3042 | return r; |
3038 | 3043 | ||
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |||
3042 | else | 3047 | else |
3043 | r = -EOPNOTSUPP; | 3048 | r = -EOPNOTSUPP; |
3044 | 3049 | ||
3045 | bdput(bdev); | 3050 | blkdev_put(bdev, mode); |
3046 | return r; | 3051 | return r; |
3047 | } | 3052 | } |
3048 | 3053 | ||
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, | |||
3054 | fmode_t mode; | 3059 | fmode_t mode; |
3055 | int r; | 3060 | int r; |
3056 | 3061 | ||
3057 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3062 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3058 | if (r < 0) | 3063 | if (r < 0) |
3059 | return r; | 3064 | return r; |
3060 | 3065 | ||
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, | |||
3064 | else | 3069 | else |
3065 | r = -EOPNOTSUPP; | 3070 | r = -EOPNOTSUPP; |
3066 | 3071 | ||
3067 | bdput(bdev); | 3072 | blkdev_put(bdev, mode); |
3068 | return r; | 3073 | return r; |
3069 | } | 3074 | } |
3070 | 3075 | ||
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) | |||
3075 | fmode_t mode; | 3080 | fmode_t mode; |
3076 | int r; | 3081 | int r; |
3077 | 3082 | ||
3078 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3083 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3079 | if (r < 0) | 3084 | if (r < 0) |
3080 | return r; | 3085 | return r; |
3081 | 3086 | ||
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) | |||
3085 | else | 3090 | else |
3086 | r = -EOPNOTSUPP; | 3091 | r = -EOPNOTSUPP; |
3087 | 3092 | ||
3088 | bdput(bdev); | 3093 | blkdev_put(bdev, mode); |
3089 | return r; | 3094 | return r; |
3090 | } | 3095 | } |
3091 | 3096 | ||