diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-04-05 21:34:33 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-04-05 21:34:33 -0400 |
| commit | 4f1cbe078546914538d8aabba04db984da68dcbf (patch) | |
| tree | 097f4448e0e990862706f3a72fbbc131955ce171 /drivers | |
| parent | 3e28fb0fcb69dbedfe254939143198b46d83bfa1 (diff) | |
| parent | 4ed319c6ac08e9a28fca7ac188181ac122f4de84 (diff) | |
Merge tag 'for-5.1/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer:
- Two queue_limits stacking fixes: disable discards if underlying
driver does. And propagate BDI_CAP_STABLE_WRITES to fix sporadic
checksum errors.
- Fix that reverts a DM core limit that wasn't needed given that
dm-crypt was already updated to impose an equivalent limit.
- Fix dm-init to properly establish 'const' for __initconst array.
- Fix deadlock in DM integrity target that occurs when overlapping IO
is being issued to it. And two smaller fixes to the DM integrity
target.
* tag 'for-5.1/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm integrity: fix deadlock with overlapping I/O
dm: disable DISCARD if the underlying storage no longer supports it
dm table: propagate BDI_CAP_STABLE_WRITES to fix sporadic checksum errors
dm: revert 8f50e358153d ("dm: limit the max bio size as BIO_MAX_PAGES * PAGE_SIZE")
dm init: fix const confusion for dm_allowed_targets array
dm integrity: make dm_integrity_init and dm_integrity_exit static
dm integrity: change memcmp to strncmp in dm_integrity_ctr
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/md/dm-core.h | 1 | ||||
| -rw-r--r-- | drivers/md/dm-init.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm-integrity.c | 16 | ||||
| -rw-r--r-- | drivers/md/dm-rq.c | 11 | ||||
| -rw-r--r-- | drivers/md/dm-table.c | 39 | ||||
| -rw-r--r-- | drivers/md/dm.c | 30 |
6 files changed, 72 insertions, 27 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 95c6d86ab5e8..c4ef1fceead6 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h | |||
| @@ -115,6 +115,7 @@ struct mapped_device { | |||
| 115 | struct srcu_struct io_barrier; | 115 | struct srcu_struct io_barrier; |
| 116 | }; | 116 | }; |
| 117 | 117 | ||
| 118 | void disable_discard(struct mapped_device *md); | ||
| 118 | void disable_write_same(struct mapped_device *md); | 119 | void disable_write_same(struct mapped_device *md); |
| 119 | void disable_write_zeroes(struct mapped_device *md); | 120 | void disable_write_zeroes(struct mapped_device *md); |
| 120 | 121 | ||
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index b53f30f16b4d..4b76f84424c3 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c | |||
| @@ -36,7 +36,7 @@ struct dm_device { | |||
| 36 | struct list_head list; | 36 | struct list_head list; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | const char *dm_allowed_targets[] __initconst = { | 39 | const char * const dm_allowed_targets[] __initconst = { |
| 40 | "crypt", | 40 | "crypt", |
| 41 | "delay", | 41 | "delay", |
| 42 | "linear", | 42 | "linear", |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index d57d997a52c8..7c678f50aaa3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
| @@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig | |||
| 913 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) | 913 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) |
| 914 | { | 914 | { |
| 915 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && | 915 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && |
| 916 | range2->logical_sector + range2->n_sectors > range2->logical_sector; | 916 | range1->logical_sector + range1->n_sectors > range2->logical_sector; |
| 917 | } | 917 | } |
| 918 | 918 | ||
| 919 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) | 919 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) |
| @@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity | |||
| 959 | struct dm_integrity_range *last_range = | 959 | struct dm_integrity_range *last_range = |
| 960 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); | 960 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); |
| 961 | struct task_struct *last_range_task; | 961 | struct task_struct *last_range_task; |
| 962 | if (!ranges_overlap(range, last_range)) | ||
| 963 | break; | ||
| 964 | last_range_task = last_range->task; | 962 | last_range_task = last_range->task; |
| 965 | list_del(&last_range->wait_entry); | 963 | list_del(&last_range->wait_entry); |
| 966 | if (!add_new_range(ic, last_range, false)) { | 964 | if (!add_new_range(ic, last_range, false)) { |
| @@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3185 | journal_watermark = val; | 3183 | journal_watermark = val; |
| 3186 | else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) | 3184 | else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) |
| 3187 | sync_msec = val; | 3185 | sync_msec = val; |
| 3188 | else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { | 3186 | else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { |
| 3189 | if (ic->meta_dev) { | 3187 | if (ic->meta_dev) { |
| 3190 | dm_put_device(ti, ic->meta_dev); | 3188 | dm_put_device(ti, ic->meta_dev); |
| 3191 | ic->meta_dev = NULL; | 3189 | ic->meta_dev = NULL; |
| @@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3204 | goto bad; | 3202 | goto bad; |
| 3205 | } | 3203 | } |
| 3206 | ic->sectors_per_block = val >> SECTOR_SHIFT; | 3204 | ic->sectors_per_block = val >> SECTOR_SHIFT; |
| 3207 | } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { | 3205 | } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { |
| 3208 | r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, | 3206 | r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, |
| 3209 | "Invalid internal_hash argument"); | 3207 | "Invalid internal_hash argument"); |
| 3210 | if (r) | 3208 | if (r) |
| 3211 | goto bad; | 3209 | goto bad; |
| 3212 | } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { | 3210 | } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { |
| 3213 | r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, | 3211 | r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, |
| 3214 | "Invalid journal_crypt argument"); | 3212 | "Invalid journal_crypt argument"); |
| 3215 | if (r) | 3213 | if (r) |
| 3216 | goto bad; | 3214 | goto bad; |
| 3217 | } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { | 3215 | } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { |
| 3218 | r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, | 3216 | r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, |
| 3219 | "Invalid journal_mac argument"); | 3217 | "Invalid journal_mac argument"); |
| 3220 | if (r) | 3218 | if (r) |
| @@ -3616,7 +3614,7 @@ static struct target_type integrity_target = { | |||
| 3616 | .io_hints = dm_integrity_io_hints, | 3614 | .io_hints = dm_integrity_io_hints, |
| 3617 | }; | 3615 | }; |
| 3618 | 3616 | ||
| 3619 | int __init dm_integrity_init(void) | 3617 | static int __init dm_integrity_init(void) |
| 3620 | { | 3618 | { |
| 3621 | int r; | 3619 | int r; |
| 3622 | 3620 | ||
| @@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void) | |||
| 3635 | return r; | 3633 | return r; |
| 3636 | } | 3634 | } |
| 3637 | 3635 | ||
| 3638 | void dm_integrity_exit(void) | 3636 | static void __exit dm_integrity_exit(void) |
| 3639 | { | 3637 | { |
| 3640 | dm_unregister_target(&integrity_target); | 3638 | dm_unregister_target(&integrity_target); |
| 3641 | kmem_cache_destroy(journal_io_cache); | 3639 | kmem_cache_destroy(journal_io_cache); |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 09773636602d..b66745bd08bb 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
| @@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) | |||
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | if (unlikely(error == BLK_STS_TARGET)) { | 224 | if (unlikely(error == BLK_STS_TARGET)) { |
| 225 | if (req_op(clone) == REQ_OP_WRITE_SAME && | 225 | if (req_op(clone) == REQ_OP_DISCARD && |
| 226 | !clone->q->limits.max_write_same_sectors) | 226 | !clone->q->limits.max_discard_sectors) |
| 227 | disable_discard(tio->md); | ||
| 228 | else if (req_op(clone) == REQ_OP_WRITE_SAME && | ||
| 229 | !clone->q->limits.max_write_same_sectors) | ||
| 227 | disable_write_same(tio->md); | 230 | disable_write_same(tio->md); |
| 228 | if (req_op(clone) == REQ_OP_WRITE_ZEROES && | 231 | else if (req_op(clone) == REQ_OP_WRITE_ZEROES && |
| 229 | !clone->q->limits.max_write_zeroes_sectors) | 232 | !clone->q->limits.max_write_zeroes_sectors) |
| 230 | disable_write_zeroes(tio->md); | 233 | disable_write_zeroes(tio->md); |
| 231 | } | 234 | } |
| 232 | 235 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ba9481f1bf3c..cde3b49b2a91 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) | |||
| 1844 | return true; | 1844 | return true; |
| 1845 | } | 1845 | } |
| 1846 | 1846 | ||
| 1847 | static int device_requires_stable_pages(struct dm_target *ti, | ||
| 1848 | struct dm_dev *dev, sector_t start, | ||
| 1849 | sector_t len, void *data) | ||
| 1850 | { | ||
| 1851 | struct request_queue *q = bdev_get_queue(dev->bdev); | ||
| 1852 | |||
| 1853 | return q && bdi_cap_stable_pages_required(q->backing_dev_info); | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | /* | ||
| 1857 | * If any underlying device requires stable pages, a table must require | ||
| 1858 | * them as well. Only targets that support iterate_devices are considered: | ||
| 1859 | * don't want error, zero, etc to require stable pages. | ||
| 1860 | */ | ||
| 1861 | static bool dm_table_requires_stable_pages(struct dm_table *t) | ||
| 1862 | { | ||
| 1863 | struct dm_target *ti; | ||
| 1864 | unsigned i; | ||
| 1865 | |||
| 1866 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
| 1867 | ti = dm_table_get_target(t, i); | ||
| 1868 | |||
| 1869 | if (ti->type->iterate_devices && | ||
| 1870 | ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) | ||
| 1871 | return true; | ||
| 1872 | } | ||
| 1873 | |||
| 1874 | return false; | ||
| 1875 | } | ||
| 1876 | |||
| 1847 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1877 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
| 1848 | struct queue_limits *limits) | 1878 | struct queue_limits *limits) |
| 1849 | { | 1879 | { |
| @@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1897 | dm_table_verify_integrity(t); | 1927 | dm_table_verify_integrity(t); |
| 1898 | 1928 | ||
| 1899 | /* | 1929 | /* |
| 1930 | * Some devices don't use blk_integrity but still want stable pages | ||
| 1931 | * because they do their own checksumming. | ||
| 1932 | */ | ||
| 1933 | if (dm_table_requires_stable_pages(t)) | ||
| 1934 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | ||
| 1935 | else | ||
| 1936 | q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; | ||
| 1937 | |||
| 1938 | /* | ||
| 1900 | * Determine whether or not this queue's I/O timings contribute | 1939 | * Determine whether or not this queue's I/O timings contribute |
| 1901 | * to the entropy pool, Only request-based targets use this. | 1940 | * to the entropy pool, Only request-based targets use this. |
| 1902 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | 1941 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68d24056d0b1..043f0761e4a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) | |||
| 945 | } | 945 | } |
| 946 | } | 946 | } |
| 947 | 947 | ||
| 948 | void disable_discard(struct mapped_device *md) | ||
| 949 | { | ||
| 950 | struct queue_limits *limits = dm_get_queue_limits(md); | ||
| 951 | |||
| 952 | /* device doesn't really support DISCARD, disable it */ | ||
| 953 | limits->max_discard_sectors = 0; | ||
| 954 | blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); | ||
| 955 | } | ||
| 956 | |||
| 948 | void disable_write_same(struct mapped_device *md) | 957 | void disable_write_same(struct mapped_device *md) |
| 949 | { | 958 | { |
| 950 | struct queue_limits *limits = dm_get_queue_limits(md); | 959 | struct queue_limits *limits = dm_get_queue_limits(md); |
| @@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio) | |||
| 970 | dm_endio_fn endio = tio->ti->type->end_io; | 979 | dm_endio_fn endio = tio->ti->type->end_io; |
| 971 | 980 | ||
| 972 | if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { | 981 | if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { |
| 973 | if (bio_op(bio) == REQ_OP_WRITE_SAME && | 982 | if (bio_op(bio) == REQ_OP_DISCARD && |
| 974 | !bio->bi_disk->queue->limits.max_write_same_sectors) | 983 | !bio->bi_disk->queue->limits.max_discard_sectors) |
| 984 | disable_discard(md); | ||
| 985 | else if (bio_op(bio) == REQ_OP_WRITE_SAME && | ||
| 986 | !bio->bi_disk->queue->limits.max_write_same_sectors) | ||
| 975 | disable_write_same(md); | 987 | disable_write_same(md); |
| 976 | if (bio_op(bio) == REQ_OP_WRITE_ZEROES && | 988 | else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && |
| 977 | !bio->bi_disk->queue->limits.max_write_zeroes_sectors) | 989 | !bio->bi_disk->queue->limits.max_write_zeroes_sectors) |
| 978 | disable_write_zeroes(md); | 990 | disable_write_zeroes(md); |
| 979 | } | 991 | } |
| 980 | 992 | ||
| @@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) | |||
| 1042 | return -EINVAL; | 1054 | return -EINVAL; |
| 1043 | } | 1055 | } |
| 1044 | 1056 | ||
| 1045 | /* | 1057 | ti->max_io_len = (uint32_t) len; |
| 1046 | * BIO based queue uses its own splitting. When multipage bvecs | ||
| 1047 | * is switched on, size of the incoming bio may be too big to | ||
| 1048 | * be handled in some targets, such as crypt. | ||
| 1049 | * | ||
| 1050 | * When these targets are ready for the big bio, we can remove | ||
| 1051 | * the limit. | ||
| 1052 | */ | ||
| 1053 | ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); | ||
| 1054 | 1058 | ||
| 1055 | return 0; | 1059 | return 0; |
| 1056 | } | 1060 | } |
