diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-30 15:04:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-30 15:04:56 -0400 |
commit | 24e19d279f9e289e965b4bc4710fbccab824c4c4 (patch) | |
tree | e069aec1702532120fdc8a817aab25c87c461410 | |
parent | 6538b8ea886e472f4431db8ca1d60478f838d14b (diff) | |
parent | 63d832c30142cdceb478b1cac7d943d83b95b2dc (diff) |
Merge tag 'dm-3.15-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device-mapper fixes from Mike Snitzer:
"A dm-cache stable fix to split discards on cache block boundaries
because dm-cache cannot yet handle discards that span cache blocks.
Really fix a dm-mpath LOCKDEP warning that was introduced in -rc1.
Add a 'no_space_timeout' control to dm-thinp to restore the ability to
queue IO indefinitely when no data space is available. This fixes a
change in behavior that was introduced in -rc6 where the timeout
couldn't be disabled"
* tag 'dm-3.15-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm mpath: really fix lockdep warning
dm cache: always split discards on cache block boundaries
dm thin: add 'no_space_timeout' dm-thin-pool module param
-rw-r--r-- | Documentation/device-mapper/thin-provisioning.txt | 5 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 14 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 12 |
4 files changed, 23 insertions, 10 deletions
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 05a27e9442bd..2f5173500bd9 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt | |||
@@ -309,7 +309,10 @@ ii) Status | |||
309 | error_if_no_space|queue_if_no_space | 309 | error_if_no_space|queue_if_no_space |
310 | If the pool runs out of data or metadata space, the pool will | 310 | If the pool runs out of data or metadata space, the pool will |
311 | either queue or error the IO destined to the data device. The | 311 | either queue or error the IO destined to the data device. The |
312 | default is to queue the IO until more space is added. | 312 | default is to queue the IO until more space is added or the |
313 | 'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool | ||
314 | module parameter can be used to change this timeout -- it | ||
315 | defaults to 60 seconds but may be disabled using a value of 0. | ||
313 | 316 | ||
314 | iii) Messages | 317 | iii) Messages |
315 | 318 | ||
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9380be7b1895..5f054c44b485 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
2178 | ti->num_discard_bios = 1; | 2178 | ti->num_discard_bios = 1; |
2179 | ti->discards_supported = true; | 2179 | ti->discards_supported = true; |
2180 | ti->discard_zeroes_data_unsupported = true; | 2180 | ti->discard_zeroes_data_unsupported = true; |
2181 | /* Discard bios must be split on a block boundary */ | ||
2182 | ti->split_discard_bios = true; | ||
2181 | 2183 | ||
2182 | cache->features = ca->features; | 2184 | cache->features = ca->features; |
2183 | ti->per_bio_data_size = get_per_bio_data_size(cache); | 2185 | ti->per_bio_data_size = get_per_bio_data_size(cache); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index fa0f6cbd6a41..ebfa411d1a7d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, | |||
445 | else | 445 | else |
446 | m->saved_queue_if_no_path = queue_if_no_path; | 446 | m->saved_queue_if_no_path = queue_if_no_path; |
447 | m->queue_if_no_path = queue_if_no_path; | 447 | m->queue_if_no_path = queue_if_no_path; |
448 | if (!m->queue_if_no_path) | ||
449 | dm_table_run_md_queue_async(m->ti->table); | ||
450 | |||
451 | spin_unlock_irqrestore(&m->lock, flags); | 448 | spin_unlock_irqrestore(&m->lock, flags); |
452 | 449 | ||
450 | if (!queue_if_no_path) | ||
451 | dm_table_run_md_queue_async(m->ti->table); | ||
452 | |||
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
@@ -954,7 +954,7 @@ out: | |||
954 | */ | 954 | */ |
955 | static int reinstate_path(struct pgpath *pgpath) | 955 | static int reinstate_path(struct pgpath *pgpath) |
956 | { | 956 | { |
957 | int r = 0; | 957 | int r = 0, run_queue = 0; |
958 | unsigned long flags; | 958 | unsigned long flags; |
959 | struct multipath *m = pgpath->pg->m; | 959 | struct multipath *m = pgpath->pg->m; |
960 | 960 | ||
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath) | |||
978 | 978 | ||
979 | if (!m->nr_valid_paths++) { | 979 | if (!m->nr_valid_paths++) { |
980 | m->current_pgpath = NULL; | 980 | m->current_pgpath = NULL; |
981 | dm_table_run_md_queue_async(m->ti->table); | 981 | run_queue = 1; |
982 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { | 982 | } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { |
983 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) | 983 | if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) |
984 | m->pg_init_in_progress++; | 984 | m->pg_init_in_progress++; |
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath) | |||
991 | 991 | ||
992 | out: | 992 | out: |
993 | spin_unlock_irqrestore(&m->lock, flags); | 993 | spin_unlock_irqrestore(&m->lock, flags); |
994 | if (run_queue) | ||
995 | dm_table_run_md_queue_async(m->ti->table); | ||
994 | 996 | ||
995 | return r; | 997 | return r; |
996 | } | 998 | } |
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1566 | } | 1568 | } |
1567 | if (m->pg_init_required) | 1569 | if (m->pg_init_required) |
1568 | __pg_init_all_paths(m); | 1570 | __pg_init_all_paths(m); |
1569 | dm_table_run_md_queue_async(m->ti->table); | ||
1570 | spin_unlock_irqrestore(&m->lock, flags); | 1571 | spin_unlock_irqrestore(&m->lock, flags); |
1572 | dm_table_run_md_queue_async(m->ti->table); | ||
1571 | } | 1573 | } |
1572 | 1574 | ||
1573 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 1575 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 2e71de8e0048..242ac2ea5f29 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -27,7 +27,9 @@ | |||
27 | #define MAPPING_POOL_SIZE 1024 | 27 | #define MAPPING_POOL_SIZE 1024 |
28 | #define PRISON_CELLS 1024 | 28 | #define PRISON_CELLS 1024 |
29 | #define COMMIT_PERIOD HZ | 29 | #define COMMIT_PERIOD HZ |
30 | #define NO_SPACE_TIMEOUT (HZ * 60) | 30 | #define NO_SPACE_TIMEOUT_SECS 60 |
31 | |||
32 | static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; | ||
31 | 33 | ||
32 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, | 34 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, |
33 | "A percentage of time allocated for copy on write"); | 35 | "A percentage of time allocated for copy on write"); |
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
1670 | struct pool_c *pt = pool->ti->private; | 1672 | struct pool_c *pt = pool->ti->private; |
1671 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); | 1673 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); |
1672 | enum pool_mode old_mode = get_pool_mode(pool); | 1674 | enum pool_mode old_mode = get_pool_mode(pool); |
1675 | unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; | ||
1673 | 1676 | ||
1674 | /* | 1677 | /* |
1675 | * Never allow the pool to transition to PM_WRITE mode if user | 1678 | * Never allow the pool to transition to PM_WRITE mode if user |
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
1732 | pool->process_prepared_mapping = process_prepared_mapping; | 1735 | pool->process_prepared_mapping = process_prepared_mapping; |
1733 | pool->process_prepared_discard = process_prepared_discard_passdown; | 1736 | pool->process_prepared_discard = process_prepared_discard_passdown; |
1734 | 1737 | ||
1735 | if (!pool->pf.error_if_no_space) | 1738 | if (!pool->pf.error_if_no_space && no_space_timeout) |
1736 | queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT); | 1739 | queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); |
1737 | break; | 1740 | break; |
1738 | 1741 | ||
1739 | case PM_WRITE: | 1742 | case PM_WRITE: |
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void) | |||
3508 | module_init(dm_thin_init); | 3511 | module_init(dm_thin_init); |
3509 | module_exit(dm_thin_exit); | 3512 | module_exit(dm_thin_exit); |
3510 | 3513 | ||
3514 | module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); | ||
3515 | MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); | ||
3516 | |||
3511 | MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); | 3517 | MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); |
3512 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | 3518 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); |
3513 | MODULE_LICENSE("GPL"); | 3519 | MODULE_LICENSE("GPL"); |