diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-22 17:47:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-22 17:47:17 -0500 |
commit | aa39477b5692611b91ac9455ae588738852b3f60 (patch) | |
tree | ab956e5718a8bd7d34d5e988351ec7ee4cf4b6e1 | |
parent | 48ec833b7851438f02164ea846852ce4696f09ad (diff) | |
parent | 5164bece1673cdf04782f8ed3fba70743700f5da (diff) |
Merge tag 'dm-3.19-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer:
"Thre stable fixes and one fix for a regression introduced during 3.19
merge:
- Fix inability to discard used space when the thin-pool target is in
out-of-data-space mode and also transition the thin-pool back to
write mode once free space is made available.
- Fix DM core bio-based end_io bug that prevented proper
post-processing of the error code returned from the block layer.
- Fix crash in DM thin-pool due to thin device being added to the
pool's active_thins list before properly initializing the thin
device's refcount"
* tag 'dm-3.19-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm: fix missed error code if .end_io isn't implemented by target_type
dm thin: fix crash by initializing thin device's refcount and completion earlier
dm thin: fix missing out-of-data-space to write mode transition if blocks are released
dm thin: fix inability to discard blocks when in out-of-data-space mode
-rw-r--r-- | drivers/md/dm-thin.c | 29 | ||||
-rw-r--r-- | drivers/md/dm.c | 2 |
2 files changed, 24 insertions, 7 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 8735543eacdb..493478989dbd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, | |||
1127 | schedule_zero(tc, virt_block, data_dest, cell, bio); | 1127 | schedule_zero(tc, virt_block, data_dest, cell, bio); |
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); | ||
1131 | |||
1132 | static void check_for_space(struct pool *pool) | ||
1133 | { | ||
1134 | int r; | ||
1135 | dm_block_t nr_free; | ||
1136 | |||
1137 | if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) | ||
1138 | return; | ||
1139 | |||
1140 | r = dm_pool_get_free_block_count(pool->pmd, &nr_free); | ||
1141 | if (r) | ||
1142 | return; | ||
1143 | |||
1144 | if (nr_free) | ||
1145 | set_pool_mode(pool, PM_WRITE); | ||
1146 | } | ||
1147 | |||
1130 | /* | 1148 | /* |
1131 | * A non-zero return indicates read_only or fail_io mode. | 1149 | * A non-zero return indicates read_only or fail_io mode. |
1132 | * Many callers don't care about the return value. | 1150 | * Many callers don't care about the return value. |
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool) | |||
1141 | r = dm_pool_commit_metadata(pool->pmd); | 1159 | r = dm_pool_commit_metadata(pool->pmd); |
1142 | if (r) | 1160 | if (r) |
1143 | metadata_operation_failed(pool, "dm_pool_commit_metadata", r); | 1161 | metadata_operation_failed(pool, "dm_pool_commit_metadata", r); |
1162 | else | ||
1163 | check_for_space(pool); | ||
1144 | 1164 | ||
1145 | return r; | 1165 | return r; |
1146 | } | 1166 | } |
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) | |||
1159 | } | 1179 | } |
1160 | } | 1180 | } |
1161 | 1181 | ||
1162 | static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); | ||
1163 | |||
1164 | static int alloc_data_block(struct thin_c *tc, dm_block_t *result) | 1182 | static int alloc_data_block(struct thin_c *tc, dm_block_t *result) |
1165 | { | 1183 | { |
1166 | int r; | 1184 | int r; |
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
2155 | pool->process_cell = process_cell_read_only; | 2173 | pool->process_cell = process_cell_read_only; |
2156 | pool->process_discard_cell = process_discard_cell; | 2174 | pool->process_discard_cell = process_discard_cell; |
2157 | pool->process_prepared_mapping = process_prepared_mapping; | 2175 | pool->process_prepared_mapping = process_prepared_mapping; |
2158 | pool->process_prepared_discard = process_prepared_discard_passdown; | 2176 | pool->process_prepared_discard = process_prepared_discard; |
2159 | 2177 | ||
2160 | if (!pool->pf.error_if_no_space && no_space_timeout) | 2178 | if (!pool->pf.error_if_no_space && no_space_timeout) |
2161 | queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); | 2179 | queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); |
@@ -3814,6 +3832,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3814 | r = -EINVAL; | 3832 | r = -EINVAL; |
3815 | goto bad; | 3833 | goto bad; |
3816 | } | 3834 | } |
3835 | atomic_set(&tc->refcount, 1); | ||
3836 | init_completion(&tc->can_destroy); | ||
3817 | list_add_tail_rcu(&tc->list, &tc->pool->active_thins); | 3837 | list_add_tail_rcu(&tc->list, &tc->pool->active_thins); |
3818 | spin_unlock_irqrestore(&tc->pool->lock, flags); | 3838 | spin_unlock_irqrestore(&tc->pool->lock, flags); |
3819 | /* | 3839 | /* |
@@ -3826,9 +3846,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
3826 | 3846 | ||
3827 | dm_put(pool_md); | 3847 | dm_put(pool_md); |
3828 | 3848 | ||
3829 | atomic_set(&tc->refcount, 1); | ||
3830 | init_completion(&tc->can_destroy); | ||
3831 | |||
3832 | return 0; | 3849 | return 0; |
3833 | 3850 | ||
3834 | bad: | 3851 | bad: |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4c06585bf165..b98cd9d84435 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md) | |||
899 | 899 | ||
900 | static void clone_endio(struct bio *bio, int error) | 900 | static void clone_endio(struct bio *bio, int error) |
901 | { | 901 | { |
902 | int r = 0; | 902 | int r = error; |
903 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); | 903 | struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); |
904 | struct dm_io *io = tio->io; | 904 | struct dm_io *io = tio->io; |
905 | struct mapped_device *md = tio->io->md; | 905 | struct mapped_device *md = tio->io->md; |