diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-17 23:53:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-17 23:53:57 -0400 |
commit | 3f8476fe892c58c0583f98f17e98fd67c3fec466 (patch) | |
tree | 0787ca7ae13ef1937498c88de0ed0d09613087b5 | |
parent | eb254374a30cc53f976f2302f2198813a3b687ea (diff) | |
parent | 665022d72f9b5762f21b5ea02fa0503d04802849 (diff) |
Merge tag 'dm-4.2-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer:
- revert a request-based DM core change that caused IO latency to
increase and adversely impact both throughput and system load
- fix for a use after free bug in DM core's device cleanup
- a couple DM btree removal fixes (used by dm-thinp)
- a DM thinp fix for order-5 allocation failure
- a DM thinp fix to not degrade to read-only metadata mode when in
out-of-data-space mode for longer than the 'no_space_timeout'
- fix a long-standing oversight in both dm-thinp and dm-cache by now
exporting 'needs_check' in status if it was set in metadata
- fix an embarrassing dm-cache busy-loop that caused worker threads to
eat cpu even if no IO was actively being issued to the cache device
* tag 'dm-4.2-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm cache: avoid calls to prealloc_free_structs() if possible
dm cache: avoid preallocation if no work in writeback_some_dirty_blocks()
dm cache: do not wake_worker() in free_migration()
dm cache: display 'needs_check' in status if it is set
dm thin: display 'needs_check' in status if it is set
dm thin: stay in out-of-data-space mode once no_space_timeout expires
dm: fix use after free crash due to incorrect cleanup sequence
Revert "dm: only run the queue on completion if congested or no requests pending"
dm btree: silence lockdep lock inversion in dm_btree_del()
dm thin: allocate the cell_sort_array dynamically
dm btree remove: fix bug in redistribute3
-rw-r--r-- | Documentation/device-mapper/cache.txt | 6 | ||||
-rw-r--r-- | Documentation/device-mapper/thin-provisioning.txt | 9 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 38 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 44 | ||||
-rw-r--r-- | drivers/md/dm.c | 12 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-btree-remove.c | 6 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-btree.c | 2 |
7 files changed, 82 insertions, 35 deletions
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt index 82960cffbad3..785eab87aa71 100644 --- a/Documentation/device-mapper/cache.txt +++ b/Documentation/device-mapper/cache.txt | |||
@@ -258,6 +258,12 @@ cache metadata mode : ro if read-only, rw if read-write | |||
258 | no further I/O will be permitted and the status will just | 258 | no further I/O will be permitted and the status will just |
259 | contain the string 'Fail'. The userspace recovery tools | 259 | contain the string 'Fail'. The userspace recovery tools |
260 | should then be used. | 260 | should then be used. |
261 | needs_check : 'needs_check' if set, '-' if not set | ||
262 | A metadata operation has failed, resulting in the needs_check | ||
263 | flag being set in the metadata's superblock. The metadata | ||
264 | device must be deactivated and checked/repaired before the | ||
265 | cache can be made fully operational again. '-' indicates | ||
266 | needs_check is not set. | ||
261 | 267 | ||
262 | Messages | 268 | Messages |
263 | -------- | 269 | -------- |
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 4f67578b2954..1699a55b7b70 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt | |||
@@ -296,7 +296,7 @@ ii) Status | |||
296 | underlying device. When this is enabled when loading the table, | 296 | underlying device. When this is enabled when loading the table, |
297 | it can get disabled if the underlying device doesn't support it. | 297 | it can get disabled if the underlying device doesn't support it. |
298 | 298 | ||
299 | ro|rw | 299 | ro|rw|out_of_data_space |
300 | If the pool encounters certain types of device failures it will | 300 | If the pool encounters certain types of device failures it will |
301 | drop into a read-only metadata mode in which no changes to | 301 | drop into a read-only metadata mode in which no changes to |
302 | the pool metadata (like allocating new blocks) are permitted. | 302 | the pool metadata (like allocating new blocks) are permitted. |
@@ -314,6 +314,13 @@ ii) Status | |||
314 | module parameter can be used to change this timeout -- it | 314 | module parameter can be used to change this timeout -- it |
315 | defaults to 60 seconds but may be disabled using a value of 0. | 315 | defaults to 60 seconds but may be disabled using a value of 0. |
316 | 316 | ||
317 | needs_check | ||
318 | A metadata operation has failed, resulting in the needs_check | ||
319 | flag being set in the metadata's superblock. The metadata | ||
320 | device must be deactivated and checked/repaired before the | ||
321 | thin-pool can be made fully operational again. '-' indicates | ||
322 | needs_check is not set. | ||
323 | |||
317 | iii) Messages | 324 | iii) Messages |
318 | 325 | ||
319 | create_thin <dev id> | 326 | create_thin <dev id> |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1b4e1756b169..b680da5d7b93 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg) | |||
424 | wake_up(&cache->migration_wait); | 424 | wake_up(&cache->migration_wait); |
425 | 425 | ||
426 | mempool_free(mg, cache->migration_pool); | 426 | mempool_free(mg, cache->migration_pool); |
427 | wake_worker(cache); | ||
428 | } | 427 | } |
429 | 428 | ||
430 | static int prealloc_data_structs(struct cache *cache, struct prealloc *p) | 429 | static int prealloc_data_structs(struct cache *cache, struct prealloc *p) |
@@ -1947,6 +1946,7 @@ static int commit_if_needed(struct cache *cache) | |||
1947 | 1946 | ||
1948 | static void process_deferred_bios(struct cache *cache) | 1947 | static void process_deferred_bios(struct cache *cache) |
1949 | { | 1948 | { |
1949 | bool prealloc_used = false; | ||
1950 | unsigned long flags; | 1950 | unsigned long flags; |
1951 | struct bio_list bios; | 1951 | struct bio_list bios; |
1952 | struct bio *bio; | 1952 | struct bio *bio; |
@@ -1981,13 +1981,16 @@ static void process_deferred_bios(struct cache *cache) | |||
1981 | process_discard_bio(cache, &structs, bio); | 1981 | process_discard_bio(cache, &structs, bio); |
1982 | else | 1982 | else |
1983 | process_bio(cache, &structs, bio); | 1983 | process_bio(cache, &structs, bio); |
1984 | prealloc_used = true; | ||
1984 | } | 1985 | } |
1985 | 1986 | ||
1986 | prealloc_free_structs(cache, &structs); | 1987 | if (prealloc_used) |
1988 | prealloc_free_structs(cache, &structs); | ||
1987 | } | 1989 | } |
1988 | 1990 | ||
1989 | static void process_deferred_cells(struct cache *cache) | 1991 | static void process_deferred_cells(struct cache *cache) |
1990 | { | 1992 | { |
1993 | bool prealloc_used = false; | ||
1991 | unsigned long flags; | 1994 | unsigned long flags; |
1992 | struct dm_bio_prison_cell *cell, *tmp; | 1995 | struct dm_bio_prison_cell *cell, *tmp; |
1993 | struct list_head cells; | 1996 | struct list_head cells; |
@@ -2015,9 +2018,11 @@ static void process_deferred_cells(struct cache *cache) | |||
2015 | } | 2018 | } |
2016 | 2019 | ||
2017 | process_cell(cache, &structs, cell); | 2020 | process_cell(cache, &structs, cell); |
2021 | prealloc_used = true; | ||
2018 | } | 2022 | } |
2019 | 2023 | ||
2020 | prealloc_free_structs(cache, &structs); | 2024 | if (prealloc_used) |
2025 | prealloc_free_structs(cache, &structs); | ||
2021 | } | 2026 | } |
2022 | 2027 | ||
2023 | static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) | 2028 | static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) |
@@ -2062,7 +2067,7 @@ static void process_deferred_writethrough_bios(struct cache *cache) | |||
2062 | 2067 | ||
2063 | static void writeback_some_dirty_blocks(struct cache *cache) | 2068 | static void writeback_some_dirty_blocks(struct cache *cache) |
2064 | { | 2069 | { |
2065 | int r = 0; | 2070 | bool prealloc_used = false; |
2066 | dm_oblock_t oblock; | 2071 | dm_oblock_t oblock; |
2067 | dm_cblock_t cblock; | 2072 | dm_cblock_t cblock; |
2068 | struct prealloc structs; | 2073 | struct prealloc structs; |
@@ -2072,23 +2077,21 @@ static void writeback_some_dirty_blocks(struct cache *cache) | |||
2072 | memset(&structs, 0, sizeof(structs)); | 2077 | memset(&structs, 0, sizeof(structs)); |
2073 | 2078 | ||
2074 | while (spare_migration_bandwidth(cache)) { | 2079 | while (spare_migration_bandwidth(cache)) { |
2075 | if (prealloc_data_structs(cache, &structs)) | 2080 | if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) |
2076 | break; | 2081 | break; /* no work to do */ |
2077 | 2082 | ||
2078 | r = policy_writeback_work(cache->policy, &oblock, &cblock, busy); | 2083 | if (prealloc_data_structs(cache, &structs) || |
2079 | if (r) | 2084 | get_cell(cache, oblock, &structs, &old_ocell)) { |
2080 | break; | ||
2081 | |||
2082 | r = get_cell(cache, oblock, &structs, &old_ocell); | ||
2083 | if (r) { | ||
2084 | policy_set_dirty(cache->policy, oblock); | 2085 | policy_set_dirty(cache->policy, oblock); |
2085 | break; | 2086 | break; |
2086 | } | 2087 | } |
2087 | 2088 | ||
2088 | writeback(cache, &structs, oblock, cblock, old_ocell); | 2089 | writeback(cache, &structs, oblock, cblock, old_ocell); |
2090 | prealloc_used = true; | ||
2089 | } | 2091 | } |
2090 | 2092 | ||
2091 | prealloc_free_structs(cache, &structs); | 2093 | if (prealloc_used) |
2094 | prealloc_free_structs(cache, &structs); | ||
2092 | } | 2095 | } |
2093 | 2096 | ||
2094 | /*---------------------------------------------------------------- | 2097 | /*---------------------------------------------------------------- |
@@ -3496,7 +3499,7 @@ static void cache_resume(struct dm_target *ti) | |||
3496 | * <#demotions> <#promotions> <#dirty> | 3499 | * <#demotions> <#promotions> <#dirty> |
3497 | * <#features> <features>* | 3500 | * <#features> <features>* |
3498 | * <#core args> <core args> | 3501 | * <#core args> <core args> |
3499 | * <policy name> <#policy args> <policy args>* <cache metadata mode> | 3502 | * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check> |
3500 | */ | 3503 | */ |
3501 | static void cache_status(struct dm_target *ti, status_type_t type, | 3504 | static void cache_status(struct dm_target *ti, status_type_t type, |
3502 | unsigned status_flags, char *result, unsigned maxlen) | 3505 | unsigned status_flags, char *result, unsigned maxlen) |
@@ -3582,6 +3585,11 @@ static void cache_status(struct dm_target *ti, status_type_t type, | |||
3582 | else | 3585 | else |
3583 | DMEMIT("rw "); | 3586 | DMEMIT("rw "); |
3584 | 3587 | ||
3588 | if (dm_cache_metadata_needs_check(cache->cmd)) | ||
3589 | DMEMIT("needs_check "); | ||
3590 | else | ||
3591 | DMEMIT("- "); | ||
3592 | |||
3585 | break; | 3593 | break; |
3586 | 3594 | ||
3587 | case STATUSTYPE_TABLE: | 3595 | case STATUSTYPE_TABLE: |
@@ -3820,7 +3828,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
3820 | 3828 | ||
3821 | static struct target_type cache_target = { | 3829 | static struct target_type cache_target = { |
3822 | .name = "cache", | 3830 | .name = "cache", |
3823 | .version = {1, 7, 0}, | 3831 | .version = {1, 8, 0}, |
3824 | .module = THIS_MODULE, | 3832 | .module = THIS_MODULE, |
3825 | .ctr = cache_ctr, | 3833 | .ctr = cache_ctr, |
3826 | .dtr = cache_dtr, | 3834 | .dtr = cache_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c33f61a4cc28..1c50c580215c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/vmalloc.h> | ||
21 | #include <linux/sort.h> | 22 | #include <linux/sort.h> |
22 | #include <linux/rbtree.h> | 23 | #include <linux/rbtree.h> |
23 | 24 | ||
@@ -268,7 +269,7 @@ struct pool { | |||
268 | process_mapping_fn process_prepared_mapping; | 269 | process_mapping_fn process_prepared_mapping; |
269 | process_mapping_fn process_prepared_discard; | 270 | process_mapping_fn process_prepared_discard; |
270 | 271 | ||
271 | struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE]; | 272 | struct dm_bio_prison_cell **cell_sort_array; |
272 | }; | 273 | }; |
273 | 274 | ||
274 | static enum pool_mode get_pool_mode(struct pool *pool); | 275 | static enum pool_mode get_pool_mode(struct pool *pool); |
@@ -2281,18 +2282,23 @@ static void do_waker(struct work_struct *ws) | |||
2281 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); | 2282 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); |
2282 | } | 2283 | } |
2283 | 2284 | ||
2285 | static void notify_of_pool_mode_change_to_oods(struct pool *pool); | ||
2286 | |||
2284 | /* | 2287 | /* |
2285 | * We're holding onto IO to allow userland time to react. After the | 2288 | * We're holding onto IO to allow userland time to react. After the |
2286 | * timeout either the pool will have been resized (and thus back in | 2289 | * timeout either the pool will have been resized (and thus back in |
2287 | * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. | 2290 | * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space. |
2288 | */ | 2291 | */ |
2289 | static void do_no_space_timeout(struct work_struct *ws) | 2292 | static void do_no_space_timeout(struct work_struct *ws) |
2290 | { | 2293 | { |
2291 | struct pool *pool = container_of(to_delayed_work(ws), struct pool, | 2294 | struct pool *pool = container_of(to_delayed_work(ws), struct pool, |
2292 | no_space_timeout); | 2295 | no_space_timeout); |
2293 | 2296 | ||
2294 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) | 2297 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { |
2295 | set_pool_mode(pool, PM_READ_ONLY); | 2298 | pool->pf.error_if_no_space = true; |
2299 | notify_of_pool_mode_change_to_oods(pool); | ||
2300 | error_retry_list(pool); | ||
2301 | } | ||
2296 | } | 2302 | } |
2297 | 2303 | ||
2298 | /*----------------------------------------------------------------*/ | 2304 | /*----------------------------------------------------------------*/ |
@@ -2370,6 +2376,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) | |||
2370 | dm_device_name(pool->pool_md), new_mode); | 2376 | dm_device_name(pool->pool_md), new_mode); |
2371 | } | 2377 | } |
2372 | 2378 | ||
2379 | static void notify_of_pool_mode_change_to_oods(struct pool *pool) | ||
2380 | { | ||
2381 | if (!pool->pf.error_if_no_space) | ||
2382 | notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); | ||
2383 | else | ||
2384 | notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); | ||
2385 | } | ||
2386 | |||
2373 | static bool passdown_enabled(struct pool_c *pt) | 2387 | static bool passdown_enabled(struct pool_c *pt) |
2374 | { | 2388 | { |
2375 | return pt->adjusted_pf.discard_passdown; | 2389 | return pt->adjusted_pf.discard_passdown; |
@@ -2454,7 +2468,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
2454 | * frequently seeing this mode. | 2468 | * frequently seeing this mode. |
2455 | */ | 2469 | */ |
2456 | if (old_mode != new_mode) | 2470 | if (old_mode != new_mode) |
2457 | notify_of_pool_mode_change(pool, "out-of-data-space"); | 2471 | notify_of_pool_mode_change_to_oods(pool); |
2458 | pool->process_bio = process_bio_read_only; | 2472 | pool->process_bio = process_bio_read_only; |
2459 | pool->process_discard = process_discard_bio; | 2473 | pool->process_discard = process_discard_bio; |
2460 | pool->process_cell = process_cell_read_only; | 2474 | pool->process_cell = process_cell_read_only; |
@@ -2777,6 +2791,7 @@ static void __pool_destroy(struct pool *pool) | |||
2777 | { | 2791 | { |
2778 | __pool_table_remove(pool); | 2792 | __pool_table_remove(pool); |
2779 | 2793 | ||
2794 | vfree(pool->cell_sort_array); | ||
2780 | if (dm_pool_metadata_close(pool->pmd) < 0) | 2795 | if (dm_pool_metadata_close(pool->pmd) < 0) |
2781 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); | 2796 | DMWARN("%s: dm_pool_metadata_close() failed.", __func__); |
2782 | 2797 | ||
@@ -2889,6 +2904,13 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2889 | goto bad_mapping_pool; | 2904 | goto bad_mapping_pool; |
2890 | } | 2905 | } |
2891 | 2906 | ||
2907 | pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); | ||
2908 | if (!pool->cell_sort_array) { | ||
2909 | *error = "Error allocating cell sort array"; | ||
2910 | err_p = ERR_PTR(-ENOMEM); | ||
2911 | goto bad_sort_array; | ||
2912 | } | ||
2913 | |||
2892 | pool->ref_count = 1; | 2914 | pool->ref_count = 1; |
2893 | pool->last_commit_jiffies = jiffies; | 2915 | pool->last_commit_jiffies = jiffies; |
2894 | pool->pool_md = pool_md; | 2916 | pool->pool_md = pool_md; |
@@ -2897,6 +2919,8 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2897 | 2919 | ||
2898 | return pool; | 2920 | return pool; |
2899 | 2921 | ||
2922 | bad_sort_array: | ||
2923 | mempool_destroy(pool->mapping_pool); | ||
2900 | bad_mapping_pool: | 2924 | bad_mapping_pool: |
2901 | dm_deferred_set_destroy(pool->all_io_ds); | 2925 | dm_deferred_set_destroy(pool->all_io_ds); |
2902 | bad_all_io_ds: | 2926 | bad_all_io_ds: |
@@ -3714,6 +3738,7 @@ static void emit_flags(struct pool_features *pf, char *result, | |||
3714 | * Status line is: | 3738 | * Status line is: |
3715 | * <transaction id> <used metadata sectors>/<total metadata sectors> | 3739 | * <transaction id> <used metadata sectors>/<total metadata sectors> |
3716 | * <used data sectors>/<total data sectors> <held metadata root> | 3740 | * <used data sectors>/<total data sectors> <held metadata root> |
3741 | * <pool mode> <discard config> <no space config> <needs_check> | ||
3717 | */ | 3742 | */ |
3718 | static void pool_status(struct dm_target *ti, status_type_t type, | 3743 | static void pool_status(struct dm_target *ti, status_type_t type, |
3719 | unsigned status_flags, char *result, unsigned maxlen) | 3744 | unsigned status_flags, char *result, unsigned maxlen) |
@@ -3815,6 +3840,11 @@ static void pool_status(struct dm_target *ti, status_type_t type, | |||
3815 | else | 3840 | else |
3816 | DMEMIT("queue_if_no_space "); | 3841 | DMEMIT("queue_if_no_space "); |
3817 | 3842 | ||
3843 | if (dm_pool_metadata_needs_check(pool->pmd)) | ||
3844 | DMEMIT("needs_check "); | ||
3845 | else | ||
3846 | DMEMIT("- "); | ||
3847 | |||
3818 | break; | 3848 | break; |
3819 | 3849 | ||
3820 | case STATUSTYPE_TABLE: | 3850 | case STATUSTYPE_TABLE: |
@@ -3918,7 +3948,7 @@ static struct target_type pool_target = { | |||
3918 | .name = "thin-pool", | 3948 | .name = "thin-pool", |
3919 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 3949 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
3920 | DM_TARGET_IMMUTABLE, | 3950 | DM_TARGET_IMMUTABLE, |
3921 | .version = {1, 15, 0}, | 3951 | .version = {1, 16, 0}, |
3922 | .module = THIS_MODULE, | 3952 | .module = THIS_MODULE, |
3923 | .ctr = pool_ctr, | 3953 | .ctr = pool_ctr, |
3924 | .dtr = pool_dtr, | 3954 | .dtr = pool_dtr, |
@@ -4305,7 +4335,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
4305 | 4335 | ||
4306 | static struct target_type thin_target = { | 4336 | static struct target_type thin_target = { |
4307 | .name = "thin", | 4337 | .name = "thin", |
4308 | .version = {1, 15, 0}, | 4338 | .version = {1, 16, 0}, |
4309 | .module = THIS_MODULE, | 4339 | .module = THIS_MODULE, |
4310 | .ctr = thin_ctr, | 4340 | .ctr = thin_ctr, |
4311 | .dtr = thin_dtr, | 4341 | .dtr = thin_dtr, |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f331d888e7f5..ab37ae114e94 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) | |||
1067 | */ | 1067 | */ |
1068 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | 1068 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) |
1069 | { | 1069 | { |
1070 | int nr_requests_pending; | ||
1071 | |||
1072 | atomic_dec(&md->pending[rw]); | 1070 | atomic_dec(&md->pending[rw]); |
1073 | 1071 | ||
1074 | /* nudge anyone waiting on suspend queue */ | 1072 | /* nudge anyone waiting on suspend queue */ |
1075 | nr_requests_pending = md_in_flight(md); | 1073 | if (!md_in_flight(md)) |
1076 | if (!nr_requests_pending) | ||
1077 | wake_up(&md->wait); | 1074 | wake_up(&md->wait); |
1078 | 1075 | ||
1079 | /* | 1076 | /* |
@@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
1085 | if (run_queue) { | 1082 | if (run_queue) { |
1086 | if (md->queue->mq_ops) | 1083 | if (md->queue->mq_ops) |
1087 | blk_mq_run_hw_queues(md->queue, true); | 1084 | blk_mq_run_hw_queues(md->queue, true); |
1088 | else if (!nr_requests_pending || | 1085 | else |
1089 | (nr_requests_pending >= md->queue->nr_congestion_on)) | ||
1090 | blk_run_queue_async(md->queue); | 1086 | blk_run_queue_async(md->queue); |
1091 | } | 1087 | } |
1092 | 1088 | ||
@@ -2281,8 +2277,6 @@ static void dm_init_old_md_queue(struct mapped_device *md) | |||
2281 | 2277 | ||
2282 | static void cleanup_mapped_device(struct mapped_device *md) | 2278 | static void cleanup_mapped_device(struct mapped_device *md) |
2283 | { | 2279 | { |
2284 | cleanup_srcu_struct(&md->io_barrier); | ||
2285 | |||
2286 | if (md->wq) | 2280 | if (md->wq) |
2287 | destroy_workqueue(md->wq); | 2281 | destroy_workqueue(md->wq); |
2288 | if (md->kworker_task) | 2282 | if (md->kworker_task) |
@@ -2294,6 +2288,8 @@ static void cleanup_mapped_device(struct mapped_device *md) | |||
2294 | if (md->bs) | 2288 | if (md->bs) |
2295 | bioset_free(md->bs); | 2289 | bioset_free(md->bs); |
2296 | 2290 | ||
2291 | cleanup_srcu_struct(&md->io_barrier); | ||
2292 | |||
2297 | if (md->disk) { | 2293 | if (md->disk) { |
2298 | spin_lock(&_minor_lock); | 2294 | spin_lock(&_minor_lock); |
2299 | md->disk->private_data = NULL; | 2295 | md->disk->private_data = NULL; |
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index e04cfd2d60ef..9836c0ae897c 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c | |||
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, | |||
309 | 309 | ||
310 | if (s < 0 && nr_center < -s) { | 310 | if (s < 0 && nr_center < -s) { |
311 | /* not enough in central node */ | 311 | /* not enough in central node */ |
312 | shift(left, center, nr_center); | 312 | shift(left, center, -nr_center); |
313 | s = nr_center - target; | 313 | s += nr_center; |
314 | shift(left, right, s); | 314 | shift(left, right, s); |
315 | nr_right += s; | 315 | nr_right += s; |
316 | } else | 316 | } else |
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, | |||
323 | if (s > 0 && nr_center < s) { | 323 | if (s > 0 && nr_center < s) { |
324 | /* not enough in central node */ | 324 | /* not enough in central node */ |
325 | shift(center, right, nr_center); | 325 | shift(center, right, nr_center); |
326 | s = target - nr_center; | 326 | s -= nr_center; |
327 | shift(left, right, s); | 327 | shift(left, right, s); |
328 | nr_left -= s; | 328 | nr_left -= s; |
329 | } else | 329 | } else |
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 200ac12a1d40..fdd3793e22f9 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c | |||
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) | |||
255 | int r; | 255 | int r; |
256 | struct del_stack *s; | 256 | struct del_stack *s; |
257 | 257 | ||
258 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 258 | s = kmalloc(sizeof(*s), GFP_NOIO); |
259 | if (!s) | 259 | if (!s) |
260 | return -ENOMEM; | 260 | return -ENOMEM; |
261 | s->info = info; | 261 | s->info = info; |