diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-03-21 14:15:13 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-03-21 14:15:13 -0400 |
| commit | da6b9a2049b46efdf8555eadbb22364f70eac848 (patch) | |
| tree | b6f6541efa66c67e55b17eaedc0457fd9c1567c0 | |
| parent | 521d474631310e8aafef7953a8a7f7d1efd42da6 (diff) | |
| parent | e5db29806b99ce2b2640d2e4d4fcb983cea115c5 (diff) | |
Merge tag 'dm-4.0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull devicemapper fixes from Mike Snitzer:
"A handful of stable fixes for DM:
- fix thin target to always zero-fill reads to unprovisioned blocks
- fix to interlock device destruction's suspend from internal
suspends
- fix 2 snapshot exception store handover bugs
- fix dm-io to cope with DISCARD and WRITE_SAME capabilities changing"
* tag 'dm-4.0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm io: deal with wandering queue limits when handling REQ_DISCARD and REQ_WRITE_SAME
dm snapshot: suspend merging snapshot when doing exception handover
dm snapshot: suspend origin when doing exception handover
dm: hold suspend_lock while suspending device during device deletion
dm thin: fix to consistently zero-fill reads to unprovisioned blocks
| -rw-r--r-- | drivers/md/dm-io.c | 15 | ||||
| -rw-r--r-- | drivers/md/dm-snap.c | 120 | ||||
| -rw-r--r-- | drivers/md/dm-thin.c | 11 | ||||
| -rw-r--r-- | drivers/md/dm.c | 21 | ||||
| -rw-r--r-- | include/linux/device-mapper.h | 1 |
5 files changed, 142 insertions, 26 deletions
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 37de0173b6d2..74adcd2c967e 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
| @@ -289,9 +289,16 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
| 289 | struct request_queue *q = bdev_get_queue(where->bdev); | 289 | struct request_queue *q = bdev_get_queue(where->bdev); |
| 290 | unsigned short logical_block_size = queue_logical_block_size(q); | 290 | unsigned short logical_block_size = queue_logical_block_size(q); |
| 291 | sector_t num_sectors; | 291 | sector_t num_sectors; |
| 292 | unsigned int uninitialized_var(special_cmd_max_sectors); | ||
| 292 | 293 | ||
| 293 | /* Reject unsupported discard requests */ | 294 | /* |
| 294 | if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { | 295 | * Reject unsupported discard and write same requests. |
| 296 | */ | ||
| 297 | if (rw & REQ_DISCARD) | ||
| 298 | special_cmd_max_sectors = q->limits.max_discard_sectors; | ||
| 299 | else if (rw & REQ_WRITE_SAME) | ||
| 300 | special_cmd_max_sectors = q->limits.max_write_same_sectors; | ||
| 301 | if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { | ||
| 295 | dec_count(io, region, -EOPNOTSUPP); | 302 | dec_count(io, region, -EOPNOTSUPP); |
| 296 | return; | 303 | return; |
| 297 | } | 304 | } |
| @@ -317,7 +324,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
| 317 | store_io_and_region_in_bio(bio, io, region); | 324 | store_io_and_region_in_bio(bio, io, region); |
| 318 | 325 | ||
| 319 | if (rw & REQ_DISCARD) { | 326 | if (rw & REQ_DISCARD) { |
| 320 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); | 327 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
| 321 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 328 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
| 322 | remaining -= num_sectors; | 329 | remaining -= num_sectors; |
| 323 | } else if (rw & REQ_WRITE_SAME) { | 330 | } else if (rw & REQ_WRITE_SAME) { |
| @@ -326,7 +333,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
| 326 | */ | 333 | */ |
| 327 | dp->get_page(dp, &page, &len, &offset); | 334 | dp->get_page(dp, &page, &len, &offset); |
| 328 | bio_add_page(bio, page, logical_block_size, offset); | 335 | bio_add_page(bio, page, logical_block_size, offset); |
| 329 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | 336 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
| 330 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; | 337 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
| 331 | 338 | ||
| 332 | offset = 0; | 339 | offset = 0; |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 8b204ae216ab..f83a0f3fc365 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
| 21 | #include <linux/dm-kcopyd.h> | 21 | #include <linux/dm-kcopyd.h> |
| 22 | 22 | ||
| 23 | #include "dm.h" | ||
| 24 | |||
| 23 | #include "dm-exception-store.h" | 25 | #include "dm-exception-store.h" |
| 24 | 26 | ||
| 25 | #define DM_MSG_PREFIX "snapshots" | 27 | #define DM_MSG_PREFIX "snapshots" |
| @@ -291,12 +293,23 @@ struct origin { | |||
| 291 | }; | 293 | }; |
| 292 | 294 | ||
| 293 | /* | 295 | /* |
| 296 | * This structure is allocated for each origin target | ||
| 297 | */ | ||
| 298 | struct dm_origin { | ||
| 299 | struct dm_dev *dev; | ||
| 300 | struct dm_target *ti; | ||
| 301 | unsigned split_boundary; | ||
| 302 | struct list_head hash_list; | ||
| 303 | }; | ||
| 304 | |||
| 305 | /* | ||
| 294 | * Size of the hash table for origin volumes. If we make this | 306 | * Size of the hash table for origin volumes. If we make this |
| 295 | * the size of the minors list then it should be nearly perfect | 307 | * the size of the minors list then it should be nearly perfect |
| 296 | */ | 308 | */ |
| 297 | #define ORIGIN_HASH_SIZE 256 | 309 | #define ORIGIN_HASH_SIZE 256 |
| 298 | #define ORIGIN_MASK 0xFF | 310 | #define ORIGIN_MASK 0xFF |
| 299 | static struct list_head *_origins; | 311 | static struct list_head *_origins; |
| 312 | static struct list_head *_dm_origins; | ||
| 300 | static struct rw_semaphore _origins_lock; | 313 | static struct rw_semaphore _origins_lock; |
| 301 | 314 | ||
| 302 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); | 315 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
| @@ -310,12 +323,22 @@ static int init_origin_hash(void) | |||
| 310 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | 323 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), |
| 311 | GFP_KERNEL); | 324 | GFP_KERNEL); |
| 312 | if (!_origins) { | 325 | if (!_origins) { |
| 313 | DMERR("unable to allocate memory"); | 326 | DMERR("unable to allocate memory for _origins"); |
| 314 | return -ENOMEM; | 327 | return -ENOMEM; |
| 315 | } | 328 | } |
| 316 | |||
| 317 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | 329 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
| 318 | INIT_LIST_HEAD(_origins + i); | 330 | INIT_LIST_HEAD(_origins + i); |
| 331 | |||
| 332 | _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | ||
| 333 | GFP_KERNEL); | ||
| 334 | if (!_dm_origins) { | ||
| 335 | DMERR("unable to allocate memory for _dm_origins"); | ||
| 336 | kfree(_origins); | ||
| 337 | return -ENOMEM; | ||
| 338 | } | ||
| 339 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | ||
| 340 | INIT_LIST_HEAD(_dm_origins + i); | ||
| 341 | |||
| 319 | init_rwsem(&_origins_lock); | 342 | init_rwsem(&_origins_lock); |
| 320 | 343 | ||
| 321 | return 0; | 344 | return 0; |
| @@ -324,6 +347,7 @@ static int init_origin_hash(void) | |||
| 324 | static void exit_origin_hash(void) | 347 | static void exit_origin_hash(void) |
| 325 | { | 348 | { |
| 326 | kfree(_origins); | 349 | kfree(_origins); |
| 350 | kfree(_dm_origins); | ||
| 327 | } | 351 | } |
| 328 | 352 | ||
| 329 | static unsigned origin_hash(struct block_device *bdev) | 353 | static unsigned origin_hash(struct block_device *bdev) |
| @@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o) | |||
| 350 | list_add_tail(&o->hash_list, sl); | 374 | list_add_tail(&o->hash_list, sl); |
| 351 | } | 375 | } |
| 352 | 376 | ||
| 377 | static struct dm_origin *__lookup_dm_origin(struct block_device *origin) | ||
| 378 | { | ||
| 379 | struct list_head *ol; | ||
| 380 | struct dm_origin *o; | ||
| 381 | |||
| 382 | ol = &_dm_origins[origin_hash(origin)]; | ||
| 383 | list_for_each_entry (o, ol, hash_list) | ||
| 384 | if (bdev_equal(o->dev->bdev, origin)) | ||
| 385 | return o; | ||
| 386 | |||
| 387 | return NULL; | ||
| 388 | } | ||
| 389 | |||
| 390 | static void __insert_dm_origin(struct dm_origin *o) | ||
| 391 | { | ||
| 392 | struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; | ||
| 393 | list_add_tail(&o->hash_list, sl); | ||
| 394 | } | ||
| 395 | |||
| 396 | static void __remove_dm_origin(struct dm_origin *o) | ||
| 397 | { | ||
| 398 | list_del(&o->hash_list); | ||
| 399 | } | ||
| 400 | |||
| 353 | /* | 401 | /* |
| 354 | * _origins_lock must be held when calling this function. | 402 | * _origins_lock must be held when calling this function. |
| 355 | * Returns number of snapshots registered using the supplied cow device, plus: | 403 | * Returns number of snapshots registered using the supplied cow device, plus: |
| @@ -1840,9 +1888,40 @@ static int snapshot_preresume(struct dm_target *ti) | |||
| 1840 | static void snapshot_resume(struct dm_target *ti) | 1888 | static void snapshot_resume(struct dm_target *ti) |
| 1841 | { | 1889 | { |
| 1842 | struct dm_snapshot *s = ti->private; | 1890 | struct dm_snapshot *s = ti->private; |
| 1843 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | 1891 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; |
| 1892 | struct dm_origin *o; | ||
| 1893 | struct mapped_device *origin_md = NULL; | ||
| 1894 | bool must_restart_merging = false; | ||
| 1844 | 1895 | ||
| 1845 | down_read(&_origins_lock); | 1896 | down_read(&_origins_lock); |
| 1897 | |||
| 1898 | o = __lookup_dm_origin(s->origin->bdev); | ||
| 1899 | if (o) | ||
| 1900 | origin_md = dm_table_get_md(o->ti->table); | ||
| 1901 | if (!origin_md) { | ||
| 1902 | (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); | ||
| 1903 | if (snap_merging) | ||
| 1904 | origin_md = dm_table_get_md(snap_merging->ti->table); | ||
| 1905 | } | ||
| 1906 | if (origin_md == dm_table_get_md(ti->table)) | ||
| 1907 | origin_md = NULL; | ||
| 1908 | if (origin_md) { | ||
| 1909 | if (dm_hold(origin_md)) | ||
| 1910 | origin_md = NULL; | ||
| 1911 | } | ||
| 1912 | |||
| 1913 | up_read(&_origins_lock); | ||
| 1914 | |||
| 1915 | if (origin_md) { | ||
| 1916 | dm_internal_suspend_fast(origin_md); | ||
| 1917 | if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { | ||
| 1918 | must_restart_merging = true; | ||
| 1919 | stop_merge(snap_merging); | ||
| 1920 | } | ||
| 1921 | } | ||
| 1922 | |||
| 1923 | down_read(&_origins_lock); | ||
| 1924 | |||
| 1846 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); | 1925 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
| 1847 | if (snap_src && snap_dest) { | 1926 | if (snap_src && snap_dest) { |
| 1848 | down_write(&snap_src->lock); | 1927 | down_write(&snap_src->lock); |
| @@ -1851,8 +1930,16 @@ static void snapshot_resume(struct dm_target *ti) | |||
| 1851 | up_write(&snap_dest->lock); | 1930 | up_write(&snap_dest->lock); |
| 1852 | up_write(&snap_src->lock); | 1931 | up_write(&snap_src->lock); |
| 1853 | } | 1932 | } |
| 1933 | |||
| 1854 | up_read(&_origins_lock); | 1934 | up_read(&_origins_lock); |
| 1855 | 1935 | ||
| 1936 | if (origin_md) { | ||
| 1937 | if (must_restart_merging) | ||
| 1938 | start_merge(snap_merging); | ||
| 1939 | dm_internal_resume_fast(origin_md); | ||
| 1940 | dm_put(origin_md); | ||
| 1941 | } | ||
| 1942 | |||
| 1856 | /* Now we have correct chunk size, reregister */ | 1943 | /* Now we have correct chunk size, reregister */ |
| 1857 | reregister_snapshot(s); | 1944 | reregister_snapshot(s); |
| 1858 | 1945 | ||
| @@ -2133,11 +2220,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, | |||
| 2133 | * Origin: maps a linear range of a device, with hooks for snapshotting. | 2220 | * Origin: maps a linear range of a device, with hooks for snapshotting. |
| 2134 | */ | 2221 | */ |
| 2135 | 2222 | ||
| 2136 | struct dm_origin { | ||
| 2137 | struct dm_dev *dev; | ||
| 2138 | unsigned split_boundary; | ||
| 2139 | }; | ||
| 2140 | |||
| 2141 | /* | 2223 | /* |
| 2142 | * Construct an origin mapping: <dev_path> | 2224 | * Construct an origin mapping: <dev_path> |
| 2143 | * The context for an origin is merely a 'struct dm_dev *' | 2225 | * The context for an origin is merely a 'struct dm_dev *' |
| @@ -2166,6 +2248,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 2166 | goto bad_open; | 2248 | goto bad_open; |
| 2167 | } | 2249 | } |
| 2168 | 2250 | ||
| 2251 | o->ti = ti; | ||
| 2169 | ti->private = o; | 2252 | ti->private = o; |
| 2170 | ti->num_flush_bios = 1; | 2253 | ti->num_flush_bios = 1; |
| 2171 | 2254 | ||
| @@ -2180,6 +2263,7 @@ bad_alloc: | |||
| 2180 | static void origin_dtr(struct dm_target *ti) | 2263 | static void origin_dtr(struct dm_target *ti) |
| 2181 | { | 2264 | { |
| 2182 | struct dm_origin *o = ti->private; | 2265 | struct dm_origin *o = ti->private; |
| 2266 | |||
| 2183 | dm_put_device(ti, o->dev); | 2267 | dm_put_device(ti, o->dev); |
| 2184 | kfree(o); | 2268 | kfree(o); |
| 2185 | } | 2269 | } |
| @@ -2216,6 +2300,19 @@ static void origin_resume(struct dm_target *ti) | |||
| 2216 | struct dm_origin *o = ti->private; | 2300 | struct dm_origin *o = ti->private; |
| 2217 | 2301 | ||
| 2218 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); | 2302 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); |
| 2303 | |||
| 2304 | down_write(&_origins_lock); | ||
| 2305 | __insert_dm_origin(o); | ||
| 2306 | up_write(&_origins_lock); | ||
| 2307 | } | ||
| 2308 | |||
| 2309 | static void origin_postsuspend(struct dm_target *ti) | ||
| 2310 | { | ||
| 2311 | struct dm_origin *o = ti->private; | ||
| 2312 | |||
| 2313 | down_write(&_origins_lock); | ||
| 2314 | __remove_dm_origin(o); | ||
| 2315 | up_write(&_origins_lock); | ||
| 2219 | } | 2316 | } |
| 2220 | 2317 | ||
| 2221 | static void origin_status(struct dm_target *ti, status_type_t type, | 2318 | static void origin_status(struct dm_target *ti, status_type_t type, |
| @@ -2258,12 +2355,13 @@ static int origin_iterate_devices(struct dm_target *ti, | |||
| 2258 | 2355 | ||
| 2259 | static struct target_type origin_target = { | 2356 | static struct target_type origin_target = { |
| 2260 | .name = "snapshot-origin", | 2357 | .name = "snapshot-origin", |
| 2261 | .version = {1, 8, 1}, | 2358 | .version = {1, 9, 0}, |
| 2262 | .module = THIS_MODULE, | 2359 | .module = THIS_MODULE, |
| 2263 | .ctr = origin_ctr, | 2360 | .ctr = origin_ctr, |
| 2264 | .dtr = origin_dtr, | 2361 | .dtr = origin_dtr, |
| 2265 | .map = origin_map, | 2362 | .map = origin_map, |
| 2266 | .resume = origin_resume, | 2363 | .resume = origin_resume, |
| 2364 | .postsuspend = origin_postsuspend, | ||
| 2267 | .status = origin_status, | 2365 | .status = origin_status, |
| 2268 | .merge = origin_merge, | 2366 | .merge = origin_merge, |
| 2269 | .iterate_devices = origin_iterate_devices, | 2367 | .iterate_devices = origin_iterate_devices, |
| @@ -2271,7 +2369,7 @@ static struct target_type origin_target = { | |||
| 2271 | 2369 | ||
| 2272 | static struct target_type snapshot_target = { | 2370 | static struct target_type snapshot_target = { |
| 2273 | .name = "snapshot", | 2371 | .name = "snapshot", |
| 2274 | .version = {1, 12, 0}, | 2372 | .version = {1, 13, 0}, |
| 2275 | .module = THIS_MODULE, | 2373 | .module = THIS_MODULE, |
| 2276 | .ctr = snapshot_ctr, | 2374 | .ctr = snapshot_ctr, |
| 2277 | .dtr = snapshot_dtr, | 2375 | .dtr = snapshot_dtr, |
| @@ -2285,7 +2383,7 @@ static struct target_type snapshot_target = { | |||
| 2285 | 2383 | ||
| 2286 | static struct target_type merge_target = { | 2384 | static struct target_type merge_target = { |
| 2287 | .name = dm_snapshot_merge_target_name, | 2385 | .name = dm_snapshot_merge_target_name, |
| 2288 | .version = {1, 2, 0}, | 2386 | .version = {1, 3, 0}, |
| 2289 | .module = THIS_MODULE, | 2387 | .module = THIS_MODULE, |
| 2290 | .ctr = snapshot_ctr, | 2388 | .ctr = snapshot_ctr, |
| 2291 | .dtr = snapshot_dtr, | 2389 | .dtr = snapshot_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 654773cb1eee..921aafd12aee 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -2358,17 +2358,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) | |||
| 2358 | return DM_MAPIO_REMAPPED; | 2358 | return DM_MAPIO_REMAPPED; |
| 2359 | 2359 | ||
| 2360 | case -ENODATA: | 2360 | case -ENODATA: |
| 2361 | if (get_pool_mode(tc->pool) == PM_READ_ONLY) { | ||
| 2362 | /* | ||
| 2363 | * This block isn't provisioned, and we have no way | ||
| 2364 | * of doing so. | ||
| 2365 | */ | ||
| 2366 | handle_unserviceable_bio(tc->pool, bio); | ||
| 2367 | cell_defer_no_holder(tc, virt_cell); | ||
| 2368 | return DM_MAPIO_SUBMITTED; | ||
| 2369 | } | ||
| 2370 | /* fall through */ | ||
| 2371 | |||
| 2372 | case -EWOULDBLOCK: | 2361 | case -EWOULDBLOCK: |
| 2373 | thin_defer_cell(tc, virt_cell); | 2362 | thin_defer_cell(tc, virt_cell); |
| 2374 | return DM_MAPIO_SUBMITTED; | 2363 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 73f28802dc7a..9b641b38b857 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -2616,6 +2616,19 @@ void dm_get(struct mapped_device *md) | |||
| 2616 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | 2616 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
| 2617 | } | 2617 | } |
| 2618 | 2618 | ||
| 2619 | int dm_hold(struct mapped_device *md) | ||
| 2620 | { | ||
| 2621 | spin_lock(&_minor_lock); | ||
| 2622 | if (test_bit(DMF_FREEING, &md->flags)) { | ||
| 2623 | spin_unlock(&_minor_lock); | ||
| 2624 | return -EBUSY; | ||
| 2625 | } | ||
| 2626 | dm_get(md); | ||
| 2627 | spin_unlock(&_minor_lock); | ||
| 2628 | return 0; | ||
| 2629 | } | ||
| 2630 | EXPORT_SYMBOL_GPL(dm_hold); | ||
| 2631 | |||
| 2619 | const char *dm_device_name(struct mapped_device *md) | 2632 | const char *dm_device_name(struct mapped_device *md) |
| 2620 | { | 2633 | { |
| 2621 | return md->name; | 2634 | return md->name; |
| @@ -2638,10 +2651,16 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
| 2638 | if (dm_request_based(md)) | 2651 | if (dm_request_based(md)) |
| 2639 | flush_kthread_worker(&md->kworker); | 2652 | flush_kthread_worker(&md->kworker); |
| 2640 | 2653 | ||
| 2654 | /* | ||
| 2655 | * Take suspend_lock so that presuspend and postsuspend methods | ||
| 2656 | * do not race with internal suspend. | ||
| 2657 | */ | ||
| 2658 | mutex_lock(&md->suspend_lock); | ||
| 2641 | if (!dm_suspended_md(md)) { | 2659 | if (!dm_suspended_md(md)) { |
| 2642 | dm_table_presuspend_targets(map); | 2660 | dm_table_presuspend_targets(map); |
| 2643 | dm_table_postsuspend_targets(map); | 2661 | dm_table_postsuspend_targets(map); |
| 2644 | } | 2662 | } |
| 2663 | mutex_unlock(&md->suspend_lock); | ||
| 2645 | 2664 | ||
| 2646 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | 2665 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ |
| 2647 | dm_put_live_table(md, srcu_idx); | 2666 | dm_put_live_table(md, srcu_idx); |
| @@ -3115,6 +3134,7 @@ void dm_internal_suspend_fast(struct mapped_device *md) | |||
| 3115 | flush_workqueue(md->wq); | 3134 | flush_workqueue(md->wq); |
| 3116 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); | 3135 | dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); |
| 3117 | } | 3136 | } |
| 3137 | EXPORT_SYMBOL_GPL(dm_internal_suspend_fast); | ||
| 3118 | 3138 | ||
| 3119 | void dm_internal_resume_fast(struct mapped_device *md) | 3139 | void dm_internal_resume_fast(struct mapped_device *md) |
| 3120 | { | 3140 | { |
| @@ -3126,6 +3146,7 @@ void dm_internal_resume_fast(struct mapped_device *md) | |||
| 3126 | done: | 3146 | done: |
| 3127 | mutex_unlock(&md->suspend_lock); | 3147 | mutex_unlock(&md->suspend_lock); |
| 3128 | } | 3148 | } |
| 3149 | EXPORT_SYMBOL_GPL(dm_internal_resume_fast); | ||
| 3129 | 3150 | ||
| 3130 | /*----------------------------------------------------------------- | 3151 | /*----------------------------------------------------------------- |
| 3131 | * Event notification. | 3152 | * Event notification. |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 2646aed1d3fe..fd23978d93fe 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
| @@ -375,6 +375,7 @@ int dm_create(int minor, struct mapped_device **md); | |||
| 375 | */ | 375 | */ |
| 376 | struct mapped_device *dm_get_md(dev_t dev); | 376 | struct mapped_device *dm_get_md(dev_t dev); |
| 377 | void dm_get(struct mapped_device *md); | 377 | void dm_get(struct mapped_device *md); |
| 378 | int dm_hold(struct mapped_device *md); | ||
| 378 | void dm_put(struct mapped_device *md); | 379 | void dm_put(struct mapped_device *md); |
| 379 | 380 | ||
| 380 | /* | 381 | /* |
