diff options
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3de8d6d5b0b..233a2e9156a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -397,7 +397,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
397 | unsigned int cmd, unsigned long arg) | 397 | unsigned int cmd, unsigned long arg) |
398 | { | 398 | { |
399 | struct mapped_device *md = bdev->bd_disk->private_data; | 399 | struct mapped_device *md = bdev->bd_disk->private_data; |
400 | struct dm_table *map = dm_get_table(md); | 400 | struct dm_table *map = dm_get_live_table(md); |
401 | struct dm_target *tgt; | 401 | struct dm_target *tgt; |
402 | int r = -ENOTTY; | 402 | int r = -ENOTTY; |
403 | 403 | ||
@@ -528,7 +528,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio) | |||
528 | * function to access the md->map field, and make sure they call | 528 | * function to access the md->map field, and make sure they call |
529 | * dm_table_put() when finished. | 529 | * dm_table_put() when finished. |
530 | */ | 530 | */ |
531 | struct dm_table *dm_get_table(struct mapped_device *md) | 531 | struct dm_table *dm_get_live_table(struct mapped_device *md) |
532 | { | 532 | { |
533 | struct dm_table *t; | 533 | struct dm_table *t; |
534 | unsigned long flags; | 534 | unsigned long flags; |
@@ -1294,7 +1294,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
1294 | struct clone_info ci; | 1294 | struct clone_info ci; |
1295 | int error = 0; | 1295 | int error = 0; |
1296 | 1296 | ||
1297 | ci.map = dm_get_table(md); | 1297 | ci.map = dm_get_live_table(md); |
1298 | if (unlikely(!ci.map)) { | 1298 | if (unlikely(!ci.map)) { |
1299 | if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) | 1299 | if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) |
1300 | bio_io_error(bio); | 1300 | bio_io_error(bio); |
@@ -1335,7 +1335,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1335 | struct bio_vec *biovec) | 1335 | struct bio_vec *biovec) |
1336 | { | 1336 | { |
1337 | struct mapped_device *md = q->queuedata; | 1337 | struct mapped_device *md = q->queuedata; |
1338 | struct dm_table *map = dm_get_table(md); | 1338 | struct dm_table *map = dm_get_live_table(md); |
1339 | struct dm_target *ti; | 1339 | struct dm_target *ti; |
1340 | sector_t max_sectors; | 1340 | sector_t max_sectors; |
1341 | int max_size = 0; | 1341 | int max_size = 0; |
@@ -1638,7 +1638,7 @@ static void map_request(struct dm_target *ti, struct request *clone, | |||
1638 | static void dm_request_fn(struct request_queue *q) | 1638 | static void dm_request_fn(struct request_queue *q) |
1639 | { | 1639 | { |
1640 | struct mapped_device *md = q->queuedata; | 1640 | struct mapped_device *md = q->queuedata; |
1641 | struct dm_table *map = dm_get_table(md); | 1641 | struct dm_table *map = dm_get_live_table(md); |
1642 | struct dm_target *ti; | 1642 | struct dm_target *ti; |
1643 | struct request *rq, *clone; | 1643 | struct request *rq, *clone; |
1644 | 1644 | ||
@@ -1697,7 +1697,7 @@ static int dm_lld_busy(struct request_queue *q) | |||
1697 | { | 1697 | { |
1698 | int r; | 1698 | int r; |
1699 | struct mapped_device *md = q->queuedata; | 1699 | struct mapped_device *md = q->queuedata; |
1700 | struct dm_table *map = dm_get_table(md); | 1700 | struct dm_table *map = dm_get_live_table(md); |
1701 | 1701 | ||
1702 | if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) | 1702 | if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) |
1703 | r = 1; | 1703 | r = 1; |
@@ -1712,7 +1712,7 @@ static int dm_lld_busy(struct request_queue *q) | |||
1712 | static void dm_unplug_all(struct request_queue *q) | 1712 | static void dm_unplug_all(struct request_queue *q) |
1713 | { | 1713 | { |
1714 | struct mapped_device *md = q->queuedata; | 1714 | struct mapped_device *md = q->queuedata; |
1715 | struct dm_table *map = dm_get_table(md); | 1715 | struct dm_table *map = dm_get_live_table(md); |
1716 | 1716 | ||
1717 | if (map) { | 1717 | if (map) { |
1718 | if (dm_request_based(md)) | 1718 | if (dm_request_based(md)) |
@@ -1730,7 +1730,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1730 | struct dm_table *map; | 1730 | struct dm_table *map; |
1731 | 1731 | ||
1732 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { | 1732 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
1733 | map = dm_get_table(md); | 1733 | map = dm_get_live_table(md); |
1734 | if (map) { | 1734 | if (map) { |
1735 | /* | 1735 | /* |
1736 | * Request-based dm cares about only own queue for | 1736 | * Request-based dm cares about only own queue for |
@@ -2166,7 +2166,7 @@ void dm_put(struct mapped_device *md) | |||
2166 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); | 2166 | BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
2167 | 2167 | ||
2168 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { | 2168 | if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { |
2169 | map = dm_get_table(md); | 2169 | map = dm_get_live_table(md); |
2170 | idr_replace(&_minor_idr, MINOR_ALLOCED, | 2170 | idr_replace(&_minor_idr, MINOR_ALLOCED, |
2171 | MINOR(disk_devt(dm_disk(md)))); | 2171 | MINOR(disk_devt(dm_disk(md)))); |
2172 | set_bit(DMF_FREEING, &md->flags); | 2172 | set_bit(DMF_FREEING, &md->flags); |
@@ -2302,7 +2302,7 @@ static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) | |||
2302 | static int dm_rq_barrier(struct mapped_device *md) | 2302 | static int dm_rq_barrier(struct mapped_device *md) |
2303 | { | 2303 | { |
2304 | int i, j; | 2304 | int i, j; |
2305 | struct dm_table *map = dm_get_table(md); | 2305 | struct dm_table *map = dm_get_live_table(md); |
2306 | unsigned num_targets = dm_table_get_num_targets(map); | 2306 | unsigned num_targets = dm_table_get_num_targets(map); |
2307 | struct dm_target *ti; | 2307 | struct dm_target *ti; |
2308 | struct request *clone; | 2308 | struct request *clone; |
@@ -2453,7 +2453,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2453 | goto out_unlock; | 2453 | goto out_unlock; |
2454 | } | 2454 | } |
2455 | 2455 | ||
2456 | map = dm_get_table(md); | 2456 | map = dm_get_live_table(md); |
2457 | 2457 | ||
2458 | /* | 2458 | /* |
2459 | * DMF_NOFLUSH_SUSPENDING must be set before presuspend. | 2459 | * DMF_NOFLUSH_SUSPENDING must be set before presuspend. |
@@ -2558,7 +2558,7 @@ int dm_resume(struct mapped_device *md) | |||
2558 | if (!dm_suspended(md)) | 2558 | if (!dm_suspended(md)) |
2559 | goto out; | 2559 | goto out; |
2560 | 2560 | ||
2561 | map = dm_get_table(md); | 2561 | map = dm_get_live_table(md); |
2562 | if (!map || !dm_table_get_size(map)) | 2562 | if (!map || !dm_table_get_size(map)) |
2563 | goto out; | 2563 | goto out; |
2564 | 2564 | ||