aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 255b75c61544..c254c6cf69a1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2033,9 +2033,13 @@ static void __set_size(struct mapped_device *md, sector_t size)
2033 mutex_unlock(&md->bdev->bd_inode->i_mutex); 2033 mutex_unlock(&md->bdev->bd_inode->i_mutex);
2034} 2034}
2035 2035
2036static int __bind(struct mapped_device *md, struct dm_table *t, 2036/*
2037 struct queue_limits *limits) 2037 * Returns old map, which caller must destroy.
2038 */
2039static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2040 struct queue_limits *limits)
2038{ 2041{
2042 struct dm_table *old_map;
2039 struct request_queue *q = md->queue; 2043 struct request_queue *q = md->queue;
2040 sector_t size; 2044 sector_t size;
2041 unsigned long flags; 2045 unsigned long flags;
@@ -2050,11 +2054,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
2050 2054
2051 __set_size(md, size); 2055 __set_size(md, size);
2052 2056
2053 if (!size) {
2054 dm_table_destroy(t);
2055 return 0;
2056 }
2057
2058 dm_table_event_callback(t, event_callback, md); 2057 dm_table_event_callback(t, event_callback, md);
2059 2058
2060 /* 2059 /*
@@ -2070,11 +2069,12 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
2070 __bind_mempools(md, t); 2069 __bind_mempools(md, t);
2071 2070
2072 write_lock_irqsave(&md->map_lock, flags); 2071 write_lock_irqsave(&md->map_lock, flags);
2072 old_map = md->map;
2073 md->map = t; 2073 md->map = t;
2074 dm_table_set_restrictions(t, q, limits); 2074 dm_table_set_restrictions(t, q, limits);
2075 write_unlock_irqrestore(&md->map_lock, flags); 2075 write_unlock_irqrestore(&md->map_lock, flags);
2076 2076
2077 return 0; 2077 return old_map;
2078} 2078}
2079 2079
2080/* 2080/*
@@ -2368,13 +2368,13 @@ static void dm_rq_barrier_work(struct work_struct *work)
2368} 2368}
2369 2369
2370/* 2370/*
2371 * Swap in a new table (destroying old one). 2371 * Swap in a new table, returning the old one for the caller to destroy.
2372 */ 2372 */
2373int dm_swap_table(struct mapped_device *md, struct dm_table *table) 2373struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2374{ 2374{
2375 struct dm_table *map; 2375 struct dm_table *map = ERR_PTR(-EINVAL);
2376 struct queue_limits limits; 2376 struct queue_limits limits;
2377 int r = -EINVAL; 2377 int r;
2378 2378
2379 mutex_lock(&md->suspend_lock); 2379 mutex_lock(&md->suspend_lock);
2380 2380
@@ -2383,8 +2383,10 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2383 goto out; 2383 goto out;
2384 2384
2385 r = dm_calculate_queue_limits(table, &limits); 2385 r = dm_calculate_queue_limits(table, &limits);
2386 if (r) 2386 if (r) {
2387 map = ERR_PTR(r);
2387 goto out; 2388 goto out;
2389 }
2388 2390
2389 /* cannot change the device type, once a table is bound */ 2391 /* cannot change the device type, once a table is bound */
2390 if (md->map && 2392 if (md->map &&
@@ -2393,13 +2395,11 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
2393 goto out; 2395 goto out;
2394 } 2396 }
2395 2397
2396 map = __unbind(md); 2398 map = __bind(md, table, &limits);
2397 r = __bind(md, table, &limits);
2398 dm_table_destroy(map);
2399 2399
2400out: 2400out:
2401 mutex_unlock(&md->suspend_lock); 2401 mutex_unlock(&md->suspend_lock);
2402 return r; 2402 return map;
2403} 2403}
2404 2404
2405/* 2405/*