diff options
author | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 18:52:23 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-12-10 18:52:23 -0500 |
commit | a794015597a2d9b437470c7692aac77e5fc08cd2 (patch) | |
tree | d25dd56edd53f8f1890ed005b5531ccfa07c28ee /drivers/md/dm.c | |
parent | 1d0f3ce83200edc5d43723c77c62b09ad6560294 (diff) |
dm: bind new table before destroying old
When replacing a mapped device's table during a 'resume', delay the
destruction of the old table until the new one is successfully in place.
This will make it easier for a later patch to transfer internal state
information from the old table to the new one (something we do not currently
support) while giving us more options for reversion if a later part
of the operation fails.
Devices are always in the suspended state during dm_swap_table().
This patch reinforces the requirement that all I/O must have been
flushed from the table targets while in this state (including any in
workqueues). In the case of 'noflush' suspending, unprocessed
I/O should have been 'pushed back' to the dm core prior to this point,
for resubmission after the new table is in place.
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 16f759fe04fe..255b75c61544 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -2077,19 +2077,23 @@ static int __bind(struct mapped_device *md, struct dm_table *t, | |||
2077 | return 0; | 2077 | return 0; |
2078 | } | 2078 | } |
2079 | 2079 | ||
2080 | static void __unbind(struct mapped_device *md) | 2080 | /* |
2081 | * Returns unbound table for the caller to free. | ||
2082 | */ | ||
2083 | static struct dm_table *__unbind(struct mapped_device *md) | ||
2081 | { | 2084 | { |
2082 | struct dm_table *map = md->map; | 2085 | struct dm_table *map = md->map; |
2083 | unsigned long flags; | 2086 | unsigned long flags; |
2084 | 2087 | ||
2085 | if (!map) | 2088 | if (!map) |
2086 | return; | 2089 | return NULL; |
2087 | 2090 | ||
2088 | dm_table_event_callback(map, NULL, NULL); | 2091 | dm_table_event_callback(map, NULL, NULL); |
2089 | write_lock_irqsave(&md->map_lock, flags); | 2092 | write_lock_irqsave(&md->map_lock, flags); |
2090 | md->map = NULL; | 2093 | md->map = NULL; |
2091 | write_unlock_irqrestore(&md->map_lock, flags); | 2094 | write_unlock_irqrestore(&md->map_lock, flags); |
2092 | dm_table_destroy(map); | 2095 | |
2096 | return map; | ||
2093 | } | 2097 | } |
2094 | 2098 | ||
2095 | /* | 2099 | /* |
@@ -2182,7 +2186,7 @@ void dm_put(struct mapped_device *md) | |||
2182 | } | 2186 | } |
2183 | dm_sysfs_exit(md); | 2187 | dm_sysfs_exit(md); |
2184 | dm_table_put(map); | 2188 | dm_table_put(map); |
2185 | __unbind(md); | 2189 | dm_table_destroy(__unbind(md)); |
2186 | free_dev(md); | 2190 | free_dev(md); |
2187 | } | 2191 | } |
2188 | } | 2192 | } |
@@ -2368,6 +2372,7 @@ static void dm_rq_barrier_work(struct work_struct *work) | |||
2368 | */ | 2372 | */ |
2369 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) | 2373 | int dm_swap_table(struct mapped_device *md, struct dm_table *table) |
2370 | { | 2374 | { |
2375 | struct dm_table *map; | ||
2371 | struct queue_limits limits; | 2376 | struct queue_limits limits; |
2372 | int r = -EINVAL; | 2377 | int r = -EINVAL; |
2373 | 2378 | ||
@@ -2388,8 +2393,9 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
2388 | goto out; | 2393 | goto out; |
2389 | } | 2394 | } |
2390 | 2395 | ||
2391 | __unbind(md); | 2396 | map = __unbind(md); |
2392 | r = __bind(md, table, &limits); | 2397 | r = __bind(md, table, &limits); |
2398 | dm_table_destroy(map); | ||
2393 | 2399 | ||
2394 | out: | 2400 | out: |
2395 | mutex_unlock(&md->suspend_lock); | 2401 | mutex_unlock(&md->suspend_lock); |