diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2013-07-10 18:41:18 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2013-07-10 18:41:18 -0400 |
commit | 83d5e5b0af907d46d241a86d9e44003b3f0accbd (patch) | |
tree | 46349d07c2090da15c250af3bac40833eb96e9f0 /drivers | |
parent | 2480945cd44b50ba8b1646544eec2db21f064f12 (diff) |
dm: optimize use SRCU and RCU
This patch removes "io_lock" and "map_lock" in struct mapped_device and
"holders" in struct dm_table and replaces these mechanisms with
sleepable-rcu.
Previously, the code would call "dm_get_live_table" and "dm_table_put" to
get and release table. Now, the code is changed to call "dm_get_live_table"
and "dm_put_live_table". dm_get_live_table locks sleepable-rcu and
dm_put_live_table unlocks it.
dm_get_live_table_fast/dm_put_live_table_fast can be used instead of
dm_get_live_table/dm_put_live_table. These *_fast functions use
non-sleepable RCU, so the caller must not block between them.
If the code changes active or inactive dm table, it must call
dm_sync_table before destroying the old table.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm-ioctl.c | 122 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 35 | ||||
-rw-r--r-- | drivers/md/dm.c | 160 |
3 files changed, 175 insertions, 142 deletions
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 12f04868e899..f1b758675ec7 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -36,6 +36,14 @@ struct hash_cell { | |||
36 | struct dm_table *new_map; | 36 | struct dm_table *new_map; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | /* | ||
40 | * A dummy definition to make RCU happy. | ||
41 | * struct dm_table should never be dereferenced in this file. | ||
42 | */ | ||
43 | struct dm_table { | ||
44 | int undefined__; | ||
45 | }; | ||
46 | |||
39 | struct vers_iter { | 47 | struct vers_iter { |
40 | size_t param_size; | 48 | size_t param_size; |
41 | struct dm_target_versions *vers, *old_vers; | 49 | struct dm_target_versions *vers, *old_vers; |
@@ -242,9 +250,10 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi | |||
242 | return -EBUSY; | 250 | return -EBUSY; |
243 | } | 251 | } |
244 | 252 | ||
245 | static void __hash_remove(struct hash_cell *hc) | 253 | static struct dm_table *__hash_remove(struct hash_cell *hc) |
246 | { | 254 | { |
247 | struct dm_table *table; | 255 | struct dm_table *table; |
256 | int srcu_idx; | ||
248 | 257 | ||
249 | /* remove from the dev hash */ | 258 | /* remove from the dev hash */ |
250 | list_del(&hc->uuid_list); | 259 | list_del(&hc->uuid_list); |
@@ -253,16 +262,18 @@ static void __hash_remove(struct hash_cell *hc) | |||
253 | dm_set_mdptr(hc->md, NULL); | 262 | dm_set_mdptr(hc->md, NULL); |
254 | mutex_unlock(&dm_hash_cells_mutex); | 263 | mutex_unlock(&dm_hash_cells_mutex); |
255 | 264 | ||
256 | table = dm_get_live_table(hc->md); | 265 | table = dm_get_live_table(hc->md, &srcu_idx); |
257 | if (table) { | 266 | if (table) |
258 | dm_table_event(table); | 267 | dm_table_event(table); |
259 | dm_table_put(table); | 268 | dm_put_live_table(hc->md, srcu_idx); |
260 | } | ||
261 | 269 | ||
270 | table = NULL; | ||
262 | if (hc->new_map) | 271 | if (hc->new_map) |
263 | dm_table_destroy(hc->new_map); | 272 | table = hc->new_map; |
264 | dm_put(hc->md); | 273 | dm_put(hc->md); |
265 | free_cell(hc); | 274 | free_cell(hc); |
275 | |||
276 | return table; | ||
266 | } | 277 | } |
267 | 278 | ||
268 | static void dm_hash_remove_all(int keep_open_devices) | 279 | static void dm_hash_remove_all(int keep_open_devices) |
@@ -270,6 +281,7 @@ static void dm_hash_remove_all(int keep_open_devices) | |||
270 | int i, dev_skipped; | 281 | int i, dev_skipped; |
271 | struct hash_cell *hc; | 282 | struct hash_cell *hc; |
272 | struct mapped_device *md; | 283 | struct mapped_device *md; |
284 | struct dm_table *t; | ||
273 | 285 | ||
274 | retry: | 286 | retry: |
275 | dev_skipped = 0; | 287 | dev_skipped = 0; |
@@ -287,10 +299,14 @@ retry: | |||
287 | continue; | 299 | continue; |
288 | } | 300 | } |
289 | 301 | ||
290 | __hash_remove(hc); | 302 | t = __hash_remove(hc); |
291 | 303 | ||
292 | up_write(&_hash_lock); | 304 | up_write(&_hash_lock); |
293 | 305 | ||
306 | if (t) { | ||
307 | dm_sync_table(md); | ||
308 | dm_table_destroy(t); | ||
309 | } | ||
294 | dm_put(md); | 310 | dm_put(md); |
295 | if (likely(keep_open_devices)) | 311 | if (likely(keep_open_devices)) |
296 | dm_destroy(md); | 312 | dm_destroy(md); |
@@ -356,6 +372,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, | |||
356 | struct dm_table *table; | 372 | struct dm_table *table; |
357 | struct mapped_device *md; | 373 | struct mapped_device *md; |
358 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; | 374 | unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; |
375 | int srcu_idx; | ||
359 | 376 | ||
360 | /* | 377 | /* |
361 | * duplicate new. | 378 | * duplicate new. |
@@ -418,11 +435,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, | |||
418 | /* | 435 | /* |
419 | * Wake up any dm event waiters. | 436 | * Wake up any dm event waiters. |
420 | */ | 437 | */ |
421 | table = dm_get_live_table(hc->md); | 438 | table = dm_get_live_table(hc->md, &srcu_idx); |
422 | if (table) { | 439 | if (table) |
423 | dm_table_event(table); | 440 | dm_table_event(table); |
424 | dm_table_put(table); | 441 | dm_put_live_table(hc->md, srcu_idx); |
425 | } | ||
426 | 442 | ||
427 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) | 443 | if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) |
428 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 444 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
@@ -620,11 +636,14 @@ static int check_name(const char *name) | |||
620 | * _hash_lock without first calling dm_table_put, because dm_table_destroy | 636 | * _hash_lock without first calling dm_table_put, because dm_table_destroy |
621 | * waits for this dm_table_put and could be called under this lock. | 637 | * waits for this dm_table_put and could be called under this lock. |
622 | */ | 638 | */ |
623 | static struct dm_table *dm_get_inactive_table(struct mapped_device *md) | 639 | static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) |
624 | { | 640 | { |
625 | struct hash_cell *hc; | 641 | struct hash_cell *hc; |
626 | struct dm_table *table = NULL; | 642 | struct dm_table *table = NULL; |
627 | 643 | ||
644 | /* increment rcu count, we don't care about the table pointer */ | ||
645 | dm_get_live_table(md, srcu_idx); | ||
646 | |||
628 | down_read(&_hash_lock); | 647 | down_read(&_hash_lock); |
629 | hc = dm_get_mdptr(md); | 648 | hc = dm_get_mdptr(md); |
630 | if (!hc || hc->md != md) { | 649 | if (!hc || hc->md != md) { |
@@ -633,8 +652,6 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md) | |||
633 | } | 652 | } |
634 | 653 | ||
635 | table = hc->new_map; | 654 | table = hc->new_map; |
636 | if (table) | ||
637 | dm_table_get(table); | ||
638 | 655 | ||
639 | out: | 656 | out: |
640 | up_read(&_hash_lock); | 657 | up_read(&_hash_lock); |
@@ -643,10 +660,11 @@ out: | |||
643 | } | 660 | } |
644 | 661 | ||
645 | static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, | 662 | static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, |
646 | struct dm_ioctl *param) | 663 | struct dm_ioctl *param, |
664 | int *srcu_idx) | ||
647 | { | 665 | { |
648 | return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? | 666 | return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? |
649 | dm_get_inactive_table(md) : dm_get_live_table(md); | 667 | dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx); |
650 | } | 668 | } |
651 | 669 | ||
652 | /* | 670 | /* |
@@ -657,6 +675,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) | |||
657 | { | 675 | { |
658 | struct gendisk *disk = dm_disk(md); | 676 | struct gendisk *disk = dm_disk(md); |
659 | struct dm_table *table; | 677 | struct dm_table *table; |
678 | int srcu_idx; | ||
660 | 679 | ||
661 | param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | | 680 | param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | |
662 | DM_ACTIVE_PRESENT_FLAG); | 681 | DM_ACTIVE_PRESENT_FLAG); |
@@ -676,26 +695,27 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) | |||
676 | param->event_nr = dm_get_event_nr(md); | 695 | param->event_nr = dm_get_event_nr(md); |
677 | param->target_count = 0; | 696 | param->target_count = 0; |
678 | 697 | ||
679 | table = dm_get_live_table(md); | 698 | table = dm_get_live_table(md, &srcu_idx); |
680 | if (table) { | 699 | if (table) { |
681 | if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { | 700 | if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { |
682 | if (get_disk_ro(disk)) | 701 | if (get_disk_ro(disk)) |
683 | param->flags |= DM_READONLY_FLAG; | 702 | param->flags |= DM_READONLY_FLAG; |
684 | param->target_count = dm_table_get_num_targets(table); | 703 | param->target_count = dm_table_get_num_targets(table); |
685 | } | 704 | } |
686 | dm_table_put(table); | ||
687 | 705 | ||
688 | param->flags |= DM_ACTIVE_PRESENT_FLAG; | 706 | param->flags |= DM_ACTIVE_PRESENT_FLAG; |
689 | } | 707 | } |
708 | dm_put_live_table(md, srcu_idx); | ||
690 | 709 | ||
691 | if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { | 710 | if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { |
692 | table = dm_get_inactive_table(md); | 711 | int srcu_idx; |
712 | table = dm_get_inactive_table(md, &srcu_idx); | ||
693 | if (table) { | 713 | if (table) { |
694 | if (!(dm_table_get_mode(table) & FMODE_WRITE)) | 714 | if (!(dm_table_get_mode(table) & FMODE_WRITE)) |
695 | param->flags |= DM_READONLY_FLAG; | 715 | param->flags |= DM_READONLY_FLAG; |
696 | param->target_count = dm_table_get_num_targets(table); | 716 | param->target_count = dm_table_get_num_targets(table); |
697 | dm_table_put(table); | ||
698 | } | 717 | } |
718 | dm_put_live_table(md, srcu_idx); | ||
699 | } | 719 | } |
700 | } | 720 | } |
701 | 721 | ||
@@ -796,6 +816,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) | |||
796 | struct hash_cell *hc; | 816 | struct hash_cell *hc; |
797 | struct mapped_device *md; | 817 | struct mapped_device *md; |
798 | int r; | 818 | int r; |
819 | struct dm_table *t; | ||
799 | 820 | ||
800 | down_write(&_hash_lock); | 821 | down_write(&_hash_lock); |
801 | hc = __find_device_hash_cell(param); | 822 | hc = __find_device_hash_cell(param); |
@@ -819,9 +840,14 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) | |||
819 | return r; | 840 | return r; |
820 | } | 841 | } |
821 | 842 | ||
822 | __hash_remove(hc); | 843 | t = __hash_remove(hc); |
823 | up_write(&_hash_lock); | 844 | up_write(&_hash_lock); |
824 | 845 | ||
846 | if (t) { | ||
847 | dm_sync_table(md); | ||
848 | dm_table_destroy(t); | ||
849 | } | ||
850 | |||
825 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) | 851 | if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) |
826 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 852 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
827 | 853 | ||
@@ -986,6 +1012,7 @@ static int do_resume(struct dm_ioctl *param) | |||
986 | 1012 | ||
987 | old_map = dm_swap_table(md, new_map); | 1013 | old_map = dm_swap_table(md, new_map); |
988 | if (IS_ERR(old_map)) { | 1014 | if (IS_ERR(old_map)) { |
1015 | dm_sync_table(md); | ||
989 | dm_table_destroy(new_map); | 1016 | dm_table_destroy(new_map); |
990 | dm_put(md); | 1017 | dm_put(md); |
991 | return PTR_ERR(old_map); | 1018 | return PTR_ERR(old_map); |
@@ -1003,6 +1030,10 @@ static int do_resume(struct dm_ioctl *param) | |||
1003 | param->flags |= DM_UEVENT_GENERATED_FLAG; | 1030 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
1004 | } | 1031 | } |
1005 | 1032 | ||
1033 | /* | ||
1034 | * Since dm_swap_table synchronizes RCU, nobody should be in | ||
1035 | * read-side critical section already. | ||
1036 | */ | ||
1006 | if (old_map) | 1037 | if (old_map) |
1007 | dm_table_destroy(old_map); | 1038 | dm_table_destroy(old_map); |
1008 | 1039 | ||
@@ -1125,6 +1156,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) | |||
1125 | int r = 0; | 1156 | int r = 0; |
1126 | struct mapped_device *md; | 1157 | struct mapped_device *md; |
1127 | struct dm_table *table; | 1158 | struct dm_table *table; |
1159 | int srcu_idx; | ||
1128 | 1160 | ||
1129 | md = find_device(param); | 1161 | md = find_device(param); |
1130 | if (!md) | 1162 | if (!md) |
@@ -1145,11 +1177,10 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) | |||
1145 | */ | 1177 | */ |
1146 | __dev_status(md, param); | 1178 | __dev_status(md, param); |
1147 | 1179 | ||
1148 | table = dm_get_live_or_inactive_table(md, param); | 1180 | table = dm_get_live_or_inactive_table(md, param, &srcu_idx); |
1149 | if (table) { | 1181 | if (table) |
1150 | retrieve_status(table, param, param_size); | 1182 | retrieve_status(table, param, param_size); |
1151 | dm_table_put(table); | 1183 | dm_put_live_table(md, srcu_idx); |
1152 | } | ||
1153 | 1184 | ||
1154 | out: | 1185 | out: |
1155 | dm_put(md); | 1186 | dm_put(md); |
@@ -1221,7 +1252,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1221 | { | 1252 | { |
1222 | int r; | 1253 | int r; |
1223 | struct hash_cell *hc; | 1254 | struct hash_cell *hc; |
1224 | struct dm_table *t; | 1255 | struct dm_table *t, *old_map = NULL; |
1225 | struct mapped_device *md; | 1256 | struct mapped_device *md; |
1226 | struct target_type *immutable_target_type; | 1257 | struct target_type *immutable_target_type; |
1227 | 1258 | ||
@@ -1277,14 +1308,14 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1277 | hc = dm_get_mdptr(md); | 1308 | hc = dm_get_mdptr(md); |
1278 | if (!hc || hc->md != md) { | 1309 | if (!hc || hc->md != md) { |
1279 | DMWARN("device has been removed from the dev hash table."); | 1310 | DMWARN("device has been removed from the dev hash table."); |
1280 | dm_table_destroy(t); | ||
1281 | up_write(&_hash_lock); | 1311 | up_write(&_hash_lock); |
1312 | dm_table_destroy(t); | ||
1282 | r = -ENXIO; | 1313 | r = -ENXIO; |
1283 | goto out; | 1314 | goto out; |
1284 | } | 1315 | } |
1285 | 1316 | ||
1286 | if (hc->new_map) | 1317 | if (hc->new_map) |
1287 | dm_table_destroy(hc->new_map); | 1318 | old_map = hc->new_map; |
1288 | hc->new_map = t; | 1319 | hc->new_map = t; |
1289 | up_write(&_hash_lock); | 1320 | up_write(&_hash_lock); |
1290 | 1321 | ||
@@ -1292,6 +1323,11 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1292 | __dev_status(md, param); | 1323 | __dev_status(md, param); |
1293 | 1324 | ||
1294 | out: | 1325 | out: |
1326 | if (old_map) { | ||
1327 | dm_sync_table(md); | ||
1328 | dm_table_destroy(old_map); | ||
1329 | } | ||
1330 | |||
1295 | dm_put(md); | 1331 | dm_put(md); |
1296 | 1332 | ||
1297 | return r; | 1333 | return r; |
@@ -1301,6 +1337,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size) | |||
1301 | { | 1337 | { |
1302 | struct hash_cell *hc; | 1338 | struct hash_cell *hc; |
1303 | struct mapped_device *md; | 1339 | struct mapped_device *md; |
1340 | struct dm_table *old_map = NULL; | ||
1304 | 1341 | ||
1305 | down_write(&_hash_lock); | 1342 | down_write(&_hash_lock); |
1306 | 1343 | ||
@@ -1312,7 +1349,7 @@ static int table_clear(struct dm_ioctl *param, size_t param_size) | |||
1312 | } | 1349 | } |
1313 | 1350 | ||
1314 | if (hc->new_map) { | 1351 | if (hc->new_map) { |
1315 | dm_table_destroy(hc->new_map); | 1352 | old_map = hc->new_map; |
1316 | hc->new_map = NULL; | 1353 | hc->new_map = NULL; |
1317 | } | 1354 | } |
1318 | 1355 | ||
@@ -1321,6 +1358,10 @@ static int table_clear(struct dm_ioctl *param, size_t param_size) | |||
1321 | __dev_status(hc->md, param); | 1358 | __dev_status(hc->md, param); |
1322 | md = hc->md; | 1359 | md = hc->md; |
1323 | up_write(&_hash_lock); | 1360 | up_write(&_hash_lock); |
1361 | if (old_map) { | ||
1362 | dm_sync_table(md); | ||
1363 | dm_table_destroy(old_map); | ||
1364 | } | ||
1324 | dm_put(md); | 1365 | dm_put(md); |
1325 | 1366 | ||
1326 | return 0; | 1367 | return 0; |
@@ -1370,6 +1411,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) | |||
1370 | { | 1411 | { |
1371 | struct mapped_device *md; | 1412 | struct mapped_device *md; |
1372 | struct dm_table *table; | 1413 | struct dm_table *table; |
1414 | int srcu_idx; | ||
1373 | 1415 | ||
1374 | md = find_device(param); | 1416 | md = find_device(param); |
1375 | if (!md) | 1417 | if (!md) |
@@ -1377,11 +1419,10 @@ static int table_deps(struct dm_ioctl *param, size_t param_size) | |||
1377 | 1419 | ||
1378 | __dev_status(md, param); | 1420 | __dev_status(md, param); |
1379 | 1421 | ||
1380 | table = dm_get_live_or_inactive_table(md, param); | 1422 | table = dm_get_live_or_inactive_table(md, param, &srcu_idx); |
1381 | if (table) { | 1423 | if (table) |
1382 | retrieve_deps(table, param, param_size); | 1424 | retrieve_deps(table, param, param_size); |
1383 | dm_table_put(table); | 1425 | dm_put_live_table(md, srcu_idx); |
1384 | } | ||
1385 | 1426 | ||
1386 | dm_put(md); | 1427 | dm_put(md); |
1387 | 1428 | ||
@@ -1396,6 +1437,7 @@ static int table_status(struct dm_ioctl *param, size_t param_size) | |||
1396 | { | 1437 | { |
1397 | struct mapped_device *md; | 1438 | struct mapped_device *md; |
1398 | struct dm_table *table; | 1439 | struct dm_table *table; |
1440 | int srcu_idx; | ||
1399 | 1441 | ||
1400 | md = find_device(param); | 1442 | md = find_device(param); |
1401 | if (!md) | 1443 | if (!md) |
@@ -1403,11 +1445,10 @@ static int table_status(struct dm_ioctl *param, size_t param_size) | |||
1403 | 1445 | ||
1404 | __dev_status(md, param); | 1446 | __dev_status(md, param); |
1405 | 1447 | ||
1406 | table = dm_get_live_or_inactive_table(md, param); | 1448 | table = dm_get_live_or_inactive_table(md, param, &srcu_idx); |
1407 | if (table) { | 1449 | if (table) |
1408 | retrieve_status(table, param, param_size); | 1450 | retrieve_status(table, param, param_size); |
1409 | dm_table_put(table); | 1451 | dm_put_live_table(md, srcu_idx); |
1410 | } | ||
1411 | 1452 | ||
1412 | dm_put(md); | 1453 | dm_put(md); |
1413 | 1454 | ||
@@ -1443,6 +1484,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1443 | struct dm_target_msg *tmsg = (void *) param + param->data_start; | 1484 | struct dm_target_msg *tmsg = (void *) param + param->data_start; |
1444 | size_t maxlen; | 1485 | size_t maxlen; |
1445 | char *result = get_result_buffer(param, param_size, &maxlen); | 1486 | char *result = get_result_buffer(param, param_size, &maxlen); |
1487 | int srcu_idx; | ||
1446 | 1488 | ||
1447 | md = find_device(param); | 1489 | md = find_device(param); |
1448 | if (!md) | 1490 | if (!md) |
@@ -1470,9 +1512,9 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1470 | if (r <= 1) | 1512 | if (r <= 1) |
1471 | goto out_argv; | 1513 | goto out_argv; |
1472 | 1514 | ||
1473 | table = dm_get_live_table(md); | 1515 | table = dm_get_live_table(md, &srcu_idx); |
1474 | if (!table) | 1516 | if (!table) |
1475 | goto out_argv; | 1517 | goto out_table; |
1476 | 1518 | ||
1477 | if (dm_deleting_md(md)) { | 1519 | if (dm_deleting_md(md)) { |
1478 | r = -ENXIO; | 1520 | r = -ENXIO; |
@@ -1491,7 +1533,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) | |||
1491 | } | 1533 | } |
1492 | 1534 | ||
1493 | out_table: | 1535 | out_table: |
1494 | dm_table_put(table); | 1536 | dm_put_live_table(md, srcu_idx); |
1495 | out_argv: | 1537 | out_argv: |
1496 | kfree(argv); | 1538 | kfree(argv); |
1497 | out: | 1539 | out: |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 1ff252ab7d46..f221812b7dbc 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -26,22 +26,8 @@ | |||
26 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) | 26 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) |
27 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | 27 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) |
28 | 28 | ||
29 | /* | ||
30 | * The table has always exactly one reference from either mapped_device->map | ||
31 | * or hash_cell->new_map. This reference is not counted in table->holders. | ||
32 | * A pair of dm_create_table/dm_destroy_table functions is used for table | ||
33 | * creation/destruction. | ||
34 | * | ||
35 | * Temporary references from the other code increase table->holders. A pair | ||
36 | * of dm_table_get/dm_table_put functions is used to manipulate it. | ||
37 | * | ||
38 | * When the table is about to be destroyed, we wait for table->holders to | ||
39 | * drop to zero. | ||
40 | */ | ||
41 | |||
42 | struct dm_table { | 29 | struct dm_table { |
43 | struct mapped_device *md; | 30 | struct mapped_device *md; |
44 | atomic_t holders; | ||
45 | unsigned type; | 31 | unsigned type; |
46 | 32 | ||
47 | /* btree table */ | 33 | /* btree table */ |
@@ -208,7 +194,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, | |||
208 | 194 | ||
209 | INIT_LIST_HEAD(&t->devices); | 195 | INIT_LIST_HEAD(&t->devices); |
210 | INIT_LIST_HEAD(&t->target_callbacks); | 196 | INIT_LIST_HEAD(&t->target_callbacks); |
211 | atomic_set(&t->holders, 0); | ||
212 | 197 | ||
213 | if (!num_targets) | 198 | if (!num_targets) |
214 | num_targets = KEYS_PER_NODE; | 199 | num_targets = KEYS_PER_NODE; |
@@ -246,10 +231,6 @@ void dm_table_destroy(struct dm_table *t) | |||
246 | if (!t) | 231 | if (!t) |
247 | return; | 232 | return; |
248 | 233 | ||
249 | while (atomic_read(&t->holders)) | ||
250 | msleep(1); | ||
251 | smp_mb(); | ||
252 | |||
253 | /* free the indexes */ | 234 | /* free the indexes */ |
254 | if (t->depth >= 2) | 235 | if (t->depth >= 2) |
255 | vfree(t->index[t->depth - 2]); | 236 | vfree(t->index[t->depth - 2]); |
@@ -274,22 +255,6 @@ void dm_table_destroy(struct dm_table *t) | |||
274 | kfree(t); | 255 | kfree(t); |
275 | } | 256 | } |
276 | 257 | ||
277 | void dm_table_get(struct dm_table *t) | ||
278 | { | ||
279 | atomic_inc(&t->holders); | ||
280 | } | ||
281 | EXPORT_SYMBOL(dm_table_get); | ||
282 | |||
283 | void dm_table_put(struct dm_table *t) | ||
284 | { | ||
285 | if (!t) | ||
286 | return; | ||
287 | |||
288 | smp_mb__before_atomic_dec(); | ||
289 | atomic_dec(&t->holders); | ||
290 | } | ||
291 | EXPORT_SYMBOL(dm_table_put); | ||
292 | |||
293 | /* | 258 | /* |
294 | * Checks to see if we need to extend highs or targets. | 259 | * Checks to see if we need to extend highs or targets. |
295 | */ | 260 | */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 33f20103d8d5..ecff83f5b53a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -117,12 +117,19 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); | |||
117 | #define DMF_MERGE_IS_OPTIONAL 6 | 117 | #define DMF_MERGE_IS_OPTIONAL 6 |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * A dummy definition to make RCU happy. | ||
121 | * struct dm_table should never be dereferenced in this file. | ||
122 | */ | ||
123 | struct dm_table { | ||
124 | int undefined__; | ||
125 | }; | ||
126 | |||
127 | /* | ||
120 | * Work processed by per-device workqueue. | 128 | * Work processed by per-device workqueue. |
121 | */ | 129 | */ |
122 | struct mapped_device { | 130 | struct mapped_device { |
123 | struct rw_semaphore io_lock; | 131 | struct srcu_struct io_barrier; |
124 | struct mutex suspend_lock; | 132 | struct mutex suspend_lock; |
125 | rwlock_t map_lock; | ||
126 | atomic_t holders; | 133 | atomic_t holders; |
127 | atomic_t open_count; | 134 | atomic_t open_count; |
128 | 135 | ||
@@ -156,6 +163,8 @@ struct mapped_device { | |||
156 | 163 | ||
157 | /* | 164 | /* |
158 | * The current mapping. | 165 | * The current mapping. |
166 | * Use dm_get_live_table{_fast} or take suspend_lock for | ||
167 | * dereference. | ||
159 | */ | 168 | */ |
160 | struct dm_table *map; | 169 | struct dm_table *map; |
161 | 170 | ||
@@ -386,12 +395,14 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
386 | unsigned int cmd, unsigned long arg) | 395 | unsigned int cmd, unsigned long arg) |
387 | { | 396 | { |
388 | struct mapped_device *md = bdev->bd_disk->private_data; | 397 | struct mapped_device *md = bdev->bd_disk->private_data; |
398 | int srcu_idx; | ||
389 | struct dm_table *map; | 399 | struct dm_table *map; |
390 | struct dm_target *tgt; | 400 | struct dm_target *tgt; |
391 | int r = -ENOTTY; | 401 | int r = -ENOTTY; |
392 | 402 | ||
393 | retry: | 403 | retry: |
394 | map = dm_get_live_table(md); | 404 | map = dm_get_live_table(md, &srcu_idx); |
405 | |||
395 | if (!map || !dm_table_get_size(map)) | 406 | if (!map || !dm_table_get_size(map)) |
396 | goto out; | 407 | goto out; |
397 | 408 | ||
@@ -410,7 +421,7 @@ retry: | |||
410 | r = tgt->type->ioctl(tgt, cmd, arg); | 421 | r = tgt->type->ioctl(tgt, cmd, arg); |
411 | 422 | ||
412 | out: | 423 | out: |
413 | dm_table_put(map); | 424 | dm_put_live_table(md, srcu_idx); |
414 | 425 | ||
415 | if (r == -ENOTCONN) { | 426 | if (r == -ENOTCONN) { |
416 | msleep(10); | 427 | msleep(10); |
@@ -509,20 +520,39 @@ static void queue_io(struct mapped_device *md, struct bio *bio) | |||
509 | /* | 520 | /* |
510 | * Everyone (including functions in this file), should use this | 521 | * Everyone (including functions in this file), should use this |
511 | * function to access the md->map field, and make sure they call | 522 | * function to access the md->map field, and make sure they call |
512 | * dm_table_put() when finished. | 523 | * dm_put_live_table() when finished. |
513 | */ | 524 | */ |
514 | struct dm_table *dm_get_live_table(struct mapped_device *md) | 525 | struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) |
515 | { | 526 | { |
516 | struct dm_table *t; | 527 | *srcu_idx = srcu_read_lock(&md->io_barrier); |
517 | unsigned long flags; | 528 | |
529 | return srcu_dereference(md->map, &md->io_barrier); | ||
530 | } | ||
518 | 531 | ||
519 | read_lock_irqsave(&md->map_lock, flags); | 532 | void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) |
520 | t = md->map; | 533 | { |
521 | if (t) | 534 | srcu_read_unlock(&md->io_barrier, srcu_idx); |
522 | dm_table_get(t); | 535 | } |
523 | read_unlock_irqrestore(&md->map_lock, flags); | 536 | |
537 | void dm_sync_table(struct mapped_device *md) | ||
538 | { | ||
539 | synchronize_srcu(&md->io_barrier); | ||
540 | synchronize_rcu_expedited(); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * A fast alternative to dm_get_live_table/dm_put_live_table. | ||
545 | * The caller must not block between these two functions. | ||
546 | */ | ||
547 | static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) | ||
548 | { | ||
549 | rcu_read_lock(); | ||
550 | return rcu_dereference(md->map); | ||
551 | } | ||
524 | 552 | ||
525 | return t; | 553 | static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) |
554 | { | ||
555 | rcu_read_unlock(); | ||
526 | } | 556 | } |
527 | 557 | ||
528 | /* | 558 | /* |
@@ -1356,17 +1386,18 @@ static int __split_and_process_non_flush(struct clone_info *ci) | |||
1356 | /* | 1386 | /* |
1357 | * Entry point to split a bio into clones and submit them to the targets. | 1387 | * Entry point to split a bio into clones and submit them to the targets. |
1358 | */ | 1388 | */ |
1359 | static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | 1389 | static void __split_and_process_bio(struct mapped_device *md, |
1390 | struct dm_table *map, struct bio *bio) | ||
1360 | { | 1391 | { |
1361 | struct clone_info ci; | 1392 | struct clone_info ci; |
1362 | int error = 0; | 1393 | int error = 0; |
1363 | 1394 | ||
1364 | ci.map = dm_get_live_table(md); | 1395 | if (unlikely(!map)) { |
1365 | if (unlikely(!ci.map)) { | ||
1366 | bio_io_error(bio); | 1396 | bio_io_error(bio); |
1367 | return; | 1397 | return; |
1368 | } | 1398 | } |
1369 | 1399 | ||
1400 | ci.map = map; | ||
1370 | ci.md = md; | 1401 | ci.md = md; |
1371 | ci.io = alloc_io(md); | 1402 | ci.io = alloc_io(md); |
1372 | ci.io->error = 0; | 1403 | ci.io->error = 0; |
@@ -1393,7 +1424,6 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) | |||
1393 | 1424 | ||
1394 | /* drop the extra reference count */ | 1425 | /* drop the extra reference count */ |
1395 | dec_pending(ci.io, error); | 1426 | dec_pending(ci.io, error); |
1396 | dm_table_put(ci.map); | ||
1397 | } | 1427 | } |
1398 | /*----------------------------------------------------------------- | 1428 | /*----------------------------------------------------------------- |
1399 | * CRUD END | 1429 | * CRUD END |
@@ -1404,7 +1434,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1404 | struct bio_vec *biovec) | 1434 | struct bio_vec *biovec) |
1405 | { | 1435 | { |
1406 | struct mapped_device *md = q->queuedata; | 1436 | struct mapped_device *md = q->queuedata; |
1407 | struct dm_table *map = dm_get_live_table(md); | 1437 | struct dm_table *map = dm_get_live_table_fast(md); |
1408 | struct dm_target *ti; | 1438 | struct dm_target *ti; |
1409 | sector_t max_sectors; | 1439 | sector_t max_sectors; |
1410 | int max_size = 0; | 1440 | int max_size = 0; |
@@ -1414,7 +1444,7 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1414 | 1444 | ||
1415 | ti = dm_table_find_target(map, bvm->bi_sector); | 1445 | ti = dm_table_find_target(map, bvm->bi_sector); |
1416 | if (!dm_target_is_valid(ti)) | 1446 | if (!dm_target_is_valid(ti)) |
1417 | goto out_table; | 1447 | goto out; |
1418 | 1448 | ||
1419 | /* | 1449 | /* |
1420 | * Find maximum amount of I/O that won't need splitting | 1450 | * Find maximum amount of I/O that won't need splitting |
@@ -1443,10 +1473,8 @@ static int dm_merge_bvec(struct request_queue *q, | |||
1443 | 1473 | ||
1444 | max_size = 0; | 1474 | max_size = 0; |
1445 | 1475 | ||
1446 | out_table: | ||
1447 | dm_table_put(map); | ||
1448 | |||
1449 | out: | 1476 | out: |
1477 | dm_put_live_table_fast(md); | ||
1450 | /* | 1478 | /* |
1451 | * Always allow an entire first page | 1479 | * Always allow an entire first page |
1452 | */ | 1480 | */ |
@@ -1465,8 +1493,10 @@ static void _dm_request(struct request_queue *q, struct bio *bio) | |||
1465 | int rw = bio_data_dir(bio); | 1493 | int rw = bio_data_dir(bio); |
1466 | struct mapped_device *md = q->queuedata; | 1494 | struct mapped_device *md = q->queuedata; |
1467 | int cpu; | 1495 | int cpu; |
1496 | int srcu_idx; | ||
1497 | struct dm_table *map; | ||
1468 | 1498 | ||
1469 | down_read(&md->io_lock); | 1499 | map = dm_get_live_table(md, &srcu_idx); |
1470 | 1500 | ||
1471 | cpu = part_stat_lock(); | 1501 | cpu = part_stat_lock(); |
1472 | part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); | 1502 | part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]); |
@@ -1475,7 +1505,7 @@ static void _dm_request(struct request_queue *q, struct bio *bio) | |||
1475 | 1505 | ||
1476 | /* if we're suspended, we have to queue this io for later */ | 1506 | /* if we're suspended, we have to queue this io for later */ |
1477 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { | 1507 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { |
1478 | up_read(&md->io_lock); | 1508 | dm_put_live_table(md, srcu_idx); |
1479 | 1509 | ||
1480 | if (bio_rw(bio) != READA) | 1510 | if (bio_rw(bio) != READA) |
1481 | queue_io(md, bio); | 1511 | queue_io(md, bio); |
@@ -1484,8 +1514,8 @@ static void _dm_request(struct request_queue *q, struct bio *bio) | |||
1484 | return; | 1514 | return; |
1485 | } | 1515 | } |
1486 | 1516 | ||
1487 | __split_and_process_bio(md, bio); | 1517 | __split_and_process_bio(md, map, bio); |
1488 | up_read(&md->io_lock); | 1518 | dm_put_live_table(md, srcu_idx); |
1489 | return; | 1519 | return; |
1490 | } | 1520 | } |
1491 | 1521 | ||
@@ -1671,7 +1701,8 @@ static struct request *dm_start_request(struct mapped_device *md, struct request | |||
1671 | static void dm_request_fn(struct request_queue *q) | 1701 | static void dm_request_fn(struct request_queue *q) |
1672 | { | 1702 | { |
1673 | struct mapped_device *md = q->queuedata; | 1703 | struct mapped_device *md = q->queuedata; |
1674 | struct dm_table *map = dm_get_live_table(md); | 1704 | int srcu_idx; |
1705 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); | ||
1675 | struct dm_target *ti; | 1706 | struct dm_target *ti; |
1676 | struct request *rq, *clone; | 1707 | struct request *rq, *clone; |
1677 | sector_t pos; | 1708 | sector_t pos; |
@@ -1726,7 +1757,7 @@ requeued: | |||
1726 | delay_and_out: | 1757 | delay_and_out: |
1727 | blk_delay_queue(q, HZ / 10); | 1758 | blk_delay_queue(q, HZ / 10); |
1728 | out: | 1759 | out: |
1729 | dm_table_put(map); | 1760 | dm_put_live_table(md, srcu_idx); |
1730 | } | 1761 | } |
1731 | 1762 | ||
1732 | int dm_underlying_device_busy(struct request_queue *q) | 1763 | int dm_underlying_device_busy(struct request_queue *q) |
@@ -1739,14 +1770,14 @@ static int dm_lld_busy(struct request_queue *q) | |||
1739 | { | 1770 | { |
1740 | int r; | 1771 | int r; |
1741 | struct mapped_device *md = q->queuedata; | 1772 | struct mapped_device *md = q->queuedata; |
1742 | struct dm_table *map = dm_get_live_table(md); | 1773 | struct dm_table *map = dm_get_live_table_fast(md); |
1743 | 1774 | ||
1744 | if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) | 1775 | if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) |
1745 | r = 1; | 1776 | r = 1; |
1746 | else | 1777 | else |
1747 | r = dm_table_any_busy_target(map); | 1778 | r = dm_table_any_busy_target(map); |
1748 | 1779 | ||
1749 | dm_table_put(map); | 1780 | dm_put_live_table_fast(md); |
1750 | 1781 | ||
1751 | return r; | 1782 | return r; |
1752 | } | 1783 | } |
@@ -1758,7 +1789,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1758 | struct dm_table *map; | 1789 | struct dm_table *map; |
1759 | 1790 | ||
1760 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { | 1791 | if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
1761 | map = dm_get_live_table(md); | 1792 | map = dm_get_live_table_fast(md); |
1762 | if (map) { | 1793 | if (map) { |
1763 | /* | 1794 | /* |
1764 | * Request-based dm cares about only own queue for | 1795 | * Request-based dm cares about only own queue for |
@@ -1769,9 +1800,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits) | |||
1769 | bdi_bits; | 1800 | bdi_bits; |
1770 | else | 1801 | else |
1771 | r = dm_table_any_congested(map, bdi_bits); | 1802 | r = dm_table_any_congested(map, bdi_bits); |
1772 | |||
1773 | dm_table_put(map); | ||
1774 | } | 1803 | } |
1804 | dm_put_live_table_fast(md); | ||
1775 | } | 1805 | } |
1776 | 1806 | ||
1777 | return r; | 1807 | return r; |
@@ -1876,12 +1906,14 @@ static struct mapped_device *alloc_dev(int minor) | |||
1876 | if (r < 0) | 1906 | if (r < 0) |
1877 | goto bad_minor; | 1907 | goto bad_minor; |
1878 | 1908 | ||
1909 | r = init_srcu_struct(&md->io_barrier); | ||
1910 | if (r < 0) | ||
1911 | goto bad_io_barrier; | ||
1912 | |||
1879 | md->type = DM_TYPE_NONE; | 1913 | md->type = DM_TYPE_NONE; |
1880 | init_rwsem(&md->io_lock); | ||
1881 | mutex_init(&md->suspend_lock); | 1914 | mutex_init(&md->suspend_lock); |
1882 | mutex_init(&md->type_lock); | 1915 | mutex_init(&md->type_lock); |
1883 | spin_lock_init(&md->deferred_lock); | 1916 | spin_lock_init(&md->deferred_lock); |
1884 | rwlock_init(&md->map_lock); | ||
1885 | atomic_set(&md->holders, 1); | 1917 | atomic_set(&md->holders, 1); |
1886 | atomic_set(&md->open_count, 0); | 1918 | atomic_set(&md->open_count, 0); |
1887 | atomic_set(&md->event_nr, 0); | 1919 | atomic_set(&md->event_nr, 0); |
@@ -1944,6 +1976,8 @@ bad_thread: | |||
1944 | bad_disk: | 1976 | bad_disk: |
1945 | blk_cleanup_queue(md->queue); | 1977 | blk_cleanup_queue(md->queue); |
1946 | bad_queue: | 1978 | bad_queue: |
1979 | cleanup_srcu_struct(&md->io_barrier); | ||
1980 | bad_io_barrier: | ||
1947 | free_minor(minor); | 1981 | free_minor(minor); |
1948 | bad_minor: | 1982 | bad_minor: |
1949 | module_put(THIS_MODULE); | 1983 | module_put(THIS_MODULE); |
@@ -1967,6 +2001,7 @@ static void free_dev(struct mapped_device *md) | |||
1967 | bioset_free(md->bs); | 2001 | bioset_free(md->bs); |
1968 | blk_integrity_unregister(md->disk); | 2002 | blk_integrity_unregister(md->disk); |
1969 | del_gendisk(md->disk); | 2003 | del_gendisk(md->disk); |
2004 | cleanup_srcu_struct(&md->io_barrier); | ||
1970 | free_minor(minor); | 2005 | free_minor(minor); |
1971 | 2006 | ||
1972 | spin_lock(&_minor_lock); | 2007 | spin_lock(&_minor_lock); |
@@ -2109,7 +2144,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2109 | struct dm_table *old_map; | 2144 | struct dm_table *old_map; |
2110 | struct request_queue *q = md->queue; | 2145 | struct request_queue *q = md->queue; |
2111 | sector_t size; | 2146 | sector_t size; |
2112 | unsigned long flags; | ||
2113 | int merge_is_optional; | 2147 | int merge_is_optional; |
2114 | 2148 | ||
2115 | size = dm_table_get_size(t); | 2149 | size = dm_table_get_size(t); |
@@ -2138,9 +2172,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2138 | 2172 | ||
2139 | merge_is_optional = dm_table_merge_is_optional(t); | 2173 | merge_is_optional = dm_table_merge_is_optional(t); |
2140 | 2174 | ||
2141 | write_lock_irqsave(&md->map_lock, flags); | ||
2142 | old_map = md->map; | 2175 | old_map = md->map; |
2143 | md->map = t; | 2176 | rcu_assign_pointer(md->map, t); |
2144 | md->immutable_target_type = dm_table_get_immutable_target_type(t); | 2177 | md->immutable_target_type = dm_table_get_immutable_target_type(t); |
2145 | 2178 | ||
2146 | dm_table_set_restrictions(t, q, limits); | 2179 | dm_table_set_restrictions(t, q, limits); |
@@ -2148,7 +2181,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2148 | set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | 2181 | set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); |
2149 | else | 2182 | else |
2150 | clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); | 2183 | clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); |
2151 | write_unlock_irqrestore(&md->map_lock, flags); | 2184 | dm_sync_table(md); |
2152 | 2185 | ||
2153 | return old_map; | 2186 | return old_map; |
2154 | } | 2187 | } |
@@ -2159,15 +2192,13 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, | |||
2159 | static struct dm_table *__unbind(struct mapped_device *md) | 2192 | static struct dm_table *__unbind(struct mapped_device *md) |
2160 | { | 2193 | { |
2161 | struct dm_table *map = md->map; | 2194 | struct dm_table *map = md->map; |
2162 | unsigned long flags; | ||
2163 | 2195 | ||
2164 | if (!map) | 2196 | if (!map) |
2165 | return NULL; | 2197 | return NULL; |
2166 | 2198 | ||
2167 | dm_table_event_callback(map, NULL, NULL); | 2199 | dm_table_event_callback(map, NULL, NULL); |
2168 | write_lock_irqsave(&md->map_lock, flags); | 2200 | rcu_assign_pointer(md->map, NULL); |
2169 | md->map = NULL; | 2201 | dm_sync_table(md); |
2170 | write_unlock_irqrestore(&md->map_lock, flags); | ||
2171 | 2202 | ||
2172 | return map; | 2203 | return map; |
2173 | } | 2204 | } |
@@ -2319,11 +2350,12 @@ EXPORT_SYMBOL_GPL(dm_device_name); | |||
2319 | static void __dm_destroy(struct mapped_device *md, bool wait) | 2350 | static void __dm_destroy(struct mapped_device *md, bool wait) |
2320 | { | 2351 | { |
2321 | struct dm_table *map; | 2352 | struct dm_table *map; |
2353 | int srcu_idx; | ||
2322 | 2354 | ||
2323 | might_sleep(); | 2355 | might_sleep(); |
2324 | 2356 | ||
2325 | spin_lock(&_minor_lock); | 2357 | spin_lock(&_minor_lock); |
2326 | map = dm_get_live_table(md); | 2358 | map = dm_get_live_table(md, &srcu_idx); |
2327 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); | 2359 | idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
2328 | set_bit(DMF_FREEING, &md->flags); | 2360 | set_bit(DMF_FREEING, &md->flags); |
2329 | spin_unlock(&_minor_lock); | 2361 | spin_unlock(&_minor_lock); |
@@ -2333,6 +2365,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2333 | dm_table_postsuspend_targets(map); | 2365 | dm_table_postsuspend_targets(map); |
2334 | } | 2366 | } |
2335 | 2367 | ||
2368 | /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ | ||
2369 | dm_put_live_table(md, srcu_idx); | ||
2370 | |||
2336 | /* | 2371 | /* |
2337 | * Rare, but there may be I/O requests still going to complete, | 2372 | * Rare, but there may be I/O requests still going to complete, |
2338 | * for example. Wait for all references to disappear. | 2373 | * for example. Wait for all references to disappear. |
@@ -2347,7 +2382,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) | |||
2347 | dm_device_name(md), atomic_read(&md->holders)); | 2382 | dm_device_name(md), atomic_read(&md->holders)); |
2348 | 2383 | ||
2349 | dm_sysfs_exit(md); | 2384 | dm_sysfs_exit(md); |
2350 | dm_table_put(map); | ||
2351 | dm_table_destroy(__unbind(md)); | 2385 | dm_table_destroy(__unbind(md)); |
2352 | free_dev(md); | 2386 | free_dev(md); |
2353 | } | 2387 | } |
@@ -2404,8 +2438,10 @@ static void dm_wq_work(struct work_struct *work) | |||
2404 | struct mapped_device *md = container_of(work, struct mapped_device, | 2438 | struct mapped_device *md = container_of(work, struct mapped_device, |
2405 | work); | 2439 | work); |
2406 | struct bio *c; | 2440 | struct bio *c; |
2441 | int srcu_idx; | ||
2442 | struct dm_table *map; | ||
2407 | 2443 | ||
2408 | down_read(&md->io_lock); | 2444 | map = dm_get_live_table(md, &srcu_idx); |
2409 | 2445 | ||
2410 | while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { | 2446 | while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
2411 | spin_lock_irq(&md->deferred_lock); | 2447 | spin_lock_irq(&md->deferred_lock); |
@@ -2415,17 +2451,13 @@ static void dm_wq_work(struct work_struct *work) | |||
2415 | if (!c) | 2451 | if (!c) |
2416 | break; | 2452 | break; |
2417 | 2453 | ||
2418 | up_read(&md->io_lock); | ||
2419 | |||
2420 | if (dm_request_based(md)) | 2454 | if (dm_request_based(md)) |
2421 | generic_make_request(c); | 2455 | generic_make_request(c); |
2422 | else | 2456 | else |
2423 | __split_and_process_bio(md, c); | 2457 | __split_and_process_bio(md, map, c); |
2424 | |||
2425 | down_read(&md->io_lock); | ||
2426 | } | 2458 | } |
2427 | 2459 | ||
2428 | up_read(&md->io_lock); | 2460 | dm_put_live_table(md, srcu_idx); |
2429 | } | 2461 | } |
2430 | 2462 | ||
2431 | static void dm_queue_flush(struct mapped_device *md) | 2463 | static void dm_queue_flush(struct mapped_device *md) |
@@ -2457,10 +2489,10 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
2457 | * reappear. | 2489 | * reappear. |
2458 | */ | 2490 | */ |
2459 | if (dm_table_has_no_data_devices(table)) { | 2491 | if (dm_table_has_no_data_devices(table)) { |
2460 | live_map = dm_get_live_table(md); | 2492 | live_map = dm_get_live_table_fast(md); |
2461 | if (live_map) | 2493 | if (live_map) |
2462 | limits = md->queue->limits; | 2494 | limits = md->queue->limits; |
2463 | dm_table_put(live_map); | 2495 | dm_put_live_table_fast(md); |
2464 | } | 2496 | } |
2465 | 2497 | ||
2466 | if (!live_map) { | 2498 | if (!live_map) { |
@@ -2540,7 +2572,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2540 | goto out_unlock; | 2572 | goto out_unlock; |
2541 | } | 2573 | } |
2542 | 2574 | ||
2543 | map = dm_get_live_table(md); | 2575 | map = md->map; |
2544 | 2576 | ||
2545 | /* | 2577 | /* |
2546 | * DMF_NOFLUSH_SUSPENDING must be set before presuspend. | 2578 | * DMF_NOFLUSH_SUSPENDING must be set before presuspend. |
@@ -2561,7 +2593,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2561 | if (!noflush && do_lockfs) { | 2593 | if (!noflush && do_lockfs) { |
2562 | r = lock_fs(md); | 2594 | r = lock_fs(md); |
2563 | if (r) | 2595 | if (r) |
2564 | goto out; | 2596 | goto out_unlock; |
2565 | } | 2597 | } |
2566 | 2598 | ||
2567 | /* | 2599 | /* |
@@ -2576,9 +2608,8 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2576 | * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call | 2608 | * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call |
2577 | * flush_workqueue(md->wq). | 2609 | * flush_workqueue(md->wq). |
2578 | */ | 2610 | */ |
2579 | down_write(&md->io_lock); | ||
2580 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); | 2611 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); |
2581 | up_write(&md->io_lock); | 2612 | synchronize_srcu(&md->io_barrier); |
2582 | 2613 | ||
2583 | /* | 2614 | /* |
2584 | * Stop md->queue before flushing md->wq in case request-based | 2615 | * Stop md->queue before flushing md->wq in case request-based |
@@ -2596,10 +2627,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2596 | */ | 2627 | */ |
2597 | r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); | 2628 | r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); |
2598 | 2629 | ||
2599 | down_write(&md->io_lock); | ||
2600 | if (noflush) | 2630 | if (noflush) |
2601 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); | 2631 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
2602 | up_write(&md->io_lock); | 2632 | synchronize_srcu(&md->io_barrier); |
2603 | 2633 | ||
2604 | /* were we interrupted ? */ | 2634 | /* were we interrupted ? */ |
2605 | if (r < 0) { | 2635 | if (r < 0) { |
@@ -2609,7 +2639,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2609 | start_queue(md->queue); | 2639 | start_queue(md->queue); |
2610 | 2640 | ||
2611 | unlock_fs(md); | 2641 | unlock_fs(md); |
2612 | goto out; /* pushback list is already flushed, so skip flush */ | 2642 | goto out_unlock; /* pushback list is already flushed, so skip flush */ |
2613 | } | 2643 | } |
2614 | 2644 | ||
2615 | /* | 2645 | /* |
@@ -2622,9 +2652,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
2622 | 2652 | ||
2623 | dm_table_postsuspend_targets(map); | 2653 | dm_table_postsuspend_targets(map); |
2624 | 2654 | ||
2625 | out: | ||
2626 | dm_table_put(map); | ||
2627 | |||
2628 | out_unlock: | 2655 | out_unlock: |
2629 | mutex_unlock(&md->suspend_lock); | 2656 | mutex_unlock(&md->suspend_lock); |
2630 | return r; | 2657 | return r; |
@@ -2639,7 +2666,7 @@ int dm_resume(struct mapped_device *md) | |||
2639 | if (!dm_suspended_md(md)) | 2666 | if (!dm_suspended_md(md)) |
2640 | goto out; | 2667 | goto out; |
2641 | 2668 | ||
2642 | map = dm_get_live_table(md); | 2669 | map = md->map; |
2643 | if (!map || !dm_table_get_size(map)) | 2670 | if (!map || !dm_table_get_size(map)) |
2644 | goto out; | 2671 | goto out; |
2645 | 2672 | ||
@@ -2663,7 +2690,6 @@ int dm_resume(struct mapped_device *md) | |||
2663 | 2690 | ||
2664 | r = 0; | 2691 | r = 0; |
2665 | out: | 2692 | out: |
2666 | dm_table_put(map); | ||
2667 | mutex_unlock(&md->suspend_lock); | 2693 | mutex_unlock(&md->suspend_lock); |
2668 | 2694 | ||
2669 | return r; | 2695 | return r; |