diff options
author | Joe Thornber <ejt@redhat.com> | 2015-12-07 09:48:04 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-12-10 10:38:55 -0500 |
commit | 086fbbbda9b743b149b0294e0feec257e2c311d5 (patch) | |
tree | 5a08a70b85eed4495d73ff469060ad7fd1754443 | |
parent | 3d5f67332ad9a500857a45397b69a27198720410 (diff) |
dm thin metadata: make dm_thin_find_mapped_range() atomic
Refactor dm_thin_find_mapped_range() so that it takes the read lock on
the metadata's lock; rather than relying on finer grained locking that
is pushed down inside dm_thin_find_next_mapped_block() and
dm_thin_find_block().
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r-- | drivers/md/dm-thin-metadata.c | 64 |
1 files changed, 43 insertions, 21 deletions
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 7547315ff18a..f962d6453afd 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c | |||
@@ -1408,8 +1408,8 @@ static void unpack_lookup_result(struct dm_thin_device *td, __le64 value, | |||
1408 | result->shared = __snapshotted_since(td, exception_time); | 1408 | result->shared = __snapshotted_since(td, exception_time); |
1409 | } | 1409 | } |
1410 | 1410 | ||
1411 | int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, | 1411 | static int __find_block(struct dm_thin_device *td, dm_block_t block, |
1412 | int can_issue_io, struct dm_thin_lookup_result *result) | 1412 | int can_issue_io, struct dm_thin_lookup_result *result) |
1413 | { | 1413 | { |
1414 | int r; | 1414 | int r; |
1415 | __le64 value; | 1415 | __le64 value; |
@@ -1417,12 +1417,6 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, | |||
1417 | dm_block_t keys[2] = { td->id, block }; | 1417 | dm_block_t keys[2] = { td->id, block }; |
1418 | struct dm_btree_info *info; | 1418 | struct dm_btree_info *info; |
1419 | 1419 | ||
1420 | down_read(&pmd->root_lock); | ||
1421 | if (pmd->fail_io) { | ||
1422 | up_read(&pmd->root_lock); | ||
1423 | return -EINVAL; | ||
1424 | } | ||
1425 | |||
1426 | if (can_issue_io) { | 1420 | if (can_issue_io) { |
1427 | info = &pmd->info; | 1421 | info = &pmd->info; |
1428 | } else | 1422 | } else |
@@ -1432,18 +1426,14 @@ int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, | |||
1432 | if (!r) | 1426 | if (!r) |
1433 | unpack_lookup_result(td, value, result); | 1427 | unpack_lookup_result(td, value, result); |
1434 | 1428 | ||
1435 | up_read(&pmd->root_lock); | ||
1436 | return r; | 1429 | return r; |
1437 | } | 1430 | } |
1438 | 1431 | ||
1439 | static int dm_thin_find_next_mapped_block(struct dm_thin_device *td, dm_block_t block, | 1432 | int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, |
1440 | dm_block_t *vblock, | 1433 | int can_issue_io, struct dm_thin_lookup_result *result) |
1441 | struct dm_thin_lookup_result *result) | ||
1442 | { | 1434 | { |
1443 | int r; | 1435 | int r; |
1444 | __le64 value; | ||
1445 | struct dm_pool_metadata *pmd = td->pmd; | 1436 | struct dm_pool_metadata *pmd = td->pmd; |
1446 | dm_block_t keys[2] = { td->id, block }; | ||
1447 | 1437 | ||
1448 | down_read(&pmd->root_lock); | 1438 | down_read(&pmd->root_lock); |
1449 | if (pmd->fail_io) { | 1439 | if (pmd->fail_io) { |
@@ -1451,18 +1441,32 @@ static int dm_thin_find_next_mapped_block(struct dm_thin_device *td, dm_block_t | |||
1451 | return -EINVAL; | 1441 | return -EINVAL; |
1452 | } | 1442 | } |
1453 | 1443 | ||
1444 | r = __find_block(td, block, can_issue_io, result); | ||
1445 | |||
1446 | up_read(&pmd->root_lock); | ||
1447 | return r; | ||
1448 | } | ||
1449 | |||
1450 | static int __find_next_mapped_block(struct dm_thin_device *td, dm_block_t block, | ||
1451 | dm_block_t *vblock, | ||
1452 | struct dm_thin_lookup_result *result) | ||
1453 | { | ||
1454 | int r; | ||
1455 | __le64 value; | ||
1456 | struct dm_pool_metadata *pmd = td->pmd; | ||
1457 | dm_block_t keys[2] = { td->id, block }; | ||
1458 | |||
1454 | r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value); | 1459 | r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value); |
1455 | if (!r) | 1460 | if (!r) |
1456 | unpack_lookup_result(td, value, result); | 1461 | unpack_lookup_result(td, value, result); |
1457 | 1462 | ||
1458 | up_read(&pmd->root_lock); | ||
1459 | return r; | 1463 | return r; |
1460 | } | 1464 | } |
1461 | 1465 | ||
1462 | int dm_thin_find_mapped_range(struct dm_thin_device *td, | 1466 | static int __find_mapped_range(struct dm_thin_device *td, |
1463 | dm_block_t begin, dm_block_t end, | 1467 | dm_block_t begin, dm_block_t end, |
1464 | dm_block_t *thin_begin, dm_block_t *thin_end, | 1468 | dm_block_t *thin_begin, dm_block_t *thin_end, |
1465 | dm_block_t *pool_begin, bool *maybe_shared) | 1469 | dm_block_t *pool_begin, bool *maybe_shared) |
1466 | { | 1470 | { |
1467 | int r; | 1471 | int r; |
1468 | dm_block_t pool_end; | 1472 | dm_block_t pool_end; |
@@ -1471,7 +1475,7 @@ int dm_thin_find_mapped_range(struct dm_thin_device *td, | |||
1471 | if (end < begin) | 1475 | if (end < begin) |
1472 | return -ENODATA; | 1476 | return -ENODATA; |
1473 | 1477 | ||
1474 | r = dm_thin_find_next_mapped_block(td, begin, &begin, &lookup); | 1478 | r = __find_next_mapped_block(td, begin, &begin, &lookup); |
1475 | if (r) | 1479 | if (r) |
1476 | return r; | 1480 | return r; |
1477 | 1481 | ||
@@ -1485,7 +1489,7 @@ int dm_thin_find_mapped_range(struct dm_thin_device *td, | |||
1485 | begin++; | 1489 | begin++; |
1486 | pool_end = *pool_begin + 1; | 1490 | pool_end = *pool_begin + 1; |
1487 | while (begin != end) { | 1491 | while (begin != end) { |
1488 | r = dm_thin_find_block(td, begin, true, &lookup); | 1492 | r = __find_block(td, begin, true, &lookup); |
1489 | if (r) { | 1493 | if (r) { |
1490 | if (r == -ENODATA) | 1494 | if (r == -ENODATA) |
1491 | break; | 1495 | break; |
@@ -1505,6 +1509,24 @@ int dm_thin_find_mapped_range(struct dm_thin_device *td, | |||
1505 | return 0; | 1509 | return 0; |
1506 | } | 1510 | } |
1507 | 1511 | ||
1512 | int dm_thin_find_mapped_range(struct dm_thin_device *td, | ||
1513 | dm_block_t begin, dm_block_t end, | ||
1514 | dm_block_t *thin_begin, dm_block_t *thin_end, | ||
1515 | dm_block_t *pool_begin, bool *maybe_shared) | ||
1516 | { | ||
1517 | int r = -EINVAL; | ||
1518 | struct dm_pool_metadata *pmd = td->pmd; | ||
1519 | |||
1520 | down_read(&pmd->root_lock); | ||
1521 | if (!pmd->fail_io) { | ||
1522 | r = __find_mapped_range(td, begin, end, thin_begin, thin_end, | ||
1523 | pool_begin, maybe_shared); | ||
1524 | } | ||
1525 | up_read(&pmd->root_lock); | ||
1526 | |||
1527 | return r; | ||
1528 | } | ||
1529 | |||
1508 | static int __insert(struct dm_thin_device *td, dm_block_t block, | 1530 | static int __insert(struct dm_thin_device *td, dm_block_t block, |
1509 | dm_block_t data_block) | 1531 | dm_block_t data_block) |
1510 | { | 1532 | { |