diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2009-04-08 19:27:15 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-04-08 19:27:15 -0400 |
commit | 3b00b2036fac7a7667d0676a0f80eee575b8c32b (patch) | |
tree | cd893a75900ee81550230711ea4a0005a6f2c2ce /drivers/md | |
parent | 54d9a1b4513b96cbd835ca6866c6a604d194b2ae (diff) |
dm: rework queueing and suspension
Rework shutting down on suspend and document the associated rules.
Drop write lock in __split_and_process_bio to allow more processing
concurrency.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 40 |
1 files changed, 32 insertions, 8 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bb97ec8d6644..9746c1eb9ef7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1434,18 +1434,21 @@ static void dm_wq_work(struct work_struct *work) | |||
1434 | 1434 | ||
1435 | down_write(&md->io_lock); | 1435 | down_write(&md->io_lock); |
1436 | 1436 | ||
1437 | while (1) { | 1437 | while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { |
1438 | spin_lock_irq(&md->deferred_lock); | 1438 | spin_lock_irq(&md->deferred_lock); |
1439 | c = bio_list_pop(&md->deferred); | 1439 | c = bio_list_pop(&md->deferred); |
1440 | spin_unlock_irq(&md->deferred_lock); | 1440 | spin_unlock_irq(&md->deferred_lock); |
1441 | 1441 | ||
1442 | if (!c) { | 1442 | if (!c) { |
1443 | clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); | ||
1444 | clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); | 1443 | clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); |
1445 | break; | 1444 | break; |
1446 | } | 1445 | } |
1447 | 1446 | ||
1447 | up_write(&md->io_lock); | ||
1448 | |||
1448 | __split_and_process_bio(md, c); | 1449 | __split_and_process_bio(md, c); |
1450 | |||
1451 | down_write(&md->io_lock); | ||
1449 | } | 1452 | } |
1450 | 1453 | ||
1451 | up_write(&md->io_lock); | 1454 | up_write(&md->io_lock); |
@@ -1453,8 +1456,9 @@ static void dm_wq_work(struct work_struct *work) | |||
1453 | 1456 | ||
1454 | static void dm_queue_flush(struct mapped_device *md) | 1457 | static void dm_queue_flush(struct mapped_device *md) |
1455 | { | 1458 | { |
1459 | clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); | ||
1460 | smp_mb__after_clear_bit(); | ||
1456 | queue_work(md->wq, &md->work); | 1461 | queue_work(md->wq, &md->work); |
1457 | flush_workqueue(md->wq); | ||
1458 | } | 1462 | } |
1459 | 1463 | ||
1460 | /* | 1464 | /* |
@@ -1572,22 +1576,36 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1572 | } | 1576 | } |
1573 | 1577 | ||
1574 | /* | 1578 | /* |
1575 | * First we set the DMF_QUEUE_IO_TO_THREAD flag so no more ios | 1579 | * Here we must make sure that no processes are submitting requests |
1576 | * will be mapped. | 1580 | * to target drivers i.e. no one may be executing |
1581 | * __split_and_process_bio. This is called from dm_request and | ||
1582 | * dm_wq_work. | ||
1583 | * | ||
1584 | * To get all processes out of __split_and_process_bio in dm_request, | ||
1585 | * we take the write lock. To prevent any process from reentering | ||
1586 | * __split_and_process_bio from dm_request, we set | ||
1587 | * DMF_QUEUE_IO_TO_THREAD. | ||
1588 | * | ||
1589 | * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND | ||
1590 | * and call flush_workqueue(md->wq). flush_workqueue will wait until | ||
1591 | * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any | ||
1592 | * further calls to __split_and_process_bio from dm_wq_work. | ||
1577 | */ | 1593 | */ |
1578 | down_write(&md->io_lock); | 1594 | down_write(&md->io_lock); |
1579 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); | 1595 | set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); |
1580 | set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); | 1596 | set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); |
1581 | |||
1582 | up_write(&md->io_lock); | 1597 | up_write(&md->io_lock); |
1583 | 1598 | ||
1599 | flush_workqueue(md->wq); | ||
1600 | |||
1584 | /* | 1601 | /* |
1585 | * Wait for the already-mapped ios to complete. | 1602 | * At this point no more requests are entering target request routines. |
1603 | * We call dm_wait_for_completion to wait for all existing requests | ||
1604 | * to finish. | ||
1586 | */ | 1605 | */ |
1587 | r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); | 1606 | r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); |
1588 | 1607 | ||
1589 | down_write(&md->io_lock); | 1608 | down_write(&md->io_lock); |
1590 | |||
1591 | if (noflush) | 1609 | if (noflush) |
1592 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); | 1610 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
1593 | up_write(&md->io_lock); | 1611 | up_write(&md->io_lock); |
@@ -1600,6 +1618,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1600 | goto out; /* pushback list is already flushed, so skip flush */ | 1618 | goto out; /* pushback list is already flushed, so skip flush */ |
1601 | } | 1619 | } |
1602 | 1620 | ||
1621 | /* | ||
1622 | * If dm_wait_for_completion returned 0, the device is completely | ||
1623 | * quiescent now. There is no request-processing activity. All new | ||
1624 | * requests are being added to md->deferred list. | ||
1625 | */ | ||
1626 | |||
1603 | dm_table_postsuspend_targets(map); | 1627 | dm_table_postsuspend_targets(map); |
1604 | 1628 | ||
1605 | set_bit(DMF_SUSPENDED, &md->flags); | 1629 | set_bit(DMF_SUSPENDED, &md->flags); |