aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-05-13 16:14:14 -0400
committerMike Snitzer <snitzer@redhat.com>2014-06-03 13:44:07 -0400
commite7a3e871d8954c636b6cd2db7c7ece7ffe405986 (patch)
tree9c4a1864d9bc1e37c4abf155230574a97e3d734f /drivers/md/dm-thin.c
parent298eaa89b02e88dc9081f8761a957f7cd5e8b201 (diff)
dm thin: cleanup noflush_work to use a proper completion
Factor out a pool_work interface that noflush_work makes use of to wait for and complete work items (in terms of a proper completion struct). Allows discontinuing the use of a custom completion in terms of atomic_t and wait_event. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c52
1 files changed, 34 insertions, 18 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 242ac2ea5f29..7694988fb806 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1610,47 +1610,63 @@ static void do_no_space_timeout(struct work_struct *ws)
1610 1610
1611/*----------------------------------------------------------------*/ 1611/*----------------------------------------------------------------*/
1612 1612
1613struct noflush_work { 1613struct pool_work {
1614 struct work_struct worker; 1614 struct work_struct worker;
1615 struct thin_c *tc; 1615 struct completion complete;
1616};
1617
1618static struct pool_work *to_pool_work(struct work_struct *ws)
1619{
1620 return container_of(ws, struct pool_work, worker);
1621}
1622
1623static void pool_work_complete(struct pool_work *pw)
1624{
1625 complete(&pw->complete);
1626}
1616 1627
1617 atomic_t complete; 1628static void pool_work_wait(struct pool_work *pw, struct pool *pool,
1618 wait_queue_head_t wait; 1629 void (*fn)(struct work_struct *))
1630{
1631 INIT_WORK_ONSTACK(&pw->worker, fn);
1632 init_completion(&pw->complete);
1633 queue_work(pool->wq, &pw->worker);
1634 wait_for_completion(&pw->complete);
1635}
1636
1637/*----------------------------------------------------------------*/
1638
1639struct noflush_work {
1640 struct pool_work pw;
1641 struct thin_c *tc;
1619}; 1642};
1620 1643
1621static void complete_noflush_work(struct noflush_work *w) 1644static struct noflush_work *to_noflush(struct work_struct *ws)
1622{ 1645{
1623 atomic_set(&w->complete, 1); 1646 return container_of(to_pool_work(ws), struct noflush_work, pw);
1624 wake_up(&w->wait);
1625} 1647}
1626 1648
1627static void do_noflush_start(struct work_struct *ws) 1649static void do_noflush_start(struct work_struct *ws)
1628{ 1650{
1629 struct noflush_work *w = container_of(ws, struct noflush_work, worker); 1651 struct noflush_work *w = to_noflush(ws);
1630 w->tc->requeue_mode = true; 1652 w->tc->requeue_mode = true;
1631 requeue_io(w->tc); 1653 requeue_io(w->tc);
1632 complete_noflush_work(w); 1654 pool_work_complete(&w->pw);
1633} 1655}
1634 1656
1635static void do_noflush_stop(struct work_struct *ws) 1657static void do_noflush_stop(struct work_struct *ws)
1636{ 1658{
1637 struct noflush_work *w = container_of(ws, struct noflush_work, worker); 1659 struct noflush_work *w = to_noflush(ws);
1638 w->tc->requeue_mode = false; 1660 w->tc->requeue_mode = false;
1639 complete_noflush_work(w); 1661 pool_work_complete(&w->pw);
1640} 1662}
1641 1663
1642static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) 1664static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1643{ 1665{
1644 struct noflush_work w; 1666 struct noflush_work w;
1645 1667
1646 INIT_WORK_ONSTACK(&w.worker, fn);
1647 w.tc = tc; 1668 w.tc = tc;
1648 atomic_set(&w.complete, 0); 1669 pool_work_wait(&w.pw, tc->pool, fn);
1649 init_waitqueue_head(&w.wait);
1650
1651 queue_work(tc->pool->wq, &w.worker);
1652
1653 wait_event(w.wait, atomic_read(&w.complete));
1654} 1670}
1655 1671
1656/*----------------------------------------------------------------*/ 1672/*----------------------------------------------------------------*/