aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8d40f27cce89..bbc7ecf6cf04 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -100,9 +100,6 @@ union map_info *dm_get_mapinfo(struct bio *bio)
100 * Work processed by per-device workqueue. 100 * Work processed by per-device workqueue.
101 */ 101 */
102struct dm_wq_req { 102struct dm_wq_req {
103 enum {
104 DM_WQ_FLUSH_DEFERRED,
105 } type;
106 struct work_struct work; 103 struct work_struct work;
107 struct mapped_device *md; 104 struct mapped_device *md;
108 void *context; 105 void *context;
@@ -1434,32 +1431,24 @@ static void dm_wq_work(struct work_struct *work)
1434 struct mapped_device *md = req->md; 1431 struct mapped_device *md = req->md;
1435 1432
1436 down_write(&md->io_lock); 1433 down_write(&md->io_lock);
1437 switch (req->type) { 1434 __flush_deferred_io(md);
1438 case DM_WQ_FLUSH_DEFERRED:
1439 __flush_deferred_io(md);
1440 break;
1441 default:
1442 DMERR("dm_wq_work: unrecognised work type %d", req->type);
1443 BUG();
1444 }
1445 up_write(&md->io_lock); 1435 up_write(&md->io_lock);
1446} 1436}
1447 1437
1448static void dm_wq_queue(struct mapped_device *md, int type, void *context, 1438static void dm_wq_queue(struct mapped_device *md, void *context,
1449 struct dm_wq_req *req) 1439 struct dm_wq_req *req)
1450{ 1440{
1451 req->type = type;
1452 req->md = md; 1441 req->md = md;
1453 req->context = context; 1442 req->context = context;
1454 INIT_WORK(&req->work, dm_wq_work); 1443 INIT_WORK(&req->work, dm_wq_work);
1455 queue_work(md->wq, &req->work); 1444 queue_work(md->wq, &req->work);
1456} 1445}
1457 1446
1458static void dm_queue_flush(struct mapped_device *md, int type, void *context) 1447static void dm_queue_flush(struct mapped_device *md, void *context)
1459{ 1448{
1460 struct dm_wq_req req; 1449 struct dm_wq_req req;
1461 1450
1462 dm_wq_queue(md, type, context, &req); 1451 dm_wq_queue(md, context, &req);
1463 flush_workqueue(md->wq); 1452 flush_workqueue(md->wq);
1464} 1453}
1465 1454
@@ -1605,7 +1594,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1605 1594
1606 /* were we interrupted ? */ 1595 /* were we interrupted ? */
1607 if (r < 0) { 1596 if (r < 0) {
1608 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1597 dm_queue_flush(md, NULL);
1609 1598
1610 unlock_fs(md); 1599 unlock_fs(md);
1611 goto out; /* pushback list is already flushed, so skip flush */ 1600 goto out; /* pushback list is already flushed, so skip flush */
@@ -1645,7 +1634,7 @@ int dm_resume(struct mapped_device *md)
1645 if (r) 1634 if (r)
1646 goto out; 1635 goto out;
1647 1636
1648 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); 1637 dm_queue_flush(md, NULL);
1649 1638
1650 unlock_fs(md); 1639 unlock_fs(md);
1651 1640