diff options
author | Milan Broz <mbroz@redhat.com> | 2008-02-07 21:11:17 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2008-02-07 21:11:17 -0500 |
commit | 304f3f6a58301316da612d7bf21d9abe1369d456 (patch) | |
tree | aa380ca3cd0a7d8a61f17915050866876510f181 /drivers/md | |
parent | 3a7f6c990ad04e6f576a159876c602d14d6f7fef (diff) |
dm: move deferred bio flushing to workqueue
Add a single-thread workqueue for each mapped device
and move flushing of the lists of pushback and deferred bios
to this new workqueue.
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 79 |
1 files changed, 67 insertions, 12 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9ca012e639a8..6617ce4af095 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -71,6 +71,19 @@ union map_info *dm_get_mapinfo(struct bio *bio) | |||
71 | #define DMF_DELETING 4 | 71 | #define DMF_DELETING 4 |
72 | #define DMF_NOFLUSH_SUSPENDING 5 | 72 | #define DMF_NOFLUSH_SUSPENDING 5 |
73 | 73 | ||
74 | /* | ||
75 | * Work processed by per-device workqueue. | ||
76 | */ | ||
77 | struct dm_wq_req { | ||
78 | enum { | ||
79 | DM_WQ_FLUSH_ALL, | ||
80 | DM_WQ_FLUSH_DEFERRED, | ||
81 | } type; | ||
82 | struct work_struct work; | ||
83 | struct mapped_device *md; | ||
84 | void *context; | ||
85 | }; | ||
86 | |||
74 | struct mapped_device { | 87 | struct mapped_device { |
75 | struct rw_semaphore io_lock; | 88 | struct rw_semaphore io_lock; |
76 | struct mutex suspend_lock; | 89 | struct mutex suspend_lock; |
@@ -96,6 +109,11 @@ struct mapped_device { | |||
96 | struct bio_list pushback; | 109 | struct bio_list pushback; |
97 | 110 | ||
98 | /* | 111 | /* |
112 | * Processing queue (flush/barriers) | ||
113 | */ | ||
114 | struct workqueue_struct *wq; | ||
115 | |||
116 | /* | ||
99 | * The current mapping. | 117 | * The current mapping. |
100 | */ | 118 | */ |
101 | struct dm_table *map; | 119 | struct dm_table *map; |
@@ -1044,6 +1062,10 @@ static struct mapped_device *alloc_dev(int minor) | |||
1044 | add_disk(md->disk); | 1062 | add_disk(md->disk); |
1045 | format_dev_t(md->name, MKDEV(_major, minor)); | 1063 | format_dev_t(md->name, MKDEV(_major, minor)); |
1046 | 1064 | ||
1065 | md->wq = create_singlethread_workqueue("kdmflush"); | ||
1066 | if (!md->wq) | ||
1067 | goto bad_thread; | ||
1068 | |||
1047 | /* Populate the mapping, nobody knows we exist yet */ | 1069 | /* Populate the mapping, nobody knows we exist yet */ |
1048 | spin_lock(&_minor_lock); | 1070 | spin_lock(&_minor_lock); |
1049 | old_md = idr_replace(&_minor_idr, md, minor); | 1071 | old_md = idr_replace(&_minor_idr, md, minor); |
@@ -1053,6 +1075,8 @@ static struct mapped_device *alloc_dev(int minor) | |||
1053 | 1075 | ||
1054 | return md; | 1076 | return md; |
1055 | 1077 | ||
1078 | bad_thread: | ||
1079 | put_disk(md->disk); | ||
1056 | bad_disk: | 1080 | bad_disk: |
1057 | bioset_free(md->bs); | 1081 | bioset_free(md->bs); |
1058 | bad_no_bioset: | 1082 | bad_no_bioset: |
@@ -1080,6 +1104,7 @@ static void free_dev(struct mapped_device *md) | |||
1080 | unlock_fs(md); | 1104 | unlock_fs(md); |
1081 | bdput(md->suspended_bdev); | 1105 | bdput(md->suspended_bdev); |
1082 | } | 1106 | } |
1107 | destroy_workqueue(md->wq); | ||
1083 | mempool_destroy(md->tio_pool); | 1108 | mempool_destroy(md->tio_pool); |
1084 | mempool_destroy(md->io_pool); | 1109 | mempool_destroy(md->io_pool); |
1085 | bioset_free(md->bs); | 1110 | bioset_free(md->bs); |
@@ -1308,6 +1333,44 @@ static void __merge_pushback_list(struct mapped_device *md) | |||
1308 | spin_unlock_irqrestore(&md->pushback_lock, flags); | 1333 | spin_unlock_irqrestore(&md->pushback_lock, flags); |
1309 | } | 1334 | } |
1310 | 1335 | ||
1336 | static void dm_wq_work(struct work_struct *work) | ||
1337 | { | ||
1338 | struct dm_wq_req *req = container_of(work, struct dm_wq_req, work); | ||
1339 | struct mapped_device *md = req->md; | ||
1340 | |||
1341 | down_write(&md->io_lock); | ||
1342 | switch (req->type) { | ||
1343 | case DM_WQ_FLUSH_ALL: | ||
1344 | __merge_pushback_list(md); | ||
1345 | /* pass through */ | ||
1346 | case DM_WQ_FLUSH_DEFERRED: | ||
1347 | __flush_deferred_io(md); | ||
1348 | break; | ||
1349 | default: | ||
1350 | DMERR("dm_wq_work: unrecognised work type %d", req->type); | ||
1351 | BUG(); | ||
1352 | } | ||
1353 | up_write(&md->io_lock); | ||
1354 | } | ||
1355 | |||
1356 | static void dm_wq_queue(struct mapped_device *md, int type, void *context, | ||
1357 | struct dm_wq_req *req) | ||
1358 | { | ||
1359 | req->type = type; | ||
1360 | req->md = md; | ||
1361 | req->context = context; | ||
1362 | INIT_WORK(&req->work, dm_wq_work); | ||
1363 | queue_work(md->wq, &req->work); | ||
1364 | } | ||
1365 | |||
1366 | static void dm_queue_flush(struct mapped_device *md, int type, void *context) | ||
1367 | { | ||
1368 | struct dm_wq_req req; | ||
1369 | |||
1370 | dm_wq_queue(md, type, context, &req); | ||
1371 | flush_workqueue(md->wq); | ||
1372 | } | ||
1373 | |||
1311 | /* | 1374 | /* |
1312 | * Swap in a new table (destroying old one). | 1375 | * Swap in a new table (destroying old one). |
1313 | */ | 1376 | */ |
@@ -1450,9 +1513,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1450 | 1513 | ||
1451 | /* were we interrupted ? */ | 1514 | /* were we interrupted ? */ |
1452 | if (r < 0) { | 1515 | if (r < 0) { |
1453 | down_write(&md->io_lock); | 1516 | dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); |
1454 | __flush_deferred_io(md); | ||
1455 | up_write(&md->io_lock); | ||
1456 | 1517 | ||
1457 | unlock_fs(md); | 1518 | unlock_fs(md); |
1458 | goto out; /* pushback list is already flushed, so skip flush */ | 1519 | goto out; /* pushback list is already flushed, so skip flush */ |
@@ -1463,16 +1524,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) | |||
1463 | set_bit(DMF_SUSPENDED, &md->flags); | 1524 | set_bit(DMF_SUSPENDED, &md->flags); |
1464 | 1525 | ||
1465 | flush_and_out: | 1526 | flush_and_out: |
1466 | if (r && noflush) { | 1527 | if (r && noflush) |
1467 | /* | 1528 | /* |
1468 | * Because there may be already I/Os in the pushback list, | 1529 | * Because there may be already I/Os in the pushback list, |
1469 | * flush them before return. | 1530 | * flush them before return. |
1470 | */ | 1531 | */ |
1471 | down_write(&md->io_lock); | 1532 | dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL); |
1472 | __merge_pushback_list(md); | ||
1473 | __flush_deferred_io(md); | ||
1474 | up_write(&md->io_lock); | ||
1475 | } | ||
1476 | 1533 | ||
1477 | out: | 1534 | out: |
1478 | if (r && md->suspended_bdev) { | 1535 | if (r && md->suspended_bdev) { |
@@ -1504,9 +1561,7 @@ int dm_resume(struct mapped_device *md) | |||
1504 | if (r) | 1561 | if (r) |
1505 | goto out; | 1562 | goto out; |
1506 | 1563 | ||
1507 | down_write(&md->io_lock); | 1564 | dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL); |
1508 | __flush_deferred_io(md); | ||
1509 | up_write(&md->io_lock); | ||
1510 | 1565 | ||
1511 | unlock_fs(md); | 1566 | unlock_fs(md); |
1512 | 1567 | ||