aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2012-03-28 13:41:27 -0400
committerAlasdair G Kergon <agk@redhat.com>2012-03-28 13:41:27 -0400
commit905e51b39a5558706a6ed883fe104de3d417050b (patch)
treed8e1d9fcdba66942333f099618e6acc5be238538 /drivers/md
parent31998ef19385c944600d9a981b96252f98204bee (diff)
dm thin: commit outstanding data every second
Commit unwritten data every second to prevent too much building up. Released blocks don't become available until after the next commit (for crash resilience). Prior to this patch commits were only triggered by a message to the target or a REQ_{FLUSH,FUA} bio. This allowed far too big a position to build up. The interval is hard-coded to 1 second. This is a sensible setting. I'm not making this user configurable, since there isn't much to be gained by tweaking this - and a lot lost by setting it far too high. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-thin.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1791134cf477..bcb143396fe0 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -23,6 +23,7 @@
23#define DEFERRED_SET_SIZE 64 23#define DEFERRED_SET_SIZE 64
24#define MAPPING_POOL_SIZE 1024 24#define MAPPING_POOL_SIZE 1024
25#define PRISON_CELLS 1024 25#define PRISON_CELLS 1024
26#define COMMIT_PERIOD HZ
26 27
27/* 28/*
28 * The block size of the device holding pool data must be 29 * The block size of the device holding pool data must be
@@ -520,8 +521,10 @@ struct pool {
520 521
521 struct workqueue_struct *wq; 522 struct workqueue_struct *wq;
522 struct work_struct worker; 523 struct work_struct worker;
524 struct delayed_work waker;
523 525
524 unsigned ref_count; 526 unsigned ref_count;
527 unsigned long last_commit_jiffies;
525 528
526 spinlock_t lock; 529 spinlock_t lock;
527 struct bio_list deferred_bios; 530 struct bio_list deferred_bios;
@@ -1271,6 +1274,12 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1271 } 1274 }
1272} 1275}
1273 1276
1277static int need_commit_due_to_time(struct pool *pool)
1278{
1279 return jiffies < pool->last_commit_jiffies ||
1280 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1281}
1282
1274static void process_deferred_bios(struct pool *pool) 1283static void process_deferred_bios(struct pool *pool)
1275{ 1284{
1276 unsigned long flags; 1285 unsigned long flags;
@@ -1312,7 +1321,7 @@ static void process_deferred_bios(struct pool *pool)
1312 bio_list_init(&pool->deferred_flush_bios); 1321 bio_list_init(&pool->deferred_flush_bios);
1313 spin_unlock_irqrestore(&pool->lock, flags); 1322 spin_unlock_irqrestore(&pool->lock, flags);
1314 1323
1315 if (bio_list_empty(&bios)) 1324 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1316 return; 1325 return;
1317 1326
1318 r = dm_pool_commit_metadata(pool->pmd); 1327 r = dm_pool_commit_metadata(pool->pmd);
@@ -1323,6 +1332,7 @@ static void process_deferred_bios(struct pool *pool)
1323 bio_io_error(bio); 1332 bio_io_error(bio);
1324 return; 1333 return;
1325 } 1334 }
1335 pool->last_commit_jiffies = jiffies;
1326 1336
1327 while ((bio = bio_list_pop(&bios))) 1337 while ((bio = bio_list_pop(&bios)))
1328 generic_make_request(bio); 1338 generic_make_request(bio);
@@ -1336,6 +1346,17 @@ static void do_worker(struct work_struct *ws)
1336 process_deferred_bios(pool); 1346 process_deferred_bios(pool);
1337} 1347}
1338 1348
1349/*
1350 * We want to commit periodically so that not too much
1351 * unwritten data builds up.
1352 */
1353static void do_waker(struct work_struct *ws)
1354{
1355 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1356 wake_worker(pool);
1357 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1358}
1359
1339/*----------------------------------------------------------------*/ 1360/*----------------------------------------------------------------*/
1340 1361
1341/* 1362/*
@@ -1545,6 +1566,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1545 } 1566 }
1546 1567
1547 INIT_WORK(&pool->worker, do_worker); 1568 INIT_WORK(&pool->worker, do_worker);
1569 INIT_DELAYED_WORK(&pool->waker, do_waker);
1548 spin_lock_init(&pool->lock); 1570 spin_lock_init(&pool->lock);
1549 bio_list_init(&pool->deferred_bios); 1571 bio_list_init(&pool->deferred_bios);
1550 bio_list_init(&pool->deferred_flush_bios); 1572 bio_list_init(&pool->deferred_flush_bios);
@@ -1571,6 +1593,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1571 goto bad_endio_hook_pool; 1593 goto bad_endio_hook_pool;
1572 } 1594 }
1573 pool->ref_count = 1; 1595 pool->ref_count = 1;
1596 pool->last_commit_jiffies = jiffies;
1574 pool->pool_md = pool_md; 1597 pool->pool_md = pool_md;
1575 pool->md_dev = metadata_dev; 1598 pool->md_dev = metadata_dev;
1576 __pool_table_insert(pool); 1599 __pool_table_insert(pool);
@@ -1900,7 +1923,7 @@ static void pool_resume(struct dm_target *ti)
1900 __requeue_bios(pool); 1923 __requeue_bios(pool);
1901 spin_unlock_irqrestore(&pool->lock, flags); 1924 spin_unlock_irqrestore(&pool->lock, flags);
1902 1925
1903 wake_worker(pool); 1926 do_waker(&pool->waker.work);
1904} 1927}
1905 1928
1906static void pool_postsuspend(struct dm_target *ti) 1929static void pool_postsuspend(struct dm_target *ti)
@@ -1909,6 +1932,7 @@ static void pool_postsuspend(struct dm_target *ti)
1909 struct pool_c *pt = ti->private; 1932 struct pool_c *pt = ti->private;
1910 struct pool *pool = pt->pool; 1933 struct pool *pool = pt->pool;
1911 1934
1935 cancel_delayed_work(&pool->waker);
1912 flush_workqueue(pool->wq); 1936 flush_workqueue(pool->wq);
1913 1937
1914 r = dm_pool_commit_metadata(pool->pmd); 1938 r = dm_pool_commit_metadata(pool->pmd);