aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2013-12-11 14:01:20 -0500
committerMike Snitzer <snitzer@redhat.com>2014-01-07 10:14:25 -0500
commitdaec338bbdaa96ba5b14c4777603e65ef74c769b (patch)
treea0798a5cddec99ad1bd7cda0b2b9a33164032e24 /drivers/md/dm-thin.c
parent8d30abff758b5f6c71343b7da6bb5de129a76c08 (diff)
dm thin: add mappings to end of prepared_* lists
Mappings could be processed in descending logical block order, particularly if buffered IO is used. This could adversely affect the latency of IO processing. Fix this by adding mappings to the end of the 'prepared_mappings' and 'prepared_discards' lists. Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Joe Thornber <ejt@redhat.com>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1988019df5c9..efa3d42ac70a 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -535,7 +535,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
535 struct pool *pool = m->tc->pool; 535 struct pool *pool = m->tc->pool;
536 536
537 if (m->quiesced && m->prepared) { 537 if (m->quiesced && m->prepared) {
538 list_add(&m->list, &pool->prepared_mappings); 538 list_add_tail(&m->list, &pool->prepared_mappings);
539 wake_worker(pool); 539 wake_worker(pool);
540 } 540 }
541} 541}
@@ -1058,7 +1058,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1058 1058
1059 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { 1059 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1060 spin_lock_irqsave(&pool->lock, flags); 1060 spin_lock_irqsave(&pool->lock, flags);
1061 list_add(&m->list, &pool->prepared_discards); 1061 list_add_tail(&m->list, &pool->prepared_discards);
1062 spin_unlock_irqrestore(&pool->lock, flags); 1062 spin_unlock_irqrestore(&pool->lock, flags);
1063 wake_worker(pool); 1063 wake_worker(pool);
1064 } 1064 }
@@ -2919,7 +2919,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2919 if (!list_empty(&work)) { 2919 if (!list_empty(&work)) {
2920 spin_lock_irqsave(&pool->lock, flags); 2920 spin_lock_irqsave(&pool->lock, flags);
2921 list_for_each_entry_safe(m, tmp, &work, list) 2921 list_for_each_entry_safe(m, tmp, &work, list)
2922 list_add(&m->list, &pool->prepared_discards); 2922 list_add_tail(&m->list, &pool->prepared_discards);
2923 spin_unlock_irqrestore(&pool->lock, flags); 2923 spin_unlock_irqrestore(&pool->lock, flags);
2924 wake_worker(pool); 2924 wake_worker(pool);
2925 } 2925 }