aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-05-19 20:49:41 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-05-19 20:49:41 -0400
commitcea35899a02226b70baeee7e179d7a7c6e814b98 (patch)
tree19621a7ae67df5f398071791d52489a464b1b30a /drivers/md/dm-thin.c
parent53971a86d2db89f32859dc26bd6594b5bc665d5b (diff)
parentd6d211db37e75de2ddc3a4f979038c40df7cc79c (diff)
Merge 3.15-rc5 into usb-next
We need these USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c77
1 files changed, 71 insertions, 6 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 53728be84dee..13abade76ad9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -232,6 +232,13 @@ struct thin_c {
232 struct bio_list deferred_bio_list; 232 struct bio_list deferred_bio_list;
233 struct bio_list retry_on_resume_list; 233 struct bio_list retry_on_resume_list;
234 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 234 struct rb_root sort_bio_list; /* sorted list of deferred bios */
235
236 /*
237 * Ensures the thin is not destroyed until the worker has finished
238 * iterating the active_thins list.
239 */
240 atomic_t refcount;
241 struct completion can_destroy;
235}; 242};
236 243
237/*----------------------------------------------------------------*/ 244/*----------------------------------------------------------------*/
@@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1486 blk_finish_plug(&plug); 1493 blk_finish_plug(&plug);
1487} 1494}
1488 1495
1496static void thin_get(struct thin_c *tc);
1497static void thin_put(struct thin_c *tc);
1498
1499/*
1500 * We can't hold rcu_read_lock() around code that can block. So we
1501 * find a thin with the rcu lock held; bump a refcount; then drop
1502 * the lock.
1503 */
1504static struct thin_c *get_first_thin(struct pool *pool)
1505{
1506 struct thin_c *tc = NULL;
1507
1508 rcu_read_lock();
1509 if (!list_empty(&pool->active_thins)) {
1510 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1511 thin_get(tc);
1512 }
1513 rcu_read_unlock();
1514
1515 return tc;
1516}
1517
1518static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1519{
1520 struct thin_c *old_tc = tc;
1521
1522 rcu_read_lock();
1523 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1524 thin_get(tc);
1525 thin_put(old_tc);
1526 rcu_read_unlock();
1527 return tc;
1528 }
1529 thin_put(old_tc);
1530 rcu_read_unlock();
1531
1532 return NULL;
1533}
1534
1489static void process_deferred_bios(struct pool *pool) 1535static void process_deferred_bios(struct pool *pool)
1490{ 1536{
1491 unsigned long flags; 1537 unsigned long flags;
@@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool)
1493 struct bio_list bios; 1539 struct bio_list bios;
1494 struct thin_c *tc; 1540 struct thin_c *tc;
1495 1541
1496 rcu_read_lock(); 1542 tc = get_first_thin(pool);
1497 list_for_each_entry_rcu(tc, &pool->active_thins, list) 1543 while (tc) {
1498 process_thin_deferred_bios(tc); 1544 process_thin_deferred_bios(tc);
1499 rcu_read_unlock(); 1545 tc = get_next_thin(pool, tc);
1546 }
1500 1547
1501 /* 1548 /*
1502 * If there are any deferred flush bios, we must commit 1549 * If there are any deferred flush bios, we must commit
@@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1578{ 1625{
1579 struct noflush_work w; 1626 struct noflush_work w;
1580 1627
1581 INIT_WORK(&w.worker, fn); 1628 INIT_WORK_ONSTACK(&w.worker, fn);
1582 w.tc = tc; 1629 w.tc = tc;
1583 atomic_set(&w.complete, 0); 1630 atomic_set(&w.complete, 0);
1584 init_waitqueue_head(&w.wait); 1631 init_waitqueue_head(&w.wait);
@@ -3061,11 +3108,25 @@ static struct target_type pool_target = {
3061/*---------------------------------------------------------------- 3108/*----------------------------------------------------------------
3062 * Thin target methods 3109 * Thin target methods
3063 *--------------------------------------------------------------*/ 3110 *--------------------------------------------------------------*/
3111static void thin_get(struct thin_c *tc)
3112{
3113 atomic_inc(&tc->refcount);
3114}
3115
3116static void thin_put(struct thin_c *tc)
3117{
3118 if (atomic_dec_and_test(&tc->refcount))
3119 complete(&tc->can_destroy);
3120}
3121
3064static void thin_dtr(struct dm_target *ti) 3122static void thin_dtr(struct dm_target *ti)
3065{ 3123{
3066 struct thin_c *tc = ti->private; 3124 struct thin_c *tc = ti->private;
3067 unsigned long flags; 3125 unsigned long flags;
3068 3126
3127 thin_put(tc);
3128 wait_for_completion(&tc->can_destroy);
3129
3069 spin_lock_irqsave(&tc->pool->lock, flags); 3130 spin_lock_irqsave(&tc->pool->lock, flags);
3070 list_del_rcu(&tc->list); 3131 list_del_rcu(&tc->list);
3071 spin_unlock_irqrestore(&tc->pool->lock, flags); 3132 spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3101 struct thin_c *tc; 3162 struct thin_c *tc;
3102 struct dm_dev *pool_dev, *origin_dev; 3163 struct dm_dev *pool_dev, *origin_dev;
3103 struct mapped_device *pool_md; 3164 struct mapped_device *pool_md;
3165 unsigned long flags;
3104 3166
3105 mutex_lock(&dm_thin_pool_table.mutex); 3167 mutex_lock(&dm_thin_pool_table.mutex);
3106 3168
@@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3191 3253
3192 mutex_unlock(&dm_thin_pool_table.mutex); 3254 mutex_unlock(&dm_thin_pool_table.mutex);
3193 3255
3194 spin_lock(&tc->pool->lock); 3256 atomic_set(&tc->refcount, 1);
3257 init_completion(&tc->can_destroy);
3258
3259 spin_lock_irqsave(&tc->pool->lock, flags);
3195 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3260 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3196 spin_unlock(&tc->pool->lock); 3261 spin_unlock_irqrestore(&tc->pool->lock, flags);
3197 /* 3262 /*
3198 * This synchronize_rcu() call is needed here otherwise we risk a 3263 * This synchronize_rcu() call is needed here otherwise we risk a
3199 * wake_worker() call finding no bios to process (because the newly 3264 * wake_worker() call finding no bios to process (because the newly