aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c61
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-thin.c106
-rw-r--r--drivers/md/dm-verity.c15
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c13
7 files changed, 143 insertions, 75 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1bf4a71919ec..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2178 ti->num_discard_bios = 1; 2178 ti->num_discard_bios = 1;
2179 ti->discards_supported = true; 2179 ti->discards_supported = true;
2180 ti->discard_zeroes_data_unsupported = true; 2180 ti->discard_zeroes_data_unsupported = true;
2181 /* Discard bios must be split on a block boundary */
2182 ti->split_discard_bios = true;
2181 2183
2182 cache->features = ca->features; 2184 cache->features = ca->features;
2183 ti->per_bio_data_size = get_per_bio_data_size(cache); 2185 ti->per_bio_data_size = get_per_bio_data_size(cache);
@@ -2488,6 +2490,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2488 2490
2489 } else { 2491 } else {
2490 inc_hit_counter(cache, bio); 2492 inc_hit_counter(cache, bio);
2493 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2491 2494
2492 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2495 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2493 !is_dirty(cache, lookup_result.cblock)) 2496 !is_dirty(cache, lookup_result.cblock))
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 784695d22fde..53b213226c01 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,6 @@
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/backing-dev.h> 21#include <linux/backing-dev.h>
22#include <linux/percpu.h>
23#include <linux/atomic.h> 22#include <linux/atomic.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/page.h> 24#include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
43 struct bvec_iter iter_out; 42 struct bvec_iter iter_out;
44 sector_t cc_sector; 43 sector_t cc_sector;
45 atomic_t cc_pending; 44 atomic_t cc_pending;
45 struct ablkcipher_request *req;
46}; 46};
47 47
48/* 48/*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
112 112
113/* 113/*
114 * Duplicated per-CPU state for cipher. 114 * The fields in here must be read only after initialization.
115 */
116struct crypt_cpu {
117 struct ablkcipher_request *req;
118};
119
120/*
121 * The fields in here must be read only after initialization,
122 * changing state should be in crypt_cpu.
123 */ 115 */
124struct crypt_config { 116struct crypt_config {
125 struct dm_dev *dev; 117 struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
150 sector_t iv_offset; 142 sector_t iv_offset;
151 unsigned int iv_size; 143 unsigned int iv_size;
152 144
153 /*
154 * Duplicated per cpu state. Access through
155 * per_cpu_ptr() only.
156 */
157 struct crypt_cpu __percpu *cpu;
158
159 /* ESSIV: struct crypto_cipher *essiv_tfm */ 145 /* ESSIV: struct crypto_cipher *essiv_tfm */
160 void *iv_private; 146 void *iv_private;
161 struct crypto_ablkcipher **tfms; 147 struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
192static void kcryptd_queue_crypt(struct dm_crypt_io *io); 178static void kcryptd_queue_crypt(struct dm_crypt_io *io);
193static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 179static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
194 180
195static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
196{
197 return this_cpu_ptr(cc->cpu);
198}
199
200/* 181/*
201 * Use this to access cipher attributes that are the same for each CPU. 182 * Use this to access cipher attributes that are the same for each CPU.
202 */ 183 */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
903static void crypt_alloc_req(struct crypt_config *cc, 884static void crypt_alloc_req(struct crypt_config *cc,
904 struct convert_context *ctx) 885 struct convert_context *ctx)
905{ 886{
906 struct crypt_cpu *this_cc = this_crypt_config(cc);
907 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 887 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
908 888
909 if (!this_cc->req) 889 if (!ctx->req)
910 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 890 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
911 891
912 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); 892 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
913 ablkcipher_request_set_callback(this_cc->req, 893 ablkcipher_request_set_callback(ctx->req,
914 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 894 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
915 kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
916} 896}
917 897
918/* 898/*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
921static int crypt_convert(struct crypt_config *cc, 901static int crypt_convert(struct crypt_config *cc,
922 struct convert_context *ctx) 902 struct convert_context *ctx)
923{ 903{
924 struct crypt_cpu *this_cc = this_crypt_config(cc);
925 int r; 904 int r;
926 905
927 atomic_set(&ctx->cc_pending, 1); 906 atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
932 911
933 atomic_inc(&ctx->cc_pending); 912 atomic_inc(&ctx->cc_pending);
934 913
935 r = crypt_convert_block(cc, ctx, this_cc->req); 914 r = crypt_convert_block(cc, ctx, ctx->req);
936 915
937 switch (r) { 916 switch (r) {
938 /* async */ 917 /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
941 reinit_completion(&ctx->restart); 920 reinit_completion(&ctx->restart);
942 /* fall through*/ 921 /* fall through*/
943 case -EINPROGRESS: 922 case -EINPROGRESS:
944 this_cc->req = NULL; 923 ctx->req = NULL;
945 ctx->cc_sector++; 924 ctx->cc_sector++;
946 continue; 925 continue;
947 926
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1040 io->sector = sector; 1019 io->sector = sector;
1041 io->error = 0; 1020 io->error = 0;
1042 io->base_io = NULL; 1021 io->base_io = NULL;
1022 io->ctx.req = NULL;
1043 atomic_set(&io->io_pending, 0); 1023 atomic_set(&io->io_pending, 0);
1044 1024
1045 return io; 1025 return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1065 if (!atomic_dec_and_test(&io->io_pending)) 1045 if (!atomic_dec_and_test(&io->io_pending))
1066 return; 1046 return;
1067 1047
1048 if (io->ctx.req)
1049 mempool_free(io->ctx.req, cc->req_pool);
1068 mempool_free(io, cc->io_pool); 1050 mempool_free(io, cc->io_pool);
1069 1051
1070 if (likely(!base_io)) 1052 if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
1492static void crypt_dtr(struct dm_target *ti) 1474static void crypt_dtr(struct dm_target *ti)
1493{ 1475{
1494 struct crypt_config *cc = ti->private; 1476 struct crypt_config *cc = ti->private;
1495 struct crypt_cpu *cpu_cc;
1496 int cpu;
1497 1477
1498 ti->private = NULL; 1478 ti->private = NULL;
1499 1479
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
1505 if (cc->crypt_queue) 1485 if (cc->crypt_queue)
1506 destroy_workqueue(cc->crypt_queue); 1486 destroy_workqueue(cc->crypt_queue);
1507 1487
1508 if (cc->cpu)
1509 for_each_possible_cpu(cpu) {
1510 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1511 if (cpu_cc->req)
1512 mempool_free(cpu_cc->req, cc->req_pool);
1513 }
1514
1515 crypt_free_tfms(cc); 1488 crypt_free_tfms(cc);
1516 1489
1517 if (cc->bs) 1490 if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
1530 if (cc->dev) 1503 if (cc->dev)
1531 dm_put_device(ti, cc->dev); 1504 dm_put_device(ti, cc->dev);
1532 1505
1533 if (cc->cpu)
1534 free_percpu(cc->cpu);
1535
1536 kzfree(cc->cipher); 1506 kzfree(cc->cipher);
1537 kzfree(cc->cipher_string); 1507 kzfree(cc->cipher_string);
1538 1508
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1588 if (tmp) 1558 if (tmp)
1589 DMWARN("Ignoring unexpected additional cipher options"); 1559 DMWARN("Ignoring unexpected additional cipher options");
1590 1560
1591 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1592 __alignof__(struct crypt_cpu));
1593 if (!cc->cpu) {
1594 ti->error = "Cannot allocate per cpu state";
1595 goto bad_mem;
1596 }
1597
1598 /* 1561 /*
1599 * For compatibility with the original dm-crypt mapping format, if 1562 * For compatibility with the original dm-crypt mapping format, if
1600 * only the cipher name is supplied, use cbc-plain. 1563 * only the cipher name is supplied, use cbc-plain.
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa009e865871..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
445 else 445 else
446 m->saved_queue_if_no_path = queue_if_no_path; 446 m->saved_queue_if_no_path = queue_if_no_path;
447 m->queue_if_no_path = queue_if_no_path; 447 m->queue_if_no_path = queue_if_no_path;
448 if (!m->queue_if_no_path)
449 dm_table_run_md_queue_async(m->ti->table);
450
451 spin_unlock_irqrestore(&m->lock, flags); 448 spin_unlock_irqrestore(&m->lock, flags);
452 449
450 if (!queue_if_no_path)
451 dm_table_run_md_queue_async(m->ti->table);
452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -954,7 +954,7 @@ out:
954 */ 954 */
955static int reinstate_path(struct pgpath *pgpath) 955static int reinstate_path(struct pgpath *pgpath)
956{ 956{
957 int r = 0; 957 int r = 0, run_queue = 0;
958 unsigned long flags; 958 unsigned long flags;
959 struct multipath *m = pgpath->pg->m; 959 struct multipath *m = pgpath->pg->m;
960 960
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
978 978
979 if (!m->nr_valid_paths++) { 979 if (!m->nr_valid_paths++) {
980 m->current_pgpath = NULL; 980 m->current_pgpath = NULL;
981 dm_table_run_md_queue_async(m->ti->table); 981 run_queue = 1;
982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
984 m->pg_init_in_progress++; 984 m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
991 991
992out: 992out:
993 spin_unlock_irqrestore(&m->lock, flags); 993 spin_unlock_irqrestore(&m->lock, flags);
994 if (run_queue)
995 dm_table_run_md_queue_async(m->ti->table);
994 996
995 return r; 997 return r;
996} 998}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 53728be84dee..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,6 +27,9 @@
27#define MAPPING_POOL_SIZE 1024 27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024 28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 29#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
30 33
31DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
32 "A percentage of time allocated for copy on write"); 35 "A percentage of time allocated for copy on write");
@@ -175,6 +178,7 @@ struct pool {
175 struct workqueue_struct *wq; 178 struct workqueue_struct *wq;
176 struct work_struct worker; 179 struct work_struct worker;
177 struct delayed_work waker; 180 struct delayed_work waker;
181 struct delayed_work no_space_timeout;
178 182
179 unsigned long last_commit_jiffies; 183 unsigned long last_commit_jiffies;
180 unsigned ref_count; 184 unsigned ref_count;
@@ -232,6 +236,13 @@ struct thin_c {
232 struct bio_list deferred_bio_list; 236 struct bio_list deferred_bio_list;
233 struct bio_list retry_on_resume_list; 237 struct bio_list retry_on_resume_list;
234 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 238 struct rb_root sort_bio_list; /* sorted list of deferred bios */
239
240 /*
241 * Ensures the thin is not destroyed until the worker has finished
242 * iterating the active_thins list.
243 */
244 atomic_t refcount;
245 struct completion can_destroy;
235}; 246};
236 247
237/*----------------------------------------------------------------*/ 248/*----------------------------------------------------------------*/
@@ -928,7 +939,7 @@ static int commit(struct pool *pool)
928{ 939{
929 int r; 940 int r;
930 941
931 if (get_pool_mode(pool) != PM_WRITE) 942 if (get_pool_mode(pool) >= PM_READ_ONLY)
932 return -EINVAL; 943 return -EINVAL;
933 944
934 r = dm_pool_commit_metadata(pool->pmd); 945 r = dm_pool_commit_metadata(pool->pmd);
@@ -1486,6 +1497,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1486 blk_finish_plug(&plug); 1497 blk_finish_plug(&plug);
1487} 1498}
1488 1499
1500static void thin_get(struct thin_c *tc);
1501static void thin_put(struct thin_c *tc);
1502
1503/*
1504 * We can't hold rcu_read_lock() around code that can block. So we
1505 * find a thin with the rcu lock held; bump a refcount; then drop
1506 * the lock.
1507 */
1508static struct thin_c *get_first_thin(struct pool *pool)
1509{
1510 struct thin_c *tc = NULL;
1511
1512 rcu_read_lock();
1513 if (!list_empty(&pool->active_thins)) {
1514 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1515 thin_get(tc);
1516 }
1517 rcu_read_unlock();
1518
1519 return tc;
1520}
1521
1522static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1523{
1524 struct thin_c *old_tc = tc;
1525
1526 rcu_read_lock();
1527 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1528 thin_get(tc);
1529 thin_put(old_tc);
1530 rcu_read_unlock();
1531 return tc;
1532 }
1533 thin_put(old_tc);
1534 rcu_read_unlock();
1535
1536 return NULL;
1537}
1538
1489static void process_deferred_bios(struct pool *pool) 1539static void process_deferred_bios(struct pool *pool)
1490{ 1540{
1491 unsigned long flags; 1541 unsigned long flags;
@@ -1493,10 +1543,11 @@ static void process_deferred_bios(struct pool *pool)
1493 struct bio_list bios; 1543 struct bio_list bios;
1494 struct thin_c *tc; 1544 struct thin_c *tc;
1495 1545
1496 rcu_read_lock(); 1546 tc = get_first_thin(pool);
1497 list_for_each_entry_rcu(tc, &pool->active_thins, list) 1547 while (tc) {
1498 process_thin_deferred_bios(tc); 1548 process_thin_deferred_bios(tc);
1499 rcu_read_unlock(); 1549 tc = get_next_thin(pool, tc);
1550 }
1500 1551
1501 /* 1552 /*
1502 * If there are any deferred flush bios, we must commit 1553 * If there are any deferred flush bios, we must commit
@@ -1543,6 +1594,20 @@ static void do_waker(struct work_struct *ws)
1543 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1594 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1544} 1595}
1545 1596
1597/*
1598 * We're holding onto IO to allow userland time to react. After the
1599 * timeout either the pool will have been resized (and thus back in
1600 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1601 */
1602static void do_no_space_timeout(struct work_struct *ws)
1603{
1604 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1605 no_space_timeout);
1606
1607 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1608 set_pool_mode(pool, PM_READ_ONLY);
1609}
1610
1546/*----------------------------------------------------------------*/ 1611/*----------------------------------------------------------------*/
1547 1612
1548struct noflush_work { 1613struct noflush_work {
@@ -1578,7 +1643,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1578{ 1643{
1579 struct noflush_work w; 1644 struct noflush_work w;
1580 1645
1581 INIT_WORK(&w.worker, fn); 1646 INIT_WORK_ONSTACK(&w.worker, fn);
1582 w.tc = tc; 1647 w.tc = tc;
1583 atomic_set(&w.complete, 0); 1648 atomic_set(&w.complete, 0);
1584 init_waitqueue_head(&w.wait); 1649 init_waitqueue_head(&w.wait);
@@ -1607,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1607 struct pool_c *pt = pool->ti->private; 1672 struct pool_c *pt = pool->ti->private;
1608 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1673 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1609 enum pool_mode old_mode = get_pool_mode(pool); 1674 enum pool_mode old_mode = get_pool_mode(pool);
1675 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1610 1676
1611 /* 1677 /*
1612 * Never allow the pool to transition to PM_WRITE mode if user 1678 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1668,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1668 pool->process_discard = process_discard; 1734 pool->process_discard = process_discard;
1669 pool->process_prepared_mapping = process_prepared_mapping; 1735 pool->process_prepared_mapping = process_prepared_mapping;
1670 pool->process_prepared_discard = process_prepared_discard_passdown; 1736 pool->process_prepared_discard = process_prepared_discard_passdown;
1737
1738 if (!pool->pf.error_if_no_space && no_space_timeout)
1739 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1671 break; 1740 break;
1672 1741
1673 case PM_WRITE: 1742 case PM_WRITE:
@@ -2053,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2053 2122
2054 INIT_WORK(&pool->worker, do_worker); 2123 INIT_WORK(&pool->worker, do_worker);
2055 INIT_DELAYED_WORK(&pool->waker, do_waker); 2124 INIT_DELAYED_WORK(&pool->waker, do_waker);
2125 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2056 spin_lock_init(&pool->lock); 2126 spin_lock_init(&pool->lock);
2057 bio_list_init(&pool->deferred_flush_bios); 2127 bio_list_init(&pool->deferred_flush_bios);
2058 INIT_LIST_HEAD(&pool->prepared_mappings); 2128 INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2615,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti)
2615 struct pool *pool = pt->pool; 2685 struct pool *pool = pt->pool;
2616 2686
2617 cancel_delayed_work(&pool->waker); 2687 cancel_delayed_work(&pool->waker);
2688 cancel_delayed_work(&pool->no_space_timeout);
2618 flush_workqueue(pool->wq); 2689 flush_workqueue(pool->wq);
2619 (void) commit(pool); 2690 (void) commit(pool);
2620} 2691}
@@ -3061,11 +3132,25 @@ static struct target_type pool_target = {
3061/*---------------------------------------------------------------- 3132/*----------------------------------------------------------------
3062 * Thin target methods 3133 * Thin target methods
3063 *--------------------------------------------------------------*/ 3134 *--------------------------------------------------------------*/
3135static void thin_get(struct thin_c *tc)
3136{
3137 atomic_inc(&tc->refcount);
3138}
3139
3140static void thin_put(struct thin_c *tc)
3141{
3142 if (atomic_dec_and_test(&tc->refcount))
3143 complete(&tc->can_destroy);
3144}
3145
3064static void thin_dtr(struct dm_target *ti) 3146static void thin_dtr(struct dm_target *ti)
3065{ 3147{
3066 struct thin_c *tc = ti->private; 3148 struct thin_c *tc = ti->private;
3067 unsigned long flags; 3149 unsigned long flags;
3068 3150
3151 thin_put(tc);
3152 wait_for_completion(&tc->can_destroy);
3153
3069 spin_lock_irqsave(&tc->pool->lock, flags); 3154 spin_lock_irqsave(&tc->pool->lock, flags);
3070 list_del_rcu(&tc->list); 3155 list_del_rcu(&tc->list);
3071 spin_unlock_irqrestore(&tc->pool->lock, flags); 3156 spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3186,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3101 struct thin_c *tc; 3186 struct thin_c *tc;
3102 struct dm_dev *pool_dev, *origin_dev; 3187 struct dm_dev *pool_dev, *origin_dev;
3103 struct mapped_device *pool_md; 3188 struct mapped_device *pool_md;
3189 unsigned long flags;
3104 3190
3105 mutex_lock(&dm_thin_pool_table.mutex); 3191 mutex_lock(&dm_thin_pool_table.mutex);
3106 3192
@@ -3191,9 +3277,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3191 3277
3192 mutex_unlock(&dm_thin_pool_table.mutex); 3278 mutex_unlock(&dm_thin_pool_table.mutex);
3193 3279
3194 spin_lock(&tc->pool->lock); 3280 atomic_set(&tc->refcount, 1);
3281 init_completion(&tc->can_destroy);
3282
3283 spin_lock_irqsave(&tc->pool->lock, flags);
3195 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3284 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3196 spin_unlock(&tc->pool->lock); 3285 spin_unlock_irqrestore(&tc->pool->lock, flags);
3197 /* 3286 /*
3198 * This synchronize_rcu() call is needed here otherwise we risk a 3287 * This synchronize_rcu() call is needed here otherwise we risk a
3199 * wake_worker() call finding no bios to process (because the newly 3288 * wake_worker() call finding no bios to process (because the newly
@@ -3422,6 +3511,9 @@ static void dm_thin_exit(void)
3422module_init(dm_thin_init); 3511module_init(dm_thin_init);
3423module_exit(dm_thin_exit); 3512module_exit(dm_thin_exit);
3424 3513
3514module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3515MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3516
3425MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3517MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3426MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3518MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3427MODULE_LICENSE("GPL"); 3519MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 796007a5e0e1..7a7bab8947ae 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -330,15 +330,17 @@ test_block_hash:
330 return r; 330 return r;
331 } 331 }
332 } 332 }
333
334 todo = 1 << v->data_dev_block_bits; 333 todo = 1 << v->data_dev_block_bits;
335 while (io->iter.bi_size) { 334 do {
336 u8 *page; 335 u8 *page;
336 unsigned len;
337 struct bio_vec bv = bio_iter_iovec(bio, io->iter); 337 struct bio_vec bv = bio_iter_iovec(bio, io->iter);
338 338
339 page = kmap_atomic(bv.bv_page); 339 page = kmap_atomic(bv.bv_page);
340 r = crypto_shash_update(desc, page + bv.bv_offset, 340 len = bv.bv_len;
341 bv.bv_len); 341 if (likely(len >= todo))
342 len = todo;
343 r = crypto_shash_update(desc, page + bv.bv_offset, len);
342 kunmap_atomic(page); 344 kunmap_atomic(page);
343 345
344 if (r < 0) { 346 if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
346 return r; 348 return r;
347 } 349 }
348 350
349 bio_advance_iter(bio, &io->iter, bv.bv_len); 351 bio_advance_iter(bio, &io->iter, len);
350 } 352 todo -= len;
353 } while (todo);
351 354
352 if (!v->version) { 355 if (!v->version) {
353 r = crypto_shash_update(desc, v->salt, v->salt_size); 356 r = crypto_shash_update(desc, v->salt, v->salt_size);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8fda38d23e38..2382cfc9bb3f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7381,8 +7381,10 @@ void md_do_sync(struct md_thread *thread)
7381 /* just incase thread restarts... */ 7381 /* just incase thread restarts... */
7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7382 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7383 return; 7383 return;
7384 if (mddev->ro) /* never try to sync a read-only array */ 7384 if (mddev->ro) {/* never try to sync a read-only array */
7385 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7385 return; 7386 return;
7387 }
7386 7388
7387 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7389 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7388 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 7390 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
@@ -7824,6 +7826,7 @@ void md_check_recovery(struct mddev *mddev)
7824 /* There is no thread, but we need to call 7826 /* There is no thread, but we need to call
7825 * ->spare_active and clear saved_raid_disk 7827 * ->spare_active and clear saved_raid_disk
7826 */ 7828 */
7829 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7827 md_reap_sync_thread(mddev); 7830 md_reap_sync_thread(mddev);
7828 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7831 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7829 goto unlock; 7832 goto unlock;
@@ -8516,7 +8519,8 @@ static int md_notify_reboot(struct notifier_block *this,
8516 if (mddev_trylock(mddev)) { 8519 if (mddev_trylock(mddev)) {
8517 if (mddev->pers) 8520 if (mddev->pers)
8518 __md_stop_writes(mddev); 8521 __md_stop_writes(mddev);
8519 mddev->safemode = 2; 8522 if (mddev->persistent)
8523 mddev->safemode = 2;
8520 mddev_unlock(mddev); 8524 mddev_unlock(mddev);
8521 } 8525 }
8522 need_delay = 1; 8526 need_delay = 1;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 33fc408e5eac..cb882aae9e20 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1172 int max_sectors; 1172 int max_sectors;
1173 int sectors; 1173 int sectors;
1174 1174
1175 /*
1176 * Register the new request and wait if the reconstruction
1177 * thread has put up a bar for new requests.
1178 * Continue immediately if no resync is active currently.
1179 */
1180 wait_barrier(conf);
1181
1175 sectors = bio_sectors(bio); 1182 sectors = bio_sectors(bio);
1176 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1183 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1177 bio->bi_iter.bi_sector < conf->reshape_progress && 1184 bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
1552 1559
1553 md_write_start(mddev, bio); 1560 md_write_start(mddev, bio);
1554 1561
1555 /*
1556 * Register the new request and wait if the reconstruction
1557 * thread has put up a bar for new requests.
1558 * Continue immediately if no resync is active currently.
1559 */
1560 wait_barrier(conf);
1561 1562
1562 do { 1563 do {
1563 1564