diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-21 04:57:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-21 04:57:31 -0400 |
commit | 23de4a7af7bc3d687f783a612b8e824865b7e6ce (patch) | |
tree | 57a8f5788107e8e712fe06231c0ddef278269ec8 /drivers/md | |
parent | 31a3fcab118f169613777c913f6a35b1ab138591 (diff) | |
parent | 4cdd2ad78098244c1bc9ec4374ea1c225fd1cd6f (diff) |
Merge tag 'dm-3.15-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer:
"A dm-crypt fix for a cpu hotplug crash that switches from using
per-cpu data to a mempool allocation (which offers allocation with cpu
locality, and there is no inter-cpu communication on slab allocation).
A couple dm-thinp stable fixes to address "out-of-data-space" issues.
A dm-multipath fix for a LOCKDEP warning introduced in 3.15-rc1"
* tag 'dm-3.15-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm mpath: fix lock order inconsistency in multipath_ioctl
dm thin: add timeout to stop out-of-data-space mode holding IO forever
dm thin: allow metadata commit if pool is in PM_OUT_OF_DATA_SPACE mode
dm crypt: fix cpu hotplug crash by removing per-cpu structure
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 61 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 23 |
3 files changed, 35 insertions, 51 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 784695d22fde..53b213226c01 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
20 | #include <linux/workqueue.h> | 20 | #include <linux/workqueue.h> |
21 | #include <linux/backing-dev.h> | 21 | #include <linux/backing-dev.h> |
22 | #include <linux/percpu.h> | ||
23 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
24 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
25 | #include <asm/page.h> | 24 | #include <asm/page.h> |
@@ -43,6 +42,7 @@ struct convert_context { | |||
43 | struct bvec_iter iter_out; | 42 | struct bvec_iter iter_out; |
44 | sector_t cc_sector; | 43 | sector_t cc_sector; |
45 | atomic_t cc_pending; | 44 | atomic_t cc_pending; |
45 | struct ablkcipher_request *req; | ||
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* | 48 | /* |
@@ -111,15 +111,7 @@ struct iv_tcw_private { | |||
111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; | 111 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Duplicated per-CPU state for cipher. | 114 | * The fields in here must be read only after initialization. |
115 | */ | ||
116 | struct crypt_cpu { | ||
117 | struct ablkcipher_request *req; | ||
118 | }; | ||
119 | |||
120 | /* | ||
121 | * The fields in here must be read only after initialization, | ||
122 | * changing state should be in crypt_cpu. | ||
123 | */ | 115 | */ |
124 | struct crypt_config { | 116 | struct crypt_config { |
125 | struct dm_dev *dev; | 117 | struct dm_dev *dev; |
@@ -150,12 +142,6 @@ struct crypt_config { | |||
150 | sector_t iv_offset; | 142 | sector_t iv_offset; |
151 | unsigned int iv_size; | 143 | unsigned int iv_size; |
152 | 144 | ||
153 | /* | ||
154 | * Duplicated per cpu state. Access through | ||
155 | * per_cpu_ptr() only. | ||
156 | */ | ||
157 | struct crypt_cpu __percpu *cpu; | ||
158 | |||
159 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | 145 | /* ESSIV: struct crypto_cipher *essiv_tfm */ |
160 | void *iv_private; | 146 | void *iv_private; |
161 | struct crypto_ablkcipher **tfms; | 147 | struct crypto_ablkcipher **tfms; |
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *); | |||
192 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | 178 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
193 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); | 179 | static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); |
194 | 180 | ||
195 | static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) | ||
196 | { | ||
197 | return this_cpu_ptr(cc->cpu); | ||
198 | } | ||
199 | |||
200 | /* | 181 | /* |
201 | * Use this to access cipher attributes that are the same for each CPU. | 182 | * Use this to access cipher attributes that are the same for each CPU. |
202 | */ | 183 | */ |
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
903 | static void crypt_alloc_req(struct crypt_config *cc, | 884 | static void crypt_alloc_req(struct crypt_config *cc, |
904 | struct convert_context *ctx) | 885 | struct convert_context *ctx) |
905 | { | 886 | { |
906 | struct crypt_cpu *this_cc = this_crypt_config(cc); | ||
907 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); | 887 | unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); |
908 | 888 | ||
909 | if (!this_cc->req) | 889 | if (!ctx->req) |
910 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 890 | ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
911 | 891 | ||
912 | ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); | 892 | ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); |
913 | ablkcipher_request_set_callback(this_cc->req, | 893 | ablkcipher_request_set_callback(ctx->req, |
914 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 894 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
915 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); | 895 | kcryptd_async_done, dmreq_of_req(cc, ctx->req)); |
916 | } | 896 | } |
917 | 897 | ||
918 | /* | 898 | /* |
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
921 | static int crypt_convert(struct crypt_config *cc, | 901 | static int crypt_convert(struct crypt_config *cc, |
922 | struct convert_context *ctx) | 902 | struct convert_context *ctx) |
923 | { | 903 | { |
924 | struct crypt_cpu *this_cc = this_crypt_config(cc); | ||
925 | int r; | 904 | int r; |
926 | 905 | ||
927 | atomic_set(&ctx->cc_pending, 1); | 906 | atomic_set(&ctx->cc_pending, 1); |
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
932 | 911 | ||
933 | atomic_inc(&ctx->cc_pending); | 912 | atomic_inc(&ctx->cc_pending); |
934 | 913 | ||
935 | r = crypt_convert_block(cc, ctx, this_cc->req); | 914 | r = crypt_convert_block(cc, ctx, ctx->req); |
936 | 915 | ||
937 | switch (r) { | 916 | switch (r) { |
938 | /* async */ | 917 | /* async */ |
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
941 | reinit_completion(&ctx->restart); | 920 | reinit_completion(&ctx->restart); |
942 | /* fall through*/ | 921 | /* fall through*/ |
943 | case -EINPROGRESS: | 922 | case -EINPROGRESS: |
944 | this_cc->req = NULL; | 923 | ctx->req = NULL; |
945 | ctx->cc_sector++; | 924 | ctx->cc_sector++; |
946 | continue; | 925 | continue; |
947 | 926 | ||
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, | |||
1040 | io->sector = sector; | 1019 | io->sector = sector; |
1041 | io->error = 0; | 1020 | io->error = 0; |
1042 | io->base_io = NULL; | 1021 | io->base_io = NULL; |
1022 | io->ctx.req = NULL; | ||
1043 | atomic_set(&io->io_pending, 0); | 1023 | atomic_set(&io->io_pending, 0); |
1044 | 1024 | ||
1045 | return io; | 1025 | return io; |
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) | |||
1065 | if (!atomic_dec_and_test(&io->io_pending)) | 1045 | if (!atomic_dec_and_test(&io->io_pending)) |
1066 | return; | 1046 | return; |
1067 | 1047 | ||
1048 | if (io->ctx.req) | ||
1049 | mempool_free(io->ctx.req, cc->req_pool); | ||
1068 | mempool_free(io, cc->io_pool); | 1050 | mempool_free(io, cc->io_pool); |
1069 | 1051 | ||
1070 | if (likely(!base_io)) | 1052 | if (likely(!base_io)) |
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) | |||
1492 | static void crypt_dtr(struct dm_target *ti) | 1474 | static void crypt_dtr(struct dm_target *ti) |
1493 | { | 1475 | { |
1494 | struct crypt_config *cc = ti->private; | 1476 | struct crypt_config *cc = ti->private; |
1495 | struct crypt_cpu *cpu_cc; | ||
1496 | int cpu; | ||
1497 | 1477 | ||
1498 | ti->private = NULL; | 1478 | ti->private = NULL; |
1499 | 1479 | ||
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti) | |||
1505 | if (cc->crypt_queue) | 1485 | if (cc->crypt_queue) |
1506 | destroy_workqueue(cc->crypt_queue); | 1486 | destroy_workqueue(cc->crypt_queue); |
1507 | 1487 | ||
1508 | if (cc->cpu) | ||
1509 | for_each_possible_cpu(cpu) { | ||
1510 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1511 | if (cpu_cc->req) | ||
1512 | mempool_free(cpu_cc->req, cc->req_pool); | ||
1513 | } | ||
1514 | |||
1515 | crypt_free_tfms(cc); | 1488 | crypt_free_tfms(cc); |
1516 | 1489 | ||
1517 | if (cc->bs) | 1490 | if (cc->bs) |
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) | |||
1530 | if (cc->dev) | 1503 | if (cc->dev) |
1531 | dm_put_device(ti, cc->dev); | 1504 | dm_put_device(ti, cc->dev); |
1532 | 1505 | ||
1533 | if (cc->cpu) | ||
1534 | free_percpu(cc->cpu); | ||
1535 | |||
1536 | kzfree(cc->cipher); | 1506 | kzfree(cc->cipher); |
1537 | kzfree(cc->cipher_string); | 1507 | kzfree(cc->cipher_string); |
1538 | 1508 | ||
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1588 | if (tmp) | 1558 | if (tmp) |
1589 | DMWARN("Ignoring unexpected additional cipher options"); | 1559 | DMWARN("Ignoring unexpected additional cipher options"); |
1590 | 1560 | ||
1591 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), | ||
1592 | __alignof__(struct crypt_cpu)); | ||
1593 | if (!cc->cpu) { | ||
1594 | ti->error = "Cannot allocate per cpu state"; | ||
1595 | goto bad_mem; | ||
1596 | } | ||
1597 | |||
1598 | /* | 1561 | /* |
1599 | * For compatibility with the original dm-crypt mapping format, if | 1562 | * For compatibility with the original dm-crypt mapping format, if |
1600 | * only the cipher name is supplied, use cbc-plain. | 1563 | * only the cipher name is supplied, use cbc-plain. |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aa009e865871..fa0f6cbd6a41 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -1566,8 +1566,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, | |||
1566 | } | 1566 | } |
1567 | if (m->pg_init_required) | 1567 | if (m->pg_init_required) |
1568 | __pg_init_all_paths(m); | 1568 | __pg_init_all_paths(m); |
1569 | spin_unlock_irqrestore(&m->lock, flags); | ||
1570 | dm_table_run_md_queue_async(m->ti->table); | 1569 | dm_table_run_md_queue_async(m->ti->table); |
1570 | spin_unlock_irqrestore(&m->lock, flags); | ||
1571 | } | 1571 | } |
1572 | 1572 | ||
1573 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 1573 | return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 13abade76ad9..2e71de8e0048 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #define MAPPING_POOL_SIZE 1024 | 27 | #define MAPPING_POOL_SIZE 1024 |
28 | #define PRISON_CELLS 1024 | 28 | #define PRISON_CELLS 1024 |
29 | #define COMMIT_PERIOD HZ | 29 | #define COMMIT_PERIOD HZ |
30 | #define NO_SPACE_TIMEOUT (HZ * 60) | ||
30 | 31 | ||
31 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, | 32 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, |
32 | "A percentage of time allocated for copy on write"); | 33 | "A percentage of time allocated for copy on write"); |
@@ -175,6 +176,7 @@ struct pool { | |||
175 | struct workqueue_struct *wq; | 176 | struct workqueue_struct *wq; |
176 | struct work_struct worker; | 177 | struct work_struct worker; |
177 | struct delayed_work waker; | 178 | struct delayed_work waker; |
179 | struct delayed_work no_space_timeout; | ||
178 | 180 | ||
179 | unsigned long last_commit_jiffies; | 181 | unsigned long last_commit_jiffies; |
180 | unsigned ref_count; | 182 | unsigned ref_count; |
@@ -935,7 +937,7 @@ static int commit(struct pool *pool) | |||
935 | { | 937 | { |
936 | int r; | 938 | int r; |
937 | 939 | ||
938 | if (get_pool_mode(pool) != PM_WRITE) | 940 | if (get_pool_mode(pool) >= PM_READ_ONLY) |
939 | return -EINVAL; | 941 | return -EINVAL; |
940 | 942 | ||
941 | r = dm_pool_commit_metadata(pool->pmd); | 943 | r = dm_pool_commit_metadata(pool->pmd); |
@@ -1590,6 +1592,20 @@ static void do_waker(struct work_struct *ws) | |||
1590 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); | 1592 | queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); |
1591 | } | 1593 | } |
1592 | 1594 | ||
1595 | /* | ||
1596 | * We're holding onto IO to allow userland time to react. After the | ||
1597 | * timeout either the pool will have been resized (and thus back in | ||
1598 | * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. | ||
1599 | */ | ||
1600 | static void do_no_space_timeout(struct work_struct *ws) | ||
1601 | { | ||
1602 | struct pool *pool = container_of(to_delayed_work(ws), struct pool, | ||
1603 | no_space_timeout); | ||
1604 | |||
1605 | if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) | ||
1606 | set_pool_mode(pool, PM_READ_ONLY); | ||
1607 | } | ||
1608 | |||
1593 | /*----------------------------------------------------------------*/ | 1609 | /*----------------------------------------------------------------*/ |
1594 | 1610 | ||
1595 | struct noflush_work { | 1611 | struct noflush_work { |
@@ -1715,6 +1731,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
1715 | pool->process_discard = process_discard; | 1731 | pool->process_discard = process_discard; |
1716 | pool->process_prepared_mapping = process_prepared_mapping; | 1732 | pool->process_prepared_mapping = process_prepared_mapping; |
1717 | pool->process_prepared_discard = process_prepared_discard_passdown; | 1733 | pool->process_prepared_discard = process_prepared_discard_passdown; |
1734 | |||
1735 | if (!pool->pf.error_if_no_space) | ||
1736 | queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT); | ||
1718 | break; | 1737 | break; |
1719 | 1738 | ||
1720 | case PM_WRITE: | 1739 | case PM_WRITE: |
@@ -2100,6 +2119,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2100 | 2119 | ||
2101 | INIT_WORK(&pool->worker, do_worker); | 2120 | INIT_WORK(&pool->worker, do_worker); |
2102 | INIT_DELAYED_WORK(&pool->waker, do_waker); | 2121 | INIT_DELAYED_WORK(&pool->waker, do_waker); |
2122 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); | ||
2103 | spin_lock_init(&pool->lock); | 2123 | spin_lock_init(&pool->lock); |
2104 | bio_list_init(&pool->deferred_flush_bios); | 2124 | bio_list_init(&pool->deferred_flush_bios); |
2105 | INIT_LIST_HEAD(&pool->prepared_mappings); | 2125 | INIT_LIST_HEAD(&pool->prepared_mappings); |
@@ -2662,6 +2682,7 @@ static void pool_postsuspend(struct dm_target *ti) | |||
2662 | struct pool *pool = pt->pool; | 2682 | struct pool *pool = pt->pool; |
2663 | 2683 | ||
2664 | cancel_delayed_work(&pool->waker); | 2684 | cancel_delayed_work(&pool->waker); |
2685 | cancel_delayed_work(&pool->no_space_timeout); | ||
2665 | flush_workqueue(pool->wq); | 2686 | flush_workqueue(pool->wq); |
2666 | (void) commit(pool); | 2687 | (void) commit(pool); |
2667 | } | 2688 | } |