diff options
author | Milan Broz <mbroz@redhat.com> | 2011-01-13 14:59:54 -0500 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2011-01-13 14:59:54 -0500 |
commit | d1f9642381847e2b94caa34c3533211cf36ffcf4 (patch) | |
tree | 32ae1cc4b0946247384fe1a31e257658f734b3f7 /drivers/md | |
parent | 2dc5327d3acb3340ab6fa3981401b076b78a51f4 (diff) |
dm crypt: add multi key capability
This patch adds generic multikey handling to be used
in following patch for Loop-AES mode compatibility.
This patch extends mapping table to optional keycount and
implements generic multi-key capability.
With more keys defined the <key> string is divided into
several <keycount> sections and these are used for tfms.
The tfm is used according to sector offset
(sector 0->tfm[0], sector 1->tfm[1], sector N->tfm[N modulo keycount])
(only power of two values supported for keycount here).
Because of tfms per-cpu allocation, this mode can be take
a lot of memory on large smp systems.
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: Max Vozeler <max@hinterhof.net>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 85 |
1 files changed, 64 insertions, 21 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e0ebe685be6a..b8b9267c4dbb 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -101,10 +101,9 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; | |||
101 | */ | 101 | */ |
102 | struct crypt_cpu { | 102 | struct crypt_cpu { |
103 | struct ablkcipher_request *req; | 103 | struct ablkcipher_request *req; |
104 | struct crypto_ablkcipher *tfm; | ||
105 | |||
106 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | 104 | /* ESSIV: struct crypto_cipher *essiv_tfm */ |
107 | void *iv_private; | 105 | void *iv_private; |
106 | struct crypto_ablkcipher *tfms[0]; | ||
108 | }; | 107 | }; |
109 | 108 | ||
110 | /* | 109 | /* |
@@ -143,6 +142,7 @@ struct crypt_config { | |||
143 | * per_cpu_ptr() only. | 142 | * per_cpu_ptr() only. |
144 | */ | 143 | */ |
145 | struct crypt_cpu __percpu *cpu; | 144 | struct crypt_cpu __percpu *cpu; |
145 | unsigned tfms_count; | ||
146 | 146 | ||
147 | /* | 147 | /* |
148 | * Layout of each crypto request: | 148 | * Layout of each crypto request: |
@@ -161,6 +161,7 @@ struct crypt_config { | |||
161 | 161 | ||
162 | unsigned long flags; | 162 | unsigned long flags; |
163 | unsigned int key_size; | 163 | unsigned int key_size; |
164 | unsigned int key_parts; | ||
164 | u8 key[0]; | 165 | u8 key[0]; |
165 | }; | 166 | }; |
166 | 167 | ||
@@ -184,7 +185,7 @@ static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) | |||
184 | */ | 185 | */ |
185 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) | 186 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) |
186 | { | 187 | { |
187 | return __this_cpu_ptr(cc->cpu)->tfm; | 188 | return __this_cpu_ptr(cc->cpu)->tfms[0]; |
188 | } | 189 | } |
189 | 190 | ||
190 | /* | 191 | /* |
@@ -567,11 +568,12 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
567 | struct convert_context *ctx) | 568 | struct convert_context *ctx) |
568 | { | 569 | { |
569 | struct crypt_cpu *this_cc = this_crypt_config(cc); | 570 | struct crypt_cpu *this_cc = this_crypt_config(cc); |
571 | unsigned key_index = ctx->sector & (cc->tfms_count - 1); | ||
570 | 572 | ||
571 | if (!this_cc->req) | 573 | if (!this_cc->req) |
572 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 574 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
573 | 575 | ||
574 | ablkcipher_request_set_tfm(this_cc->req, this_cc->tfm); | 576 | ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); |
575 | ablkcipher_request_set_callback(this_cc->req, | 577 | ablkcipher_request_set_callback(this_cc->req, |
576 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 578 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
577 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); | 579 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); |
@@ -1097,15 +1099,48 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |||
1097 | } | 1099 | } |
1098 | } | 1100 | } |
1099 | 1101 | ||
1102 | static void crypt_free_tfms(struct crypt_config *cc, int cpu) | ||
1103 | { | ||
1104 | struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1105 | unsigned i; | ||
1106 | |||
1107 | for (i = 0; i < cc->tfms_count; i++) | ||
1108 | if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { | ||
1109 | crypto_free_ablkcipher(cpu_cc->tfms[i]); | ||
1110 | cpu_cc->tfms[i] = NULL; | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) | ||
1115 | { | ||
1116 | struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1117 | unsigned i; | ||
1118 | int err; | ||
1119 | |||
1120 | for (i = 0; i < cc->tfms_count; i++) { | ||
1121 | cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); | ||
1122 | if (IS_ERR(cpu_cc->tfms[i])) { | ||
1123 | err = PTR_ERR(cpu_cc->tfms[i]); | ||
1124 | crypt_free_tfms(cc, cpu); | ||
1125 | return err; | ||
1126 | } | ||
1127 | } | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1100 | static int crypt_setkey_allcpus(struct crypt_config *cc) | 1132 | static int crypt_setkey_allcpus(struct crypt_config *cc) |
1101 | { | 1133 | { |
1102 | int cpu, err = 0, r; | 1134 | unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); |
1135 | int cpu, err = 0, i, r; | ||
1103 | 1136 | ||
1104 | for_each_possible_cpu(cpu) { | 1137 | for_each_possible_cpu(cpu) { |
1105 | r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfm, | 1138 | for (i = 0; i < cc->tfms_count; i++) { |
1106 | cc->key, cc->key_size); | 1139 | r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], |
1107 | if (r) | 1140 | cc->key + (i * subkey_size), subkey_size); |
1108 | err = r; | 1141 | if (r) |
1142 | err = r; | ||
1143 | } | ||
1109 | } | 1144 | } |
1110 | 1145 | ||
1111 | return err; | 1146 | return err; |
@@ -1158,8 +1193,7 @@ static void crypt_dtr(struct dm_target *ti) | |||
1158 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | 1193 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); |
1159 | if (cpu_cc->req) | 1194 | if (cpu_cc->req) |
1160 | mempool_free(cpu_cc->req, cc->req_pool); | 1195 | mempool_free(cpu_cc->req, cc->req_pool); |
1161 | if (cpu_cc->tfm) | 1196 | crypt_free_tfms(cc, cpu); |
1162 | crypto_free_ablkcipher(cpu_cc->tfm); | ||
1163 | } | 1197 | } |
1164 | 1198 | ||
1165 | if (cc->bs) | 1199 | if (cc->bs) |
@@ -1192,8 +1226,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1192 | char *cipher_in, char *key) | 1226 | char *cipher_in, char *key) |
1193 | { | 1227 | { |
1194 | struct crypt_config *cc = ti->private; | 1228 | struct crypt_config *cc = ti->private; |
1195 | struct crypto_ablkcipher *tfm; | 1229 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; |
1196 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts; | ||
1197 | char *cipher_api = NULL; | 1230 | char *cipher_api = NULL; |
1198 | int cpu, ret = -EINVAL; | 1231 | int cpu, ret = -EINVAL; |
1199 | 1232 | ||
@@ -1209,10 +1242,20 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1209 | 1242 | ||
1210 | /* | 1243 | /* |
1211 | * Legacy dm-crypt cipher specification | 1244 | * Legacy dm-crypt cipher specification |
1212 | * cipher-mode-iv:ivopts | 1245 | * cipher[:keycount]-mode-iv:ivopts |
1213 | */ | 1246 | */ |
1214 | tmp = cipher_in; | 1247 | tmp = cipher_in; |
1215 | cipher = strsep(&tmp, "-"); | 1248 | keycount = strsep(&tmp, "-"); |
1249 | cipher = strsep(&keycount, ":"); | ||
1250 | |||
1251 | if (!keycount) | ||
1252 | cc->tfms_count = 1; | ||
1253 | else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 || | ||
1254 | !is_power_of_2(cc->tfms_count)) { | ||
1255 | ti->error = "Bad cipher key count specification"; | ||
1256 | return -EINVAL; | ||
1257 | } | ||
1258 | cc->key_parts = cc->tfms_count; | ||
1216 | 1259 | ||
1217 | cc->cipher = kstrdup(cipher, GFP_KERNEL); | 1260 | cc->cipher = kstrdup(cipher, GFP_KERNEL); |
1218 | if (!cc->cipher) | 1261 | if (!cc->cipher) |
@@ -1225,7 +1268,9 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1225 | if (tmp) | 1268 | if (tmp) |
1226 | DMWARN("Ignoring unexpected additional cipher options"); | 1269 | DMWARN("Ignoring unexpected additional cipher options"); |
1227 | 1270 | ||
1228 | cc->cpu = alloc_percpu(struct crypt_cpu); | 1271 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + |
1272 | cc->tfms_count * sizeof(*(cc->cpu->tfms)), | ||
1273 | __alignof__(struct crypt_cpu)); | ||
1229 | if (!cc->cpu) { | 1274 | if (!cc->cpu) { |
1230 | ti->error = "Cannot allocate per cpu state"; | 1275 | ti->error = "Cannot allocate per cpu state"; |
1231 | goto bad_mem; | 1276 | goto bad_mem; |
@@ -1258,13 +1303,11 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1258 | 1303 | ||
1259 | /* Allocate cipher */ | 1304 | /* Allocate cipher */ |
1260 | for_each_possible_cpu(cpu) { | 1305 | for_each_possible_cpu(cpu) { |
1261 | tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); | 1306 | ret = crypt_alloc_tfms(cc, cpu, cipher_api); |
1262 | if (IS_ERR(tfm)) { | 1307 | if (ret < 0) { |
1263 | ret = PTR_ERR(tfm); | ||
1264 | ti->error = "Error allocating crypto tfm"; | 1308 | ti->error = "Error allocating crypto tfm"; |
1265 | goto bad; | 1309 | goto bad; |
1266 | } | 1310 | } |
1267 | per_cpu_ptr(cc->cpu, cpu)->tfm = tfm; | ||
1268 | } | 1311 | } |
1269 | 1312 | ||
1270 | /* Initialize and set key */ | 1313 | /* Initialize and set key */ |
@@ -1587,7 +1630,7 @@ static int crypt_iterate_devices(struct dm_target *ti, | |||
1587 | 1630 | ||
1588 | static struct target_type crypt_target = { | 1631 | static struct target_type crypt_target = { |
1589 | .name = "crypt", | 1632 | .name = "crypt", |
1590 | .version = {1, 9, 0}, | 1633 | .version = {1, 10, 0}, |
1591 | .module = THIS_MODULE, | 1634 | .module = THIS_MODULE, |
1592 | .ctr = crypt_ctr, | 1635 | .ctr = crypt_ctr, |
1593 | .dtr = crypt_dtr, | 1636 | .dtr = crypt_dtr, |