diff options
author | Mikulas Patocka <mpatocka@redhat.com> | 2012-07-27 10:08:05 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2012-07-27 10:08:05 -0400 |
commit | fd2d231faf3ca25584d2320fdcd5a8b202342e46 (patch) | |
tree | 6ab9522f76d84c6c65e51ac3007b7435015314e7 /drivers/md | |
parent | 40b6229b69211e24b46868675d4db1dab1a51d96 (diff) |
dm crypt: move cipher data out of per_cpu struct
Move static dm-crypt cipher data out of per-cpu structure.
Cipher information is static, so it does not have to be in a per-cpu
structure.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 134 |
1 files changed, 65 insertions, 69 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index d7f0e0191c01..711e0ac58665 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -109,9 +109,6 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; | |||
109 | */ | 109 | */ |
110 | struct crypt_cpu { | 110 | struct crypt_cpu { |
111 | struct ablkcipher_request *req; | 111 | struct ablkcipher_request *req; |
112 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | ||
113 | void *iv_private; | ||
114 | struct crypto_ablkcipher *tfms[0]; | ||
115 | }; | 112 | }; |
116 | 113 | ||
117 | /* | 114 | /* |
@@ -151,6 +148,10 @@ struct crypt_config { | |||
151 | * per_cpu_ptr() only. | 148 | * per_cpu_ptr() only. |
152 | */ | 149 | */ |
153 | struct crypt_cpu __percpu *cpu; | 150 | struct crypt_cpu __percpu *cpu; |
151 | |||
152 | /* ESSIV: struct crypto_cipher *essiv_tfm */ | ||
153 | void *iv_private; | ||
154 | struct crypto_ablkcipher **tfms; | ||
154 | unsigned tfms_count; | 155 | unsigned tfms_count; |
155 | 156 | ||
156 | /* | 157 | /* |
@@ -193,7 +194,7 @@ static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) | |||
193 | */ | 194 | */ |
194 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) | 195 | static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) |
195 | { | 196 | { |
196 | return __this_cpu_ptr(cc->cpu)->tfms[0]; | 197 | return cc->tfms[0]; |
197 | } | 198 | } |
198 | 199 | ||
199 | /* | 200 | /* |
@@ -258,7 +259,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) | |||
258 | struct hash_desc desc; | 259 | struct hash_desc desc; |
259 | struct scatterlist sg; | 260 | struct scatterlist sg; |
260 | struct crypto_cipher *essiv_tfm; | 261 | struct crypto_cipher *essiv_tfm; |
261 | int err, cpu; | 262 | int err; |
262 | 263 | ||
263 | sg_init_one(&sg, cc->key, cc->key_size); | 264 | sg_init_one(&sg, cc->key, cc->key_size); |
264 | desc.tfm = essiv->hash_tfm; | 265 | desc.tfm = essiv->hash_tfm; |
@@ -268,14 +269,12 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) | |||
268 | if (err) | 269 | if (err) |
269 | return err; | 270 | return err; |
270 | 271 | ||
271 | for_each_possible_cpu(cpu) { | 272 | essiv_tfm = cc->iv_private; |
272 | essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, | ||
273 | 273 | ||
274 | err = crypto_cipher_setkey(essiv_tfm, essiv->salt, | 274 | err = crypto_cipher_setkey(essiv_tfm, essiv->salt, |
275 | crypto_hash_digestsize(essiv->hash_tfm)); | 275 | crypto_hash_digestsize(essiv->hash_tfm)); |
276 | if (err) | 276 | if (err) |
277 | return err; | 277 | return err; |
278 | } | ||
279 | 278 | ||
280 | return 0; | 279 | return 0; |
281 | } | 280 | } |
@@ -286,16 +285,14 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc) | |||
286 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | 285 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
287 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); | 286 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); |
288 | struct crypto_cipher *essiv_tfm; | 287 | struct crypto_cipher *essiv_tfm; |
289 | int cpu, r, err = 0; | 288 | int r, err = 0; |
290 | 289 | ||
291 | memset(essiv->salt, 0, salt_size); | 290 | memset(essiv->salt, 0, salt_size); |
292 | 291 | ||
293 | for_each_possible_cpu(cpu) { | 292 | essiv_tfm = cc->iv_private; |
294 | essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; | 293 | r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); |
295 | r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); | 294 | if (r) |
296 | if (r) | 295 | err = r; |
297 | err = r; | ||
298 | } | ||
299 | 296 | ||
300 | return err; | 297 | return err; |
301 | } | 298 | } |
@@ -335,8 +332,6 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, | |||
335 | 332 | ||
336 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | 333 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
337 | { | 334 | { |
338 | int cpu; | ||
339 | struct crypt_cpu *cpu_cc; | ||
340 | struct crypto_cipher *essiv_tfm; | 335 | struct crypto_cipher *essiv_tfm; |
341 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | 336 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; |
342 | 337 | ||
@@ -346,15 +341,12 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc) | |||
346 | kzfree(essiv->salt); | 341 | kzfree(essiv->salt); |
347 | essiv->salt = NULL; | 342 | essiv->salt = NULL; |
348 | 343 | ||
349 | for_each_possible_cpu(cpu) { | 344 | essiv_tfm = cc->iv_private; |
350 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
351 | essiv_tfm = cpu_cc->iv_private; | ||
352 | 345 | ||
353 | if (essiv_tfm) | 346 | if (essiv_tfm) |
354 | crypto_free_cipher(essiv_tfm); | 347 | crypto_free_cipher(essiv_tfm); |
355 | 348 | ||
356 | cpu_cc->iv_private = NULL; | 349 | cc->iv_private = NULL; |
357 | } | ||
358 | } | 350 | } |
359 | 351 | ||
360 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | 352 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
@@ -363,7 +355,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
363 | struct crypto_cipher *essiv_tfm = NULL; | 355 | struct crypto_cipher *essiv_tfm = NULL; |
364 | struct crypto_hash *hash_tfm = NULL; | 356 | struct crypto_hash *hash_tfm = NULL; |
365 | u8 *salt = NULL; | 357 | u8 *salt = NULL; |
366 | int err, cpu; | 358 | int err; |
367 | 359 | ||
368 | if (!opts) { | 360 | if (!opts) { |
369 | ti->error = "Digest algorithm missing for ESSIV mode"; | 361 | ti->error = "Digest algorithm missing for ESSIV mode"; |
@@ -388,15 +380,13 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
388 | cc->iv_gen_private.essiv.salt = salt; | 380 | cc->iv_gen_private.essiv.salt = salt; |
389 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; | 381 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
390 | 382 | ||
391 | for_each_possible_cpu(cpu) { | 383 | essiv_tfm = setup_essiv_cpu(cc, ti, salt, |
392 | essiv_tfm = setup_essiv_cpu(cc, ti, salt, | 384 | crypto_hash_digestsize(hash_tfm)); |
393 | crypto_hash_digestsize(hash_tfm)); | 385 | if (IS_ERR(essiv_tfm)) { |
394 | if (IS_ERR(essiv_tfm)) { | 386 | crypt_iv_essiv_dtr(cc); |
395 | crypt_iv_essiv_dtr(cc); | 387 | return PTR_ERR(essiv_tfm); |
396 | return PTR_ERR(essiv_tfm); | ||
397 | } | ||
398 | per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; | ||
399 | } | 388 | } |
389 | cc->iv_private = essiv_tfm; | ||
400 | 390 | ||
401 | return 0; | 391 | return 0; |
402 | 392 | ||
@@ -410,7 +400,7 @@ bad: | |||
410 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, | 400 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, |
411 | struct dm_crypt_request *dmreq) | 401 | struct dm_crypt_request *dmreq) |
412 | { | 402 | { |
413 | struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; | 403 | struct crypto_cipher *essiv_tfm = cc->iv_private; |
414 | 404 | ||
415 | memset(iv, 0, cc->iv_size); | 405 | memset(iv, 0, cc->iv_size); |
416 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); | 406 | *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); |
@@ -754,7 +744,7 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
754 | if (!this_cc->req) | 744 | if (!this_cc->req) |
755 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 745 | this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
756 | 746 | ||
757 | ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); | 747 | ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); |
758 | ablkcipher_request_set_callback(this_cc->req, | 748 | ablkcipher_request_set_callback(this_cc->req, |
759 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 749 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
760 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); | 750 | kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); |
@@ -1273,29 +1263,38 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |||
1273 | } | 1263 | } |
1274 | } | 1264 | } |
1275 | 1265 | ||
1276 | static void crypt_free_tfms(struct crypt_config *cc, int cpu) | 1266 | static void crypt_free_tfms(struct crypt_config *cc) |
1277 | { | 1267 | { |
1278 | struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1279 | unsigned i; | 1268 | unsigned i; |
1280 | 1269 | ||
1270 | if (!cc->tfms) | ||
1271 | return; | ||
1272 | |||
1281 | for (i = 0; i < cc->tfms_count; i++) | 1273 | for (i = 0; i < cc->tfms_count; i++) |
1282 | if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { | 1274 | if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { |
1283 | crypto_free_ablkcipher(cpu_cc->tfms[i]); | 1275 | crypto_free_ablkcipher(cc->tfms[i]); |
1284 | cpu_cc->tfms[i] = NULL; | 1276 | cc->tfms[i] = NULL; |
1285 | } | 1277 | } |
1278 | |||
1279 | kfree(cc->tfms); | ||
1280 | cc->tfms = NULL; | ||
1286 | } | 1281 | } |
1287 | 1282 | ||
1288 | static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) | 1283 | static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) |
1289 | { | 1284 | { |
1290 | struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); | ||
1291 | unsigned i; | 1285 | unsigned i; |
1292 | int err; | 1286 | int err; |
1293 | 1287 | ||
1288 | cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), | ||
1289 | GFP_KERNEL); | ||
1290 | if (!cc->tfms) | ||
1291 | return -ENOMEM; | ||
1292 | |||
1294 | for (i = 0; i < cc->tfms_count; i++) { | 1293 | for (i = 0; i < cc->tfms_count; i++) { |
1295 | cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); | 1294 | cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); |
1296 | if (IS_ERR(cpu_cc->tfms[i])) { | 1295 | if (IS_ERR(cc->tfms[i])) { |
1297 | err = PTR_ERR(cpu_cc->tfms[i]); | 1296 | err = PTR_ERR(cc->tfms[i]); |
1298 | crypt_free_tfms(cc, cpu); | 1297 | crypt_free_tfms(cc); |
1299 | return err; | 1298 | return err; |
1300 | } | 1299 | } |
1301 | } | 1300 | } |
@@ -1306,15 +1305,14 @@ static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) | |||
1306 | static int crypt_setkey_allcpus(struct crypt_config *cc) | 1305 | static int crypt_setkey_allcpus(struct crypt_config *cc) |
1307 | { | 1306 | { |
1308 | unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); | 1307 | unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); |
1309 | int cpu, err = 0, i, r; | 1308 | int err = 0, i, r; |
1310 | 1309 | ||
1311 | for_each_possible_cpu(cpu) { | 1310 | for (i = 0; i < cc->tfms_count; i++) { |
1312 | for (i = 0; i < cc->tfms_count; i++) { | 1311 | r = crypto_ablkcipher_setkey(cc->tfms[i], |
1313 | r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], | 1312 | cc->key + (i * subkey_size), |
1314 | cc->key + (i * subkey_size), subkey_size); | 1313 | subkey_size); |
1315 | if (r) | 1314 | if (r) |
1316 | err = r; | 1315 | err = r; |
1317 | } | ||
1318 | } | 1316 | } |
1319 | 1317 | ||
1320 | return err; | 1318 | return err; |
@@ -1376,9 +1374,10 @@ static void crypt_dtr(struct dm_target *ti) | |||
1376 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); | 1374 | cpu_cc = per_cpu_ptr(cc->cpu, cpu); |
1377 | if (cpu_cc->req) | 1375 | if (cpu_cc->req) |
1378 | mempool_free(cpu_cc->req, cc->req_pool); | 1376 | mempool_free(cpu_cc->req, cc->req_pool); |
1379 | crypt_free_tfms(cc, cpu); | ||
1380 | } | 1377 | } |
1381 | 1378 | ||
1379 | crypt_free_tfms(cc); | ||
1380 | |||
1382 | if (cc->bs) | 1381 | if (cc->bs) |
1383 | bioset_free(cc->bs); | 1382 | bioset_free(cc->bs); |
1384 | 1383 | ||
@@ -1411,7 +1410,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1411 | struct crypt_config *cc = ti->private; | 1410 | struct crypt_config *cc = ti->private; |
1412 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; | 1411 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; |
1413 | char *cipher_api = NULL; | 1412 | char *cipher_api = NULL; |
1414 | int cpu, ret = -EINVAL; | 1413 | int ret = -EINVAL; |
1415 | char dummy; | 1414 | char dummy; |
1416 | 1415 | ||
1417 | /* Convert to crypto api definition? */ | 1416 | /* Convert to crypto api definition? */ |
@@ -1452,8 +1451,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1452 | if (tmp) | 1451 | if (tmp) |
1453 | DMWARN("Ignoring unexpected additional cipher options"); | 1452 | DMWARN("Ignoring unexpected additional cipher options"); |
1454 | 1453 | ||
1455 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + | 1454 | cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), |
1456 | cc->tfms_count * sizeof(*(cc->cpu->tfms)), | ||
1457 | __alignof__(struct crypt_cpu)); | 1455 | __alignof__(struct crypt_cpu)); |
1458 | if (!cc->cpu) { | 1456 | if (!cc->cpu) { |
1459 | ti->error = "Cannot allocate per cpu state"; | 1457 | ti->error = "Cannot allocate per cpu state"; |
@@ -1486,12 +1484,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1486 | } | 1484 | } |
1487 | 1485 | ||
1488 | /* Allocate cipher */ | 1486 | /* Allocate cipher */ |
1489 | for_each_possible_cpu(cpu) { | 1487 | ret = crypt_alloc_tfms(cc, cipher_api); |
1490 | ret = crypt_alloc_tfms(cc, cpu, cipher_api); | 1488 | if (ret < 0) { |
1491 | if (ret < 0) { | 1489 | ti->error = "Error allocating crypto tfm"; |
1492 | ti->error = "Error allocating crypto tfm"; | 1490 | goto bad; |
1493 | goto bad; | ||
1494 | } | ||
1495 | } | 1491 | } |
1496 | 1492 | ||
1497 | /* Initialize and set key */ | 1493 | /* Initialize and set key */ |