diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 977 |
1 files changed, 977 insertions, 0 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c new file mode 100644 index 000000000000..77619a56e2bf --- /dev/null +++ b/drivers/md/dm-crypt.c | |||
@@ -0,0 +1,977 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | ||
3 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | ||
4 | * | ||
5 | * This file is released under the GPL. | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/bio.h> | ||
12 | #include <linux/blkdev.h> | ||
13 | #include <linux/mempool.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/crypto.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | #include <asm/atomic.h> | ||
18 | #include <asm/scatterlist.h> | ||
19 | #include <asm/page.h> | ||
20 | |||
21 | #include "dm.h" | ||
22 | |||
23 | #define PFX "crypt: " | ||
24 | |||
25 | /* | ||
26 | * per bio private data | ||
27 | */ | ||
28 | struct crypt_io { | ||
29 | struct dm_target *target; | ||
30 | struct bio *bio; | ||
31 | struct bio *first_clone; | ||
32 | struct work_struct work; | ||
33 | atomic_t pending; | ||
34 | int error; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * context holding the current state of a multi-part conversion | ||
39 | */ | ||
40 | struct convert_context { | ||
41 | struct bio *bio_in; | ||
42 | struct bio *bio_out; | ||
43 | unsigned int offset_in; | ||
44 | unsigned int offset_out; | ||
45 | unsigned int idx_in; | ||
46 | unsigned int idx_out; | ||
47 | sector_t sector; | ||
48 | int write; | ||
49 | }; | ||
50 | |||
51 | struct crypt_config; | ||
52 | |||
53 | struct crypt_iv_operations { | ||
54 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | ||
55 | const char *opts); | ||
56 | void (*dtr)(struct crypt_config *cc); | ||
57 | const char *(*status)(struct crypt_config *cc); | ||
58 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | ||
59 | }; | ||
60 | |||
61 | /* | ||
62 | * Crypt: maps a linear range of a block device | ||
63 | * and encrypts / decrypts at the same time. | ||
64 | */ | ||
65 | struct crypt_config { | ||
66 | struct dm_dev *dev; | ||
67 | sector_t start; | ||
68 | |||
69 | /* | ||
70 | * pool for per bio private data and | ||
71 | * for encryption buffer pages | ||
72 | */ | ||
73 | mempool_t *io_pool; | ||
74 | mempool_t *page_pool; | ||
75 | |||
76 | /* | ||
77 | * crypto related data | ||
78 | */ | ||
79 | struct crypt_iv_operations *iv_gen_ops; | ||
80 | char *iv_mode; | ||
81 | void *iv_gen_private; | ||
82 | sector_t iv_offset; | ||
83 | unsigned int iv_size; | ||
84 | |||
85 | struct crypto_tfm *tfm; | ||
86 | unsigned int key_size; | ||
87 | u8 key[0]; | ||
88 | }; | ||
89 | |||
90 | #define MIN_IOS 256 | ||
91 | #define MIN_POOL_PAGES 32 | ||
92 | #define MIN_BIO_PAGES 8 | ||
93 | |||
94 | static kmem_cache_t *_crypt_io_pool; | ||
95 | |||
96 | /* | ||
97 | * Mempool alloc and free functions for the page | ||
98 | */ | ||
99 | static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data) | ||
100 | { | ||
101 | return alloc_page(gfp_mask); | ||
102 | } | ||
103 | |||
104 | static void mempool_free_page(void *page, void *data) | ||
105 | { | ||
106 | __free_page(page); | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * Different IV generation algorithms: | ||
112 | * | ||
113 | * plain: the initial vector is the 32-bit low-endian version of the sector | ||
114 | * number, padded with zeros if neccessary. | ||
115 | * | ||
116 | * ess_iv: "encrypted sector|salt initial vector", the sector number is | ||
117 | * encrypted with the bulk cipher using a salt as key. The salt | ||
118 | * should be derived from the bulk cipher's key via hashing. | ||
119 | * | ||
120 | * plumb: unimplemented, see: | ||
121 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | ||
122 | */ | ||
123 | |||
124 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | ||
125 | { | ||
126 | memset(iv, 0, cc->iv_size); | ||
127 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | ||
133 | const char *opts) | ||
134 | { | ||
135 | struct crypto_tfm *essiv_tfm; | ||
136 | struct crypto_tfm *hash_tfm; | ||
137 | struct scatterlist sg; | ||
138 | unsigned int saltsize; | ||
139 | u8 *salt; | ||
140 | |||
141 | if (opts == NULL) { | ||
142 | ti->error = PFX "Digest algorithm missing for ESSIV mode"; | ||
143 | return -EINVAL; | ||
144 | } | ||
145 | |||
146 | /* Hash the cipher key with the given hash algorithm */ | ||
147 | hash_tfm = crypto_alloc_tfm(opts, 0); | ||
148 | if (hash_tfm == NULL) { | ||
149 | ti->error = PFX "Error initializing ESSIV hash"; | ||
150 | return -EINVAL; | ||
151 | } | ||
152 | |||
153 | if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { | ||
154 | ti->error = PFX "Expected digest algorithm for ESSIV hash"; | ||
155 | crypto_free_tfm(hash_tfm); | ||
156 | return -EINVAL; | ||
157 | } | ||
158 | |||
159 | saltsize = crypto_tfm_alg_digestsize(hash_tfm); | ||
160 | salt = kmalloc(saltsize, GFP_KERNEL); | ||
161 | if (salt == NULL) { | ||
162 | ti->error = PFX "Error kmallocing salt storage in ESSIV"; | ||
163 | crypto_free_tfm(hash_tfm); | ||
164 | return -ENOMEM; | ||
165 | } | ||
166 | |||
167 | sg.page = virt_to_page(cc->key); | ||
168 | sg.offset = offset_in_page(cc->key); | ||
169 | sg.length = cc->key_size; | ||
170 | crypto_digest_digest(hash_tfm, &sg, 1, salt); | ||
171 | crypto_free_tfm(hash_tfm); | ||
172 | |||
173 | /* Setup the essiv_tfm with the given salt */ | ||
174 | essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), | ||
175 | CRYPTO_TFM_MODE_ECB); | ||
176 | if (essiv_tfm == NULL) { | ||
177 | ti->error = PFX "Error allocating crypto tfm for ESSIV"; | ||
178 | kfree(salt); | ||
179 | return -EINVAL; | ||
180 | } | ||
181 | if (crypto_tfm_alg_blocksize(essiv_tfm) | ||
182 | != crypto_tfm_alg_ivsize(cc->tfm)) { | ||
183 | ti->error = PFX "Block size of ESSIV cipher does " | ||
184 | "not match IV size of block cipher"; | ||
185 | crypto_free_tfm(essiv_tfm); | ||
186 | kfree(salt); | ||
187 | return -EINVAL; | ||
188 | } | ||
189 | if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { | ||
190 | ti->error = PFX "Failed to set key for ESSIV cipher"; | ||
191 | crypto_free_tfm(essiv_tfm); | ||
192 | kfree(salt); | ||
193 | return -EINVAL; | ||
194 | } | ||
195 | kfree(salt); | ||
196 | |||
197 | cc->iv_gen_private = (void *)essiv_tfm; | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | ||
202 | { | ||
203 | crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); | ||
204 | cc->iv_gen_private = NULL; | ||
205 | } | ||
206 | |||
207 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | ||
208 | { | ||
209 | struct scatterlist sg = { NULL, }; | ||
210 | |||
211 | memset(iv, 0, cc->iv_size); | ||
212 | *(u64 *)iv = cpu_to_le64(sector); | ||
213 | |||
214 | sg.page = virt_to_page(iv); | ||
215 | sg.offset = offset_in_page(iv); | ||
216 | sg.length = cc->iv_size; | ||
217 | crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private, | ||
218 | &sg, &sg, cc->iv_size); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | static struct crypt_iv_operations crypt_iv_plain_ops = { | ||
224 | .generator = crypt_iv_plain_gen | ||
225 | }; | ||
226 | |||
227 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | ||
228 | .ctr = crypt_iv_essiv_ctr, | ||
229 | .dtr = crypt_iv_essiv_dtr, | ||
230 | .generator = crypt_iv_essiv_gen | ||
231 | }; | ||
232 | |||
233 | |||
234 | static inline int | ||
235 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, | ||
236 | struct scatterlist *in, unsigned int length, | ||
237 | int write, sector_t sector) | ||
238 | { | ||
239 | u8 iv[cc->iv_size]; | ||
240 | int r; | ||
241 | |||
242 | if (cc->iv_gen_ops) { | ||
243 | r = cc->iv_gen_ops->generator(cc, iv, sector); | ||
244 | if (r < 0) | ||
245 | return r; | ||
246 | |||
247 | if (write) | ||
248 | r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); | ||
249 | else | ||
250 | r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); | ||
251 | } else { | ||
252 | if (write) | ||
253 | r = crypto_cipher_encrypt(cc->tfm, out, in, length); | ||
254 | else | ||
255 | r = crypto_cipher_decrypt(cc->tfm, out, in, length); | ||
256 | } | ||
257 | |||
258 | return r; | ||
259 | } | ||
260 | |||
261 | static void | ||
262 | crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | ||
263 | struct bio *bio_out, struct bio *bio_in, | ||
264 | sector_t sector, int write) | ||
265 | { | ||
266 | ctx->bio_in = bio_in; | ||
267 | ctx->bio_out = bio_out; | ||
268 | ctx->offset_in = 0; | ||
269 | ctx->offset_out = 0; | ||
270 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | ||
271 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | ||
272 | ctx->sector = sector + cc->iv_offset; | ||
273 | ctx->write = write; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * Encrypt / decrypt data from one bio to another one (can be the same one) | ||
278 | */ | ||
279 | static int crypt_convert(struct crypt_config *cc, | ||
280 | struct convert_context *ctx) | ||
281 | { | ||
282 | int r = 0; | ||
283 | |||
284 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | ||
285 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | ||
286 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | ||
287 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | ||
288 | struct scatterlist sg_in = { | ||
289 | .page = bv_in->bv_page, | ||
290 | .offset = bv_in->bv_offset + ctx->offset_in, | ||
291 | .length = 1 << SECTOR_SHIFT | ||
292 | }; | ||
293 | struct scatterlist sg_out = { | ||
294 | .page = bv_out->bv_page, | ||
295 | .offset = bv_out->bv_offset + ctx->offset_out, | ||
296 | .length = 1 << SECTOR_SHIFT | ||
297 | }; | ||
298 | |||
299 | ctx->offset_in += sg_in.length; | ||
300 | if (ctx->offset_in >= bv_in->bv_len) { | ||
301 | ctx->offset_in = 0; | ||
302 | ctx->idx_in++; | ||
303 | } | ||
304 | |||
305 | ctx->offset_out += sg_out.length; | ||
306 | if (ctx->offset_out >= bv_out->bv_len) { | ||
307 | ctx->offset_out = 0; | ||
308 | ctx->idx_out++; | ||
309 | } | ||
310 | |||
311 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | ||
312 | ctx->write, ctx->sector); | ||
313 | if (r < 0) | ||
314 | break; | ||
315 | |||
316 | ctx->sector++; | ||
317 | } | ||
318 | |||
319 | return r; | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Generate a new unfragmented bio with the given size | ||
324 | * This should never violate the device limitations | ||
325 | * May return a smaller bio when running out of pages | ||
326 | */ | ||
327 | static struct bio * | ||
328 | crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | ||
329 | struct bio *base_bio, unsigned int *bio_vec_idx) | ||
330 | { | ||
331 | struct bio *bio; | ||
332 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
333 | int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | ||
334 | unsigned long flags = current->flags; | ||
335 | unsigned int i; | ||
336 | |||
337 | /* | ||
338 | * Tell VM to act less aggressively and fail earlier. | ||
339 | * This is not necessary but increases throughput. | ||
340 | * FIXME: Is this really intelligent? | ||
341 | */ | ||
342 | current->flags &= ~PF_MEMALLOC; | ||
343 | |||
344 | if (base_bio) | ||
345 | bio = bio_clone(base_bio, GFP_NOIO); | ||
346 | else | ||
347 | bio = bio_alloc(GFP_NOIO, nr_iovecs); | ||
348 | if (!bio) { | ||
349 | if (flags & PF_MEMALLOC) | ||
350 | current->flags |= PF_MEMALLOC; | ||
351 | return NULL; | ||
352 | } | ||
353 | |||
354 | /* if the last bio was not complete, continue where that one ended */ | ||
355 | bio->bi_idx = *bio_vec_idx; | ||
356 | bio->bi_vcnt = *bio_vec_idx; | ||
357 | bio->bi_size = 0; | ||
358 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
359 | |||
360 | /* bio->bi_idx pages have already been allocated */ | ||
361 | size -= bio->bi_idx * PAGE_SIZE; | ||
362 | |||
363 | for(i = bio->bi_idx; i < nr_iovecs; i++) { | ||
364 | struct bio_vec *bv = bio_iovec_idx(bio, i); | ||
365 | |||
366 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | ||
367 | if (!bv->bv_page) | ||
368 | break; | ||
369 | |||
370 | /* | ||
371 | * if additional pages cannot be allocated without waiting, | ||
372 | * return a partially allocated bio, the caller will then try | ||
373 | * to allocate additional bios while submitting this partial bio | ||
374 | */ | ||
375 | if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) | ||
376 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | ||
377 | |||
378 | bv->bv_offset = 0; | ||
379 | if (size > PAGE_SIZE) | ||
380 | bv->bv_len = PAGE_SIZE; | ||
381 | else | ||
382 | bv->bv_len = size; | ||
383 | |||
384 | bio->bi_size += bv->bv_len; | ||
385 | bio->bi_vcnt++; | ||
386 | size -= bv->bv_len; | ||
387 | } | ||
388 | |||
389 | if (flags & PF_MEMALLOC) | ||
390 | current->flags |= PF_MEMALLOC; | ||
391 | |||
392 | if (!bio->bi_size) { | ||
393 | bio_put(bio); | ||
394 | return NULL; | ||
395 | } | ||
396 | |||
397 | /* | ||
398 | * Remember the last bio_vec allocated to be able | ||
399 | * to correctly continue after the splitting. | ||
400 | */ | ||
401 | *bio_vec_idx = bio->bi_vcnt; | ||
402 | |||
403 | return bio; | ||
404 | } | ||
405 | |||
406 | static void crypt_free_buffer_pages(struct crypt_config *cc, | ||
407 | struct bio *bio, unsigned int bytes) | ||
408 | { | ||
409 | unsigned int i, start, end; | ||
410 | struct bio_vec *bv; | ||
411 | |||
412 | /* | ||
413 | * This is ugly, but Jens Axboe thinks that using bi_idx in the | ||
414 | * endio function is too dangerous at the moment, so I calculate the | ||
415 | * correct position using bi_vcnt and bi_size. | ||
416 | * The bv_offset and bv_len fields might already be modified but we | ||
417 | * know that we always allocated whole pages. | ||
418 | * A fix to the bi_idx issue in the kernel is in the works, so | ||
419 | * we will hopefully be able to revert to the cleaner solution soon. | ||
420 | */ | ||
421 | i = bio->bi_vcnt - 1; | ||
422 | bv = bio_iovec_idx(bio, i); | ||
423 | end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size; | ||
424 | start = end - bytes; | ||
425 | |||
426 | start >>= PAGE_SHIFT; | ||
427 | if (!bio->bi_size) | ||
428 | end = bio->bi_vcnt; | ||
429 | else | ||
430 | end >>= PAGE_SHIFT; | ||
431 | |||
432 | for(i = start; i < end; i++) { | ||
433 | bv = bio_iovec_idx(bio, i); | ||
434 | BUG_ON(!bv->bv_page); | ||
435 | mempool_free(bv->bv_page, cc->page_pool); | ||
436 | bv->bv_page = NULL; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * One of the bios was finished. Check for completion of | ||
442 | * the whole request and correctly clean up the buffer. | ||
443 | */ | ||
444 | static void dec_pending(struct crypt_io *io, int error) | ||
445 | { | ||
446 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | ||
447 | |||
448 | if (error < 0) | ||
449 | io->error = error; | ||
450 | |||
451 | if (!atomic_dec_and_test(&io->pending)) | ||
452 | return; | ||
453 | |||
454 | if (io->first_clone) | ||
455 | bio_put(io->first_clone); | ||
456 | |||
457 | bio_endio(io->bio, io->bio->bi_size, io->error); | ||
458 | |||
459 | mempool_free(io, cc->io_pool); | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * kcryptd: | ||
464 | * | ||
465 | * Needed because it would be very unwise to do decryption in an | ||
466 | * interrupt context, so bios returning from read requests get | ||
467 | * queued here. | ||
468 | */ | ||
469 | static struct workqueue_struct *_kcryptd_workqueue; | ||
470 | |||
471 | static void kcryptd_do_work(void *data) | ||
472 | { | ||
473 | struct crypt_io *io = (struct crypt_io *) data; | ||
474 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | ||
475 | struct convert_context ctx; | ||
476 | int r; | ||
477 | |||
478 | crypt_convert_init(cc, &ctx, io->bio, io->bio, | ||
479 | io->bio->bi_sector - io->target->begin, 0); | ||
480 | r = crypt_convert(cc, &ctx); | ||
481 | |||
482 | dec_pending(io, r); | ||
483 | } | ||
484 | |||
485 | static void kcryptd_queue_io(struct crypt_io *io) | ||
486 | { | ||
487 | INIT_WORK(&io->work, kcryptd_do_work, io); | ||
488 | queue_work(_kcryptd_workqueue, &io->work); | ||
489 | } | ||
490 | |||
491 | /* | ||
492 | * Decode key from its hex representation | ||
493 | */ | ||
494 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | ||
495 | { | ||
496 | char buffer[3]; | ||
497 | char *endp; | ||
498 | unsigned int i; | ||
499 | |||
500 | buffer[2] = '\0'; | ||
501 | |||
502 | for(i = 0; i < size; i++) { | ||
503 | buffer[0] = *hex++; | ||
504 | buffer[1] = *hex++; | ||
505 | |||
506 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | ||
507 | |||
508 | if (endp != &buffer[2]) | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | |||
512 | if (*hex != '\0') | ||
513 | return -EINVAL; | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * Encode key into its hex representation | ||
520 | */ | ||
521 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | ||
522 | { | ||
523 | unsigned int i; | ||
524 | |||
525 | for(i = 0; i < size; i++) { | ||
526 | sprintf(hex, "%02x", *key); | ||
527 | hex += 2; | ||
528 | key++; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * Construct an encryption mapping: | ||
534 | * <cipher> <key> <iv_offset> <dev_path> <start> | ||
535 | */ | ||
536 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||
537 | { | ||
538 | struct crypt_config *cc; | ||
539 | struct crypto_tfm *tfm; | ||
540 | char *tmp; | ||
541 | char *cipher; | ||
542 | char *chainmode; | ||
543 | char *ivmode; | ||
544 | char *ivopts; | ||
545 | unsigned int crypto_flags; | ||
546 | unsigned int key_size; | ||
547 | |||
548 | if (argc != 5) { | ||
549 | ti->error = PFX "Not enough arguments"; | ||
550 | return -EINVAL; | ||
551 | } | ||
552 | |||
553 | tmp = argv[0]; | ||
554 | cipher = strsep(&tmp, "-"); | ||
555 | chainmode = strsep(&tmp, "-"); | ||
556 | ivopts = strsep(&tmp, "-"); | ||
557 | ivmode = strsep(&ivopts, ":"); | ||
558 | |||
559 | if (tmp) | ||
560 | DMWARN(PFX "Unexpected additional cipher options"); | ||
561 | |||
562 | key_size = strlen(argv[1]) >> 1; | ||
563 | |||
564 | cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | ||
565 | if (cc == NULL) { | ||
566 | ti->error = | ||
567 | PFX "Cannot allocate transparent encryption context"; | ||
568 | return -ENOMEM; | ||
569 | } | ||
570 | |||
571 | cc->key_size = key_size; | ||
572 | if ((!key_size && strcmp(argv[1], "-") != 0) || | ||
573 | (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { | ||
574 | ti->error = PFX "Error decoding key"; | ||
575 | goto bad1; | ||
576 | } | ||
577 | |||
578 | /* Compatiblity mode for old dm-crypt cipher strings */ | ||
579 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { | ||
580 | chainmode = "cbc"; | ||
581 | ivmode = "plain"; | ||
582 | } | ||
583 | |||
584 | /* Choose crypto_flags according to chainmode */ | ||
585 | if (strcmp(chainmode, "cbc") == 0) | ||
586 | crypto_flags = CRYPTO_TFM_MODE_CBC; | ||
587 | else if (strcmp(chainmode, "ecb") == 0) | ||
588 | crypto_flags = CRYPTO_TFM_MODE_ECB; | ||
589 | else { | ||
590 | ti->error = PFX "Unknown chaining mode"; | ||
591 | goto bad1; | ||
592 | } | ||
593 | |||
594 | if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { | ||
595 | ti->error = PFX "This chaining mode requires an IV mechanism"; | ||
596 | goto bad1; | ||
597 | } | ||
598 | |||
599 | tfm = crypto_alloc_tfm(cipher, crypto_flags); | ||
600 | if (!tfm) { | ||
601 | ti->error = PFX "Error allocating crypto tfm"; | ||
602 | goto bad1; | ||
603 | } | ||
604 | if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { | ||
605 | ti->error = PFX "Expected cipher algorithm"; | ||
606 | goto bad2; | ||
607 | } | ||
608 | |||
609 | cc->tfm = tfm; | ||
610 | |||
611 | /* | ||
612 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>". | ||
613 | * See comments at iv code | ||
614 | */ | ||
615 | |||
616 | if (ivmode == NULL) | ||
617 | cc->iv_gen_ops = NULL; | ||
618 | else if (strcmp(ivmode, "plain") == 0) | ||
619 | cc->iv_gen_ops = &crypt_iv_plain_ops; | ||
620 | else if (strcmp(ivmode, "essiv") == 0) | ||
621 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | ||
622 | else { | ||
623 | ti->error = PFX "Invalid IV mode"; | ||
624 | goto bad2; | ||
625 | } | ||
626 | |||
627 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | ||
628 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | ||
629 | goto bad2; | ||
630 | |||
631 | if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) | ||
632 | /* at least a 64 bit sector number should fit in our buffer */ | ||
633 | cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), | ||
634 | (unsigned int)(sizeof(u64) / sizeof(u8))); | ||
635 | else { | ||
636 | cc->iv_size = 0; | ||
637 | if (cc->iv_gen_ops) { | ||
638 | DMWARN(PFX "Selected cipher does not support IVs"); | ||
639 | if (cc->iv_gen_ops->dtr) | ||
640 | cc->iv_gen_ops->dtr(cc); | ||
641 | cc->iv_gen_ops = NULL; | ||
642 | } | ||
643 | } | ||
644 | |||
645 | cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | ||
646 | mempool_free_slab, _crypt_io_pool); | ||
647 | if (!cc->io_pool) { | ||
648 | ti->error = PFX "Cannot allocate crypt io mempool"; | ||
649 | goto bad3; | ||
650 | } | ||
651 | |||
652 | cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, | ||
653 | mempool_free_page, NULL); | ||
654 | if (!cc->page_pool) { | ||
655 | ti->error = PFX "Cannot allocate page mempool"; | ||
656 | goto bad4; | ||
657 | } | ||
658 | |||
659 | if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { | ||
660 | ti->error = PFX "Error setting key"; | ||
661 | goto bad5; | ||
662 | } | ||
663 | |||
664 | if (sscanf(argv[2], SECTOR_FORMAT, &cc->iv_offset) != 1) { | ||
665 | ti->error = PFX "Invalid iv_offset sector"; | ||
666 | goto bad5; | ||
667 | } | ||
668 | |||
669 | if (sscanf(argv[4], SECTOR_FORMAT, &cc->start) != 1) { | ||
670 | ti->error = PFX "Invalid device sector"; | ||
671 | goto bad5; | ||
672 | } | ||
673 | |||
674 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | ||
675 | dm_table_get_mode(ti->table), &cc->dev)) { | ||
676 | ti->error = PFX "Device lookup failed"; | ||
677 | goto bad5; | ||
678 | } | ||
679 | |||
680 | if (ivmode && cc->iv_gen_ops) { | ||
681 | if (ivopts) | ||
682 | *(ivopts - 1) = ':'; | ||
683 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | ||
684 | if (!cc->iv_mode) { | ||
685 | ti->error = PFX "Error kmallocing iv_mode string"; | ||
686 | goto bad5; | ||
687 | } | ||
688 | strcpy(cc->iv_mode, ivmode); | ||
689 | } else | ||
690 | cc->iv_mode = NULL; | ||
691 | |||
692 | ti->private = cc; | ||
693 | return 0; | ||
694 | |||
695 | bad5: | ||
696 | mempool_destroy(cc->page_pool); | ||
697 | bad4: | ||
698 | mempool_destroy(cc->io_pool); | ||
699 | bad3: | ||
700 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | ||
701 | cc->iv_gen_ops->dtr(cc); | ||
702 | bad2: | ||
703 | crypto_free_tfm(tfm); | ||
704 | bad1: | ||
705 | kfree(cc); | ||
706 | return -EINVAL; | ||
707 | } | ||
708 | |||
709 | static void crypt_dtr(struct dm_target *ti) | ||
710 | { | ||
711 | struct crypt_config *cc = (struct crypt_config *) ti->private; | ||
712 | |||
713 | mempool_destroy(cc->page_pool); | ||
714 | mempool_destroy(cc->io_pool); | ||
715 | |||
716 | if (cc->iv_mode) | ||
717 | kfree(cc->iv_mode); | ||
718 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | ||
719 | cc->iv_gen_ops->dtr(cc); | ||
720 | crypto_free_tfm(cc->tfm); | ||
721 | dm_put_device(ti, cc->dev); | ||
722 | kfree(cc); | ||
723 | } | ||
724 | |||
725 | static int crypt_endio(struct bio *bio, unsigned int done, int error) | ||
726 | { | ||
727 | struct crypt_io *io = (struct crypt_io *) bio->bi_private; | ||
728 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | ||
729 | |||
730 | if (bio_data_dir(bio) == WRITE) { | ||
731 | /* | ||
732 | * free the processed pages, even if | ||
733 | * it's only a partially completed write | ||
734 | */ | ||
735 | crypt_free_buffer_pages(cc, bio, done); | ||
736 | } | ||
737 | |||
738 | if (bio->bi_size) | ||
739 | return 1; | ||
740 | |||
741 | bio_put(bio); | ||
742 | |||
743 | /* | ||
744 | * successful reads are decrypted by the worker thread | ||
745 | */ | ||
746 | if ((bio_data_dir(bio) == READ) | ||
747 | && bio_flagged(bio, BIO_UPTODATE)) { | ||
748 | kcryptd_queue_io(io); | ||
749 | return 0; | ||
750 | } | ||
751 | |||
752 | dec_pending(io, error); | ||
753 | return error; | ||
754 | } | ||
755 | |||
756 | static inline struct bio * | ||
757 | crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio, | ||
758 | sector_t sector, unsigned int *bvec_idx, | ||
759 | struct convert_context *ctx) | ||
760 | { | ||
761 | struct bio *clone; | ||
762 | |||
763 | if (bio_data_dir(bio) == WRITE) { | ||
764 | clone = crypt_alloc_buffer(cc, bio->bi_size, | ||
765 | io->first_clone, bvec_idx); | ||
766 | if (clone) { | ||
767 | ctx->bio_out = clone; | ||
768 | if (crypt_convert(cc, ctx) < 0) { | ||
769 | crypt_free_buffer_pages(cc, clone, | ||
770 | clone->bi_size); | ||
771 | bio_put(clone); | ||
772 | return NULL; | ||
773 | } | ||
774 | } | ||
775 | } else { | ||
776 | /* | ||
777 | * The block layer might modify the bvec array, so always | ||
778 | * copy the required bvecs because we need the original | ||
779 | * one in order to decrypt the whole bio data *afterwards*. | ||
780 | */ | ||
781 | clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | ||
782 | if (clone) { | ||
783 | clone->bi_idx = 0; | ||
784 | clone->bi_vcnt = bio_segments(bio); | ||
785 | clone->bi_size = bio->bi_size; | ||
786 | memcpy(clone->bi_io_vec, bio_iovec(bio), | ||
787 | sizeof(struct bio_vec) * clone->bi_vcnt); | ||
788 | } | ||
789 | } | ||
790 | |||
791 | if (!clone) | ||
792 | return NULL; | ||
793 | |||
794 | clone->bi_private = io; | ||
795 | clone->bi_end_io = crypt_endio; | ||
796 | clone->bi_bdev = cc->dev->bdev; | ||
797 | clone->bi_sector = cc->start + sector; | ||
798 | clone->bi_rw = bio->bi_rw; | ||
799 | |||
800 | return clone; | ||
801 | } | ||
802 | |||
803 | static int crypt_map(struct dm_target *ti, struct bio *bio, | ||
804 | union map_info *map_context) | ||
805 | { | ||
806 | struct crypt_config *cc = (struct crypt_config *) ti->private; | ||
807 | struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); | ||
808 | struct convert_context ctx; | ||
809 | struct bio *clone; | ||
810 | unsigned int remaining = bio->bi_size; | ||
811 | sector_t sector = bio->bi_sector - ti->begin; | ||
812 | unsigned int bvec_idx = 0; | ||
813 | |||
814 | io->target = ti; | ||
815 | io->bio = bio; | ||
816 | io->first_clone = NULL; | ||
817 | io->error = 0; | ||
818 | atomic_set(&io->pending, 1); /* hold a reference */ | ||
819 | |||
820 | if (bio_data_dir(bio) == WRITE) | ||
821 | crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); | ||
822 | |||
823 | /* | ||
824 | * The allocated buffers can be smaller than the whole bio, | ||
825 | * so repeat the whole process until all the data can be handled. | ||
826 | */ | ||
827 | while (remaining) { | ||
828 | clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx); | ||
829 | if (!clone) | ||
830 | goto cleanup; | ||
831 | |||
832 | if (!io->first_clone) { | ||
833 | /* | ||
834 | * hold a reference to the first clone, because it | ||
835 | * holds the bio_vec array and that can't be freed | ||
836 | * before all other clones are released | ||
837 | */ | ||
838 | bio_get(clone); | ||
839 | io->first_clone = clone; | ||
840 | } | ||
841 | atomic_inc(&io->pending); | ||
842 | |||
843 | remaining -= clone->bi_size; | ||
844 | sector += bio_sectors(clone); | ||
845 | |||
846 | generic_make_request(clone); | ||
847 | |||
848 | /* out of memory -> run queues */ | ||
849 | if (remaining) | ||
850 | blk_congestion_wait(bio_data_dir(clone), HZ/100); | ||
851 | } | ||
852 | |||
853 | /* drop reference, clones could have returned before we reach this */ | ||
854 | dec_pending(io, 0); | ||
855 | return 0; | ||
856 | |||
857 | cleanup: | ||
858 | if (io->first_clone) { | ||
859 | dec_pending(io, -ENOMEM); | ||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | /* if no bio has been dispatched yet, we can directly return the error */ | ||
864 | mempool_free(io, cc->io_pool); | ||
865 | return -ENOMEM; | ||
866 | } | ||
867 | |||
868 | static int crypt_status(struct dm_target *ti, status_type_t type, | ||
869 | char *result, unsigned int maxlen) | ||
870 | { | ||
871 | struct crypt_config *cc = (struct crypt_config *) ti->private; | ||
872 | const char *cipher; | ||
873 | const char *chainmode = NULL; | ||
874 | unsigned int sz = 0; | ||
875 | |||
876 | switch (type) { | ||
877 | case STATUSTYPE_INFO: | ||
878 | result[0] = '\0'; | ||
879 | break; | ||
880 | |||
881 | case STATUSTYPE_TABLE: | ||
882 | cipher = crypto_tfm_alg_name(cc->tfm); | ||
883 | |||
884 | switch(cc->tfm->crt_cipher.cit_mode) { | ||
885 | case CRYPTO_TFM_MODE_CBC: | ||
886 | chainmode = "cbc"; | ||
887 | break; | ||
888 | case CRYPTO_TFM_MODE_ECB: | ||
889 | chainmode = "ecb"; | ||
890 | break; | ||
891 | default: | ||
892 | BUG(); | ||
893 | } | ||
894 | |||
895 | if (cc->iv_mode) | ||
896 | DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); | ||
897 | else | ||
898 | DMEMIT("%s-%s ", cipher, chainmode); | ||
899 | |||
900 | if (cc->key_size > 0) { | ||
901 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | ||
902 | return -ENOMEM; | ||
903 | |||
904 | crypt_encode_key(result + sz, cc->key, cc->key_size); | ||
905 | sz += cc->key_size << 1; | ||
906 | } else { | ||
907 | if (sz >= maxlen) | ||
908 | return -ENOMEM; | ||
909 | result[sz++] = '-'; | ||
910 | } | ||
911 | |||
912 | DMEMIT(" " SECTOR_FORMAT " %s " SECTOR_FORMAT, | ||
913 | cc->iv_offset, cc->dev->name, cc->start); | ||
914 | break; | ||
915 | } | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | static struct target_type crypt_target = { | ||
920 | .name = "crypt", | ||
921 | .version= {1, 1, 0}, | ||
922 | .module = THIS_MODULE, | ||
923 | .ctr = crypt_ctr, | ||
924 | .dtr = crypt_dtr, | ||
925 | .map = crypt_map, | ||
926 | .status = crypt_status, | ||
927 | }; | ||
928 | |||
929 | static int __init dm_crypt_init(void) | ||
930 | { | ||
931 | int r; | ||
932 | |||
933 | _crypt_io_pool = kmem_cache_create("dm-crypt_io", | ||
934 | sizeof(struct crypt_io), | ||
935 | 0, 0, NULL, NULL); | ||
936 | if (!_crypt_io_pool) | ||
937 | return -ENOMEM; | ||
938 | |||
939 | _kcryptd_workqueue = create_workqueue("kcryptd"); | ||
940 | if (!_kcryptd_workqueue) { | ||
941 | r = -ENOMEM; | ||
942 | DMERR(PFX "couldn't create kcryptd"); | ||
943 | goto bad1; | ||
944 | } | ||
945 | |||
946 | r = dm_register_target(&crypt_target); | ||
947 | if (r < 0) { | ||
948 | DMERR(PFX "register failed %d", r); | ||
949 | goto bad2; | ||
950 | } | ||
951 | |||
952 | return 0; | ||
953 | |||
954 | bad2: | ||
955 | destroy_workqueue(_kcryptd_workqueue); | ||
956 | bad1: | ||
957 | kmem_cache_destroy(_crypt_io_pool); | ||
958 | return r; | ||
959 | } | ||
960 | |||
961 | static void __exit dm_crypt_exit(void) | ||
962 | { | ||
963 | int r = dm_unregister_target(&crypt_target); | ||
964 | |||
965 | if (r < 0) | ||
966 | DMERR(PFX "unregister failed %d", r); | ||
967 | |||
968 | destroy_workqueue(_kcryptd_workqueue); | ||
969 | kmem_cache_destroy(_crypt_io_pool); | ||
970 | } | ||
971 | |||
972 | module_init(dm_crypt_init); | ||
973 | module_exit(dm_crypt_exit); | ||
974 | |||
975 | MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); | ||
976 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | ||
977 | MODULE_LICENSE("GPL"); | ||