diff options
Diffstat (limited to 'fs/crypto/crypto.c')
-rw-r--r-- | fs/crypto/crypto.c | 157 |
1 files changed, 16 insertions, 141 deletions
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ac8e4f6a3773..02a7a9286449 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/ratelimit.h> | 26 | #include <linux/ratelimit.h> |
27 | #include <linux/bio.h> | ||
28 | #include <linux/dcache.h> | 27 | #include <linux/dcache.h> |
29 | #include <linux/namei.h> | 28 | #include <linux/namei.h> |
30 | #include "fscrypt_private.h" | 29 | #include "fscrypt_private.h" |
@@ -44,7 +43,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL; | |||
44 | static LIST_HEAD(fscrypt_free_ctxs); | 43 | static LIST_HEAD(fscrypt_free_ctxs); |
45 | static DEFINE_SPINLOCK(fscrypt_ctx_lock); | 44 | static DEFINE_SPINLOCK(fscrypt_ctx_lock); |
46 | 45 | ||
47 | static struct workqueue_struct *fscrypt_read_workqueue; | 46 | struct workqueue_struct *fscrypt_read_workqueue; |
48 | static DEFINE_MUTEX(fscrypt_init_mutex); | 47 | static DEFINE_MUTEX(fscrypt_init_mutex); |
49 | 48 | ||
50 | static struct kmem_cache *fscrypt_ctx_cachep; | 49 | static struct kmem_cache *fscrypt_ctx_cachep; |
@@ -141,16 +140,10 @@ static void page_crypt_complete(struct crypto_async_request *req, int res) | |||
141 | complete(&ecr->completion); | 140 | complete(&ecr->completion); |
142 | } | 141 | } |
143 | 142 | ||
144 | typedef enum { | 143 | int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, |
145 | FS_DECRYPT = 0, | 144 | u64 lblk_num, struct page *src_page, |
146 | FS_ENCRYPT, | 145 | struct page *dest_page, unsigned int len, |
147 | } fscrypt_direction_t; | 146 | unsigned int offs, gfp_t gfp_flags) |
148 | |||
149 | static int do_page_crypto(const struct inode *inode, | ||
150 | fscrypt_direction_t rw, u64 lblk_num, | ||
151 | struct page *src_page, struct page *dest_page, | ||
152 | unsigned int len, unsigned int offs, | ||
153 | gfp_t gfp_flags) | ||
154 | { | 147 | { |
155 | struct { | 148 | struct { |
156 | __le64 index; | 149 | __le64 index; |
@@ -205,7 +198,8 @@ static int do_page_crypto(const struct inode *inode, | |||
205 | return 0; | 198 | return 0; |
206 | } | 199 | } |
207 | 200 | ||
208 | static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags) | 201 | struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, |
202 | gfp_t gfp_flags) | ||
209 | { | 203 | { |
210 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); | 204 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); |
211 | if (ctx->w.bounce_page == NULL) | 205 | if (ctx->w.bounce_page == NULL) |
@@ -260,9 +254,9 @@ struct page *fscrypt_encrypt_page(const struct inode *inode, | |||
260 | 254 | ||
261 | if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { | 255 | if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { |
262 | /* with inplace-encryption we just encrypt the page */ | 256 | /* with inplace-encryption we just encrypt the page */ |
263 | err = do_page_crypto(inode, FS_ENCRYPT, lblk_num, | 257 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, |
264 | page, ciphertext_page, | 258 | ciphertext_page, len, offs, |
265 | len, offs, gfp_flags); | 259 | gfp_flags); |
266 | if (err) | 260 | if (err) |
267 | return ERR_PTR(err); | 261 | return ERR_PTR(err); |
268 | 262 | ||
@@ -276,14 +270,14 @@ struct page *fscrypt_encrypt_page(const struct inode *inode, | |||
276 | return (struct page *)ctx; | 270 | return (struct page *)ctx; |
277 | 271 | ||
278 | /* The encryption operation will require a bounce page. */ | 272 | /* The encryption operation will require a bounce page. */ |
279 | ciphertext_page = alloc_bounce_page(ctx, gfp_flags); | 273 | ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); |
280 | if (IS_ERR(ciphertext_page)) | 274 | if (IS_ERR(ciphertext_page)) |
281 | goto errout; | 275 | goto errout; |
282 | 276 | ||
283 | ctx->w.control_page = page; | 277 | ctx->w.control_page = page; |
284 | err = do_page_crypto(inode, FS_ENCRYPT, lblk_num, | 278 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, |
285 | page, ciphertext_page, | 279 | page, ciphertext_page, len, offs, |
286 | len, offs, gfp_flags); | 280 | gfp_flags); |
287 | if (err) { | 281 | if (err) { |
288 | ciphertext_page = ERR_PTR(err); | 282 | ciphertext_page = ERR_PTR(err); |
289 | goto errout; | 283 | goto errout; |
@@ -320,72 +314,11 @@ int fscrypt_decrypt_page(const struct inode *inode, struct page *page, | |||
320 | if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) | 314 | if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) |
321 | BUG_ON(!PageLocked(page)); | 315 | BUG_ON(!PageLocked(page)); |
322 | 316 | ||
323 | return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len, | 317 | return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, |
324 | offs, GFP_NOFS); | 318 | len, offs, GFP_NOFS); |
325 | } | 319 | } |
326 | EXPORT_SYMBOL(fscrypt_decrypt_page); | 320 | EXPORT_SYMBOL(fscrypt_decrypt_page); |
327 | 321 | ||
328 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | ||
329 | sector_t pblk, unsigned int len) | ||
330 | { | ||
331 | struct fscrypt_ctx *ctx; | ||
332 | struct page *ciphertext_page = NULL; | ||
333 | struct bio *bio; | ||
334 | int ret, err = 0; | ||
335 | |||
336 | BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); | ||
337 | |||
338 | ctx = fscrypt_get_ctx(inode, GFP_NOFS); | ||
339 | if (IS_ERR(ctx)) | ||
340 | return PTR_ERR(ctx); | ||
341 | |||
342 | ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); | ||
343 | if (IS_ERR(ciphertext_page)) { | ||
344 | err = PTR_ERR(ciphertext_page); | ||
345 | goto errout; | ||
346 | } | ||
347 | |||
348 | while (len--) { | ||
349 | err = do_page_crypto(inode, FS_ENCRYPT, lblk, | ||
350 | ZERO_PAGE(0), ciphertext_page, | ||
351 | PAGE_SIZE, 0, GFP_NOFS); | ||
352 | if (err) | ||
353 | goto errout; | ||
354 | |||
355 | bio = bio_alloc(GFP_NOWAIT, 1); | ||
356 | if (!bio) { | ||
357 | err = -ENOMEM; | ||
358 | goto errout; | ||
359 | } | ||
360 | bio->bi_bdev = inode->i_sb->s_bdev; | ||
361 | bio->bi_iter.bi_sector = | ||
362 | pblk << (inode->i_sb->s_blocksize_bits - 9); | ||
363 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | ||
364 | ret = bio_add_page(bio, ciphertext_page, | ||
365 | inode->i_sb->s_blocksize, 0); | ||
366 | if (ret != inode->i_sb->s_blocksize) { | ||
367 | /* should never happen! */ | ||
368 | WARN_ON(1); | ||
369 | bio_put(bio); | ||
370 | err = -EIO; | ||
371 | goto errout; | ||
372 | } | ||
373 | err = submit_bio_wait(bio); | ||
374 | if ((err == 0) && bio->bi_error) | ||
375 | err = -EIO; | ||
376 | bio_put(bio); | ||
377 | if (err) | ||
378 | goto errout; | ||
379 | lblk++; | ||
380 | pblk++; | ||
381 | } | ||
382 | err = 0; | ||
383 | errout: | ||
384 | fscrypt_release_ctx(ctx); | ||
385 | return err; | ||
386 | } | ||
387 | EXPORT_SYMBOL(fscrypt_zeroout_range); | ||
388 | |||
389 | /* | 322 | /* |
390 | * Validate dentries for encrypted directories to make sure we aren't | 323 | * Validate dentries for encrypted directories to make sure we aren't |
391 | * potentially caching stale data after a key has been added or | 324 | * potentially caching stale data after a key has been added or |
@@ -442,64 +375,6 @@ const struct dentry_operations fscrypt_d_ops = { | |||
442 | }; | 375 | }; |
443 | EXPORT_SYMBOL(fscrypt_d_ops); | 376 | EXPORT_SYMBOL(fscrypt_d_ops); |
444 | 377 | ||
445 | /* | ||
446 | * Call fscrypt_decrypt_page on every single page, reusing the encryption | ||
447 | * context. | ||
448 | */ | ||
449 | static void completion_pages(struct work_struct *work) | ||
450 | { | ||
451 | struct fscrypt_ctx *ctx = | ||
452 | container_of(work, struct fscrypt_ctx, r.work); | ||
453 | struct bio *bio = ctx->r.bio; | ||
454 | struct bio_vec *bv; | ||
455 | int i; | ||
456 | |||
457 | bio_for_each_segment_all(bv, bio, i) { | ||
458 | struct page *page = bv->bv_page; | ||
459 | int ret = fscrypt_decrypt_page(page->mapping->host, page, | ||
460 | PAGE_SIZE, 0, page->index); | ||
461 | |||
462 | if (ret) { | ||
463 | WARN_ON_ONCE(1); | ||
464 | SetPageError(page); | ||
465 | } else { | ||
466 | SetPageUptodate(page); | ||
467 | } | ||
468 | unlock_page(page); | ||
469 | } | ||
470 | fscrypt_release_ctx(ctx); | ||
471 | bio_put(bio); | ||
472 | } | ||
473 | |||
474 | void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) | ||
475 | { | ||
476 | INIT_WORK(&ctx->r.work, completion_pages); | ||
477 | ctx->r.bio = bio; | ||
478 | queue_work(fscrypt_read_workqueue, &ctx->r.work); | ||
479 | } | ||
480 | EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); | ||
481 | |||
482 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
483 | { | ||
484 | struct fscrypt_ctx *ctx; | ||
485 | struct page *bounce_page; | ||
486 | |||
487 | /* The bounce data pages are unmapped. */ | ||
488 | if ((*page)->mapping) | ||
489 | return; | ||
490 | |||
491 | /* The bounce data page is unmapped. */ | ||
492 | bounce_page = *page; | ||
493 | ctx = (struct fscrypt_ctx *)page_private(bounce_page); | ||
494 | |||
495 | /* restore control page */ | ||
496 | *page = ctx->w.control_page; | ||
497 | |||
498 | if (restore) | ||
499 | fscrypt_restore_control_page(bounce_page); | ||
500 | } | ||
501 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | ||
502 | |||
503 | void fscrypt_restore_control_page(struct page *page) | 378 | void fscrypt_restore_control_page(struct page *page) |
504 | { | 379 | { |
505 | struct fscrypt_ctx *ctx; | 380 | struct fscrypt_ctx *ctx; |