diff options
-rw-r--r-- | Documentation/filesystems/fscrypt.rst | 43 | ||||
-rw-r--r-- | fs/crypto/Kconfig | 1 | ||||
-rw-r--r-- | fs/crypto/bio.c | 73 | ||||
-rw-r--r-- | fs/crypto/crypto.c | 299 | ||||
-rw-r--r-- | fs/crypto/fname.c | 1 | ||||
-rw-r--r-- | fs/crypto/fscrypt_private.h | 15 | ||||
-rw-r--r-- | fs/crypto/hooks.c | 1 | ||||
-rw-r--r-- | fs/crypto/keyinfo.c | 1 | ||||
-rw-r--r-- | fs/crypto/policy.c | 2 | ||||
-rw-r--r-- | fs/ext4/inode.c | 37 | ||||
-rw-r--r-- | fs/ext4/page-io.c | 44 | ||||
-rw-r--r-- | fs/f2fs/data.c | 17 | ||||
-rw-r--r-- | fs/ubifs/crypto.c | 19 | ||||
-rw-r--r-- | include/linux/fscrypt.h | 96 |
14 files changed, 363 insertions, 286 deletions
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 08c23b60e016..82efa41b0e6c 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst | |||
@@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported: | |||
191 | If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair. | 191 | If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair. |
192 | 192 | ||
193 | AES-128-CBC was added only for low-powered embedded devices with | 193 | AES-128-CBC was added only for low-powered embedded devices with |
194 | crypto accelerators such as CAAM or CESA that do not support XTS. | 194 | crypto accelerators such as CAAM or CESA that do not support XTS. To |
195 | use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256 | ||
196 | implementation) must be enabled so that ESSIV can be used. | ||
195 | 197 | ||
196 | Adiantum is a (primarily) stream cipher-based mode that is fast even | 198 | Adiantum is a (primarily) stream cipher-based mode that is fast even |
197 | on CPUs without dedicated crypto instructions. It's also a true | 199 | on CPUs without dedicated crypto instructions. It's also a true |
@@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace | |||
647 | without the key is subject to change in the future. It is only meant | 649 | without the key is subject to change in the future. It is only meant |
648 | as a way to temporarily present valid filenames so that commands like | 650 | as a way to temporarily present valid filenames so that commands like |
649 | ``rm -r`` work as expected on encrypted directories. | 651 | ``rm -r`` work as expected on encrypted directories. |
652 | |||
653 | Tests | ||
654 | ===== | ||
655 | |||
656 | To test fscrypt, use xfstests, which is Linux's de facto standard | ||
657 | filesystem test suite. First, run all the tests in the "encrypt" | ||
658 | group on the relevant filesystem(s). For example, to test ext4 and | ||
659 | f2fs encryption using `kvm-xfstests | ||
660 | <https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_:: | ||
661 | |||
662 | kvm-xfstests -c ext4,f2fs -g encrypt | ||
663 | |||
664 | UBIFS encryption can also be tested this way, but it should be done in | ||
665 | a separate command, and it takes some time for kvm-xfstests to set up | ||
666 | emulated UBI volumes:: | ||
667 | |||
668 | kvm-xfstests -c ubifs -g encrypt | ||
669 | |||
670 | No tests should fail. However, tests that use non-default encryption | ||
671 | modes (e.g. generic/549 and generic/550) will be skipped if the needed | ||
672 | algorithms were not built into the kernel's crypto API. Also, tests | ||
673 | that access the raw block device (e.g. generic/399, generic/548, | ||
674 | generic/549, generic/550) will be skipped on UBIFS. | ||
675 | |||
676 | Besides running the "encrypt" group tests, for ext4 and f2fs it's also | ||
677 | possible to run most xfstests with the "test_dummy_encryption" mount | ||
678 | option. This option causes all new files to be automatically | ||
679 | encrypted with a dummy key, without having to make any API calls. | ||
680 | This tests the encrypted I/O paths more thoroughly. To do this with | ||
681 | kvm-xfstests, use the "encrypt" filesystem configuration:: | ||
682 | |||
683 | kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto | ||
684 | |||
685 | Because this runs many more tests than "-g encrypt" does, it takes | ||
686 | much longer to run; so also consider using `gce-xfstests | ||
687 | <https://github.com/tytso/xfstests-bld/blob/master/Documentation/gce-xfstests.md>`_ | ||
688 | instead of kvm-xfstests:: | ||
689 | |||
690 | gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto | ||
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 24ed99e2eca0..5fdf24877c17 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig | |||
@@ -7,7 +7,6 @@ config FS_ENCRYPTION | |||
7 | select CRYPTO_ECB | 7 | select CRYPTO_ECB |
8 | select CRYPTO_XTS | 8 | select CRYPTO_XTS |
9 | select CRYPTO_CTS | 9 | select CRYPTO_CTS |
10 | select CRYPTO_SHA256 | ||
11 | select KEYS | 10 | select KEYS |
12 | help | 11 | help |
13 | Enable encryption of files and directories. This | 12 | Enable encryption of files and directories. This |
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index b46021ebde85..82da2510721f 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c | |||
@@ -33,9 +33,8 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done) | |||
33 | 33 | ||
34 | bio_for_each_segment_all(bv, bio, iter_all) { | 34 | bio_for_each_segment_all(bv, bio, iter_all) { |
35 | struct page *page = bv->bv_page; | 35 | struct page *page = bv->bv_page; |
36 | int ret = fscrypt_decrypt_page(page->mapping->host, page, | 36 | int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len, |
37 | PAGE_SIZE, 0, page->index); | 37 | bv->bv_offset); |
38 | |||
39 | if (ret) | 38 | if (ret) |
40 | SetPageError(page); | 39 | SetPageError(page); |
41 | else if (done) | 40 | else if (done) |
@@ -53,9 +52,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio); | |||
53 | 52 | ||
54 | static void completion_pages(struct work_struct *work) | 53 | static void completion_pages(struct work_struct *work) |
55 | { | 54 | { |
56 | struct fscrypt_ctx *ctx = | 55 | struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work); |
57 | container_of(work, struct fscrypt_ctx, r.work); | 56 | struct bio *bio = ctx->bio; |
58 | struct bio *bio = ctx->r.bio; | ||
59 | 57 | ||
60 | __fscrypt_decrypt_bio(bio, true); | 58 | __fscrypt_decrypt_bio(bio, true); |
61 | fscrypt_release_ctx(ctx); | 59 | fscrypt_release_ctx(ctx); |
@@ -64,57 +62,29 @@ static void completion_pages(struct work_struct *work) | |||
64 | 62 | ||
65 | void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) | 63 | void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio) |
66 | { | 64 | { |
67 | INIT_WORK(&ctx->r.work, completion_pages); | 65 | INIT_WORK(&ctx->work, completion_pages); |
68 | ctx->r.bio = bio; | 66 | ctx->bio = bio; |
69 | fscrypt_enqueue_decrypt_work(&ctx->r.work); | 67 | fscrypt_enqueue_decrypt_work(&ctx->work); |
70 | } | 68 | } |
71 | EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); | 69 | EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio); |
72 | 70 | ||
73 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
74 | { | ||
75 | struct fscrypt_ctx *ctx; | ||
76 | struct page *bounce_page; | ||
77 | |||
78 | /* The bounce data pages are unmapped. */ | ||
79 | if ((*page)->mapping) | ||
80 | return; | ||
81 | |||
82 | /* The bounce data page is unmapped. */ | ||
83 | bounce_page = *page; | ||
84 | ctx = (struct fscrypt_ctx *)page_private(bounce_page); | ||
85 | |||
86 | /* restore control page */ | ||
87 | *page = ctx->w.control_page; | ||
88 | |||
89 | if (restore) | ||
90 | fscrypt_restore_control_page(bounce_page); | ||
91 | } | ||
92 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | ||
93 | |||
94 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | 71 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, |
95 | sector_t pblk, unsigned int len) | 72 | sector_t pblk, unsigned int len) |
96 | { | 73 | { |
97 | struct fscrypt_ctx *ctx; | 74 | const unsigned int blockbits = inode->i_blkbits; |
98 | struct page *ciphertext_page = NULL; | 75 | const unsigned int blocksize = 1 << blockbits; |
76 | struct page *ciphertext_page; | ||
99 | struct bio *bio; | 77 | struct bio *bio; |
100 | int ret, err = 0; | 78 | int ret, err = 0; |
101 | 79 | ||
102 | BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); | 80 | ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); |
103 | 81 | if (!ciphertext_page) | |
104 | ctx = fscrypt_get_ctx(GFP_NOFS); | 82 | return -ENOMEM; |
105 | if (IS_ERR(ctx)) | ||
106 | return PTR_ERR(ctx); | ||
107 | |||
108 | ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT); | ||
109 | if (IS_ERR(ciphertext_page)) { | ||
110 | err = PTR_ERR(ciphertext_page); | ||
111 | goto errout; | ||
112 | } | ||
113 | 83 | ||
114 | while (len--) { | 84 | while (len--) { |
115 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk, | 85 | err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, |
116 | ZERO_PAGE(0), ciphertext_page, | 86 | ZERO_PAGE(0), ciphertext_page, |
117 | PAGE_SIZE, 0, GFP_NOFS); | 87 | blocksize, 0, GFP_NOFS); |
118 | if (err) | 88 | if (err) |
119 | goto errout; | 89 | goto errout; |
120 | 90 | ||
@@ -124,14 +94,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | |||
124 | goto errout; | 94 | goto errout; |
125 | } | 95 | } |
126 | bio_set_dev(bio, inode->i_sb->s_bdev); | 96 | bio_set_dev(bio, inode->i_sb->s_bdev); |
127 | bio->bi_iter.bi_sector = | 97 | bio->bi_iter.bi_sector = pblk << (blockbits - 9); |
128 | pblk << (inode->i_sb->s_blocksize_bits - 9); | ||
129 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 98 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
130 | ret = bio_add_page(bio, ciphertext_page, | 99 | ret = bio_add_page(bio, ciphertext_page, blocksize, 0); |
131 | inode->i_sb->s_blocksize, 0); | 100 | if (WARN_ON(ret != blocksize)) { |
132 | if (ret != inode->i_sb->s_blocksize) { | ||
133 | /* should never happen! */ | 101 | /* should never happen! */ |
134 | WARN_ON(1); | ||
135 | bio_put(bio); | 102 | bio_put(bio); |
136 | err = -EIO; | 103 | err = -EIO; |
137 | goto errout; | 104 | goto errout; |
@@ -147,7 +114,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | |||
147 | } | 114 | } |
148 | err = 0; | 115 | err = 0; |
149 | errout: | 116 | errout: |
150 | fscrypt_release_ctx(ctx); | 117 | fscrypt_free_bounce_page(ciphertext_page); |
151 | return err; | 118 | return err; |
152 | } | 119 | } |
153 | EXPORT_SYMBOL(fscrypt_zeroout_range); | 120 | EXPORT_SYMBOL(fscrypt_zeroout_range); |
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 335a362ee446..45c3d0427fb2 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c | |||
@@ -59,23 +59,16 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work) | |||
59 | EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); | 59 | EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); |
60 | 60 | ||
61 | /** | 61 | /** |
62 | * fscrypt_release_ctx() - Releases an encryption context | 62 | * fscrypt_release_ctx() - Release a decryption context |
63 | * @ctx: The encryption context to release. | 63 | * @ctx: The decryption context to release. |
64 | * | 64 | * |
65 | * If the encryption context was allocated from the pre-allocated pool, returns | 65 | * If the decryption context was allocated from the pre-allocated pool, return |
66 | * it to that pool. Else, frees it. | 66 | * it to that pool. Else, free it. |
67 | * | ||
68 | * If there's a bounce page in the context, this frees that. | ||
69 | */ | 67 | */ |
70 | void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | 68 | void fscrypt_release_ctx(struct fscrypt_ctx *ctx) |
71 | { | 69 | { |
72 | unsigned long flags; | 70 | unsigned long flags; |
73 | 71 | ||
74 | if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) { | ||
75 | mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); | ||
76 | ctx->w.bounce_page = NULL; | ||
77 | } | ||
78 | ctx->w.control_page = NULL; | ||
79 | if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { | 72 | if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { |
80 | kmem_cache_free(fscrypt_ctx_cachep, ctx); | 73 | kmem_cache_free(fscrypt_ctx_cachep, ctx); |
81 | } else { | 74 | } else { |
@@ -87,12 +80,12 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | |||
87 | EXPORT_SYMBOL(fscrypt_release_ctx); | 80 | EXPORT_SYMBOL(fscrypt_release_ctx); |
88 | 81 | ||
89 | /** | 82 | /** |
90 | * fscrypt_get_ctx() - Gets an encryption context | 83 | * fscrypt_get_ctx() - Get a decryption context |
91 | * @gfp_flags: The gfp flag for memory allocation | 84 | * @gfp_flags: The gfp flag for memory allocation |
92 | * | 85 | * |
93 | * Allocates and initializes an encryption context. | 86 | * Allocate and initialize a decryption context. |
94 | * | 87 | * |
95 | * Return: A new encryption context on success; an ERR_PTR() otherwise. | 88 | * Return: A new decryption context on success; an ERR_PTR() otherwise. |
96 | */ | 89 | */ |
97 | struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) | 90 | struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) |
98 | { | 91 | { |
@@ -100,14 +93,8 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) | |||
100 | unsigned long flags; | 93 | unsigned long flags; |
101 | 94 | ||
102 | /* | 95 | /* |
103 | * We first try getting the ctx from a free list because in | 96 | * First try getting a ctx from the free list so that we don't have to |
104 | * the common case the ctx will have an allocated and | 97 | * call into the slab allocator. |
105 | * initialized crypto tfm, so it's probably a worthwhile | ||
106 | * optimization. For the bounce page, we first try getting it | ||
107 | * from the kernel allocator because that's just about as fast | ||
108 | * as getting it from a list and because a cache of free pages | ||
109 | * should generally be a "last resort" option for a filesystem | ||
110 | * to be able to do its job. | ||
111 | */ | 98 | */ |
112 | spin_lock_irqsave(&fscrypt_ctx_lock, flags); | 99 | spin_lock_irqsave(&fscrypt_ctx_lock, flags); |
113 | ctx = list_first_entry_or_null(&fscrypt_free_ctxs, | 100 | ctx = list_first_entry_or_null(&fscrypt_free_ctxs, |
@@ -123,11 +110,31 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags) | |||
123 | } else { | 110 | } else { |
124 | ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | 111 | ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; |
125 | } | 112 | } |
126 | ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL; | ||
127 | return ctx; | 113 | return ctx; |
128 | } | 114 | } |
129 | EXPORT_SYMBOL(fscrypt_get_ctx); | 115 | EXPORT_SYMBOL(fscrypt_get_ctx); |
130 | 116 | ||
117 | struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) | ||
118 | { | ||
119 | return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * fscrypt_free_bounce_page() - free a ciphertext bounce page | ||
124 | * | ||
125 | * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(), | ||
126 | * or by fscrypt_alloc_bounce_page() directly. | ||
127 | */ | ||
128 | void fscrypt_free_bounce_page(struct page *bounce_page) | ||
129 | { | ||
130 | if (!bounce_page) | ||
131 | return; | ||
132 | set_page_private(bounce_page, (unsigned long)NULL); | ||
133 | ClearPagePrivate(bounce_page); | ||
134 | mempool_free(bounce_page, fscrypt_bounce_page_pool); | ||
135 | } | ||
136 | EXPORT_SYMBOL(fscrypt_free_bounce_page); | ||
137 | |||
131 | void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, | 138 | void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, |
132 | const struct fscrypt_info *ci) | 139 | const struct fscrypt_info *ci) |
133 | { | 140 | { |
@@ -141,10 +148,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, | |||
141 | crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw); | 148 | crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw); |
142 | } | 149 | } |
143 | 150 | ||
144 | int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | 151 | /* Encrypt or decrypt a single filesystem block of file contents */ |
145 | u64 lblk_num, struct page *src_page, | 152 | int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, |
146 | struct page *dest_page, unsigned int len, | 153 | u64 lblk_num, struct page *src_page, |
147 | unsigned int offs, gfp_t gfp_flags) | 154 | struct page *dest_page, unsigned int len, |
155 | unsigned int offs, gfp_t gfp_flags) | ||
148 | { | 156 | { |
149 | union fscrypt_iv iv; | 157 | union fscrypt_iv iv; |
150 | struct skcipher_request *req = NULL; | 158 | struct skcipher_request *req = NULL; |
@@ -154,7 +162,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | |||
154 | struct crypto_skcipher *tfm = ci->ci_ctfm; | 162 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
155 | int res = 0; | 163 | int res = 0; |
156 | 164 | ||
157 | BUG_ON(len == 0); | 165 | if (WARN_ON_ONCE(len <= 0)) |
166 | return -EINVAL; | ||
167 | if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0)) | ||
168 | return -EINVAL; | ||
158 | 169 | ||
159 | fscrypt_generate_iv(&iv, lblk_num, ci); | 170 | fscrypt_generate_iv(&iv, lblk_num, ci); |
160 | 171 | ||
@@ -186,126 +197,158 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, | |||
186 | return 0; | 197 | return 0; |
187 | } | 198 | } |
188 | 199 | ||
189 | struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, | ||
190 | gfp_t gfp_flags) | ||
191 | { | ||
192 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); | ||
193 | if (ctx->w.bounce_page == NULL) | ||
194 | return ERR_PTR(-ENOMEM); | ||
195 | ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL; | ||
196 | return ctx->w.bounce_page; | ||
197 | } | ||
198 | |||
199 | /** | 200 | /** |
200 | * fscypt_encrypt_page() - Encrypts a page | 201 | * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page |
201 | * @inode: The inode for which the encryption should take place | 202 | * @page: The locked pagecache page containing the block(s) to encrypt |
202 | * @page: The page to encrypt. Must be locked for bounce-page | 203 | * @len: Total size of the block(s) to encrypt. Must be a nonzero |
203 | * encryption. | 204 | * multiple of the filesystem's block size. |
204 | * @len: Length of data to encrypt in @page and encrypted | 205 | * @offs: Byte offset within @page of the first block to encrypt. Must be |
205 | * data in returned page. | 206 | * a multiple of the filesystem's block size. |
206 | * @offs: Offset of data within @page and returned | 207 | * @gfp_flags: Memory allocation flags |
207 | * page holding encrypted data. | ||
208 | * @lblk_num: Logical block number. This must be unique for multiple | ||
209 | * calls with same inode, except when overwriting | ||
210 | * previously written data. | ||
211 | * @gfp_flags: The gfp flag for memory allocation | ||
212 | * | ||
213 | * Encrypts @page using the ctx encryption context. Performs encryption | ||
214 | * either in-place or into a newly allocated bounce page. | ||
215 | * Called on the page write path. | ||
216 | * | 208 | * |
217 | * Bounce page allocation is the default. | 209 | * A new bounce page is allocated, and the specified block(s) are encrypted into |
218 | * In this case, the contents of @page are encrypted and stored in an | 210 | * it. In the bounce page, the ciphertext block(s) will be located at the same |
219 | * allocated bounce page. @page has to be locked and the caller must call | 211 | * offsets at which the plaintext block(s) were located in the source page; any |
220 | * fscrypt_restore_control_page() on the returned ciphertext page to | 212 | * other parts of the bounce page will be left uninitialized. However, normally |
221 | * release the bounce buffer and the encryption context. | 213 | * blocksize == PAGE_SIZE and the whole page is encrypted at once. |
222 | * | 214 | * |
223 | * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in | 215 | * This is for use by the filesystem's ->writepages() method. |
224 | * fscrypt_operations. Here, the input-page is returned with its content | ||
225 | * encrypted. | ||
226 | * | 216 | * |
227 | * Return: A page with the encrypted content on success. Else, an | 217 | * Return: the new encrypted bounce page on success; an ERR_PTR() on failure |
228 | * error value or NULL. | ||
229 | */ | 218 | */ |
230 | struct page *fscrypt_encrypt_page(const struct inode *inode, | 219 | struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, |
231 | struct page *page, | 220 | unsigned int len, |
232 | unsigned int len, | 221 | unsigned int offs, |
233 | unsigned int offs, | 222 | gfp_t gfp_flags) |
234 | u64 lblk_num, gfp_t gfp_flags) | ||
235 | 223 | ||
236 | { | 224 | { |
237 | struct fscrypt_ctx *ctx; | 225 | const struct inode *inode = page->mapping->host; |
238 | struct page *ciphertext_page = page; | 226 | const unsigned int blockbits = inode->i_blkbits; |
227 | const unsigned int blocksize = 1 << blockbits; | ||
228 | struct page *ciphertext_page; | ||
229 | u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) + | ||
230 | (offs >> blockbits); | ||
231 | unsigned int i; | ||
239 | int err; | 232 | int err; |
240 | 233 | ||
241 | BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0); | 234 | if (WARN_ON_ONCE(!PageLocked(page))) |
235 | return ERR_PTR(-EINVAL); | ||
242 | 236 | ||
243 | if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) { | 237 | if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) |
244 | /* with inplace-encryption we just encrypt the page */ | 238 | return ERR_PTR(-EINVAL); |
245 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page, | ||
246 | ciphertext_page, len, offs, | ||
247 | gfp_flags); | ||
248 | if (err) | ||
249 | return ERR_PTR(err); | ||
250 | 239 | ||
251 | return ciphertext_page; | 240 | ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags); |
252 | } | 241 | if (!ciphertext_page) |
253 | 242 | return ERR_PTR(-ENOMEM); | |
254 | BUG_ON(!PageLocked(page)); | ||
255 | |||
256 | ctx = fscrypt_get_ctx(gfp_flags); | ||
257 | if (IS_ERR(ctx)) | ||
258 | return ERR_CAST(ctx); | ||
259 | |||
260 | /* The encryption operation will require a bounce page. */ | ||
261 | ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags); | ||
262 | if (IS_ERR(ciphertext_page)) | ||
263 | goto errout; | ||
264 | 243 | ||
265 | ctx->w.control_page = page; | 244 | for (i = offs; i < offs + len; i += blocksize, lblk_num++) { |
266 | err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, | 245 | err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, |
267 | page, ciphertext_page, len, offs, | 246 | page, ciphertext_page, |
268 | gfp_flags); | 247 | blocksize, i, gfp_flags); |
269 | if (err) { | 248 | if (err) { |
270 | ciphertext_page = ERR_PTR(err); | 249 | fscrypt_free_bounce_page(ciphertext_page); |
271 | goto errout; | 250 | return ERR_PTR(err); |
251 | } | ||
272 | } | 252 | } |
273 | SetPagePrivate(ciphertext_page); | 253 | SetPagePrivate(ciphertext_page); |
274 | set_page_private(ciphertext_page, (unsigned long)ctx); | 254 | set_page_private(ciphertext_page, (unsigned long)page); |
275 | lock_page(ciphertext_page); | ||
276 | return ciphertext_page; | 255 | return ciphertext_page; |
256 | } | ||
257 | EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks); | ||
277 | 258 | ||
278 | errout: | 259 | /** |
279 | fscrypt_release_ctx(ctx); | 260 | * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place |
280 | return ciphertext_page; | 261 | * @inode: The inode to which this block belongs |
262 | * @page: The page containing the block to encrypt | ||
263 | * @len: Size of block to encrypt. Doesn't need to be a multiple of the | ||
264 | * fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE. | ||
265 | * @offs: Byte offset within @page at which the block to encrypt begins | ||
266 | * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based | ||
267 | * number of the block within the file | ||
268 | * @gfp_flags: Memory allocation flags | ||
269 | * | ||
270 | * Encrypt a possibly-compressed filesystem block that is located in an | ||
271 | * arbitrary page, not necessarily in the original pagecache page. The @inode | ||
272 | * and @lblk_num must be specified, as they can't be determined from @page. | ||
273 | * | ||
274 | * Return: 0 on success; -errno on failure | ||
275 | */ | ||
276 | int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, | ||
277 | unsigned int len, unsigned int offs, | ||
278 | u64 lblk_num, gfp_t gfp_flags) | ||
279 | { | ||
280 | return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page, | ||
281 | len, offs, gfp_flags); | ||
281 | } | 282 | } |
282 | EXPORT_SYMBOL(fscrypt_encrypt_page); | 283 | EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); |
283 | 284 | ||
284 | /** | 285 | /** |
285 | * fscrypt_decrypt_page() - Decrypts a page in-place | 286 | * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page |
286 | * @inode: The corresponding inode for the page to decrypt. | 287 | * @page: The locked pagecache page containing the block(s) to decrypt |
287 | * @page: The page to decrypt. Must be locked in case | 288 | * @len: Total size of the block(s) to decrypt. Must be a nonzero |
288 | * it is a writeback page (FS_CFLG_OWN_PAGES unset). | 289 | * multiple of the filesystem's block size. |
289 | * @len: Number of bytes in @page to be decrypted. | 290 | * @offs: Byte offset within @page of the first block to decrypt. Must be |
290 | * @offs: Start of data in @page. | 291 | * a multiple of the filesystem's block size. |
291 | * @lblk_num: Logical block number. | ||
292 | * | 292 | * |
293 | * Decrypts page in-place using the ctx encryption context. | 293 | * The specified block(s) are decrypted in-place within the pagecache page, |
294 | * which must still be locked and not uptodate. Normally, blocksize == | ||
295 | * PAGE_SIZE and the whole page is decrypted at once. | ||
294 | * | 296 | * |
295 | * Called from the read completion callback. | 297 | * This is for use by the filesystem's ->readpages() method. |
296 | * | 298 | * |
297 | * Return: Zero on success, non-zero otherwise. | 299 | * Return: 0 on success; -errno on failure |
298 | */ | 300 | */ |
299 | int fscrypt_decrypt_page(const struct inode *inode, struct page *page, | 301 | int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, |
300 | unsigned int len, unsigned int offs, u64 lblk_num) | 302 | unsigned int offs) |
301 | { | 303 | { |
302 | if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)) | 304 | const struct inode *inode = page->mapping->host; |
303 | BUG_ON(!PageLocked(page)); | 305 | const unsigned int blockbits = inode->i_blkbits; |
306 | const unsigned int blocksize = 1 << blockbits; | ||
307 | u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) + | ||
308 | (offs >> blockbits); | ||
309 | unsigned int i; | ||
310 | int err; | ||
311 | |||
312 | if (WARN_ON_ONCE(!PageLocked(page))) | ||
313 | return -EINVAL; | ||
314 | |||
315 | if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) | ||
316 | return -EINVAL; | ||
317 | |||
318 | for (i = offs; i < offs + len; i += blocksize, lblk_num++) { | ||
319 | err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, | ||
320 | page, blocksize, i, GFP_NOFS); | ||
321 | if (err) | ||
322 | return err; | ||
323 | } | ||
324 | return 0; | ||
325 | } | ||
326 | EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks); | ||
304 | 327 | ||
305 | return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, | 328 | /** |
306 | len, offs, GFP_NOFS); | 329 | * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place |
330 | * @inode: The inode to which this block belongs | ||
331 | * @page: The page containing the block to decrypt | ||
332 | * @len: Size of block to decrypt. Doesn't need to be a multiple of the | ||
333 | * fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE. | ||
334 | * @offs: Byte offset within @page at which the block to decrypt begins | ||
335 | * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based | ||
336 | * number of the block within the file | ||
337 | * | ||
338 | * Decrypt a possibly-compressed filesystem block that is located in an | ||
339 | * arbitrary page, not necessarily in the original pagecache page. The @inode | ||
340 | * and @lblk_num must be specified, as they can't be determined from @page. | ||
341 | * | ||
342 | * Return: 0 on success; -errno on failure | ||
343 | */ | ||
344 | int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, | ||
345 | unsigned int len, unsigned int offs, | ||
346 | u64 lblk_num) | ||
347 | { | ||
348 | return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page, | ||
349 | len, offs, GFP_NOFS); | ||
307 | } | 350 | } |
308 | EXPORT_SYMBOL(fscrypt_decrypt_page); | 351 | EXPORT_SYMBOL(fscrypt_decrypt_block_inplace); |
309 | 352 | ||
310 | /* | 353 | /* |
311 | * Validate dentries in encrypted directories to make sure we aren't potentially | 354 | * Validate dentries in encrypted directories to make sure we aren't potentially |
@@ -355,18 +398,6 @@ const struct dentry_operations fscrypt_d_ops = { | |||
355 | .d_revalidate = fscrypt_d_revalidate, | 398 | .d_revalidate = fscrypt_d_revalidate, |
356 | }; | 399 | }; |
357 | 400 | ||
358 | void fscrypt_restore_control_page(struct page *page) | ||
359 | { | ||
360 | struct fscrypt_ctx *ctx; | ||
361 | |||
362 | ctx = (struct fscrypt_ctx *)page_private(page); | ||
363 | set_page_private(page, (unsigned long)NULL); | ||
364 | ClearPagePrivate(page); | ||
365 | unlock_page(page); | ||
366 | fscrypt_release_ctx(ctx); | ||
367 | } | ||
368 | EXPORT_SYMBOL(fscrypt_restore_control_page); | ||
369 | |||
370 | static void fscrypt_destroy(void) | 401 | static void fscrypt_destroy(void) |
371 | { | 402 | { |
372 | struct fscrypt_ctx *pos, *n; | 403 | struct fscrypt_ctx *pos, *n; |
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index eccea3d8f923..00d150ff3033 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c | |||
@@ -12,7 +12,6 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
15 | #include <linux/ratelimit.h> | ||
16 | #include <crypto/skcipher.h> | 15 | #include <crypto/skcipher.h> |
17 | #include "fscrypt_private.h" | 16 | #include "fscrypt_private.h" |
18 | 17 | ||
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 7da276159593..8978eec9d766 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h | |||
@@ -94,7 +94,6 @@ typedef enum { | |||
94 | } fscrypt_direction_t; | 94 | } fscrypt_direction_t; |
95 | 95 | ||
96 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 | 96 | #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 |
97 | #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 | ||
98 | 97 | ||
99 | static inline bool fscrypt_valid_enc_modes(u32 contents_mode, | 98 | static inline bool fscrypt_valid_enc_modes(u32 contents_mode, |
100 | u32 filenames_mode) | 99 | u32 filenames_mode) |
@@ -117,14 +116,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, | |||
117 | /* crypto.c */ | 116 | /* crypto.c */ |
118 | extern struct kmem_cache *fscrypt_info_cachep; | 117 | extern struct kmem_cache *fscrypt_info_cachep; |
119 | extern int fscrypt_initialize(unsigned int cop_flags); | 118 | extern int fscrypt_initialize(unsigned int cop_flags); |
120 | extern int fscrypt_do_page_crypto(const struct inode *inode, | 119 | extern int fscrypt_crypt_block(const struct inode *inode, |
121 | fscrypt_direction_t rw, u64 lblk_num, | 120 | fscrypt_direction_t rw, u64 lblk_num, |
122 | struct page *src_page, | 121 | struct page *src_page, struct page *dest_page, |
123 | struct page *dest_page, | 122 | unsigned int len, unsigned int offs, |
124 | unsigned int len, unsigned int offs, | 123 | gfp_t gfp_flags); |
125 | gfp_t gfp_flags); | 124 | extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); |
126 | extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, | ||
127 | gfp_t gfp_flags); | ||
128 | extern const struct dentry_operations fscrypt_d_ops; | 125 | extern const struct dentry_operations fscrypt_d_ops; |
129 | 126 | ||
130 | extern void __printf(3, 4) __cold | 127 | extern void __printf(3, 4) __cold |
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index bd525f7573a4..c1d6715d88e9 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c | |||
@@ -5,7 +5,6 @@ | |||
5 | * Encryption hooks for higher-level filesystem operations. | 5 | * Encryption hooks for higher-level filesystem operations. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/ratelimit.h> | ||
9 | #include "fscrypt_private.h" | 8 | #include "fscrypt_private.h" |
10 | 9 | ||
11 | /** | 10 | /** |
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index dcd91a3fbe49..207ebed918c1 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <keys/user-type.h> | 12 | #include <keys/user-type.h> |
13 | #include <linux/hashtable.h> | 13 | #include <linux/hashtable.h> |
14 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
15 | #include <linux/ratelimit.h> | ||
16 | #include <crypto/aes.h> | 15 | #include <crypto/aes.h> |
17 | #include <crypto/algapi.h> | 16 | #include <crypto/algapi.h> |
18 | #include <crypto/sha.h> | 17 | #include <crypto/sha.h> |
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index d536889ac31b..4941fe8471ce 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c | |||
@@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) | |||
81 | if (ret == -ENODATA) { | 81 | if (ret == -ENODATA) { |
82 | if (!S_ISDIR(inode->i_mode)) | 82 | if (!S_ISDIR(inode->i_mode)) |
83 | ret = -ENOTDIR; | 83 | ret = -ENOTDIR; |
84 | else if (IS_DEADDIR(inode)) | ||
85 | ret = -ENOENT; | ||
84 | else if (!inode->i_sb->s_cop->empty_dir(inode)) | 86 | else if (!inode->i_sb->s_cop->empty_dir(inode)) |
85 | ret = -ENOTEMPTY; | 87 | ret = -ENOTEMPTY; |
86 | else | 88 | else |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c7f77c643008..f65357735a1a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -1164,8 +1164,9 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, | |||
1164 | int err = 0; | 1164 | int err = 0; |
1165 | unsigned blocksize = inode->i_sb->s_blocksize; | 1165 | unsigned blocksize = inode->i_sb->s_blocksize; |
1166 | unsigned bbits; | 1166 | unsigned bbits; |
1167 | struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; | 1167 | struct buffer_head *bh, *head, *wait[2]; |
1168 | bool decrypt = false; | 1168 | int nr_wait = 0; |
1169 | int i; | ||
1169 | 1170 | ||
1170 | BUG_ON(!PageLocked(page)); | 1171 | BUG_ON(!PageLocked(page)); |
1171 | BUG_ON(from > PAGE_SIZE); | 1172 | BUG_ON(from > PAGE_SIZE); |
@@ -1217,23 +1218,32 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, | |||
1217 | !buffer_unwritten(bh) && | 1218 | !buffer_unwritten(bh) && |
1218 | (block_start < from || block_end > to)) { | 1219 | (block_start < from || block_end > to)) { |
1219 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); | 1220 | ll_rw_block(REQ_OP_READ, 0, 1, &bh); |
1220 | *wait_bh++ = bh; | 1221 | wait[nr_wait++] = bh; |
1221 | decrypt = IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); | ||
1222 | } | 1222 | } |
1223 | } | 1223 | } |
1224 | /* | 1224 | /* |
1225 | * If we issued read requests, let them complete. | 1225 | * If we issued read requests, let them complete. |
1226 | */ | 1226 | */ |
1227 | while (wait_bh > wait) { | 1227 | for (i = 0; i < nr_wait; i++) { |
1228 | wait_on_buffer(*--wait_bh); | 1228 | wait_on_buffer(wait[i]); |
1229 | if (!buffer_uptodate(*wait_bh)) | 1229 | if (!buffer_uptodate(wait[i])) |
1230 | err = -EIO; | 1230 | err = -EIO; |
1231 | } | 1231 | } |
1232 | if (unlikely(err)) | 1232 | if (unlikely(err)) { |
1233 | page_zero_new_buffers(page, from, to); | 1233 | page_zero_new_buffers(page, from, to); |
1234 | else if (decrypt) | 1234 | } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { |
1235 | err = fscrypt_decrypt_page(page->mapping->host, page, | 1235 | for (i = 0; i < nr_wait; i++) { |
1236 | PAGE_SIZE, 0, page->index); | 1236 | int err2; |
1237 | |||
1238 | err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize, | ||
1239 | bh_offset(wait[i])); | ||
1240 | if (err2) { | ||
1241 | clear_buffer_uptodate(wait[i]); | ||
1242 | err = err2; | ||
1243 | } | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1237 | return err; | 1247 | return err; |
1238 | } | 1248 | } |
1239 | #endif | 1249 | #endif |
@@ -4065,9 +4075,8 @@ static int __ext4_block_zero_page_range(handle_t *handle, | |||
4065 | if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { | 4075 | if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { |
4066 | /* We expect the key to be set. */ | 4076 | /* We expect the key to be set. */ |
4067 | BUG_ON(!fscrypt_has_encryption_key(inode)); | 4077 | BUG_ON(!fscrypt_has_encryption_key(inode)); |
4068 | BUG_ON(blocksize != PAGE_SIZE); | 4078 | WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks( |
4069 | WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host, | 4079 | page, blocksize, bh_offset(bh))); |
4070 | page, PAGE_SIZE, 0, page->index)); | ||
4071 | } | 4080 | } |
4072 | } | 4081 | } |
4073 | if (ext4_should_journal_data(inode)) { | 4082 | if (ext4_should_journal_data(inode)) { |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 4690618a92e9..a18a47a2a1d1 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio) | |||
66 | 66 | ||
67 | bio_for_each_segment_all(bvec, bio, iter_all) { | 67 | bio_for_each_segment_all(bvec, bio, iter_all) { |
68 | struct page *page = bvec->bv_page; | 68 | struct page *page = bvec->bv_page; |
69 | #ifdef CONFIG_FS_ENCRYPTION | 69 | struct page *bounce_page = NULL; |
70 | struct page *data_page = NULL; | ||
71 | #endif | ||
72 | struct buffer_head *bh, *head; | 70 | struct buffer_head *bh, *head; |
73 | unsigned bio_start = bvec->bv_offset; | 71 | unsigned bio_start = bvec->bv_offset; |
74 | unsigned bio_end = bio_start + bvec->bv_len; | 72 | unsigned bio_end = bio_start + bvec->bv_len; |
@@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio) | |||
78 | if (!page) | 76 | if (!page) |
79 | continue; | 77 | continue; |
80 | 78 | ||
81 | #ifdef CONFIG_FS_ENCRYPTION | 79 | if (fscrypt_is_bounce_page(page)) { |
82 | if (!page->mapping) { | 80 | bounce_page = page; |
83 | /* The bounce data pages are unmapped. */ | 81 | page = fscrypt_pagecache_page(bounce_page); |
84 | data_page = page; | ||
85 | fscrypt_pullback_bio_page(&page, false); | ||
86 | } | 82 | } |
87 | #endif | ||
88 | 83 | ||
89 | if (bio->bi_status) { | 84 | if (bio->bi_status) { |
90 | SetPageError(page); | 85 | SetPageError(page); |
@@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio) | |||
111 | bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); | 106 | bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); |
112 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
113 | if (!under_io) { | 108 | if (!under_io) { |
114 | #ifdef CONFIG_FS_ENCRYPTION | 109 | fscrypt_free_bounce_page(bounce_page); |
115 | if (data_page) | ||
116 | fscrypt_restore_control_page(data_page); | ||
117 | #endif | ||
118 | end_page_writeback(page); | 110 | end_page_writeback(page); |
119 | } | 111 | } |
120 | } | 112 | } |
@@ -415,7 +407,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
415 | struct writeback_control *wbc, | 407 | struct writeback_control *wbc, |
416 | bool keep_towrite) | 408 | bool keep_towrite) |
417 | { | 409 | { |
418 | struct page *data_page = NULL; | 410 | struct page *bounce_page = NULL; |
419 | struct inode *inode = page->mapping->host; | 411 | struct inode *inode = page->mapping->host; |
420 | unsigned block_start; | 412 | unsigned block_start; |
421 | struct buffer_head *bh, *head; | 413 | struct buffer_head *bh, *head; |
@@ -475,14 +467,22 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
475 | 467 | ||
476 | bh = head = page_buffers(page); | 468 | bh = head = page_buffers(page); |
477 | 469 | ||
470 | /* | ||
471 | * If any blocks are being written to an encrypted file, encrypt them | ||
472 | * into a bounce page. For simplicity, just encrypt until the last | ||
473 | * block which might be needed. This may cause some unneeded blocks | ||
474 | * (e.g. holes) to be unnecessarily encrypted, but this is rare and | ||
475 | * can't happen in the common case of blocksize == PAGE_SIZE. | ||
476 | */ | ||
478 | if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { | 477 | if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { |
479 | gfp_t gfp_flags = GFP_NOFS; | 478 | gfp_t gfp_flags = GFP_NOFS; |
479 | unsigned int enc_bytes = round_up(len, i_blocksize(inode)); | ||
480 | 480 | ||
481 | retry_encrypt: | 481 | retry_encrypt: |
482 | data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0, | 482 | bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes, |
483 | page->index, gfp_flags); | 483 | 0, gfp_flags); |
484 | if (IS_ERR(data_page)) { | 484 | if (IS_ERR(bounce_page)) { |
485 | ret = PTR_ERR(data_page); | 485 | ret = PTR_ERR(bounce_page); |
486 | if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { | 486 | if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { |
487 | if (io->io_bio) { | 487 | if (io->io_bio) { |
488 | ext4_io_submit(io); | 488 | ext4_io_submit(io); |
@@ -491,7 +491,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
491 | gfp_flags |= __GFP_NOFAIL; | 491 | gfp_flags |= __GFP_NOFAIL; |
492 | goto retry_encrypt; | 492 | goto retry_encrypt; |
493 | } | 493 | } |
494 | data_page = NULL; | 494 | bounce_page = NULL; |
495 | goto out; | 495 | goto out; |
496 | } | 496 | } |
497 | } | 497 | } |
@@ -500,8 +500,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
500 | do { | 500 | do { |
501 | if (!buffer_async_write(bh)) | 501 | if (!buffer_async_write(bh)) |
502 | continue; | 502 | continue; |
503 | ret = io_submit_add_bh(io, inode, | 503 | ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh); |
504 | data_page ? data_page : page, bh); | ||
505 | if (ret) { | 504 | if (ret) { |
506 | /* | 505 | /* |
507 | * We only get here on ENOMEM. Not much else | 506 | * We only get here on ENOMEM. Not much else |
@@ -517,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, | |||
517 | /* Error stopped previous loop? Clean up buffers... */ | 516 | /* Error stopped previous loop? Clean up buffers... */ |
518 | if (ret) { | 517 | if (ret) { |
519 | out: | 518 | out: |
520 | if (data_page) | 519 | fscrypt_free_bounce_page(bounce_page); |
521 | fscrypt_restore_control_page(data_page); | ||
522 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); | 520 | printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret); |
523 | redirty_page_for_writepage(wbc, page); | 521 | redirty_page_for_writepage(wbc, page); |
524 | do { | 522 | do { |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index eda4181d2092..a546ac8685ea 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -185,7 +185,7 @@ static void f2fs_write_end_io(struct bio *bio) | |||
185 | continue; | 185 | continue; |
186 | } | 186 | } |
187 | 187 | ||
188 | fscrypt_pullback_bio_page(&page, true); | 188 | fscrypt_finalize_bounce_page(&page); |
189 | 189 | ||
190 | if (unlikely(bio->bi_status)) { | 190 | if (unlikely(bio->bi_status)) { |
191 | mapping_set_error(page->mapping, -EIO); | 191 | mapping_set_error(page->mapping, -EIO); |
@@ -362,10 +362,9 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, | |||
362 | 362 | ||
363 | bio_for_each_segment_all(bvec, io->bio, iter_all) { | 363 | bio_for_each_segment_all(bvec, io->bio, iter_all) { |
364 | 364 | ||
365 | if (bvec->bv_page->mapping) | 365 | target = bvec->bv_page; |
366 | target = bvec->bv_page; | 366 | if (fscrypt_is_bounce_page(target)) |
367 | else | 367 | target = fscrypt_pagecache_page(target); |
368 | target = fscrypt_control_page(bvec->bv_page); | ||
369 | 368 | ||
370 | if (inode && inode == target->mapping->host) | 369 | if (inode && inode == target->mapping->host) |
371 | return true; | 370 | return true; |
@@ -1727,8 +1726,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio) | |||
1727 | f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); | 1726 | f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); |
1728 | 1727 | ||
1729 | retry_encrypt: | 1728 | retry_encrypt: |
1730 | fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page, | 1729 | fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page, |
1731 | PAGE_SIZE, 0, fio->page->index, gfp_flags); | 1730 | PAGE_SIZE, 0, |
1731 | gfp_flags); | ||
1732 | if (IS_ERR(fio->encrypted_page)) { | 1732 | if (IS_ERR(fio->encrypted_page)) { |
1733 | /* flush pending IOs and wait for a while in the ENOMEM case */ | 1733 | /* flush pending IOs and wait for a while in the ENOMEM case */ |
1734 | if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { | 1734 | if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { |
@@ -1900,8 +1900,7 @@ got_it: | |||
1900 | err = f2fs_inplace_write_data(fio); | 1900 | err = f2fs_inplace_write_data(fio); |
1901 | if (err) { | 1901 | if (err) { |
1902 | if (f2fs_encrypted_file(inode)) | 1902 | if (f2fs_encrypted_file(inode)) |
1903 | fscrypt_pullback_bio_page(&fio->encrypted_page, | 1903 | fscrypt_finalize_bounce_page(&fio->encrypted_page); |
1904 | true); | ||
1905 | if (PageWriteback(page)) | 1904 | if (PageWriteback(page)) |
1906 | end_page_writeback(page); | 1905 | end_page_writeback(page); |
1907 | } else { | 1906 | } else { |
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c index 4aaedf2d7f44..22be7aeb96c4 100644 --- a/fs/ubifs/crypto.c +++ b/fs/ubifs/crypto.c | |||
@@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, | |||
29 | { | 29 | { |
30 | struct ubifs_info *c = inode->i_sb->s_fs_info; | 30 | struct ubifs_info *c = inode->i_sb->s_fs_info; |
31 | void *p = &dn->data; | 31 | void *p = &dn->data; |
32 | struct page *ret; | ||
33 | unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE); | 32 | unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE); |
33 | int err; | ||
34 | 34 | ||
35 | ubifs_assert(c, pad_len <= *out_len); | 35 | ubifs_assert(c, pad_len <= *out_len); |
36 | dn->compr_size = cpu_to_le16(in_len); | 36 | dn->compr_size = cpu_to_le16(in_len); |
@@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, | |||
39 | if (pad_len != in_len) | 39 | if (pad_len != in_len) |
40 | memset(p + in_len, 0, pad_len - in_len); | 40 | memset(p + in_len, 0, pad_len - in_len); |
41 | 41 | ||
42 | ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len, | 42 | err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len, |
43 | offset_in_page(&dn->data), block, GFP_NOFS); | 43 | offset_in_page(p), block, GFP_NOFS); |
44 | if (IS_ERR(ret)) { | 44 | if (err) { |
45 | ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret)); | 45 | ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err); |
46 | return PTR_ERR(ret); | 46 | return err; |
47 | } | 47 | } |
48 | *out_len = pad_len; | 48 | *out_len = pad_len; |
49 | 49 | ||
@@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, | |||
64 | } | 64 | } |
65 | 65 | ||
66 | ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE); | 66 | ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE); |
67 | err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen, | 67 | err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data), |
68 | offset_in_page(&dn->data), block); | 68 | dlen, offset_in_page(&dn->data), |
69 | block); | ||
69 | if (err) { | 70 | if (err) { |
70 | ubifs_err(c, "fscrypt_decrypt_page failed: %i", err); | 71 | ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err); |
71 | return err; | 72 | return err; |
72 | } | 73 | } |
73 | *out_len = clen; | 74 | *out_len = clen; |
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index f7680ef1abd2..bd8f207a2fb6 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h | |||
@@ -63,16 +63,13 @@ struct fscrypt_operations { | |||
63 | unsigned int max_namelen; | 63 | unsigned int max_namelen; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /* Decryption work */ | ||
66 | struct fscrypt_ctx { | 67 | struct fscrypt_ctx { |
67 | union { | 68 | union { |
68 | struct { | 69 | struct { |
69 | struct page *bounce_page; /* Ciphertext page */ | ||
70 | struct page *control_page; /* Original page */ | ||
71 | } w; | ||
72 | struct { | ||
73 | struct bio *bio; | 70 | struct bio *bio; |
74 | struct work_struct work; | 71 | struct work_struct work; |
75 | } r; | 72 | }; |
76 | struct list_head free_list; /* Free list */ | 73 | struct list_head free_list; /* Free list */ |
77 | }; | 74 | }; |
78 | u8 flags; /* Flags */ | 75 | u8 flags; /* Flags */ |
@@ -106,18 +103,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) | |||
106 | extern void fscrypt_enqueue_decrypt_work(struct work_struct *); | 103 | extern void fscrypt_enqueue_decrypt_work(struct work_struct *); |
107 | extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); | 104 | extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); |
108 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); | 105 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); |
109 | extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, | ||
110 | unsigned int, unsigned int, | ||
111 | u64, gfp_t); | ||
112 | extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, | ||
113 | unsigned int, u64); | ||
114 | 106 | ||
115 | static inline struct page *fscrypt_control_page(struct page *page) | 107 | extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, |
108 | unsigned int len, | ||
109 | unsigned int offs, | ||
110 | gfp_t gfp_flags); | ||
111 | extern int fscrypt_encrypt_block_inplace(const struct inode *inode, | ||
112 | struct page *page, unsigned int len, | ||
113 | unsigned int offs, u64 lblk_num, | ||
114 | gfp_t gfp_flags); | ||
115 | |||
116 | extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, | ||
117 | unsigned int offs); | ||
118 | extern int fscrypt_decrypt_block_inplace(const struct inode *inode, | ||
119 | struct page *page, unsigned int len, | ||
120 | unsigned int offs, u64 lblk_num); | ||
121 | |||
122 | static inline bool fscrypt_is_bounce_page(struct page *page) | ||
123 | { | ||
124 | return page->mapping == NULL; | ||
125 | } | ||
126 | |||
127 | static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) | ||
116 | { | 128 | { |
117 | return ((struct fscrypt_ctx *)page_private(page))->w.control_page; | 129 | return (struct page *)page_private(bounce_page); |
118 | } | 130 | } |
119 | 131 | ||
120 | extern void fscrypt_restore_control_page(struct page *); | 132 | extern void fscrypt_free_bounce_page(struct page *bounce_page); |
121 | 133 | ||
122 | /* policy.c */ | 134 | /* policy.c */ |
123 | extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); | 135 | extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); |
@@ -223,7 +235,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, | |||
223 | extern void fscrypt_decrypt_bio(struct bio *); | 235 | extern void fscrypt_decrypt_bio(struct bio *); |
224 | extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, | 236 | extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, |
225 | struct bio *bio); | 237 | struct bio *bio); |
226 | extern void fscrypt_pullback_bio_page(struct page **, bool); | ||
227 | extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, | 238 | extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, |
228 | unsigned int); | 239 | unsigned int); |
229 | 240 | ||
@@ -283,32 +294,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | |||
283 | return; | 294 | return; |
284 | } | 295 | } |
285 | 296 | ||
286 | static inline struct page *fscrypt_encrypt_page(const struct inode *inode, | 297 | static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, |
298 | unsigned int len, | ||
299 | unsigned int offs, | ||
300 | gfp_t gfp_flags) | ||
301 | { | ||
302 | return ERR_PTR(-EOPNOTSUPP); | ||
303 | } | ||
304 | |||
305 | static inline int fscrypt_encrypt_block_inplace(const struct inode *inode, | ||
287 | struct page *page, | 306 | struct page *page, |
288 | unsigned int len, | 307 | unsigned int len, |
289 | unsigned int offs, | 308 | unsigned int offs, u64 lblk_num, |
290 | u64 lblk_num, gfp_t gfp_flags) | 309 | gfp_t gfp_flags) |
291 | { | 310 | { |
292 | return ERR_PTR(-EOPNOTSUPP); | 311 | return -EOPNOTSUPP; |
312 | } | ||
313 | |||
314 | static inline int fscrypt_decrypt_pagecache_blocks(struct page *page, | ||
315 | unsigned int len, | ||
316 | unsigned int offs) | ||
317 | { | ||
318 | return -EOPNOTSUPP; | ||
293 | } | 319 | } |
294 | 320 | ||
295 | static inline int fscrypt_decrypt_page(const struct inode *inode, | 321 | static inline int fscrypt_decrypt_block_inplace(const struct inode *inode, |
296 | struct page *page, | 322 | struct page *page, |
297 | unsigned int len, unsigned int offs, | 323 | unsigned int len, |
298 | u64 lblk_num) | 324 | unsigned int offs, u64 lblk_num) |
299 | { | 325 | { |
300 | return -EOPNOTSUPP; | 326 | return -EOPNOTSUPP; |
301 | } | 327 | } |
302 | 328 | ||
303 | static inline struct page *fscrypt_control_page(struct page *page) | 329 | static inline bool fscrypt_is_bounce_page(struct page *page) |
330 | { | ||
331 | return false; | ||
332 | } | ||
333 | |||
334 | static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) | ||
304 | { | 335 | { |
305 | WARN_ON_ONCE(1); | 336 | WARN_ON_ONCE(1); |
306 | return ERR_PTR(-EINVAL); | 337 | return ERR_PTR(-EINVAL); |
307 | } | 338 | } |
308 | 339 | ||
309 | static inline void fscrypt_restore_control_page(struct page *page) | 340 | static inline void fscrypt_free_bounce_page(struct page *bounce_page) |
310 | { | 341 | { |
311 | return; | ||
312 | } | 342 | } |
313 | 343 | ||
314 | /* policy.c */ | 344 | /* policy.c */ |
@@ -410,11 +440,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, | |||
410 | { | 440 | { |
411 | } | 441 | } |
412 | 442 | ||
413 | static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
414 | { | ||
415 | return; | ||
416 | } | ||
417 | |||
418 | static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | 443 | static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, |
419 | sector_t pblk, unsigned int len) | 444 | sector_t pblk, unsigned int len) |
420 | { | 445 | { |
@@ -692,4 +717,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode, | |||
692 | return 0; | 717 | return 0; |
693 | } | 718 | } |
694 | 719 | ||
720 | /* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ | ||
721 | static inline void fscrypt_finalize_bounce_page(struct page **pagep) | ||
722 | { | ||
723 | struct page *page = *pagep; | ||
724 | |||
725 | if (fscrypt_is_bounce_page(page)) { | ||
726 | *pagep = fscrypt_pagecache_page(page); | ||
727 | fscrypt_free_bounce_page(page); | ||
728 | } | ||
729 | } | ||
730 | |||
695 | #endif /* _LINUX_FSCRYPT_H */ | 731 | #endif /* _LINUX_FSCRYPT_H */ |