diff options
author | Michael Halcrow <mhalcrow@google.com> | 2015-04-12 00:43:56 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2015-04-12 00:43:56 -0400 |
commit | b30ab0e03407d2aa2d9316cba199c757e4bfc8ad (patch) | |
tree | 900754eb3fe069f90bb5ea2c1df1ed88cf701eb5 /fs | |
parent | 9bd8212f981ea6375911fe055382ad7529be5b28 (diff) |
ext4 crypto: add ext4 encryption facilities
On encrypt, we will re-assign the buffer_heads to point to a bounce
page rather than the control_page (which is the original page to write
that contains the plaintext). The block I/O occurs against the bounce
page. On write completion, we re-assign the buffer_heads to the
original plaintext page.
On decrypt, we will attach a read completion callback to the bio
struct. This read completion will decrypt the read contents in-place
prior to setting the page up-to-date.
The current encryption mode, AES-256-XTS, lacks cryptographic
integrity. AES-256-GCM is in-plan, but we will need to devise a
mechanism for handling the integrity data.
Signed-off-by: Michael Halcrow <mhalcrow@google.com>
Signed-off-by: Ildar Muslukhov <ildarm@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ext4/Makefile | 2 | ||||
-rw-r--r-- | fs/ext4/crypto.c | 558 | ||||
-rw-r--r-- | fs/ext4/crypto_policy.c | 8 | ||||
-rw-r--r-- | fs/ext4/ext4.h | 52 | ||||
-rw-r--r-- | fs/ext4/ext4_crypto.h | 55 | ||||
-rw-r--r-- | fs/ext4/super.c | 8 |
6 files changed, 682 insertions, 1 deletions
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index 3886ee45f556..1b1c5619523d 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile | |||
@@ -12,4 +12,4 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ | |||
12 | 12 | ||
13 | ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o | 13 | ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o |
14 | ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o | 14 | ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o |
15 | ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o | 15 | ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o |
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c new file mode 100644 index 000000000000..8ff15273ab0c --- /dev/null +++ b/fs/ext4/crypto.c | |||
@@ -0,0 +1,558 @@ | |||
1 | /* | ||
2 | * linux/fs/ext4/crypto.c | ||
3 | * | ||
4 | * Copyright (C) 2015, Google, Inc. | ||
5 | * | ||
6 | * This contains encryption functions for ext4 | ||
7 | * | ||
8 | * Written by Michael Halcrow, 2014. | ||
9 | * | ||
10 | * Filename encryption additions | ||
11 | * Uday Savagaonkar, 2014 | ||
12 | * Encryption policy handling additions | ||
13 | * Ildar Muslukhov, 2014 | ||
14 | * | ||
15 | * This has not yet undergone a rigorous security audit. | ||
16 | * | ||
17 | * The usage of AES-XTS should conform to recommendations in NIST | ||
18 | * Special Publication 800-38E and IEEE P1619/D16. | ||
19 | */ | ||
20 | |||
21 | #include <crypto/hash.h> | ||
22 | #include <crypto/sha.h> | ||
23 | #include <keys/user-type.h> | ||
24 | #include <keys/encrypted-type.h> | ||
25 | #include <linux/crypto.h> | ||
26 | #include <linux/ecryptfs.h> | ||
27 | #include <linux/gfp.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/key.h> | ||
30 | #include <linux/list.h> | ||
31 | #include <linux/mempool.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/mutex.h> | ||
34 | #include <linux/random.h> | ||
35 | #include <linux/scatterlist.h> | ||
36 | #include <linux/spinlock_types.h> | ||
37 | |||
38 | #include "ext4_extents.h" | ||
39 | #include "xattr.h" | ||
40 | |||
41 | /* Encryption added and removed here! (L: */ | ||
42 | |||
43 | static unsigned int num_prealloc_crypto_pages = 32; | ||
44 | static unsigned int num_prealloc_crypto_ctxs = 128; | ||
45 | |||
46 | module_param(num_prealloc_crypto_pages, uint, 0444); | ||
47 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | ||
48 | "Number of crypto pages to preallocate"); | ||
49 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | ||
50 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | ||
51 | "Number of crypto contexts to preallocate"); | ||
52 | |||
53 | static mempool_t *ext4_bounce_page_pool; | ||
54 | |||
55 | static LIST_HEAD(ext4_free_crypto_ctxs); | ||
56 | static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); | ||
57 | |||
58 | /** | ||
59 | * ext4_release_crypto_ctx() - Releases an encryption context | ||
60 | * @ctx: The encryption context to release. | ||
61 | * | ||
62 | * If the encryption context was allocated from the pre-allocated pool, returns | ||
63 | * it to that pool. Else, frees it. | ||
64 | * | ||
65 | * If there's a bounce page in the context, this frees that. | ||
66 | */ | ||
67 | void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | if (ctx->bounce_page) { | ||
72 | if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) | ||
73 | __free_page(ctx->bounce_page); | ||
74 | else | ||
75 | mempool_free(ctx->bounce_page, ext4_bounce_page_pool); | ||
76 | ctx->bounce_page = NULL; | ||
77 | } | ||
78 | ctx->control_page = NULL; | ||
79 | if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { | ||
80 | if (ctx->tfm) | ||
81 | crypto_free_tfm(ctx->tfm); | ||
82 | kfree(ctx); | ||
83 | } else { | ||
84 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | ||
85 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | ||
86 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context | ||
92 | * @mask: The allocation mask. | ||
93 | * | ||
94 | * Return: An allocated and initialized encryption context on success. An error | ||
95 | * value or NULL otherwise. | ||
96 | */ | ||
97 | static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask) | ||
98 | { | ||
99 | struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx), | ||
100 | mask); | ||
101 | |||
102 | if (!ctx) | ||
103 | return ERR_PTR(-ENOMEM); | ||
104 | return ctx; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * ext4_get_crypto_ctx() - Gets an encryption context | ||
109 | * @inode: The inode for which we are doing the crypto | ||
110 | * | ||
111 | * Allocates and initializes an encryption context. | ||
112 | * | ||
113 | * Return: An allocated and initialized encryption context on success; error | ||
114 | * value or NULL otherwise. | ||
115 | */ | ||
116 | struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) | ||
117 | { | ||
118 | struct ext4_crypto_ctx *ctx = NULL; | ||
119 | int res = 0; | ||
120 | unsigned long flags; | ||
121 | struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key; | ||
122 | |||
123 | if (!ext4_read_workqueue) | ||
124 | ext4_init_crypto(); | ||
125 | |||
126 | /* | ||
127 | * We first try getting the ctx from a free list because in | ||
128 | * the common case the ctx will have an allocated and | ||
129 | * initialized crypto tfm, so it's probably a worthwhile | ||
130 | * optimization. For the bounce page, we first try getting it | ||
131 | * from the kernel allocator because that's just about as fast | ||
132 | * as getting it from a list and because a cache of free pages | ||
133 | * should generally be a "last resort" option for a filesystem | ||
134 | * to be able to do its job. | ||
135 | */ | ||
136 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | ||
137 | ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, | ||
138 | struct ext4_crypto_ctx, free_list); | ||
139 | if (ctx) | ||
140 | list_del(&ctx->free_list); | ||
141 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | ||
142 | if (!ctx) { | ||
143 | ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS); | ||
144 | if (IS_ERR(ctx)) { | ||
145 | res = PTR_ERR(ctx); | ||
146 | goto out; | ||
147 | } | ||
148 | ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | ||
149 | } else { | ||
150 | ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | ||
151 | } | ||
152 | |||
153 | /* Allocate a new Crypto API context if we don't already have | ||
154 | * one or if it isn't the right mode. */ | ||
155 | BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID); | ||
156 | if (ctx->tfm && (ctx->mode != key->mode)) { | ||
157 | crypto_free_tfm(ctx->tfm); | ||
158 | ctx->tfm = NULL; | ||
159 | ctx->mode = EXT4_ENCRYPTION_MODE_INVALID; | ||
160 | } | ||
161 | if (!ctx->tfm) { | ||
162 | switch (key->mode) { | ||
163 | case EXT4_ENCRYPTION_MODE_AES_256_XTS: | ||
164 | ctx->tfm = crypto_ablkcipher_tfm( | ||
165 | crypto_alloc_ablkcipher("xts(aes)", 0, 0)); | ||
166 | break; | ||
167 | case EXT4_ENCRYPTION_MODE_AES_256_GCM: | ||
168 | /* TODO(mhalcrow): AEAD w/ gcm(aes); | ||
169 | * crypto_aead_setauthsize() */ | ||
170 | ctx->tfm = ERR_PTR(-ENOTSUPP); | ||
171 | break; | ||
172 | default: | ||
173 | BUG(); | ||
174 | } | ||
175 | if (IS_ERR_OR_NULL(ctx->tfm)) { | ||
176 | res = PTR_ERR(ctx->tfm); | ||
177 | ctx->tfm = NULL; | ||
178 | goto out; | ||
179 | } | ||
180 | ctx->mode = key->mode; | ||
181 | } | ||
182 | BUG_ON(key->size != ext4_encryption_key_size(key->mode)); | ||
183 | |||
184 | /* There shouldn't be a bounce page attached to the crypto | ||
185 | * context at this point. */ | ||
186 | BUG_ON(ctx->bounce_page); | ||
187 | |||
188 | out: | ||
189 | if (res) { | ||
190 | if (!IS_ERR_OR_NULL(ctx)) | ||
191 | ext4_release_crypto_ctx(ctx); | ||
192 | ctx = ERR_PTR(res); | ||
193 | } | ||
194 | return ctx; | ||
195 | } | ||
196 | |||
197 | struct workqueue_struct *ext4_read_workqueue; | ||
198 | static DEFINE_MUTEX(crypto_init); | ||
199 | |||
200 | /** | ||
201 | * ext4_exit_crypto() - Shutdown the ext4 encryption system | ||
202 | */ | ||
203 | void ext4_exit_crypto(void) | ||
204 | { | ||
205 | struct ext4_crypto_ctx *pos, *n; | ||
206 | |||
207 | list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) { | ||
208 | if (pos->bounce_page) { | ||
209 | if (pos->flags & | ||
210 | EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) { | ||
211 | __free_page(pos->bounce_page); | ||
212 | } else { | ||
213 | mempool_free(pos->bounce_page, | ||
214 | ext4_bounce_page_pool); | ||
215 | } | ||
216 | } | ||
217 | if (pos->tfm) | ||
218 | crypto_free_tfm(pos->tfm); | ||
219 | kfree(pos); | ||
220 | } | ||
221 | INIT_LIST_HEAD(&ext4_free_crypto_ctxs); | ||
222 | if (ext4_bounce_page_pool) | ||
223 | mempool_destroy(ext4_bounce_page_pool); | ||
224 | ext4_bounce_page_pool = NULL; | ||
225 | if (ext4_read_workqueue) | ||
226 | destroy_workqueue(ext4_read_workqueue); | ||
227 | ext4_read_workqueue = NULL; | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * ext4_init_crypto() - Set up for ext4 encryption. | ||
232 | * | ||
233 | * We only call this when we start accessing encrypted files, since it | ||
234 | * results in memory getting allocated that wouldn't otherwise be used. | ||
235 | * | ||
236 | * Return: Zero on success, non-zero otherwise. | ||
237 | */ | ||
238 | int ext4_init_crypto(void) | ||
239 | { | ||
240 | int i, res; | ||
241 | |||
242 | mutex_lock(&crypto_init); | ||
243 | if (ext4_read_workqueue) | ||
244 | goto already_initialized; | ||
245 | ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); | ||
246 | if (!ext4_read_workqueue) { | ||
247 | res = -ENOMEM; | ||
248 | goto fail; | ||
249 | } | ||
250 | |||
251 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | ||
252 | struct ext4_crypto_ctx *ctx; | ||
253 | |||
254 | ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL); | ||
255 | if (IS_ERR(ctx)) { | ||
256 | res = PTR_ERR(ctx); | ||
257 | goto fail; | ||
258 | } | ||
259 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | ||
260 | } | ||
261 | |||
262 | ext4_bounce_page_pool = | ||
263 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | ||
264 | if (!ext4_bounce_page_pool) { | ||
265 | res = -ENOMEM; | ||
266 | goto fail; | ||
267 | } | ||
268 | already_initialized: | ||
269 | mutex_unlock(&crypto_init); | ||
270 | return 0; | ||
271 | fail: | ||
272 | ext4_exit_crypto(); | ||
273 | mutex_unlock(&crypto_init); | ||
274 | return res; | ||
275 | } | ||
276 | |||
277 | void ext4_restore_control_page(struct page *data_page) | ||
278 | { | ||
279 | struct ext4_crypto_ctx *ctx = | ||
280 | (struct ext4_crypto_ctx *)page_private(data_page); | ||
281 | |||
282 | set_page_private(data_page, (unsigned long)NULL); | ||
283 | ClearPagePrivate(data_page); | ||
284 | unlock_page(data_page); | ||
285 | ext4_release_crypto_ctx(ctx); | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * ext4_crypt_complete() - The completion callback for page encryption | ||
290 | * @req: The asynchronous encryption request context | ||
291 | * @res: The result of the encryption operation | ||
292 | */ | ||
293 | static void ext4_crypt_complete(struct crypto_async_request *req, int res) | ||
294 | { | ||
295 | struct ext4_completion_result *ecr = req->data; | ||
296 | |||
297 | if (res == -EINPROGRESS) | ||
298 | return; | ||
299 | ecr->res = res; | ||
300 | complete(&ecr->completion); | ||
301 | } | ||
302 | |||
303 | typedef enum { | ||
304 | EXT4_DECRYPT = 0, | ||
305 | EXT4_ENCRYPT, | ||
306 | } ext4_direction_t; | ||
307 | |||
308 | static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, | ||
309 | struct inode *inode, | ||
310 | ext4_direction_t rw, | ||
311 | pgoff_t index, | ||
312 | struct page *src_page, | ||
313 | struct page *dest_page) | ||
314 | |||
315 | { | ||
316 | u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; | ||
317 | struct ablkcipher_request *req = NULL; | ||
318 | DECLARE_EXT4_COMPLETION_RESULT(ecr); | ||
319 | struct scatterlist dst, src; | ||
320 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
321 | struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm); | ||
322 | int res = 0; | ||
323 | |||
324 | BUG_ON(!ctx->tfm); | ||
325 | BUG_ON(ctx->mode != ei->i_encryption_key.mode); | ||
326 | |||
327 | if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) { | ||
328 | printk_ratelimited(KERN_ERR | ||
329 | "%s: unsupported crypto algorithm: %d\n", | ||
330 | __func__, ctx->mode); | ||
331 | return -ENOTSUPP; | ||
332 | } | ||
333 | |||
334 | crypto_ablkcipher_clear_flags(atfm, ~0); | ||
335 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY); | ||
336 | |||
337 | res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw, | ||
338 | ei->i_encryption_key.size); | ||
339 | if (res) { | ||
340 | printk_ratelimited(KERN_ERR | ||
341 | "%s: crypto_ablkcipher_setkey() failed\n", | ||
342 | __func__); | ||
343 | return res; | ||
344 | } | ||
345 | req = ablkcipher_request_alloc(atfm, GFP_NOFS); | ||
346 | if (!req) { | ||
347 | printk_ratelimited(KERN_ERR | ||
348 | "%s: crypto_request_alloc() failed\n", | ||
349 | __func__); | ||
350 | return -ENOMEM; | ||
351 | } | ||
352 | ablkcipher_request_set_callback( | ||
353 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | ||
354 | ext4_crypt_complete, &ecr); | ||
355 | |||
356 | BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); | ||
357 | memcpy(xts_tweak, &index, sizeof(index)); | ||
358 | memset(&xts_tweak[sizeof(index)], 0, | ||
359 | EXT4_XTS_TWEAK_SIZE - sizeof(index)); | ||
360 | |||
361 | sg_init_table(&dst, 1); | ||
362 | sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); | ||
363 | sg_init_table(&src, 1); | ||
364 | sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); | ||
365 | ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, | ||
366 | xts_tweak); | ||
367 | if (rw == EXT4_DECRYPT) | ||
368 | res = crypto_ablkcipher_decrypt(req); | ||
369 | else | ||
370 | res = crypto_ablkcipher_encrypt(req); | ||
371 | if (res == -EINPROGRESS || res == -EBUSY) { | ||
372 | BUG_ON(req->base.data != &ecr); | ||
373 | wait_for_completion(&ecr.completion); | ||
374 | res = ecr.res; | ||
375 | } | ||
376 | ablkcipher_request_free(req); | ||
377 | if (res) { | ||
378 | printk_ratelimited( | ||
379 | KERN_ERR | ||
380 | "%s: crypto_ablkcipher_encrypt() returned %d\n", | ||
381 | __func__, res); | ||
382 | return res; | ||
383 | } | ||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * ext4_encrypt() - Encrypts a page | ||
389 | * @inode: The inode for which the encryption should take place | ||
390 | * @plaintext_page: The page to encrypt. Must be locked. | ||
391 | * | ||
392 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | ||
393 | * encryption context. | ||
394 | * | ||
395 | * Called on the page write path. The caller must call | ||
396 | * ext4_restore_control_page() on the returned ciphertext page to | ||
397 | * release the bounce buffer and the encryption context. | ||
398 | * | ||
399 | * Return: An allocated page with the encrypted content on success. Else, an | ||
400 | * error value or NULL. | ||
401 | */ | ||
402 | struct page *ext4_encrypt(struct inode *inode, | ||
403 | struct page *plaintext_page) | ||
404 | { | ||
405 | struct ext4_crypto_ctx *ctx; | ||
406 | struct page *ciphertext_page = NULL; | ||
407 | int err; | ||
408 | |||
409 | BUG_ON(!PageLocked(plaintext_page)); | ||
410 | |||
411 | ctx = ext4_get_crypto_ctx(inode); | ||
412 | if (IS_ERR(ctx)) | ||
413 | return (struct page *) ctx; | ||
414 | |||
415 | /* The encryption operation will require a bounce page. */ | ||
416 | ciphertext_page = alloc_page(GFP_NOFS); | ||
417 | if (!ciphertext_page) { | ||
418 | /* This is a potential bottleneck, but at least we'll have | ||
419 | * forward progress. */ | ||
420 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | ||
421 | GFP_NOFS); | ||
422 | if (WARN_ON_ONCE(!ciphertext_page)) { | ||
423 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | ||
424 | GFP_NOFS | __GFP_WAIT); | ||
425 | } | ||
426 | ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | ||
427 | } else { | ||
428 | ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | ||
429 | } | ||
430 | ctx->bounce_page = ciphertext_page; | ||
431 | ctx->control_page = plaintext_page; | ||
432 | err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, | ||
433 | plaintext_page, ciphertext_page); | ||
434 | if (err) { | ||
435 | ext4_release_crypto_ctx(ctx); | ||
436 | return ERR_PTR(err); | ||
437 | } | ||
438 | SetPagePrivate(ciphertext_page); | ||
439 | set_page_private(ciphertext_page, (unsigned long)ctx); | ||
440 | lock_page(ciphertext_page); | ||
441 | return ciphertext_page; | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * ext4_decrypt() - Decrypts a page in-place | ||
446 | * @ctx: The encryption context. | ||
447 | * @page: The page to decrypt. Must be locked. | ||
448 | * | ||
449 | * Decrypts page in-place using the ctx encryption context. | ||
450 | * | ||
451 | * Called from the read completion callback. | ||
452 | * | ||
453 | * Return: Zero on success, non-zero otherwise. | ||
454 | */ | ||
455 | int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page) | ||
456 | { | ||
457 | BUG_ON(!PageLocked(page)); | ||
458 | |||
459 | return ext4_page_crypto(ctx, page->mapping->host, | ||
460 | EXT4_DECRYPT, page->index, page, page); | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * Convenience function which takes care of allocating and | ||
465 | * deallocating the encryption context | ||
466 | */ | ||
467 | int ext4_decrypt_one(struct inode *inode, struct page *page) | ||
468 | { | ||
469 | int ret; | ||
470 | |||
471 | struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode); | ||
472 | |||
473 | if (!ctx) | ||
474 | return -ENOMEM; | ||
475 | ret = ext4_decrypt(ctx, page); | ||
476 | ext4_release_crypto_ctx(ctx); | ||
477 | return ret; | ||
478 | } | ||
479 | |||
480 | int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) | ||
481 | { | ||
482 | struct ext4_crypto_ctx *ctx; | ||
483 | struct page *ciphertext_page = NULL; | ||
484 | struct bio *bio; | ||
485 | ext4_lblk_t lblk = ex->ee_block; | ||
486 | ext4_fsblk_t pblk = ext4_ext_pblock(ex); | ||
487 | unsigned int len = ext4_ext_get_actual_len(ex); | ||
488 | int err = 0; | ||
489 | |||
490 | BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); | ||
491 | |||
492 | ctx = ext4_get_crypto_ctx(inode); | ||
493 | if (IS_ERR(ctx)) | ||
494 | return PTR_ERR(ctx); | ||
495 | |||
496 | ciphertext_page = alloc_page(GFP_NOFS); | ||
497 | if (!ciphertext_page) { | ||
498 | /* This is a potential bottleneck, but at least we'll have | ||
499 | * forward progress. */ | ||
500 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | ||
501 | GFP_NOFS); | ||
502 | if (WARN_ON_ONCE(!ciphertext_page)) { | ||
503 | ciphertext_page = mempool_alloc(ext4_bounce_page_pool, | ||
504 | GFP_NOFS | __GFP_WAIT); | ||
505 | } | ||
506 | ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | ||
507 | } else { | ||
508 | ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; | ||
509 | } | ||
510 | ctx->bounce_page = ciphertext_page; | ||
511 | |||
512 | while (len--) { | ||
513 | err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, | ||
514 | ZERO_PAGE(0), ciphertext_page); | ||
515 | if (err) | ||
516 | goto errout; | ||
517 | |||
518 | bio = bio_alloc(GFP_KERNEL, 1); | ||
519 | if (!bio) { | ||
520 | err = -ENOMEM; | ||
521 | goto errout; | ||
522 | } | ||
523 | bio->bi_bdev = inode->i_sb->s_bdev; | ||
524 | bio->bi_iter.bi_sector = pblk; | ||
525 | err = bio_add_page(bio, ciphertext_page, | ||
526 | inode->i_sb->s_blocksize, 0); | ||
527 | if (err) { | ||
528 | bio_put(bio); | ||
529 | goto errout; | ||
530 | } | ||
531 | err = submit_bio_wait(WRITE, bio); | ||
532 | if (err) | ||
533 | goto errout; | ||
534 | } | ||
535 | err = 0; | ||
536 | errout: | ||
537 | ext4_release_crypto_ctx(ctx); | ||
538 | return err; | ||
539 | } | ||
540 | |||
541 | bool ext4_valid_contents_enc_mode(uint32_t mode) | ||
542 | { | ||
543 | return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * ext4_validate_encryption_key_size() - Validate the encryption key size | ||
548 | * @mode: The key mode. | ||
549 | * @size: The key size to validate. | ||
550 | * | ||
551 | * Return: The validated key size for @mode. Zero if invalid. | ||
552 | */ | ||
553 | uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) | ||
554 | { | ||
555 | if (size == ext4_encryption_key_size(mode)) | ||
556 | return size; | ||
557 | return 0; | ||
558 | } | ||
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c index 532b69c0afab..a4bf762b3ba9 100644 --- a/fs/ext4/crypto_policy.c +++ b/fs/ext4/crypto_policy.c | |||
@@ -52,6 +52,13 @@ static int ext4_create_encryption_context_from_policy( | |||
52 | ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; | 52 | ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; |
53 | memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, | 53 | memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, |
54 | EXT4_KEY_DESCRIPTOR_SIZE); | 54 | EXT4_KEY_DESCRIPTOR_SIZE); |
55 | if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) { | ||
56 | printk(KERN_WARNING | ||
57 | "%s: Invalid contents encryption mode %d\n", __func__, | ||
58 | policy->contents_encryption_mode); | ||
59 | res = -EINVAL; | ||
60 | goto out; | ||
61 | } | ||
55 | ctx.contents_encryption_mode = policy->contents_encryption_mode; | 62 | ctx.contents_encryption_mode = policy->contents_encryption_mode; |
56 | ctx.filenames_encryption_mode = policy->filenames_encryption_mode; | 63 | ctx.filenames_encryption_mode = policy->filenames_encryption_mode; |
57 | BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); | 64 | BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); |
@@ -60,6 +67,7 @@ static int ext4_create_encryption_context_from_policy( | |||
60 | res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, | 67 | res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, |
61 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, | 68 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, |
62 | sizeof(ctx), 0); | 69 | sizeof(ctx), 0); |
70 | out: | ||
63 | if (!res) | 71 | if (!res) |
64 | ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); | 72 | ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); |
65 | return res; | 73 | return res; |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ab873aa9955e..71619ef72225 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -951,6 +951,11 @@ struct ext4_inode_info { | |||
951 | 951 | ||
952 | /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ | 952 | /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ |
953 | __u32 i_csum_seed; | 953 | __u32 i_csum_seed; |
954 | |||
955 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
956 | /* Encryption params */ | ||
957 | struct ext4_encryption_key i_encryption_key; | ||
958 | #endif | ||
954 | }; | 959 | }; |
955 | 960 | ||
956 | /* | 961 | /* |
@@ -1366,6 +1371,12 @@ struct ext4_sb_info { | |||
1366 | struct ratelimit_state s_err_ratelimit_state; | 1371 | struct ratelimit_state s_err_ratelimit_state; |
1367 | struct ratelimit_state s_warning_ratelimit_state; | 1372 | struct ratelimit_state s_warning_ratelimit_state; |
1368 | struct ratelimit_state s_msg_ratelimit_state; | 1373 | struct ratelimit_state s_msg_ratelimit_state; |
1374 | |||
1375 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
1376 | /* Encryption */ | ||
1377 | uint32_t s_file_encryption_mode; | ||
1378 | uint32_t s_dir_encryption_mode; | ||
1379 | #endif | ||
1369 | }; | 1380 | }; |
1370 | 1381 | ||
1371 | static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) | 1382 | static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) |
@@ -1481,6 +1492,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) | |||
1481 | #define EXT4_SB(sb) (sb) | 1492 | #define EXT4_SB(sb) (sb) |
1482 | #endif | 1493 | #endif |
1483 | 1494 | ||
1495 | /* | ||
1496 | * Returns true if the inode is inode is encrypted | ||
1497 | */ | ||
1498 | static inline int ext4_encrypted_inode(struct inode *inode) | ||
1499 | { | ||
1500 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
1501 | return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT); | ||
1502 | #else | ||
1503 | return 0; | ||
1504 | #endif | ||
1505 | } | ||
1506 | |||
1484 | #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime | 1507 | #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime |
1485 | 1508 | ||
1486 | /* | 1509 | /* |
@@ -2026,6 +2049,35 @@ int ext4_process_policy(const struct ext4_encryption_policy *policy, | |||
2026 | int ext4_get_policy(struct inode *inode, | 2049 | int ext4_get_policy(struct inode *inode, |
2027 | struct ext4_encryption_policy *policy); | 2050 | struct ext4_encryption_policy *policy); |
2028 | 2051 | ||
2052 | /* crypto.c */ | ||
2053 | bool ext4_valid_contents_enc_mode(uint32_t mode); | ||
2054 | uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); | ||
2055 | extern struct workqueue_struct *ext4_read_workqueue; | ||
2056 | struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode); | ||
2057 | void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); | ||
2058 | void ext4_restore_control_page(struct page *data_page); | ||
2059 | struct page *ext4_encrypt(struct inode *inode, | ||
2060 | struct page *plaintext_page); | ||
2061 | int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page); | ||
2062 | int ext4_decrypt_one(struct inode *inode, struct page *page); | ||
2063 | int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex); | ||
2064 | |||
2065 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
2066 | int ext4_init_crypto(void); | ||
2067 | void ext4_exit_crypto(void); | ||
2068 | static inline int ext4_sb_has_crypto(struct super_block *sb) | ||
2069 | { | ||
2070 | return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT); | ||
2071 | } | ||
2072 | #else | ||
2073 | static inline int ext4_init_crypto(void) { return 0; } | ||
2074 | static inline void ext4_exit_crypto(void) { } | ||
2075 | static inline int ext4_sb_has_crypto(struct super_block *sb) | ||
2076 | { | ||
2077 | return 0; | ||
2078 | } | ||
2079 | #endif | ||
2080 | |||
2029 | /* dir.c */ | 2081 | /* dir.c */ |
2030 | extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, | 2082 | extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, |
2031 | struct file *, | 2083 | struct file *, |
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h index a69d2ba54bee..9d5d2e56cc46 100644 --- a/fs/ext4/ext4_crypto.h +++ b/fs/ext4/ext4_crypto.h | |||
@@ -46,4 +46,59 @@ struct ext4_encryption_context { | |||
46 | char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; | 46 | char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; |
47 | } __attribute__((__packed__)); | 47 | } __attribute__((__packed__)); |
48 | 48 | ||
49 | /* Encryption parameters */ | ||
50 | #define EXT4_XTS_TWEAK_SIZE 16 | ||
51 | #define EXT4_AES_128_ECB_KEY_SIZE 16 | ||
52 | #define EXT4_AES_256_GCM_KEY_SIZE 32 | ||
53 | #define EXT4_AES_256_CBC_KEY_SIZE 32 | ||
54 | #define EXT4_AES_256_CTS_KEY_SIZE 32 | ||
55 | #define EXT4_AES_256_XTS_KEY_SIZE 64 | ||
56 | #define EXT4_MAX_KEY_SIZE 64 | ||
57 | |||
58 | struct ext4_encryption_key { | ||
59 | uint32_t mode; | ||
60 | char raw[EXT4_MAX_KEY_SIZE]; | ||
61 | uint32_t size; | ||
62 | }; | ||
63 | |||
64 | #define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 | ||
65 | #define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002 | ||
66 | |||
67 | struct ext4_crypto_ctx { | ||
68 | struct crypto_tfm *tfm; /* Crypto API context */ | ||
69 | struct page *bounce_page; /* Ciphertext page on write path */ | ||
70 | struct page *control_page; /* Original page on write path */ | ||
71 | struct bio *bio; /* The bio for this context */ | ||
72 | struct work_struct work; /* Work queue for read complete path */ | ||
73 | struct list_head free_list; /* Free list */ | ||
74 | int flags; /* Flags */ | ||
75 | int mode; /* Encryption mode for tfm */ | ||
76 | }; | ||
77 | |||
78 | struct ext4_completion_result { | ||
79 | struct completion completion; | ||
80 | int res; | ||
81 | }; | ||
82 | |||
83 | #define DECLARE_EXT4_COMPLETION_RESULT(ecr) \ | ||
84 | struct ext4_completion_result ecr = { \ | ||
85 | COMPLETION_INITIALIZER((ecr).completion), 0 } | ||
86 | |||
87 | static inline int ext4_encryption_key_size(int mode) | ||
88 | { | ||
89 | switch (mode) { | ||
90 | case EXT4_ENCRYPTION_MODE_AES_256_XTS: | ||
91 | return EXT4_AES_256_XTS_KEY_SIZE; | ||
92 | case EXT4_ENCRYPTION_MODE_AES_256_GCM: | ||
93 | return EXT4_AES_256_GCM_KEY_SIZE; | ||
94 | case EXT4_ENCRYPTION_MODE_AES_256_CBC: | ||
95 | return EXT4_AES_256_CBC_KEY_SIZE; | ||
96 | case EXT4_ENCRYPTION_MODE_AES_256_CTS: | ||
97 | return EXT4_AES_256_CTS_KEY_SIZE; | ||
98 | default: | ||
99 | BUG(); | ||
100 | } | ||
101 | return 0; | ||
102 | } | ||
103 | |||
49 | #endif /* _EXT4_CRYPTO_H */ | 104 | #endif /* _EXT4_CRYPTO_H */ |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e47a552fea05..1008ca258de4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -876,6 +876,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
876 | atomic_set(&ei->i_ioend_count, 0); | 876 | atomic_set(&ei->i_ioend_count, 0); |
877 | atomic_set(&ei->i_unwritten, 0); | 877 | atomic_set(&ei->i_unwritten, 0); |
878 | INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); | 878 | INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); |
879 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
880 | ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID; | ||
881 | #endif | ||
879 | 882 | ||
880 | return &ei->vfs_inode; | 883 | return &ei->vfs_inode; |
881 | } | 884 | } |
@@ -3431,6 +3434,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3431 | if (sb->s_bdev->bd_part) | 3434 | if (sb->s_bdev->bd_part) |
3432 | sbi->s_sectors_written_start = | 3435 | sbi->s_sectors_written_start = |
3433 | part_stat_read(sb->s_bdev->bd_part, sectors[1]); | 3436 | part_stat_read(sb->s_bdev->bd_part, sectors[1]); |
3437 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
3438 | /* Modes of operations for file and directory encryption. */ | ||
3439 | sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS; | ||
3440 | sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID; | ||
3441 | #endif | ||
3434 | 3442 | ||
3435 | /* Cleanup superblock name */ | 3443 | /* Cleanup superblock name */ |
3436 | for (cp = sb->s_id; (cp = strchr(cp, '/'));) | 3444 | for (cp = sb->s_id; (cp = strchr(cp, '/'));) |