aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-19 17:26:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-19 17:26:31 -0400
commit6162e4b0bedeb3dac2ba0a5e1b1f56db107d97ec (patch)
treeb4ee364c3819f19acd8a63b06d455b11cd91b9ae /fs
parent17974c054db3030b714b7108566bf5208d965a19 (diff)
parent6ddb2447846a8ece111e316a2863c2355023682d (diff)
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "A few bug fixes and add support for file-system level encryption in ext4" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (31 commits) ext4 crypto: enable encryption feature flag ext4 crypto: add symlink encryption ext4 crypto: enable filename encryption ext4 crypto: filename encryption modifications ext4 crypto: partial update to namei.c for fname crypto ext4 crypto: insert encrypted filenames into a leaf directory block ext4 crypto: teach ext4_htree_store_dirent() to store decrypted filenames ext4 crypto: filename encryption facilities ext4 crypto: implement the ext4 decryption read path ext4 crypto: implement the ext4 encryption write path ext4 crypto: inherit encryption policies on inode and directory create ext4 crypto: enforce context consistency ext4 crypto: add encryption key management facilities ext4 crypto: add ext4 encryption facilities ext4 crypto: add encryption policy and password salt support ext4 crypto: add encryption xattr support ext4 crypto: export ext4_empty_dir() ext4 crypto: add ext4 encryption Kconfig ext4 crypto: reserve codepoints used by the ext4 encryption feature ext4 crypto: add ext4_mpage_readpages() ...
Diffstat (limited to 'fs')
-rw-r--r--fs/ext4/Kconfig17
-rw-r--r--fs/ext4/Makefile4
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/bitmap.c1
-rw-r--r--fs/ext4/block_validity.c1
-rw-r--r--fs/ext4/crypto.c558
-rw-r--r--fs/ext4/crypto_fname.c709
-rw-r--r--fs/ext4/crypto_key.c165
-rw-r--r--fs/ext4/crypto_policy.c194
-rw-r--r--fs/ext4/dir.c81
-rw-r--r--fs/ext4/ext4.h169
-rw-r--r--fs/ext4/ext4_crypto.h147
-rw-r--r--fs/ext4/extents.c81
-rw-r--r--fs/ext4/extents_status.c2
-rw-r--r--fs/ext4/file.c19
-rw-r--r--fs/ext4/fsync.c1
-rw-r--r--fs/ext4/hash.c1
-rw-r--r--fs/ext4/ialloc.c28
-rw-r--r--fs/ext4/inline.c16
-rw-r--r--fs/ext4/inode.c130
-rw-r--r--fs/ext4/ioctl.c86
-rw-r--r--fs/ext4/namei.c637
-rw-r--r--fs/ext4/page-io.c47
-rw-r--r--fs/ext4/readpage.c328
-rw-r--r--fs/ext4/super.c56
-rw-r--r--fs/ext4/symlink.c97
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/ext4/xattr.h3
29 files changed, 3344 insertions, 246 deletions
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index efea5d5c44ce..18228c201f7f 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,6 +64,23 @@ config EXT4_FS_SECURITY
64 If you are not using a security module that requires using 64 If you are not using a security module that requires using
65 extended attributes for file security labels, say N. 65 extended attributes for file security labels, say N.
66 66
67config EXT4_FS_ENCRYPTION
68 bool "Ext4 Encryption"
69 depends on EXT4_FS
70 select CRYPTO_AES
71 select CRYPTO_CBC
72 select CRYPTO_ECB
73 select CRYPTO_XTS
74 select CRYPTO_CTS
75 select CRYPTO_SHA256
76 select KEYS
77 select ENCRYPTED_KEYS
78 help
79 Enable encryption of ext4 files and directories. This
80 feature is similar to ecryptfs, but it is more memory
81 efficient since it avoids caching the encrypted and
82 decrypted pages in the page cache.
83
67config EXT4_DEBUG 84config EXT4_DEBUG
68 bool "EXT4 debugging support" 85 bool "EXT4 debugging support"
69 depends on EXT4_FS 86 depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 0310fec2ee3d..75285ea9aa05 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -8,7 +8,9 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ 8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
9 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \ 9 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
10 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \ 10 mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
11 xattr_trusted.o inline.o 11 xattr_trusted.o inline.o readpage.o
12 12
13ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o 13ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
14ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o 14ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
15ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \
16 crypto_key.o crypto_fname.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index d40c8dbbb0d6..69b1e73026a5 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,11 +4,6 @@
4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> 4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
5 */ 5 */
6 6
7#include <linux/init.h>
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/capability.h>
11#include <linux/fs.h>
12#include "ext4_jbd2.h" 7#include "ext4_jbd2.h"
13#include "ext4.h" 8#include "ext4.h"
14#include "xattr.h" 9#include "xattr.h"
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 83a6f497c4e0..955bf49a7945 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -14,7 +14,6 @@
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/capability.h> 15#include <linux/capability.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/jbd2.h>
18#include <linux/quotaops.h> 17#include <linux/quotaops.h>
19#include <linux/buffer_head.h> 18#include <linux/buffer_head.h>
20#include "ext4.h" 19#include "ext4.h"
@@ -641,8 +640,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
641 * fail EDQUOT for metdata, but we do account for it. 640 * fail EDQUOT for metdata, but we do account for it.
642 */ 641 */
643 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) { 642 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
644 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
645 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
646 dquot_alloc_block_nofail(inode, 643 dquot_alloc_block_nofail(inode,
647 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len)); 644 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
648 } 645 }
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index b610779a958c..4a606afb171f 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -8,7 +8,6 @@
8 */ 8 */
9 9
10#include <linux/buffer_head.h> 10#include <linux/buffer_head.h>
11#include <linux/jbd2.h>
12#include "ext4.h" 11#include "ext4.h"
13 12
14unsigned int ext4_count_free(char *bitmap, unsigned int numchars) 13unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 41eb9dcfac7e..3522340c7a99 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -16,7 +16,6 @@
16#include <linux/swap.h> 16#include <linux/swap.h>
17#include <linux/pagemap.h> 17#include <linux/pagemap.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/mutex.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include "ext4.h" 20#include "ext4.h"
22 21
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
new file mode 100644
index 000000000000..8ff15273ab0c
--- /dev/null
+++ b/fs/ext4/crypto.c
@@ -0,0 +1,558 @@
1/*
2 * linux/fs/ext4/crypto.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption functions for ext4
7 *
8 * Written by Michael Halcrow, 2014.
9 *
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
14 *
15 * This has not yet undergone a rigorous security audit.
16 *
17 * The usage of AES-XTS should conform to recommendations in NIST
18 * Special Publication 800-38E and IEEE P1619/D16.
19 */
20
21#include <crypto/hash.h>
22#include <crypto/sha.h>
23#include <keys/user-type.h>
24#include <keys/encrypted-type.h>
25#include <linux/crypto.h>
26#include <linux/ecryptfs.h>
27#include <linux/gfp.h>
28#include <linux/kernel.h>
29#include <linux/key.h>
30#include <linux/list.h>
31#include <linux/mempool.h>
32#include <linux/module.h>
33#include <linux/mutex.h>
34#include <linux/random.h>
35#include <linux/scatterlist.h>
36#include <linux/spinlock_types.h>
37
38#include "ext4_extents.h"
39#include "xattr.h"
40
41/* Encryption added and removed here! (L: */
42
43static unsigned int num_prealloc_crypto_pages = 32;
44static unsigned int num_prealloc_crypto_ctxs = 128;
45
46module_param(num_prealloc_crypto_pages, uint, 0444);
47MODULE_PARM_DESC(num_prealloc_crypto_pages,
48 "Number of crypto pages to preallocate");
49module_param(num_prealloc_crypto_ctxs, uint, 0444);
50MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
51 "Number of crypto contexts to preallocate");
52
53static mempool_t *ext4_bounce_page_pool;
54
55static LIST_HEAD(ext4_free_crypto_ctxs);
56static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
57
58/**
59 * ext4_release_crypto_ctx() - Releases an encryption context
60 * @ctx: The encryption context to release.
61 *
62 * If the encryption context was allocated from the pre-allocated pool, returns
63 * it to that pool. Else, frees it.
64 *
65 * If there's a bounce page in the context, this frees that.
66 */
67void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
68{
69 unsigned long flags;
70
71 if (ctx->bounce_page) {
72 if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
73 __free_page(ctx->bounce_page);
74 else
75 mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
76 ctx->bounce_page = NULL;
77 }
78 ctx->control_page = NULL;
79 if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
80 if (ctx->tfm)
81 crypto_free_tfm(ctx->tfm);
82 kfree(ctx);
83 } else {
84 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
85 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
86 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
87 }
88}
89
90/**
91 * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
92 * @mask: The allocation mask.
93 *
94 * Return: An allocated and initialized encryption context on success. An error
95 * value or NULL otherwise.
96 */
97static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
98{
99 struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
100 mask);
101
102 if (!ctx)
103 return ERR_PTR(-ENOMEM);
104 return ctx;
105}
106
107/**
108 * ext4_get_crypto_ctx() - Gets an encryption context
109 * @inode: The inode for which we are doing the crypto
110 *
111 * Allocates and initializes an encryption context.
112 *
113 * Return: An allocated and initialized encryption context on success; error
114 * value or NULL otherwise.
115 */
116struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
117{
118 struct ext4_crypto_ctx *ctx = NULL;
119 int res = 0;
120 unsigned long flags;
121 struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
122
123 if (!ext4_read_workqueue)
124 ext4_init_crypto();
125
126 /*
127 * We first try getting the ctx from a free list because in
128 * the common case the ctx will have an allocated and
129 * initialized crypto tfm, so it's probably a worthwhile
130 * optimization. For the bounce page, we first try getting it
131 * from the kernel allocator because that's just about as fast
132 * as getting it from a list and because a cache of free pages
133 * should generally be a "last resort" option for a filesystem
134 * to be able to do its job.
135 */
136 spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
137 ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
138 struct ext4_crypto_ctx, free_list);
139 if (ctx)
140 list_del(&ctx->free_list);
141 spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
142 if (!ctx) {
143 ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
144 if (IS_ERR(ctx)) {
145 res = PTR_ERR(ctx);
146 goto out;
147 }
148 ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
149 } else {
150 ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
151 }
152
153 /* Allocate a new Crypto API context if we don't already have
154 * one or if it isn't the right mode. */
155 BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
156 if (ctx->tfm && (ctx->mode != key->mode)) {
157 crypto_free_tfm(ctx->tfm);
158 ctx->tfm = NULL;
159 ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
160 }
161 if (!ctx->tfm) {
162 switch (key->mode) {
163 case EXT4_ENCRYPTION_MODE_AES_256_XTS:
164 ctx->tfm = crypto_ablkcipher_tfm(
165 crypto_alloc_ablkcipher("xts(aes)", 0, 0));
166 break;
167 case EXT4_ENCRYPTION_MODE_AES_256_GCM:
168 /* TODO(mhalcrow): AEAD w/ gcm(aes);
169 * crypto_aead_setauthsize() */
170 ctx->tfm = ERR_PTR(-ENOTSUPP);
171 break;
172 default:
173 BUG();
174 }
175 if (IS_ERR_OR_NULL(ctx->tfm)) {
176 res = PTR_ERR(ctx->tfm);
177 ctx->tfm = NULL;
178 goto out;
179 }
180 ctx->mode = key->mode;
181 }
182 BUG_ON(key->size != ext4_encryption_key_size(key->mode));
183
184 /* There shouldn't be a bounce page attached to the crypto
185 * context at this point. */
186 BUG_ON(ctx->bounce_page);
187
188out:
189 if (res) {
190 if (!IS_ERR_OR_NULL(ctx))
191 ext4_release_crypto_ctx(ctx);
192 ctx = ERR_PTR(res);
193 }
194 return ctx;
195}
196
197struct workqueue_struct *ext4_read_workqueue;
198static DEFINE_MUTEX(crypto_init);
199
200/**
201 * ext4_exit_crypto() - Shutdown the ext4 encryption system
202 */
203void ext4_exit_crypto(void)
204{
205 struct ext4_crypto_ctx *pos, *n;
206
207 list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
208 if (pos->bounce_page) {
209 if (pos->flags &
210 EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
211 __free_page(pos->bounce_page);
212 } else {
213 mempool_free(pos->bounce_page,
214 ext4_bounce_page_pool);
215 }
216 }
217 if (pos->tfm)
218 crypto_free_tfm(pos->tfm);
219 kfree(pos);
220 }
221 INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
222 if (ext4_bounce_page_pool)
223 mempool_destroy(ext4_bounce_page_pool);
224 ext4_bounce_page_pool = NULL;
225 if (ext4_read_workqueue)
226 destroy_workqueue(ext4_read_workqueue);
227 ext4_read_workqueue = NULL;
228}
229
230/**
231 * ext4_init_crypto() - Set up for ext4 encryption.
232 *
233 * We only call this when we start accessing encrypted files, since it
234 * results in memory getting allocated that wouldn't otherwise be used.
235 *
236 * Return: Zero on success, non-zero otherwise.
237 */
238int ext4_init_crypto(void)
239{
240 int i, res;
241
242 mutex_lock(&crypto_init);
243 if (ext4_read_workqueue)
244 goto already_initialized;
245 ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
246 if (!ext4_read_workqueue) {
247 res = -ENOMEM;
248 goto fail;
249 }
250
251 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
252 struct ext4_crypto_ctx *ctx;
253
254 ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
255 if (IS_ERR(ctx)) {
256 res = PTR_ERR(ctx);
257 goto fail;
258 }
259 list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
260 }
261
262 ext4_bounce_page_pool =
263 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
264 if (!ext4_bounce_page_pool) {
265 res = -ENOMEM;
266 goto fail;
267 }
268already_initialized:
269 mutex_unlock(&crypto_init);
270 return 0;
271fail:
272 ext4_exit_crypto();
273 mutex_unlock(&crypto_init);
274 return res;
275}
276
277void ext4_restore_control_page(struct page *data_page)
278{
279 struct ext4_crypto_ctx *ctx =
280 (struct ext4_crypto_ctx *)page_private(data_page);
281
282 set_page_private(data_page, (unsigned long)NULL);
283 ClearPagePrivate(data_page);
284 unlock_page(data_page);
285 ext4_release_crypto_ctx(ctx);
286}
287
288/**
289 * ext4_crypt_complete() - The completion callback for page encryption
290 * @req: The asynchronous encryption request context
291 * @res: The result of the encryption operation
292 */
293static void ext4_crypt_complete(struct crypto_async_request *req, int res)
294{
295 struct ext4_completion_result *ecr = req->data;
296
297 if (res == -EINPROGRESS)
298 return;
299 ecr->res = res;
300 complete(&ecr->completion);
301}
302
303typedef enum {
304 EXT4_DECRYPT = 0,
305 EXT4_ENCRYPT,
306} ext4_direction_t;
307
308static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
309 struct inode *inode,
310 ext4_direction_t rw,
311 pgoff_t index,
312 struct page *src_page,
313 struct page *dest_page)
314
315{
316 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
317 struct ablkcipher_request *req = NULL;
318 DECLARE_EXT4_COMPLETION_RESULT(ecr);
319 struct scatterlist dst, src;
320 struct ext4_inode_info *ei = EXT4_I(inode);
321 struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
322 int res = 0;
323
324 BUG_ON(!ctx->tfm);
325 BUG_ON(ctx->mode != ei->i_encryption_key.mode);
326
327 if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
328 printk_ratelimited(KERN_ERR
329 "%s: unsupported crypto algorithm: %d\n",
330 __func__, ctx->mode);
331 return -ENOTSUPP;
332 }
333
334 crypto_ablkcipher_clear_flags(atfm, ~0);
335 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
336
337 res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
338 ei->i_encryption_key.size);
339 if (res) {
340 printk_ratelimited(KERN_ERR
341 "%s: crypto_ablkcipher_setkey() failed\n",
342 __func__);
343 return res;
344 }
345 req = ablkcipher_request_alloc(atfm, GFP_NOFS);
346 if (!req) {
347 printk_ratelimited(KERN_ERR
348 "%s: crypto_request_alloc() failed\n",
349 __func__);
350 return -ENOMEM;
351 }
352 ablkcipher_request_set_callback(
353 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
354 ext4_crypt_complete, &ecr);
355
356 BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
357 memcpy(xts_tweak, &index, sizeof(index));
358 memset(&xts_tweak[sizeof(index)], 0,
359 EXT4_XTS_TWEAK_SIZE - sizeof(index));
360
361 sg_init_table(&dst, 1);
362 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
363 sg_init_table(&src, 1);
364 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
365 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
366 xts_tweak);
367 if (rw == EXT4_DECRYPT)
368 res = crypto_ablkcipher_decrypt(req);
369 else
370 res = crypto_ablkcipher_encrypt(req);
371 if (res == -EINPROGRESS || res == -EBUSY) {
372 BUG_ON(req->base.data != &ecr);
373 wait_for_completion(&ecr.completion);
374 res = ecr.res;
375 }
376 ablkcipher_request_free(req);
377 if (res) {
378 printk_ratelimited(
379 KERN_ERR
380 "%s: crypto_ablkcipher_encrypt() returned %d\n",
381 __func__, res);
382 return res;
383 }
384 return 0;
385}
386
387/**
388 * ext4_encrypt() - Encrypts a page
389 * @inode: The inode for which the encryption should take place
390 * @plaintext_page: The page to encrypt. Must be locked.
391 *
392 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
393 * encryption context.
394 *
395 * Called on the page write path. The caller must call
396 * ext4_restore_control_page() on the returned ciphertext page to
397 * release the bounce buffer and the encryption context.
398 *
399 * Return: An allocated page with the encrypted content on success. Else, an
400 * error value or NULL.
401 */
402struct page *ext4_encrypt(struct inode *inode,
403 struct page *plaintext_page)
404{
405 struct ext4_crypto_ctx *ctx;
406 struct page *ciphertext_page = NULL;
407 int err;
408
409 BUG_ON(!PageLocked(plaintext_page));
410
411 ctx = ext4_get_crypto_ctx(inode);
412 if (IS_ERR(ctx))
413 return (struct page *) ctx;
414
415 /* The encryption operation will require a bounce page. */
416 ciphertext_page = alloc_page(GFP_NOFS);
417 if (!ciphertext_page) {
418 /* This is a potential bottleneck, but at least we'll have
419 * forward progress. */
420 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
421 GFP_NOFS);
422 if (WARN_ON_ONCE(!ciphertext_page)) {
423 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
424 GFP_NOFS | __GFP_WAIT);
425 }
426 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
427 } else {
428 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
429 }
430 ctx->bounce_page = ciphertext_page;
431 ctx->control_page = plaintext_page;
432 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
433 plaintext_page, ciphertext_page);
434 if (err) {
435 ext4_release_crypto_ctx(ctx);
436 return ERR_PTR(err);
437 }
438 SetPagePrivate(ciphertext_page);
439 set_page_private(ciphertext_page, (unsigned long)ctx);
440 lock_page(ciphertext_page);
441 return ciphertext_page;
442}
443
444/**
445 * ext4_decrypt() - Decrypts a page in-place
446 * @ctx: The encryption context.
447 * @page: The page to decrypt. Must be locked.
448 *
449 * Decrypts page in-place using the ctx encryption context.
450 *
451 * Called from the read completion callback.
452 *
453 * Return: Zero on success, non-zero otherwise.
454 */
455int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
456{
457 BUG_ON(!PageLocked(page));
458
459 return ext4_page_crypto(ctx, page->mapping->host,
460 EXT4_DECRYPT, page->index, page, page);
461}
462
463/*
464 * Convenience function which takes care of allocating and
465 * deallocating the encryption context
466 */
467int ext4_decrypt_one(struct inode *inode, struct page *page)
468{
469 int ret;
470
471 struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
472
473 if (!ctx)
474 return -ENOMEM;
475 ret = ext4_decrypt(ctx, page);
476 ext4_release_crypto_ctx(ctx);
477 return ret;
478}
479
480int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
481{
482 struct ext4_crypto_ctx *ctx;
483 struct page *ciphertext_page = NULL;
484 struct bio *bio;
485 ext4_lblk_t lblk = ex->ee_block;
486 ext4_fsblk_t pblk = ext4_ext_pblock(ex);
487 unsigned int len = ext4_ext_get_actual_len(ex);
488 int err = 0;
489
490 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
491
492 ctx = ext4_get_crypto_ctx(inode);
493 if (IS_ERR(ctx))
494 return PTR_ERR(ctx);
495
496 ciphertext_page = alloc_page(GFP_NOFS);
497 if (!ciphertext_page) {
498 /* This is a potential bottleneck, but at least we'll have
499 * forward progress. */
500 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
501 GFP_NOFS);
502 if (WARN_ON_ONCE(!ciphertext_page)) {
503 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
504 GFP_NOFS | __GFP_WAIT);
505 }
506 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
507 } else {
508 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
509 }
510 ctx->bounce_page = ciphertext_page;
511
512 while (len--) {
513 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
514 ZERO_PAGE(0), ciphertext_page);
515 if (err)
516 goto errout;
517
518 bio = bio_alloc(GFP_KERNEL, 1);
519 if (!bio) {
520 err = -ENOMEM;
521 goto errout;
522 }
523 bio->bi_bdev = inode->i_sb->s_bdev;
524 bio->bi_iter.bi_sector = pblk;
525 err = bio_add_page(bio, ciphertext_page,
526 inode->i_sb->s_blocksize, 0);
527 if (err) {
528 bio_put(bio);
529 goto errout;
530 }
531 err = submit_bio_wait(WRITE, bio);
532 if (err)
533 goto errout;
534 }
535 err = 0;
536errout:
537 ext4_release_crypto_ctx(ctx);
538 return err;
539}
540
541bool ext4_valid_contents_enc_mode(uint32_t mode)
542{
543 return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
544}
545
546/**
547 * ext4_validate_encryption_key_size() - Validate the encryption key size
548 * @mode: The key mode.
549 * @size: The key size to validate.
550 *
551 * Return: The validated key size for @mode. Zero if invalid.
552 */
553uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
554{
555 if (size == ext4_encryption_key_size(mode))
556 return size;
557 return 0;
558}
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
new file mode 100644
index 000000000000..ca2f5948c1ac
--- /dev/null
+++ b/fs/ext4/crypto_fname.c
@@ -0,0 +1,709 @@
1/*
2 * linux/fs/ext4/crypto_fname.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains functions for filename crypto management in ext4
7 *
8 * Written by Uday Savagaonkar, 2014.
9 *
10 * This has not yet undergone a rigorous security audit.
11 *
12 */
13
14#include <crypto/hash.h>
15#include <crypto/sha.h>
16#include <keys/encrypted-type.h>
17#include <keys/user-type.h>
18#include <linux/crypto.h>
19#include <linux/gfp.h>
20#include <linux/kernel.h>
21#include <linux/key.h>
22#include <linux/key.h>
23#include <linux/list.h>
24#include <linux/mempool.h>
25#include <linux/random.h>
26#include <linux/scatterlist.h>
27#include <linux/spinlock_types.h>
28
29#include "ext4.h"
30#include "ext4_crypto.h"
31#include "xattr.h"
32
33/**
34 * ext4_dir_crypt_complete() -
35 */
36static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
37{
38 struct ext4_completion_result *ecr = req->data;
39
40 if (res == -EINPROGRESS)
41 return;
42 ecr->res = res;
43 complete(&ecr->completion);
44}
45
46bool ext4_valid_filenames_enc_mode(uint32_t mode)
47{
48 return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
49}
50
51/**
52 * ext4_fname_encrypt() -
53 *
54 * This function encrypts the input filename, and returns the length of the
55 * ciphertext. Errors are returned as negative numbers. We trust the caller to
56 * allocate sufficient memory to oname string.
57 */
58static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
59 const struct qstr *iname,
60 struct ext4_str *oname)
61{
62 u32 ciphertext_len;
63 struct ablkcipher_request *req = NULL;
64 DECLARE_EXT4_COMPLETION_RESULT(ecr);
65 struct crypto_ablkcipher *tfm = ctx->ctfm;
66 int res = 0;
67 char iv[EXT4_CRYPTO_BLOCK_SIZE];
68 struct scatterlist sg[1];
69 char *workbuf;
70
71 if (iname->len <= 0 || iname->len > ctx->lim)
72 return -EIO;
73
74 ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
75 EXT4_CRYPTO_BLOCK_SIZE : iname->len;
76 ciphertext_len = (ciphertext_len > ctx->lim)
77 ? ctx->lim : ciphertext_len;
78
79 /* Allocate request */
80 req = ablkcipher_request_alloc(tfm, GFP_NOFS);
81 if (!req) {
82 printk_ratelimited(
83 KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
84 return -ENOMEM;
85 }
86 ablkcipher_request_set_callback(req,
87 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
88 ext4_dir_crypt_complete, &ecr);
89
90 /* Map the workpage */
91 workbuf = kmap(ctx->workpage);
92
93 /* Copy the input */
94 memcpy(workbuf, iname->name, iname->len);
95 if (iname->len < ciphertext_len)
96 memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
97
98 /* Initialize IV */
99 memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
100
101 /* Create encryption request */
102 sg_init_table(sg, 1);
103 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
104 ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
105 res = crypto_ablkcipher_encrypt(req);
106 if (res == -EINPROGRESS || res == -EBUSY) {
107 BUG_ON(req->base.data != &ecr);
108 wait_for_completion(&ecr.completion);
109 res = ecr.res;
110 }
111 if (res >= 0) {
112 /* Copy the result to output */
113 memcpy(oname->name, workbuf, ciphertext_len);
114 res = ciphertext_len;
115 }
116 kunmap(ctx->workpage);
117 ablkcipher_request_free(req);
118 if (res < 0) {
119 printk_ratelimited(
120 KERN_ERR "%s: Error (error code %d)\n", __func__, res);
121 }
122 oname->len = ciphertext_len;
123 return res;
124}
125
126/*
127 * ext4_fname_decrypt()
128 * This function decrypts the input filename, and returns
129 * the length of the plaintext.
130 * Errors are returned as negative numbers.
131 * We trust the caller to allocate sufficient memory to oname string.
132 */
133static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
134 const struct ext4_str *iname,
135 struct ext4_str *oname)
136{
137 struct ext4_str tmp_in[2], tmp_out[1];
138 struct ablkcipher_request *req = NULL;
139 DECLARE_EXT4_COMPLETION_RESULT(ecr);
140 struct scatterlist sg[1];
141 struct crypto_ablkcipher *tfm = ctx->ctfm;
142 int res = 0;
143 char iv[EXT4_CRYPTO_BLOCK_SIZE];
144 char *workbuf;
145
146 if (iname->len <= 0 || iname->len > ctx->lim)
147 return -EIO;
148
149 tmp_in[0].name = iname->name;
150 tmp_in[0].len = iname->len;
151 tmp_out[0].name = oname->name;
152
153 /* Allocate request */
154 req = ablkcipher_request_alloc(tfm, GFP_NOFS);
155 if (!req) {
156 printk_ratelimited(
157 KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
158 return -ENOMEM;
159 }
160 ablkcipher_request_set_callback(req,
161 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
162 ext4_dir_crypt_complete, &ecr);
163
164 /* Map the workpage */
165 workbuf = kmap(ctx->workpage);
166
167 /* Copy the input */
168 memcpy(workbuf, iname->name, iname->len);
169
170 /* Initialize IV */
171 memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
172
173 /* Create encryption request */
174 sg_init_table(sg, 1);
175 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
176 ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
177 res = crypto_ablkcipher_decrypt(req);
178 if (res == -EINPROGRESS || res == -EBUSY) {
179 BUG_ON(req->base.data != &ecr);
180 wait_for_completion(&ecr.completion);
181 res = ecr.res;
182 }
183 if (res >= 0) {
184 /* Copy the result to output */
185 memcpy(oname->name, workbuf, iname->len);
186 res = iname->len;
187 }
188 kunmap(ctx->workpage);
189 ablkcipher_request_free(req);
190 if (res < 0) {
191 printk_ratelimited(
192 KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
193 __func__, res);
194 return res;
195 }
196
197 oname->len = strnlen(oname->name, iname->len);
198 return oname->len;
199}
200
201/**
202 * ext4_fname_encode_digest() -
203 *
204 * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
205 * The encoded string is roughly 4/3 times the size of the input string.
206 */
207int ext4_fname_encode_digest(char *dst, char *src, u32 len)
208{
209 static const char *lookup_table =
210 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+";
211 u32 current_chunk, num_chunks, i;
212 char tmp_buf[3];
213 u32 c0, c1, c2, c3;
214
215 current_chunk = 0;
216 num_chunks = len/3;
217 for (i = 0; i < num_chunks; i++) {
218 c0 = src[3*i] & 0x3f;
219 c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f;
220 c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f;
221 c3 = (src[3*i+2]>>2) & 0x3f;
222 dst[4*i] = lookup_table[c0];
223 dst[4*i+1] = lookup_table[c1];
224 dst[4*i+2] = lookup_table[c2];
225 dst[4*i+3] = lookup_table[c3];
226 }
227 if (i*3 < len) {
228 memset(tmp_buf, 0, 3);
229 memcpy(tmp_buf, &src[3*i], len-3*i);
230 c0 = tmp_buf[0] & 0x3f;
231 c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f;
232 c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f;
233 c3 = (tmp_buf[2]>>2) & 0x3f;
234 dst[4*i] = lookup_table[c0];
235 dst[4*i+1] = lookup_table[c1];
236 dst[4*i+2] = lookup_table[c2];
237 dst[4*i+3] = lookup_table[c3];
238 i++;
239 }
240 return (i * 4);
241}
242
243/**
244 * ext4_fname_hash() -
245 *
246 * This function computes the hash of the input filename, and sets the output
247 * buffer to the *encoded* digest. It returns the length of the digest as its
248 * return value. Errors are returned as negative numbers. We trust the caller
249 * to allocate sufficient memory to oname string.
250 */
251static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx,
252 const struct ext4_str *iname,
253 struct ext4_str *oname)
254{
255 struct scatterlist sg;
256 struct hash_desc desc = {
257 .tfm = (struct crypto_hash *)ctx->htfm,
258 .flags = CRYPTO_TFM_REQ_MAY_SLEEP
259 };
260 int res = 0;
261
262 if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
263 res = ext4_fname_encode_digest(oname->name, iname->name,
264 iname->len);
265 oname->len = res;
266 return res;
267 }
268
269 sg_init_one(&sg, iname->name, iname->len);
270 res = crypto_hash_init(&desc);
271 if (res) {
272 printk(KERN_ERR
273 "%s: Error initializing crypto hash; res = [%d]\n",
274 __func__, res);
275 goto out;
276 }
277 res = crypto_hash_update(&desc, &sg, iname->len);
278 if (res) {
279 printk(KERN_ERR
280 "%s: Error updating crypto hash; res = [%d]\n",
281 __func__, res);
282 goto out;
283 }
284 res = crypto_hash_final(&desc,
285 &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]);
286 if (res) {
287 printk(KERN_ERR
288 "%s: Error finalizing crypto hash; res = [%d]\n",
289 __func__, res);
290 goto out;
291 }
292 /* Encode the digest as a printable string--this will increase the
293 * size of the digest */
294 oname->name[0] = 'I';
295 res = ext4_fname_encode_digest(oname->name+1,
296 &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE],
297 EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1;
298 oname->len = res;
299out:
300 return res;
301}
302
303/**
304 * ext4_free_fname_crypto_ctx() -
305 *
306 * Frees up a crypto context.
307 */
308void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx)
309{
310 if (ctx == NULL || IS_ERR(ctx))
311 return;
312
313 if (ctx->ctfm && !IS_ERR(ctx->ctfm))
314 crypto_free_ablkcipher(ctx->ctfm);
315 if (ctx->htfm && !IS_ERR(ctx->htfm))
316 crypto_free_hash(ctx->htfm);
317 if (ctx->workpage && !IS_ERR(ctx->workpage))
318 __free_page(ctx->workpage);
319 kfree(ctx);
320}
321
322/**
323 * ext4_put_fname_crypto_ctx() -
324 *
325 * Return: The crypto context onto free list. If the free list is above a
326 * threshold, completely frees up the context, and returns the memory.
327 *
328 * TODO: Currently we directly free the crypto context. Eventually we should
329 * add code it to return to free list. Such an approach will increase
330 * efficiency of directory lookup.
331 */
332void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx)
333{
334 if (*ctx == NULL || IS_ERR(*ctx))
335 return;
336 ext4_free_fname_crypto_ctx(*ctx);
337 *ctx = NULL;
338}
339
340/**
341 * ext4_search_fname_crypto_ctx() -
342 */
343static struct ext4_fname_crypto_ctx *ext4_search_fname_crypto_ctx(
344 const struct ext4_encryption_key *key)
345{
346 return NULL;
347}
348
349/**
350 * ext4_alloc_fname_crypto_ctx() -
351 */
352struct ext4_fname_crypto_ctx *ext4_alloc_fname_crypto_ctx(
353 const struct ext4_encryption_key *key)
354{
355 struct ext4_fname_crypto_ctx *ctx;
356
357 ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS);
358 if (ctx == NULL)
359 return ERR_PTR(-ENOMEM);
360 if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) {
361 /* This will automatically set key mode to invalid
362 * As enum for ENCRYPTION_MODE_INVALID is zero */
363 memset(&ctx->key, 0, sizeof(ctx->key));
364 } else {
365 memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key));
366 }
367 ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode)
368 ? 0 : 1;
369 ctx->ctfm_key_is_ready = 0;
370 ctx->ctfm = NULL;
371 ctx->htfm = NULL;
372 ctx->workpage = NULL;
373 return ctx;
374}
375
376/**
377 * ext4_get_fname_crypto_ctx() -
378 *
379 * Allocates a free crypto context and initializes it to hold
380 * the crypto material for the inode.
381 *
382 * Return: NULL if not encrypted. Error value on error. Valid pointer otherwise.
383 */
384struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
385 struct inode *inode, u32 max_ciphertext_len)
386{
387 struct ext4_fname_crypto_ctx *ctx;
388 struct ext4_inode_info *ei = EXT4_I(inode);
389 int res;
390
391 /* Check if the crypto policy is set on the inode */
392 res = ext4_encrypted_inode(inode);
393 if (res == 0)
394 return NULL;
395
396 if (!ext4_has_encryption_key(inode))
397 ext4_generate_encryption_key(inode);
398
399 /* Get a crypto context based on the key.
400 * A new context is allocated if no context matches the requested key.
401 */
402 ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key));
403 if (ctx == NULL)
404 ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key));
405 if (IS_ERR(ctx))
406 return ctx;
407
408 if (ctx->has_valid_key) {
409 if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
410 printk_once(KERN_WARNING
411 "ext4: unsupported key mode %d\n",
412 ctx->key.mode);
413 return ERR_PTR(-ENOKEY);
414 }
415
416 /* As a first cut, we will allocate new tfm in every call.
417 * later, we will keep the tfm around, in case the key gets
418 * re-used */
419 if (ctx->ctfm == NULL) {
420 ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))",
421 0, 0);
422 }
423 if (IS_ERR(ctx->ctfm)) {
424 res = PTR_ERR(ctx->ctfm);
425 printk(
426 KERN_DEBUG "%s: error (%d) allocating crypto tfm\n",
427 __func__, res);
428 ctx->ctfm = NULL;
429 ext4_put_fname_crypto_ctx(&ctx);
430 return ERR_PTR(res);
431 }
432 if (ctx->ctfm == NULL) {
433 printk(
434 KERN_DEBUG "%s: could not allocate crypto tfm\n",
435 __func__);
436 ext4_put_fname_crypto_ctx(&ctx);
437 return ERR_PTR(-ENOMEM);
438 }
439 if (ctx->workpage == NULL)
440 ctx->workpage = alloc_page(GFP_NOFS);
441 if (IS_ERR(ctx->workpage)) {
442 res = PTR_ERR(ctx->workpage);
443 printk(
444 KERN_DEBUG "%s: error (%d) allocating work page\n",
445 __func__, res);
446 ctx->workpage = NULL;
447 ext4_put_fname_crypto_ctx(&ctx);
448 return ERR_PTR(res);
449 }
450 if (ctx->workpage == NULL) {
451 printk(
452 KERN_DEBUG "%s: could not allocate work page\n",
453 __func__);
454 ext4_put_fname_crypto_ctx(&ctx);
455 return ERR_PTR(-ENOMEM);
456 }
457 ctx->lim = max_ciphertext_len;
458 crypto_ablkcipher_clear_flags(ctx->ctfm, ~0);
459 crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm),
460 CRYPTO_TFM_REQ_WEAK_KEY);
461
462 /* If we are lucky, we will get a context that is already
463 * set up with the right key. Else, we will have to
464 * set the key */
465 if (!ctx->ctfm_key_is_ready) {
466 /* Since our crypto objectives for filename encryption
467 * are pretty weak,
468 * we directly use the inode master key */
469 res = crypto_ablkcipher_setkey(ctx->ctfm,
470 ctx->key.raw, ctx->key.size);
471 if (res) {
472 ext4_put_fname_crypto_ctx(&ctx);
473 return ERR_PTR(-EIO);
474 }
475 ctx->ctfm_key_is_ready = 1;
476 } else {
477 /* In the current implementation, key should never be
478 * marked "ready" for a context that has just been
479 * allocated. So we should never reach here */
480 BUG();
481 }
482 }
483 if (ctx->htfm == NULL)
484 ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
485 if (IS_ERR(ctx->htfm)) {
486 res = PTR_ERR(ctx->htfm);
487 printk(KERN_DEBUG "%s: error (%d) allocating hash tfm\n",
488 __func__, res);
489 ctx->htfm = NULL;
490 ext4_put_fname_crypto_ctx(&ctx);
491 return ERR_PTR(res);
492 }
493 if (ctx->htfm == NULL) {
494 printk(KERN_DEBUG "%s: could not allocate hash tfm\n",
495 __func__);
496 ext4_put_fname_crypto_ctx(&ctx);
497 return ERR_PTR(-ENOMEM);
498 }
499
500 return ctx;
501}
502
503/**
504 * ext4_fname_crypto_round_up() -
505 *
506 * Return: The next multiple of block size
507 */
508u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
509{
510 return ((size+blksize-1)/blksize)*blksize;
511}
512
513/**
514 * ext4_fname_crypto_namelen_on_disk() -
515 */
516int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
517 u32 namelen)
518{
519 u32 ciphertext_len;
520
521 if (ctx == NULL)
522 return -EIO;
523 if (!(ctx->has_valid_key))
524 return -EACCES;
525 ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
526 EXT4_CRYPTO_BLOCK_SIZE : namelen;
527 ciphertext_len = (ciphertext_len > ctx->lim)
528 ? ctx->lim : ciphertext_len;
529 return (int) ciphertext_len;
530}
531
532/**
533 * ext4_fname_crypto_alloc_obuff() -
534 *
535 * Allocates an output buffer that is sufficient for the crypto operation
536 * specified by the context and the direction.
537 */
538int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
539 u32 ilen, struct ext4_str *crypto_str)
540{
541 unsigned int olen;
542
543 if (!ctx)
544 return -EIO;
545 olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE);
546 crypto_str->len = olen;
547 if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
548 olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
549 /* Allocated buffer can hold one more character to null-terminate the
550 * string */
551 crypto_str->name = kmalloc(olen+1, GFP_NOFS);
552 if (!(crypto_str->name))
553 return -ENOMEM;
554 return 0;
555}
556
557/**
558 * ext4_fname_crypto_free_buffer() -
559 *
560 * Frees the buffer allocated for crypto operation.
561 */
562void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
563{
564 if (!crypto_str)
565 return;
566 kfree(crypto_str->name);
567 crypto_str->name = NULL;
568}
569
570/**
571 * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
572 */
573int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
574 const struct ext4_str *iname,
575 struct ext4_str *oname)
576{
577 if (ctx == NULL)
578 return -EIO;
579 if (iname->len < 3) {
580 /*Check for . and .. */
581 if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
582 oname->name[0] = '.';
583 oname->name[iname->len-1] = '.';
584 oname->len = iname->len;
585 return oname->len;
586 }
587 }
588 if (ctx->has_valid_key)
589 return ext4_fname_decrypt(ctx, iname, oname);
590 else
591 return ext4_fname_hash(ctx, iname, oname);
592}
593
594int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
595 const struct ext4_dir_entry_2 *de,
596 struct ext4_str *oname)
597{
598 struct ext4_str iname = {.name = (unsigned char *) de->name,
599 .len = de->name_len };
600
601 return _ext4_fname_disk_to_usr(ctx, &iname, oname);
602}
603
604
605/**
606 * ext4_fname_usr_to_disk() - converts a filename from user space to disk space
607 */
608int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
609 const struct qstr *iname,
610 struct ext4_str *oname)
611{
612 int res;
613
614 if (ctx == NULL)
615 return -EIO;
616 if (iname->len < 3) {
617 /*Check for . and .. */
618 if (iname->name[0] == '.' &&
619 iname->name[iname->len-1] == '.') {
620 oname->name[0] = '.';
621 oname->name[iname->len-1] = '.';
622 oname->len = iname->len;
623 return oname->len;
624 }
625 }
626 if (ctx->has_valid_key) {
627 res = ext4_fname_encrypt(ctx, iname, oname);
628 return res;
629 }
630 /* Without a proper key, a user is not allowed to modify the filenames
631 * in a directory. Consequently, a user space name cannot be mapped to
632 * a disk-space name */
633 return -EACCES;
634}
635
636/*
637 * Calculate the htree hash from a filename from user space
638 */
639int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
640 const struct qstr *iname,
641 struct dx_hash_info *hinfo)
642{
643 struct ext4_str tmp, tmp2;
644 int ret = 0;
645
646 if (!ctx || !ctx->has_valid_key ||
647 ((iname->name[0] == '.') &&
648 ((iname->len == 1) ||
649 ((iname->name[1] == '.') && (iname->len == 2))))) {
650 ext4fs_dirhash(iname->name, iname->len, hinfo);
651 return 0;
652 }
653
654 /* First encrypt the plaintext name */
655 ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
656 if (ret < 0)
657 return ret;
658
659 ret = ext4_fname_encrypt(ctx, iname, &tmp);
660 if (ret < 0)
661 goto out;
662
663 tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
664 tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL);
665 if (tmp2.name == NULL) {
666 ret = -ENOMEM;
667 goto out;
668 }
669
670 ret = ext4_fname_hash(ctx, &tmp, &tmp2);
671 if (ret > 0)
672 ext4fs_dirhash(tmp2.name, tmp2.len, hinfo);
673 ext4_fname_crypto_free_buffer(&tmp2);
674out:
675 ext4_fname_crypto_free_buffer(&tmp);
676 return ret;
677}
678
679/**
680 * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string
681 */
682int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
683 const struct ext4_dir_entry_2 *de,
684 struct dx_hash_info *hinfo)
685{
686 struct ext4_str iname = {.name = (unsigned char *) de->name,
687 .len = de->name_len};
688 struct ext4_str tmp;
689 int ret;
690
691 if (!ctx ||
692 ((iname.name[0] == '.') &&
693 ((iname.len == 1) ||
694 ((iname.name[1] == '.') && (iname.len == 2))))) {
695 ext4fs_dirhash(iname.name, iname.len, hinfo);
696 return 0;
697 }
698
699 tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
700 tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL);
701 if (tmp.name == NULL)
702 return -ENOMEM;
703
704 ret = ext4_fname_hash(ctx, &iname, &tmp);
705 if (ret > 0)
706 ext4fs_dirhash(tmp.name, tmp.len, hinfo);
707 ext4_fname_crypto_free_buffer(&tmp);
708 return ret;
709}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
new file mode 100644
index 000000000000..c8392af8abbb
--- /dev/null
+++ b/fs/ext4/crypto_key.c
@@ -0,0 +1,165 @@
1/*
2 * linux/fs/ext4/crypto_key.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption key functions for ext4
7 *
8 * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
9 */
10
11#include <keys/encrypted-type.h>
12#include <keys/user-type.h>
13#include <linux/random.h>
14#include <linux/scatterlist.h>
15#include <uapi/linux/keyctl.h>
16
17#include "ext4.h"
18#include "xattr.h"
19
20static void derive_crypt_complete(struct crypto_async_request *req, int rc)
21{
22 struct ext4_completion_result *ecr = req->data;
23
24 if (rc == -EINPROGRESS)
25 return;
26
27 ecr->res = rc;
28 complete(&ecr->completion);
29}
30
31/**
32 * ext4_derive_key_aes() - Derive a key using AES-128-ECB
33 * @deriving_key: Encryption key used for derivatio.
34 * @source_key: Source key to which to apply derivation.
35 * @derived_key: Derived key.
36 *
37 * Return: Zero on success; non-zero otherwise.
38 */
39static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
40 char source_key[EXT4_AES_256_XTS_KEY_SIZE],
41 char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
42{
43 int res = 0;
44 struct ablkcipher_request *req = NULL;
45 DECLARE_EXT4_COMPLETION_RESULT(ecr);
46 struct scatterlist src_sg, dst_sg;
47 struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
48 0);
49
50 if (IS_ERR(tfm)) {
51 res = PTR_ERR(tfm);
52 tfm = NULL;
53 goto out;
54 }
55 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
56 req = ablkcipher_request_alloc(tfm, GFP_NOFS);
57 if (!req) {
58 res = -ENOMEM;
59 goto out;
60 }
61 ablkcipher_request_set_callback(req,
62 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
63 derive_crypt_complete, &ecr);
64 res = crypto_ablkcipher_setkey(tfm, deriving_key,
65 EXT4_AES_128_ECB_KEY_SIZE);
66 if (res < 0)
67 goto out;
68 sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
69 sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
70 ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
71 EXT4_AES_256_XTS_KEY_SIZE, NULL);
72 res = crypto_ablkcipher_encrypt(req);
73 if (res == -EINPROGRESS || res == -EBUSY) {
74 BUG_ON(req->base.data != &ecr);
75 wait_for_completion(&ecr.completion);
76 res = ecr.res;
77 }
78
79out:
80 if (req)
81 ablkcipher_request_free(req);
82 if (tfm)
83 crypto_free_ablkcipher(tfm);
84 return res;
85}
86
87/**
88 * ext4_generate_encryption_key() - generates an encryption key
89 * @inode: The inode to generate the encryption key for.
90 */
91int ext4_generate_encryption_key(struct inode *inode)
92{
93 struct ext4_inode_info *ei = EXT4_I(inode);
94 struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
95 char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
96 (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1];
97 struct key *keyring_key = NULL;
98 struct ext4_encryption_key *master_key;
99 struct ext4_encryption_context ctx;
100 struct user_key_payload *ukp;
101 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
102 int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
103 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
104 &ctx, sizeof(ctx));
105
106 if (res != sizeof(ctx)) {
107 if (res > 0)
108 res = -EINVAL;
109 goto out;
110 }
111 res = 0;
112
113 if (S_ISREG(inode->i_mode))
114 crypt_key->mode = ctx.contents_encryption_mode;
115 else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
116 crypt_key->mode = ctx.filenames_encryption_mode;
117 else {
118 printk(KERN_ERR "ext4 crypto: Unsupported inode type.\n");
119 BUG();
120 }
121 crypt_key->size = ext4_encryption_key_size(crypt_key->mode);
122 BUG_ON(!crypt_key->size);
123 if (DUMMY_ENCRYPTION_ENABLED(sbi)) {
124 memset(crypt_key->raw, 0x42, EXT4_AES_256_XTS_KEY_SIZE);
125 goto out;
126 }
127 memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX,
128 EXT4_KEY_DESC_PREFIX_SIZE);
129 sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE,
130 "%*phN", EXT4_KEY_DESCRIPTOR_SIZE,
131 ctx.master_key_descriptor);
132 full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
133 (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0';
134 keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
135 if (IS_ERR(keyring_key)) {
136 res = PTR_ERR(keyring_key);
137 keyring_key = NULL;
138 goto out;
139 }
140 BUG_ON(keyring_key->type != &key_type_logon);
141 ukp = ((struct user_key_payload *)keyring_key->payload.data);
142 if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
143 res = -EINVAL;
144 goto out;
145 }
146 master_key = (struct ext4_encryption_key *)ukp->data;
147 BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE !=
148 EXT4_KEY_DERIVATION_NONCE_SIZE);
149 BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
150 res = ext4_derive_key_aes(ctx.nonce, master_key->raw, crypt_key->raw);
151out:
152 if (keyring_key)
153 key_put(keyring_key);
154 if (res < 0)
155 crypt_key->mode = EXT4_ENCRYPTION_MODE_INVALID;
156 return res;
157}
158
159int ext4_has_encryption_key(struct inode *inode)
160{
161 struct ext4_inode_info *ei = EXT4_I(inode);
162 struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
163
164 return (crypt_key->mode != EXT4_ENCRYPTION_MODE_INVALID);
165}
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
new file mode 100644
index 000000000000..30eaf9e9864a
--- /dev/null
+++ b/fs/ext4/crypto_policy.c
@@ -0,0 +1,194 @@
1/*
2 * linux/fs/ext4/crypto_policy.c
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption policy functions for ext4
7 *
8 * Written by Michael Halcrow, 2015.
9 */
10
11#include <linux/random.h>
12#include <linux/string.h>
13#include <linux/types.h>
14
15#include "ext4.h"
16#include "xattr.h"
17
18static int ext4_inode_has_encryption_context(struct inode *inode)
19{
20 int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
21 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0);
22 return (res > 0);
23}
24
25/*
26 * check whether the policy is consistent with the encryption context
27 * for the inode
28 */
29static int ext4_is_encryption_context_consistent_with_policy(
30 struct inode *inode, const struct ext4_encryption_policy *policy)
31{
32 struct ext4_encryption_context ctx;
33 int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
34 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
35 sizeof(ctx));
36 if (res != sizeof(ctx))
37 return 0;
38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
39 EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
40 (ctx.contents_encryption_mode ==
41 policy->contents_encryption_mode) &&
42 (ctx.filenames_encryption_mode ==
43 policy->filenames_encryption_mode));
44}
45
46static int ext4_create_encryption_context_from_policy(
47 struct inode *inode, const struct ext4_encryption_policy *policy)
48{
49 struct ext4_encryption_context ctx;
50 int res = 0;
51
52 ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
53 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
54 EXT4_KEY_DESCRIPTOR_SIZE);
55 if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
56 printk(KERN_WARNING
57 "%s: Invalid contents encryption mode %d\n", __func__,
58 policy->contents_encryption_mode);
59 res = -EINVAL;
60 goto out;
61 }
62 if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
63 printk(KERN_WARNING
64 "%s: Invalid filenames encryption mode %d\n", __func__,
65 policy->filenames_encryption_mode);
66 res = -EINVAL;
67 goto out;
68 }
69 ctx.contents_encryption_mode = policy->contents_encryption_mode;
70 ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
71 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
72 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
73
74 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
75 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
76 sizeof(ctx), 0);
77out:
78 if (!res)
79 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
80 return res;
81}
82
83int ext4_process_policy(const struct ext4_encryption_policy *policy,
84 struct inode *inode)
85{
86 if (policy->version != 0)
87 return -EINVAL;
88
89 if (!ext4_inode_has_encryption_context(inode)) {
90 if (!ext4_empty_dir(inode))
91 return -ENOTEMPTY;
92 return ext4_create_encryption_context_from_policy(inode,
93 policy);
94 }
95
96 if (ext4_is_encryption_context_consistent_with_policy(inode, policy))
97 return 0;
98
99 printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
100 __func__);
101 return -EINVAL;
102}
103
104int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
105{
106 struct ext4_encryption_context ctx;
107
108 int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
109 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
110 &ctx, sizeof(ctx));
111 if (res != sizeof(ctx))
112 return -ENOENT;
113 if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1)
114 return -EINVAL;
115 policy->version = 0;
116 policy->contents_encryption_mode = ctx.contents_encryption_mode;
117 policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
118 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
119 EXT4_KEY_DESCRIPTOR_SIZE);
120 return 0;
121}
122
123int ext4_is_child_context_consistent_with_parent(struct inode *parent,
124 struct inode *child)
125{
126 struct ext4_encryption_context parent_ctx, child_ctx;
127 int res;
128
129 if ((parent == NULL) || (child == NULL)) {
130 pr_err("parent %p child %p\n", parent, child);
131 BUG_ON(1);
132 }
133 /* no restrictions if the parent directory is not encrypted */
134 if (!ext4_encrypted_inode(parent))
135 return 1;
136 res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
137 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
138 &parent_ctx, sizeof(parent_ctx));
139 if (res != sizeof(parent_ctx))
140 return 0;
141 /* if the child directory is not encrypted, this is always a problem */
142 if (!ext4_encrypted_inode(child))
143 return 0;
144 res = ext4_xattr_get(child, EXT4_XATTR_INDEX_ENCRYPTION,
145 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
146 &child_ctx, sizeof(child_ctx));
147 if (res != sizeof(child_ctx))
148 return 0;
149 return (memcmp(parent_ctx.master_key_descriptor,
150 child_ctx.master_key_descriptor,
151 EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
152 (parent_ctx.contents_encryption_mode ==
153 child_ctx.contents_encryption_mode) &&
154 (parent_ctx.filenames_encryption_mode ==
155 child_ctx.filenames_encryption_mode));
156}
157
158/**
159 * ext4_inherit_context() - Sets a child context from its parent
160 * @parent: Parent inode from which the context is inherited.
161 * @child: Child inode that inherits the context from @parent.
162 *
163 * Return: Zero on success, non-zero otherwise
164 */
165int ext4_inherit_context(struct inode *parent, struct inode *child)
166{
167 struct ext4_encryption_context ctx;
168 int res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
169 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
170 &ctx, sizeof(ctx));
171
172 if (res != sizeof(ctx)) {
173 if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) {
174 ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
175 ctx.contents_encryption_mode =
176 EXT4_ENCRYPTION_MODE_AES_256_XTS;
177 ctx.filenames_encryption_mode =
178 EXT4_ENCRYPTION_MODE_AES_256_CTS;
179 memset(ctx.master_key_descriptor, 0x42,
180 EXT4_KEY_DESCRIPTOR_SIZE);
181 res = 0;
182 } else {
183 goto out;
184 }
185 }
186 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
187 res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION,
188 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
189 sizeof(ctx), 0);
190out:
191 if (!res)
192 ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT);
193 return res;
194}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index c24143ea9c08..61db51a5ce4c 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -22,10 +22,8 @@
22 */ 22 */
23 23
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/jbd2.h>
26#include <linux/buffer_head.h> 25#include <linux/buffer_head.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <linux/rbtree.h>
29#include "ext4.h" 27#include "ext4.h"
30#include "xattr.h" 28#include "xattr.h"
31 29
@@ -110,7 +108,10 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
110 int err; 108 int err;
111 struct inode *inode = file_inode(file); 109 struct inode *inode = file_inode(file);
112 struct super_block *sb = inode->i_sb; 110 struct super_block *sb = inode->i_sb;
111 struct buffer_head *bh = NULL;
113 int dir_has_error = 0; 112 int dir_has_error = 0;
113 struct ext4_fname_crypto_ctx *enc_ctx = NULL;
114 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
114 115
115 if (is_dx_dir(inode)) { 116 if (is_dx_dir(inode)) {
116 err = ext4_dx_readdir(file, ctx); 117 err = ext4_dx_readdir(file, ctx);
@@ -127,17 +128,28 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
127 128
128 if (ext4_has_inline_data(inode)) { 129 if (ext4_has_inline_data(inode)) {
129 int has_inline_data = 1; 130 int has_inline_data = 1;
130 int ret = ext4_read_inline_dir(file, ctx, 131 err = ext4_read_inline_dir(file, ctx,
131 &has_inline_data); 132 &has_inline_data);
132 if (has_inline_data) 133 if (has_inline_data)
133 return ret; 134 return err;
135 }
136
137 enc_ctx = ext4_get_fname_crypto_ctx(inode, EXT4_NAME_LEN);
138 if (IS_ERR(enc_ctx))
139 return PTR_ERR(enc_ctx);
140 if (enc_ctx) {
141 err = ext4_fname_crypto_alloc_buffer(enc_ctx, EXT4_NAME_LEN,
142 &fname_crypto_str);
143 if (err < 0) {
144 ext4_put_fname_crypto_ctx(&enc_ctx);
145 return err;
146 }
134 } 147 }
135 148
136 offset = ctx->pos & (sb->s_blocksize - 1); 149 offset = ctx->pos & (sb->s_blocksize - 1);
137 150
138 while (ctx->pos < inode->i_size) { 151 while (ctx->pos < inode->i_size) {
139 struct ext4_map_blocks map; 152 struct ext4_map_blocks map;
140 struct buffer_head *bh = NULL;
141 153
142 map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb); 154 map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
143 map.m_len = 1; 155 map.m_len = 1;
@@ -180,6 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
180 (unsigned long long)ctx->pos); 192 (unsigned long long)ctx->pos);
181 ctx->pos += sb->s_blocksize - offset; 193 ctx->pos += sb->s_blocksize - offset;
182 brelse(bh); 194 brelse(bh);
195 bh = NULL;
183 continue; 196 continue;
184 } 197 }
185 set_buffer_verified(bh); 198 set_buffer_verified(bh);
@@ -226,25 +239,44 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
226 offset += ext4_rec_len_from_disk(de->rec_len, 239 offset += ext4_rec_len_from_disk(de->rec_len,
227 sb->s_blocksize); 240 sb->s_blocksize);
228 if (le32_to_cpu(de->inode)) { 241 if (le32_to_cpu(de->inode)) {
229 if (!dir_emit(ctx, de->name, 242 if (enc_ctx == NULL) {
230 de->name_len, 243 /* Directory is not encrypted */
231 le32_to_cpu(de->inode), 244 if (!dir_emit(ctx, de->name,
232 get_dtype(sb, de->file_type))) { 245 de->name_len,
233 brelse(bh); 246 le32_to_cpu(de->inode),
234 return 0; 247 get_dtype(sb, de->file_type)))
248 goto done;
249 } else {
250 /* Directory is encrypted */
251 err = ext4_fname_disk_to_usr(enc_ctx,
252 de, &fname_crypto_str);
253 if (err < 0)
254 goto errout;
255 if (!dir_emit(ctx,
256 fname_crypto_str.name, err,
257 le32_to_cpu(de->inode),
258 get_dtype(sb, de->file_type)))
259 goto done;
235 } 260 }
236 } 261 }
237 ctx->pos += ext4_rec_len_from_disk(de->rec_len, 262 ctx->pos += ext4_rec_len_from_disk(de->rec_len,
238 sb->s_blocksize); 263 sb->s_blocksize);
239 } 264 }
240 offset = 0; 265 if ((ctx->pos < inode->i_size) && !dir_relax(inode))
266 goto done;
241 brelse(bh); 267 brelse(bh);
242 if (ctx->pos < inode->i_size) { 268 bh = NULL;
243 if (!dir_relax(inode)) 269 offset = 0;
244 return 0;
245 }
246 } 270 }
247 return 0; 271done:
272 err = 0;
273errout:
274#ifdef CONFIG_EXT4_FS_ENCRYPTION
275 ext4_put_fname_crypto_ctx(&enc_ctx);
276 ext4_fname_crypto_free_buffer(&fname_crypto_str);
277#endif
278 brelse(bh);
279 return err;
248} 280}
249 281
250static inline int is_32bit_api(void) 282static inline int is_32bit_api(void)
@@ -384,10 +416,15 @@ void ext4_htree_free_dir_info(struct dir_private_info *p)
384 416
385/* 417/*
386 * Given a directory entry, enter it into the fname rb tree. 418 * Given a directory entry, enter it into the fname rb tree.
419 *
420 * When filename encryption is enabled, the dirent will hold the
421 * encrypted filename, while the htree will hold decrypted filename.
422 * The decrypted filename is passed in via ent_name. parameter.
387 */ 423 */
388int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, 424int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
389 __u32 minor_hash, 425 __u32 minor_hash,
390 struct ext4_dir_entry_2 *dirent) 426 struct ext4_dir_entry_2 *dirent,
427 struct ext4_str *ent_name)
391{ 428{
392 struct rb_node **p, *parent = NULL; 429 struct rb_node **p, *parent = NULL;
393 struct fname *fname, *new_fn; 430 struct fname *fname, *new_fn;
@@ -398,17 +435,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
398 p = &info->root.rb_node; 435 p = &info->root.rb_node;
399 436
400 /* Create and allocate the fname structure */ 437 /* Create and allocate the fname structure */
401 len = sizeof(struct fname) + dirent->name_len + 1; 438 len = sizeof(struct fname) + ent_name->len + 1;
402 new_fn = kzalloc(len, GFP_KERNEL); 439 new_fn = kzalloc(len, GFP_KERNEL);
403 if (!new_fn) 440 if (!new_fn)
404 return -ENOMEM; 441 return -ENOMEM;
405 new_fn->hash = hash; 442 new_fn->hash = hash;
406 new_fn->minor_hash = minor_hash; 443 new_fn->minor_hash = minor_hash;
407 new_fn->inode = le32_to_cpu(dirent->inode); 444 new_fn->inode = le32_to_cpu(dirent->inode);
408 new_fn->name_len = dirent->name_len; 445 new_fn->name_len = ent_name->len;
409 new_fn->file_type = dirent->file_type; 446 new_fn->file_type = dirent->file_type;
410 memcpy(new_fn->name, dirent->name, dirent->name_len); 447 memcpy(new_fn->name, ent_name->name, ent_name->len);
411 new_fn->name[dirent->name_len] = 0; 448 new_fn->name[ent_name->len] = 0;
412 449
413 while (*p) { 450 while (*p) {
414 parent = *p; 451 parent = *p;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c8eb32eefc3c..ef267adce19a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -422,7 +422,7 @@ enum {
422 EXT4_INODE_DIRTY = 8, 422 EXT4_INODE_DIRTY = 8,
423 EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ 423 EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
424 EXT4_INODE_NOCOMPR = 10, /* Don't compress */ 424 EXT4_INODE_NOCOMPR = 10, /* Don't compress */
425 EXT4_INODE_ENCRYPT = 11, /* Compression error */ 425 EXT4_INODE_ENCRYPT = 11, /* Encrypted file */
426/* End compression flags --- maybe not all used */ 426/* End compression flags --- maybe not all used */
427 EXT4_INODE_INDEX = 12, /* hash-indexed directory */ 427 EXT4_INODE_INDEX = 12, /* hash-indexed directory */
428 EXT4_INODE_IMAGIC = 13, /* AFS directory */ 428 EXT4_INODE_IMAGIC = 13, /* AFS directory */
@@ -582,6 +582,15 @@ enum {
582#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010 582#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
583#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020 583#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
584 584
585/* Encryption algorithms */
586#define EXT4_ENCRYPTION_MODE_INVALID 0
587#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
588#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
589#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
590#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4
591
592#include "ext4_crypto.h"
593
585/* 594/*
586 * ioctl commands 595 * ioctl commands
587 */ 596 */
@@ -603,6 +612,9 @@ enum {
603#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) 612#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
604#define EXT4_IOC_SWAP_BOOT _IO('f', 17) 613#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
605#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) 614#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
615#define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy)
616#define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
617#define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy)
606 618
607#if defined(__KERNEL__) && defined(CONFIG_COMPAT) 619#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
608/* 620/*
@@ -939,6 +951,11 @@ struct ext4_inode_info {
939 951
940 /* Precomputed uuid+inum+igen checksum for seeding inode checksums */ 952 /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
941 __u32 i_csum_seed; 953 __u32 i_csum_seed;
954
955#ifdef CONFIG_EXT4_FS_ENCRYPTION
956 /* Encryption params */
957 struct ext4_encryption_key i_encryption_key;
958#endif
942}; 959};
943 960
944/* 961/*
@@ -1142,7 +1159,8 @@ struct ext4_super_block {
1142 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ 1159 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
1143 __u8 s_log_groups_per_flex; /* FLEX_BG group size */ 1160 __u8 s_log_groups_per_flex; /* FLEX_BG group size */
1144 __u8 s_checksum_type; /* metadata checksum algorithm used */ 1161 __u8 s_checksum_type; /* metadata checksum algorithm used */
1145 __le16 s_reserved_pad; 1162 __u8 s_encryption_level; /* versioning level for encryption */
1163 __u8 s_reserved_pad; /* Padding to next 32bits */
1146 __le64 s_kbytes_written; /* nr of lifetime kilobytes written */ 1164 __le64 s_kbytes_written; /* nr of lifetime kilobytes written */
1147 __le32 s_snapshot_inum; /* Inode number of active snapshot */ 1165 __le32 s_snapshot_inum; /* Inode number of active snapshot */
1148 __le32 s_snapshot_id; /* sequential ID of active snapshot */ 1166 __le32 s_snapshot_id; /* sequential ID of active snapshot */
@@ -1169,7 +1187,9 @@ struct ext4_super_block {
1169 __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ 1187 __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
1170 __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */ 1188 __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */
1171 __u8 s_encrypt_algos[4]; /* Encryption algorithms in use */ 1189 __u8 s_encrypt_algos[4]; /* Encryption algorithms in use */
1172 __le32 s_reserved[105]; /* Padding to the end of the block */ 1190 __u8 s_encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
1191 __le32 s_lpf_ino; /* Location of the lost+found inode */
1192 __le32 s_reserved[100]; /* Padding to the end of the block */
1173 __le32 s_checksum; /* crc32c(superblock) */ 1193 __le32 s_checksum; /* crc32c(superblock) */
1174}; 1194};
1175 1195
@@ -1180,8 +1200,16 @@ struct ext4_super_block {
1180/* 1200/*
1181 * run-time mount flags 1201 * run-time mount flags
1182 */ 1202 */
1183#define EXT4_MF_MNTDIR_SAMPLED 0x0001 1203#define EXT4_MF_MNTDIR_SAMPLED 0x0001
1184#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ 1204#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
1205#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004
1206
1207#ifdef CONFIG_EXT4_FS_ENCRYPTION
1208#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
1209 EXT4_MF_TEST_DUMMY_ENCRYPTION))
1210#else
1211#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
1212#endif
1185 1213
1186/* Number of quota types we support */ 1214/* Number of quota types we support */
1187#define EXT4_MAXQUOTAS 2 1215#define EXT4_MAXQUOTAS 2
@@ -1351,6 +1379,12 @@ struct ext4_sb_info {
1351 struct ratelimit_state s_err_ratelimit_state; 1379 struct ratelimit_state s_err_ratelimit_state;
1352 struct ratelimit_state s_warning_ratelimit_state; 1380 struct ratelimit_state s_warning_ratelimit_state;
1353 struct ratelimit_state s_msg_ratelimit_state; 1381 struct ratelimit_state s_msg_ratelimit_state;
1382
1383#ifdef CONFIG_EXT4_FS_ENCRYPTION
1384 /* Encryption */
1385 uint32_t s_file_encryption_mode;
1386 uint32_t s_dir_encryption_mode;
1387#endif
1354}; 1388};
1355 1389
1356static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) 1390static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1466,6 +1500,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1466#define EXT4_SB(sb) (sb) 1500#define EXT4_SB(sb) (sb)
1467#endif 1501#endif
1468 1502
1503/*
1504 * Returns true if the inode is inode is encrypted
1505 */
1506static inline int ext4_encrypted_inode(struct inode *inode)
1507{
1508#ifdef CONFIG_EXT4_FS_ENCRYPTION
1509 return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
1510#else
1511 return 0;
1512#endif
1513}
1514
1469#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime 1515#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
1470 1516
1471/* 1517/*
@@ -1575,8 +1621,9 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1575 EXT4_FEATURE_INCOMPAT_EXTENTS| \ 1621 EXT4_FEATURE_INCOMPAT_EXTENTS| \
1576 EXT4_FEATURE_INCOMPAT_64BIT| \ 1622 EXT4_FEATURE_INCOMPAT_64BIT| \
1577 EXT4_FEATURE_INCOMPAT_FLEX_BG| \ 1623 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
1578 EXT4_FEATURE_INCOMPAT_MMP | \ 1624 EXT4_FEATURE_INCOMPAT_MMP | \
1579 EXT4_FEATURE_INCOMPAT_INLINE_DATA) 1625 EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
1626 EXT4_FEATURE_INCOMPAT_ENCRYPT)
1580#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ 1627#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
1581 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ 1628 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
1582 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ 1629 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -2001,6 +2048,99 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
2001 struct ext4_group_desc *gdp); 2048 struct ext4_group_desc *gdp);
2002ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); 2049ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
2003 2050
2051/* crypto_policy.c */
2052int ext4_is_child_context_consistent_with_parent(struct inode *parent,
2053 struct inode *child);
2054int ext4_inherit_context(struct inode *parent, struct inode *child);
2055void ext4_to_hex(char *dst, char *src, size_t src_size);
2056int ext4_process_policy(const struct ext4_encryption_policy *policy,
2057 struct inode *inode);
2058int ext4_get_policy(struct inode *inode,
2059 struct ext4_encryption_policy *policy);
2060
2061/* crypto.c */
2062bool ext4_valid_contents_enc_mode(uint32_t mode);
2063uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
2064extern struct workqueue_struct *ext4_read_workqueue;
2065struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
2066void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
2067void ext4_restore_control_page(struct page *data_page);
2068struct page *ext4_encrypt(struct inode *inode,
2069 struct page *plaintext_page);
2070int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
2071int ext4_decrypt_one(struct inode *inode, struct page *page);
2072int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
2073
2074#ifdef CONFIG_EXT4_FS_ENCRYPTION
2075int ext4_init_crypto(void);
2076void ext4_exit_crypto(void);
2077static inline int ext4_sb_has_crypto(struct super_block *sb)
2078{
2079 return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
2080}
2081#else
2082static inline int ext4_init_crypto(void) { return 0; }
2083static inline void ext4_exit_crypto(void) { }
2084static inline int ext4_sb_has_crypto(struct super_block *sb)
2085{
2086 return 0;
2087}
2088#endif
2089
2090/* crypto_fname.c */
2091bool ext4_valid_filenames_enc_mode(uint32_t mode);
2092u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
2093int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
2094 u32 ilen, struct ext4_str *crypto_str);
2095int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
2096 const struct ext4_str *iname,
2097 struct ext4_str *oname);
2098int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
2099 const struct ext4_dir_entry_2 *de,
2100 struct ext4_str *oname);
2101int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
2102 const struct qstr *iname,
2103 struct ext4_str *oname);
2104int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
2105 const struct qstr *iname,
2106 struct dx_hash_info *hinfo);
2107int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
2108 const struct ext4_dir_entry_2 *de,
2109 struct dx_hash_info *hinfo);
2110int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
2111 u32 namelen);
2112
2113#ifdef CONFIG_EXT4_FS_ENCRYPTION
2114void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
2115struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
2116 u32 max_len);
2117void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str);
2118#else
2119static inline
2120void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) { }
2121static inline
2122struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
2123 u32 max_len)
2124{
2125 return NULL;
2126}
2127static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { }
2128#endif
2129
2130
2131/* crypto_key.c */
2132int ext4_generate_encryption_key(struct inode *inode);
2133
2134#ifdef CONFIG_EXT4_FS_ENCRYPTION
2135int ext4_has_encryption_key(struct inode *inode);
2136#else
2137static inline int ext4_has_encryption_key(struct inode *inode)
2138{
2139 return 0;
2140}
2141#endif
2142
2143
2004/* dir.c */ 2144/* dir.c */
2005extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, 2145extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
2006 struct file *, 2146 struct file *,
@@ -2011,17 +2151,20 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
2011 unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \ 2151 unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
2012 (de), (bh), (buf), (size), (offset))) 2152 (de), (bh), (buf), (size), (offset)))
2013extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, 2153extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
2014 __u32 minor_hash, 2154 __u32 minor_hash,
2015 struct ext4_dir_entry_2 *dirent); 2155 struct ext4_dir_entry_2 *dirent,
2156 struct ext4_str *ent_name);
2016extern void ext4_htree_free_dir_info(struct dir_private_info *p); 2157extern void ext4_htree_free_dir_info(struct dir_private_info *p);
2017extern int ext4_find_dest_de(struct inode *dir, struct inode *inode, 2158extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
2018 struct buffer_head *bh, 2159 struct buffer_head *bh,
2019 void *buf, int buf_size, 2160 void *buf, int buf_size,
2020 const char *name, int namelen, 2161 const char *name, int namelen,
2021 struct ext4_dir_entry_2 **dest_de); 2162 struct ext4_dir_entry_2 **dest_de);
2022void ext4_insert_dentry(struct inode *inode, 2163int ext4_insert_dentry(struct inode *dir,
2164 struct inode *inode,
2023 struct ext4_dir_entry_2 *de, 2165 struct ext4_dir_entry_2 *de,
2024 int buf_size, 2166 int buf_size,
2167 const struct qstr *iname,
2025 const char *name, int namelen); 2168 const char *name, int namelen);
2026static inline void ext4_update_dx_flag(struct inode *inode) 2169static inline void ext4_update_dx_flag(struct inode *inode)
2027{ 2170{
@@ -2099,6 +2242,7 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
2099extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); 2242extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
2100 2243
2101/* inode.c */ 2244/* inode.c */
2245int ext4_inode_is_fast_symlink(struct inode *inode);
2102struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int); 2246struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
2103struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int); 2247struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
2104int ext4_get_block_write(struct inode *inode, sector_t iblock, 2248int ext4_get_block_write(struct inode *inode, sector_t iblock,
@@ -2189,6 +2333,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
2189 void *entry_buf, 2333 void *entry_buf,
2190 int buf_size, 2334 int buf_size,
2191 int csum_size); 2335 int csum_size);
2336extern int ext4_empty_dir(struct inode *inode);
2192 2337
2193/* resize.c */ 2338/* resize.c */
2194extern int ext4_group_add(struct super_block *sb, 2339extern int ext4_group_add(struct super_block *sb,
@@ -2698,6 +2843,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
2698 de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; 2843 de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
2699} 2844}
2700 2845
2846/* readpages.c */
2847extern int ext4_mpage_readpages(struct address_space *mapping,
2848 struct list_head *pages, struct page *page,
2849 unsigned nr_pages);
2701 2850
2702/* symlink.c */ 2851/* symlink.c */
2703extern const struct inode_operations ext4_symlink_inode_operations; 2852extern const struct inode_operations ext4_symlink_inode_operations;
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
new file mode 100644
index 000000000000..c2ba35a914b6
--- /dev/null
+++ b/fs/ext4/ext4_crypto.h
@@ -0,0 +1,147 @@
1/*
2 * linux/fs/ext4/ext4_crypto.h
3 *
4 * Copyright (C) 2015, Google, Inc.
5 *
6 * This contains encryption header content for ext4
7 *
8 * Written by Michael Halcrow, 2015.
9 */
10
11#ifndef _EXT4_CRYPTO_H
12#define _EXT4_CRYPTO_H
13
14#include <linux/fs.h>
15
16#define EXT4_KEY_DESCRIPTOR_SIZE 8
17
18/* Policy provided via an ioctl on the topmost directory */
19struct ext4_encryption_policy {
20 char version;
21 char contents_encryption_mode;
22 char filenames_encryption_mode;
23 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
24} __attribute__((__packed__));
25
26#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
27#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
28
29/**
30 * Encryption context for inode
31 *
32 * Protector format:
33 * 1 byte: Protector format (1 = this version)
34 * 1 byte: File contents encryption mode
35 * 1 byte: File names encryption mode
36 * 1 byte: Reserved
37 * 8 bytes: Master Key descriptor
38 * 16 bytes: Encryption Key derivation nonce
39 */
40struct ext4_encryption_context {
41 char format;
42 char contents_encryption_mode;
43 char filenames_encryption_mode;
44 char reserved;
45 char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
46 char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
47} __attribute__((__packed__));
48
49/* Encryption parameters */
50#define EXT4_XTS_TWEAK_SIZE 16
51#define EXT4_AES_128_ECB_KEY_SIZE 16
52#define EXT4_AES_256_GCM_KEY_SIZE 32
53#define EXT4_AES_256_CBC_KEY_SIZE 32
54#define EXT4_AES_256_CTS_KEY_SIZE 32
55#define EXT4_AES_256_XTS_KEY_SIZE 64
56#define EXT4_MAX_KEY_SIZE 64
57
58#define EXT4_KEY_DESC_PREFIX "ext4:"
59#define EXT4_KEY_DESC_PREFIX_SIZE 5
60
61struct ext4_encryption_key {
62 uint32_t mode;
63 char raw[EXT4_MAX_KEY_SIZE];
64 uint32_t size;
65};
66
67#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
68#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
69
70struct ext4_crypto_ctx {
71 struct crypto_tfm *tfm; /* Crypto API context */
72 struct page *bounce_page; /* Ciphertext page on write path */
73 struct page *control_page; /* Original page on write path */
74 struct bio *bio; /* The bio for this context */
75 struct work_struct work; /* Work queue for read complete path */
76 struct list_head free_list; /* Free list */
77 int flags; /* Flags */
78 int mode; /* Encryption mode for tfm */
79};
80
81struct ext4_completion_result {
82 struct completion completion;
83 int res;
84};
85
86#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
87 struct ext4_completion_result ecr = { \
88 COMPLETION_INITIALIZER((ecr).completion), 0 }
89
90static inline int ext4_encryption_key_size(int mode)
91{
92 switch (mode) {
93 case EXT4_ENCRYPTION_MODE_AES_256_XTS:
94 return EXT4_AES_256_XTS_KEY_SIZE;
95 case EXT4_ENCRYPTION_MODE_AES_256_GCM:
96 return EXT4_AES_256_GCM_KEY_SIZE;
97 case EXT4_ENCRYPTION_MODE_AES_256_CBC:
98 return EXT4_AES_256_CBC_KEY_SIZE;
99 case EXT4_ENCRYPTION_MODE_AES_256_CTS:
100 return EXT4_AES_256_CTS_KEY_SIZE;
101 default:
102 BUG();
103 }
104 return 0;
105}
106
107#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4
108#define EXT4_CRYPTO_BLOCK_SIZE 16
109#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32
110
111struct ext4_str {
112 unsigned char *name;
113 u32 len;
114};
115
116struct ext4_fname_crypto_ctx {
117 u32 lim;
118 char tmp_buf[EXT4_CRYPTO_BLOCK_SIZE];
119 struct crypto_ablkcipher *ctfm;
120 struct crypto_hash *htfm;
121 struct page *workpage;
122 struct ext4_encryption_key key;
123 unsigned has_valid_key : 1;
124 unsigned ctfm_key_is_ready : 1;
125};
126
127/**
128 * For encrypted symlinks, the ciphertext length is stored at the beginning
129 * of the string in little-endian format.
130 */
131struct ext4_encrypted_symlink_data {
132 __le16 len;
133 char encrypted_path[1];
134} __attribute__((__packed__));
135
136/**
137 * This function is used to calculate the disk space required to
138 * store a filename of length l in encrypted symlink format.
139 */
140static inline u32 encrypted_symlink_data_len(u32 l)
141{
142 if (l < EXT4_CRYPTO_BLOCK_SIZE)
143 l = EXT4_CRYPTO_BLOCK_SIZE;
144 return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
145}
146
147#endif /* _EXT4_CRYPTO_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index bed43081720f..973816bfe4a9 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1717,12 +1717,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1717{ 1717{
1718 unsigned short ext1_ee_len, ext2_ee_len; 1718 unsigned short ext1_ee_len, ext2_ee_len;
1719 1719
1720 /*
1721 * Make sure that both extents are initialized. We don't merge
1722 * unwritten extents so that we can be sure that end_io code has
1723 * the extent that was written properly split out and conversion to
1724 * initialized is trivial.
1725 */
1726 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1720 if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1727 return 0; 1721 return 0;
1728 1722
@@ -3128,6 +3122,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3128 ee_len = ext4_ext_get_actual_len(ex); 3122 ee_len = ext4_ext_get_actual_len(ex);
3129 ee_pblock = ext4_ext_pblock(ex); 3123 ee_pblock = ext4_ext_pblock(ex);
3130 3124
3125 if (ext4_encrypted_inode(inode))
3126 return ext4_encrypted_zeroout(inode, ex);
3127
3131 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 3128 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
3132 if (ret > 0) 3129 if (ret > 0)
3133 ret = 0; 3130 ret = 0;
@@ -4535,19 +4532,7 @@ got_allocated_blocks:
4535 */ 4532 */
4536 reserved_clusters = get_reserved_cluster_alloc(inode, 4533 reserved_clusters = get_reserved_cluster_alloc(inode,
4537 map->m_lblk, allocated); 4534 map->m_lblk, allocated);
4538 if (map_from_cluster) { 4535 if (!map_from_cluster) {
4539 if (reserved_clusters) {
4540 /*
4541 * We have clusters reserved for this range.
4542 * But since we are not doing actual allocation
4543 * and are simply using blocks from previously
4544 * allocated cluster, we should release the
4545 * reservation and not claim quota.
4546 */
4547 ext4_da_update_reserve_space(inode,
4548 reserved_clusters, 0);
4549 }
4550 } else {
4551 BUG_ON(allocated_clusters < reserved_clusters); 4536 BUG_ON(allocated_clusters < reserved_clusters);
4552 if (reserved_clusters < allocated_clusters) { 4537 if (reserved_clusters < allocated_clusters) {
4553 struct ext4_inode_info *ei = EXT4_I(inode); 4538 struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4803,12 +4788,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4803 else 4788 else
4804 max_blocks -= lblk; 4789 max_blocks -= lblk;
4805 4790
4806 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
4807 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4808 EXT4_EX_NOCACHE;
4809 if (mode & FALLOC_FL_KEEP_SIZE)
4810 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4811
4812 mutex_lock(&inode->i_mutex); 4791 mutex_lock(&inode->i_mutex);
4813 4792
4814 /* 4793 /*
@@ -4825,15 +4804,28 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4825 ret = inode_newsize_ok(inode, new_size); 4804 ret = inode_newsize_ok(inode, new_size);
4826 if (ret) 4805 if (ret)
4827 goto out_mutex; 4806 goto out_mutex;
4828 /*
4829 * If we have a partial block after EOF we have to allocate
4830 * the entire block.
4831 */
4832 if (partial_end)
4833 max_blocks += 1;
4834 } 4807 }
4835 4808
4809 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
4810 if (mode & FALLOC_FL_KEEP_SIZE)
4811 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4812
4813 /* Preallocate the range including the unaligned edges */
4814 if (partial_begin || partial_end) {
4815 ret = ext4_alloc_file_blocks(file,
4816 round_down(offset, 1 << blkbits) >> blkbits,
4817 (round_up((offset + len), 1 << blkbits) -
4818 round_down(offset, 1 << blkbits)) >> blkbits,
4819 new_size, flags, mode);
4820 if (ret)
4821 goto out_mutex;
4822
4823 }
4824
4825 /* Zero range excluding the unaligned edges */
4836 if (max_blocks > 0) { 4826 if (max_blocks > 0) {
4827 flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4828 EXT4_EX_NOCACHE);
4837 4829
4838 /* Now release the pages and zero block aligned part of pages*/ 4830 /* Now release the pages and zero block aligned part of pages*/
4839 truncate_pagecache_range(inode, start, end - 1); 4831 truncate_pagecache_range(inode, start, end - 1);
@@ -4847,19 +4839,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4847 flags, mode); 4839 flags, mode);
4848 if (ret) 4840 if (ret)
4849 goto out_dio; 4841 goto out_dio;
4850 /*
4851 * Remove entire range from the extent status tree.
4852 *
4853 * ext4_es_remove_extent(inode, lblk, max_blocks) is
4854 * NOT sufficient. I'm not sure why this is the case,
4855 * but let's be conservative and remove the extent
4856 * status tree for the entire inode. There should be
4857 * no outstanding delalloc extents thanks to the
4858 * filemap_write_and_wait_range() call above.
4859 */
4860 ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
4861 if (ret)
4862 goto out_dio;
4863 } 4842 }
4864 if (!partial_begin && !partial_end) 4843 if (!partial_begin && !partial_end)
4865 goto out_dio; 4844 goto out_dio;
@@ -4922,6 +4901,20 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4922 ext4_lblk_t lblk; 4901 ext4_lblk_t lblk;
4923 unsigned int blkbits = inode->i_blkbits; 4902 unsigned int blkbits = inode->i_blkbits;
4924 4903
4904 /*
4905 * Encrypted inodes can't handle collapse range or insert
4906 * range since we would need to re-encrypt blocks with a
4907 * different IV or XTS tweak (which are based on the logical
4908 * block number).
4909 *
4910 * XXX It's not clear why zero range isn't working, but we'll
4911 * leave it disabled for encrypted inodes for now. This is a
4912 * bug we should fix....
4913 */
4914 if (ext4_encrypted_inode(inode) &&
4915 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)))
4916 return -EOPNOTSUPP;
4917
4925 /* Return error if mode is not supported */ 4918 /* Return error if mode is not supported */
4926 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4919 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4927 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 4920 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index e04d45733976..d33d5a6852b9 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -9,12 +9,10 @@
9 * 9 *
10 * Ext4 extents status tree core functions. 10 * Ext4 extents status tree core functions.
11 */ 11 */
12#include <linux/rbtree.h>
13#include <linux/list_sort.h> 12#include <linux/list_sort.h>
14#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include "ext4.h" 15#include "ext4.h"
17#include "extents_status.h"
18 16
19#include <trace/events/ext4.h> 17#include <trace/events/ext4.h>
20 18
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index e576d682b353..0613c256c344 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -20,7 +20,6 @@
20 20
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/jbd2.h>
24#include <linux/mount.h> 23#include <linux/mount.h>
25#include <linux/path.h> 24#include <linux/path.h>
26#include <linux/quotaops.h> 25#include <linux/quotaops.h>
@@ -221,6 +220,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
221 220
222static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) 221static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
223{ 222{
223 struct inode *inode = file->f_mapping->host;
224
225 if (ext4_encrypted_inode(inode)) {
226 int err = ext4_generate_encryption_key(inode);
227 if (err)
228 return 0;
229 }
224 file_accessed(file); 230 file_accessed(file);
225 if (IS_DAX(file_inode(file))) { 231 if (IS_DAX(file_inode(file))) {
226 vma->vm_ops = &ext4_dax_vm_ops; 232 vma->vm_ops = &ext4_dax_vm_ops;
@@ -238,6 +244,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
238 struct vfsmount *mnt = filp->f_path.mnt; 244 struct vfsmount *mnt = filp->f_path.mnt;
239 struct path path; 245 struct path path;
240 char buf[64], *cp; 246 char buf[64], *cp;
247 int ret;
241 248
242 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && 249 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
243 !(sb->s_flags & MS_RDONLY))) { 250 !(sb->s_flags & MS_RDONLY))) {
@@ -276,11 +283,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
276 * writing and the journal is present 283 * writing and the journal is present
277 */ 284 */
278 if (filp->f_mode & FMODE_WRITE) { 285 if (filp->f_mode & FMODE_WRITE) {
279 int ret = ext4_inode_attach_jinode(inode); 286 ret = ext4_inode_attach_jinode(inode);
280 if (ret < 0) 287 if (ret < 0)
281 return ret; 288 return ret;
282 } 289 }
283 return dquot_file_open(inode, filp); 290 ret = dquot_file_open(inode, filp);
291 if (!ret && ext4_encrypted_inode(inode)) {
292 ret = ext4_generate_encryption_key(inode);
293 if (ret)
294 ret = -EACCES;
295 }
296 return ret;
284} 297}
285 298
286/* 299/*
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index a8bc47f75fa0..e9d632e9aa4b 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -26,7 +26,6 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/writeback.h> 28#include <linux/writeback.h>
29#include <linux/jbd2.h>
30#include <linux/blkdev.h> 29#include <linux/blkdev.h>
31 30
32#include "ext4.h" 31#include "ext4.h"
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index 3d586f02883e..e026aa941fd5 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/jbd2.h>
14#include <linux/cryptohash.h> 13#include <linux/cryptohash.h>
15#include "ext4.h" 14#include "ext4.h"
16 15
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index ac644c31ca67..2cf18a2d5c72 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/jbd2.h>
18#include <linux/stat.h> 17#include <linux/stat.h>
19#include <linux/string.h> 18#include <linux/string.h>
20#include <linux/quotaops.h> 19#include <linux/quotaops.h>
@@ -997,6 +996,12 @@ got:
997 ei->i_block_group = group; 996 ei->i_block_group = group;
998 ei->i_last_alloc_group = ~0; 997 ei->i_last_alloc_group = ~0;
999 998
999 /* If the directory encrypted, then we should encrypt the inode. */
1000 if ((S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) &&
1001 (ext4_encrypted_inode(dir) ||
1002 DUMMY_ENCRYPTION_ENABLED(sbi)))
1003 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
1004
1000 ext4_set_inode_flags(inode); 1005 ext4_set_inode_flags(inode);
1001 if (IS_DIRSYNC(inode)) 1006 if (IS_DIRSYNC(inode))
1002 ext4_handle_sync(handle); 1007 ext4_handle_sync(handle);
@@ -1029,11 +1034,28 @@ got:
1029 ext4_set_inode_state(inode, EXT4_STATE_NEW); 1034 ext4_set_inode_state(inode, EXT4_STATE_NEW);
1030 1035
1031 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 1036 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1032 1037#ifdef CONFIG_EXT4_FS_ENCRYPTION
1038 if ((sbi->s_file_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID) &&
1039 (sbi->s_dir_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID)) {
1040 ei->i_inline_off = 0;
1041 if (EXT4_HAS_INCOMPAT_FEATURE(sb,
1042 EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1043 ext4_set_inode_state(inode,
1044 EXT4_STATE_MAY_INLINE_DATA);
1045 } else {
1046 /* Inline data and encryption are incompatible
1047 * We turn off inline data since encryption is enabled */
1048 ei->i_inline_off = 1;
1049 if (EXT4_HAS_INCOMPAT_FEATURE(sb,
1050 EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1051 ext4_clear_inode_state(inode,
1052 EXT4_STATE_MAY_INLINE_DATA);
1053 }
1054#else
1033 ei->i_inline_off = 0; 1055 ei->i_inline_off = 0;
1034 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA)) 1056 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
1035 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); 1057 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1036 1058#endif
1037 ret = inode; 1059 ret = inode;
1038 err = dquot_alloc_inode(inode); 1060 err = dquot_alloc_inode(inode);
1039 if (err) 1061 if (err)
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 4b143febf21f..feb2cafbeace 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -11,11 +11,13 @@
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 */ 13 */
14
15#include <linux/fiemap.h>
16
14#include "ext4_jbd2.h" 17#include "ext4_jbd2.h"
15#include "ext4.h" 18#include "ext4.h"
16#include "xattr.h" 19#include "xattr.h"
17#include "truncate.h" 20#include "truncate.h"
18#include <linux/fiemap.h>
19 21
20#define EXT4_XATTR_SYSTEM_DATA "data" 22#define EXT4_XATTR_SYSTEM_DATA "data"
21#define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS)) 23#define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -972,7 +974,7 @@ void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
972 offset = 0; 974 offset = 0;
973 while ((void *)de < dlimit) { 975 while ((void *)de < dlimit) {
974 de_len = ext4_rec_len_from_disk(de->rec_len, inline_size); 976 de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
975 trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n", 977 trace_printk("de: off %u rlen %u name %.*s nlen %u ino %u\n",
976 offset, de_len, de->name_len, de->name, 978 offset, de_len, de->name_len, de->name,
977 de->name_len, le32_to_cpu(de->inode)); 979 de->name_len, le32_to_cpu(de->inode));
978 if (ext4_check_dir_entry(dir, NULL, de, bh, 980 if (ext4_check_dir_entry(dir, NULL, de, bh,
@@ -1014,7 +1016,8 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
1014 err = ext4_journal_get_write_access(handle, iloc->bh); 1016 err = ext4_journal_get_write_access(handle, iloc->bh);
1015 if (err) 1017 if (err)
1016 return err; 1018 return err;
1017 ext4_insert_dentry(inode, de, inline_size, name, namelen); 1019 ext4_insert_dentry(dir, inode, de, inline_size, &dentry->d_name,
1020 name, namelen);
1018 1021
1019 ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size); 1022 ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
1020 1023
@@ -1327,6 +1330,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
1327 struct ext4_iloc iloc; 1330 struct ext4_iloc iloc;
1328 void *dir_buf = NULL; 1331 void *dir_buf = NULL;
1329 struct ext4_dir_entry_2 fake; 1332 struct ext4_dir_entry_2 fake;
1333 struct ext4_str tmp_str;
1330 1334
1331 ret = ext4_get_inode_loc(inode, &iloc); 1335 ret = ext4_get_inode_loc(inode, &iloc);
1332 if (ret) 1336 if (ret)
@@ -1398,8 +1402,10 @@ int htree_inlinedir_to_tree(struct file *dir_file,
1398 continue; 1402 continue;
1399 if (de->inode == 0) 1403 if (de->inode == 0)
1400 continue; 1404 continue;
1401 err = ext4_htree_store_dirent(dir_file, 1405 tmp_str.name = de->name;
1402 hinfo->hash, hinfo->minor_hash, de); 1406 tmp_str.len = de->name_len;
1407 err = ext4_htree_store_dirent(dir_file, hinfo->hash,
1408 hinfo->minor_hash, de, &tmp_str);
1403 if (err) { 1409 if (err) {
1404 count = err; 1410 count = err;
1405 goto out; 1411 goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b49cf6e59953..366476e71e10 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -20,7 +20,6 @@
20 20
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/jbd2.h>
24#include <linux/highuid.h> 23#include <linux/highuid.h>
25#include <linux/pagemap.h> 24#include <linux/pagemap.h>
26#include <linux/quotaops.h> 25#include <linux/quotaops.h>
@@ -36,7 +35,6 @@
36#include <linux/kernel.h> 35#include <linux/kernel.h>
37#include <linux/printk.h> 36#include <linux/printk.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <linux/ratelimit.h>
40#include <linux/bitops.h> 38#include <linux/bitops.h>
41 39
42#include "ext4_jbd2.h" 40#include "ext4_jbd2.h"
@@ -140,7 +138,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
140/* 138/*
141 * Test whether an inode is a fast symlink. 139 * Test whether an inode is a fast symlink.
142 */ 140 */
143static int ext4_inode_is_fast_symlink(struct inode *inode) 141int ext4_inode_is_fast_symlink(struct inode *inode)
144{ 142{
145 int ea_blocks = EXT4_I(inode)->i_file_acl ? 143 int ea_blocks = EXT4_I(inode)->i_file_acl ?
146 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; 144 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
@@ -887,6 +885,95 @@ int do_journal_get_write_access(handle_t *handle,
887 885
888static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 886static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
889 struct buffer_head *bh_result, int create); 887 struct buffer_head *bh_result, int create);
888
889#ifdef CONFIG_EXT4_FS_ENCRYPTION
890static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
891 get_block_t *get_block)
892{
893 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
894 unsigned to = from + len;
895 struct inode *inode = page->mapping->host;
896 unsigned block_start, block_end;
897 sector_t block;
898 int err = 0;
899 unsigned blocksize = inode->i_sb->s_blocksize;
900 unsigned bbits;
901 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
902 bool decrypt = false;
903
904 BUG_ON(!PageLocked(page));
905 BUG_ON(from > PAGE_CACHE_SIZE);
906 BUG_ON(to > PAGE_CACHE_SIZE);
907 BUG_ON(from > to);
908
909 if (!page_has_buffers(page))
910 create_empty_buffers(page, blocksize, 0);
911 head = page_buffers(page);
912 bbits = ilog2(blocksize);
913 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
914
915 for (bh = head, block_start = 0; bh != head || !block_start;
916 block++, block_start = block_end, bh = bh->b_this_page) {
917 block_end = block_start + blocksize;
918 if (block_end <= from || block_start >= to) {
919 if (PageUptodate(page)) {
920 if (!buffer_uptodate(bh))
921 set_buffer_uptodate(bh);
922 }
923 continue;
924 }
925 if (buffer_new(bh))
926 clear_buffer_new(bh);
927 if (!buffer_mapped(bh)) {
928 WARN_ON(bh->b_size != blocksize);
929 err = get_block(inode, block, bh, 1);
930 if (err)
931 break;
932 if (buffer_new(bh)) {
933 unmap_underlying_metadata(bh->b_bdev,
934 bh->b_blocknr);
935 if (PageUptodate(page)) {
936 clear_buffer_new(bh);
937 set_buffer_uptodate(bh);
938 mark_buffer_dirty(bh);
939 continue;
940 }
941 if (block_end > to || block_start < from)
942 zero_user_segments(page, to, block_end,
943 block_start, from);
944 continue;
945 }
946 }
947 if (PageUptodate(page)) {
948 if (!buffer_uptodate(bh))
949 set_buffer_uptodate(bh);
950 continue;
951 }
952 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
953 !buffer_unwritten(bh) &&
954 (block_start < from || block_end > to)) {
955 ll_rw_block(READ, 1, &bh);
956 *wait_bh++ = bh;
957 decrypt = ext4_encrypted_inode(inode) &&
958 S_ISREG(inode->i_mode);
959 }
960 }
961 /*
962 * If we issued read requests, let them complete.
963 */
964 while (wait_bh > wait) {
965 wait_on_buffer(*--wait_bh);
966 if (!buffer_uptodate(*wait_bh))
967 err = -EIO;
968 }
969 if (unlikely(err))
970 page_zero_new_buffers(page, from, to);
971 else if (decrypt)
972 err = ext4_decrypt_one(inode, page);
973 return err;
974}
975#endif
976
890static int ext4_write_begin(struct file *file, struct address_space *mapping, 977static int ext4_write_begin(struct file *file, struct address_space *mapping,
891 loff_t pos, unsigned len, unsigned flags, 978 loff_t pos, unsigned len, unsigned flags,
892 struct page **pagep, void **fsdata) 979 struct page **pagep, void **fsdata)
@@ -949,11 +1036,19 @@ retry_journal:
949 /* In case writeback began while the page was unlocked */ 1036 /* In case writeback began while the page was unlocked */
950 wait_for_stable_page(page); 1037 wait_for_stable_page(page);
951 1038
1039#ifdef CONFIG_EXT4_FS_ENCRYPTION
1040 if (ext4_should_dioread_nolock(inode))
1041 ret = ext4_block_write_begin(page, pos, len,
1042 ext4_get_block_write);
1043 else
1044 ret = ext4_block_write_begin(page, pos, len,
1045 ext4_get_block);
1046#else
952 if (ext4_should_dioread_nolock(inode)) 1047 if (ext4_should_dioread_nolock(inode))
953 ret = __block_write_begin(page, pos, len, ext4_get_block_write); 1048 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
954 else 1049 else
955 ret = __block_write_begin(page, pos, len, ext4_get_block); 1050 ret = __block_write_begin(page, pos, len, ext4_get_block);
956 1051#endif
957 if (!ret && ext4_should_journal_data(inode)) { 1052 if (!ret && ext4_should_journal_data(inode)) {
958 ret = ext4_walk_page_buffers(handle, page_buffers(page), 1053 ret = ext4_walk_page_buffers(handle, page_buffers(page),
959 from, to, NULL, 1054 from, to, NULL,
@@ -2575,7 +2670,12 @@ retry_journal:
2575 /* In case writeback began while the page was unlocked */ 2670 /* In case writeback began while the page was unlocked */
2576 wait_for_stable_page(page); 2671 wait_for_stable_page(page);
2577 2672
2673#ifdef CONFIG_EXT4_FS_ENCRYPTION
2674 ret = ext4_block_write_begin(page, pos, len,
2675 ext4_da_get_block_prep);
2676#else
2578 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 2677 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2678#endif
2579 if (ret < 0) { 2679 if (ret < 0) {
2580 unlock_page(page); 2680 unlock_page(page);
2581 ext4_journal_stop(handle); 2681 ext4_journal_stop(handle);
@@ -2821,7 +2921,7 @@ static int ext4_readpage(struct file *file, struct page *page)
2821 ret = ext4_readpage_inline(inode, page); 2921 ret = ext4_readpage_inline(inode, page);
2822 2922
2823 if (ret == -EAGAIN) 2923 if (ret == -EAGAIN)
2824 return mpage_readpage(page, ext4_get_block); 2924 return ext4_mpage_readpages(page->mapping, NULL, page, 1);
2825 2925
2826 return ret; 2926 return ret;
2827} 2927}
@@ -2836,7 +2936,7 @@ ext4_readpages(struct file *file, struct address_space *mapping,
2836 if (ext4_has_inline_data(inode)) 2936 if (ext4_has_inline_data(inode))
2837 return 0; 2937 return 0;
2838 2938
2839 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2939 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
2840} 2940}
2841 2941
2842static void ext4_invalidatepage(struct page *page, unsigned int offset, 2942static void ext4_invalidatepage(struct page *page, unsigned int offset,
@@ -3033,6 +3133,9 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
3033 get_block_func = ext4_get_block_write; 3133 get_block_func = ext4_get_block_write;
3034 dio_flags = DIO_LOCKING; 3134 dio_flags = DIO_LOCKING;
3035 } 3135 }
3136#ifdef CONFIG_EXT4_FS_ENCRYPTION
3137 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
3138#endif
3036 if (IS_DAX(inode)) 3139 if (IS_DAX(inode))
3037 ret = dax_do_io(iocb, inode, iter, offset, get_block_func, 3140 ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
3038 ext4_end_io_dio, dio_flags); 3141 ext4_end_io_dio, dio_flags);
@@ -3097,6 +3200,11 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
3097 size_t count = iov_iter_count(iter); 3200 size_t count = iov_iter_count(iter);
3098 ssize_t ret; 3201 ssize_t ret;
3099 3202
3203#ifdef CONFIG_EXT4_FS_ENCRYPTION
3204 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
3205 return 0;
3206#endif
3207
3100 /* 3208 /*
3101 * If we are doing data journalling we don't support O_DIRECT 3209 * If we are doing data journalling we don't support O_DIRECT
3102 */ 3210 */
@@ -3261,6 +3369,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
3261 /* Uhhuh. Read error. Complain and punt. */ 3369 /* Uhhuh. Read error. Complain and punt. */
3262 if (!buffer_uptodate(bh)) 3370 if (!buffer_uptodate(bh))
3263 goto unlock; 3371 goto unlock;
3372 if (S_ISREG(inode->i_mode) &&
3373 ext4_encrypted_inode(inode)) {
3374 /* We expect the key to be set. */
3375 BUG_ON(!ext4_has_encryption_key(inode));
3376 BUG_ON(blocksize != PAGE_CACHE_SIZE);
3377 WARN_ON_ONCE(ext4_decrypt_one(inode, page));
3378 }
3264 } 3379 }
3265 if (ext4_should_journal_data(inode)) { 3380 if (ext4_should_journal_data(inode)) {
3266 BUFFER_TRACE(bh, "get write access"); 3381 BUFFER_TRACE(bh, "get write access");
@@ -4096,7 +4211,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4096 inode->i_op = &ext4_dir_inode_operations; 4211 inode->i_op = &ext4_dir_inode_operations;
4097 inode->i_fop = &ext4_dir_operations; 4212 inode->i_fop = &ext4_dir_operations;
4098 } else if (S_ISLNK(inode->i_mode)) { 4213 } else if (S_ISLNK(inode->i_mode)) {
4099 if (ext4_inode_is_fast_symlink(inode)) { 4214 if (ext4_inode_is_fast_symlink(inode) &&
4215 !ext4_encrypted_inode(inode)) {
4100 inode->i_op = &ext4_fast_symlink_inode_operations; 4216 inode->i_op = &ext4_fast_symlink_inode_operations;
4101 nd_terminate_link(ei->i_data, inode->i_size, 4217 nd_terminate_link(ei->i_data, inode->i_size,
4102 sizeof(ei->i_data) - 1); 4218 sizeof(ei->i_data) - 1);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index f58a0d106726..2cb9e178d1c5 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -8,12 +8,12 @@
8 */ 8 */
9 9
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/jbd2.h>
12#include <linux/capability.h> 11#include <linux/capability.h>
13#include <linux/time.h> 12#include <linux/time.h>
14#include <linux/compat.h> 13#include <linux/compat.h>
15#include <linux/mount.h> 14#include <linux/mount.h>
16#include <linux/file.h> 15#include <linux/file.h>
16#include <linux/random.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include "ext4_jbd2.h" 18#include "ext4_jbd2.h"
19#include "ext4.h" 19#include "ext4.h"
@@ -196,6 +196,16 @@ journal_err_out:
196 return err; 196 return err;
197} 197}
198 198
199static int uuid_is_zero(__u8 u[16])
200{
201 int i;
202
203 for (i = 0; i < 16; i++)
204 if (u[i])
205 return 0;
206 return 1;
207}
208
199long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 209long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
200{ 210{
201 struct inode *inode = file_inode(filp); 211 struct inode *inode = file_inode(filp);
@@ -615,7 +625,78 @@ resizefs_out:
615 } 625 }
616 case EXT4_IOC_PRECACHE_EXTENTS: 626 case EXT4_IOC_PRECACHE_EXTENTS:
617 return ext4_ext_precache(inode); 627 return ext4_ext_precache(inode);
628 case EXT4_IOC_SET_ENCRYPTION_POLICY: {
629#ifdef CONFIG_EXT4_FS_ENCRYPTION
630 struct ext4_encryption_policy policy;
631 int err = 0;
632
633 if (copy_from_user(&policy,
634 (struct ext4_encryption_policy __user *)arg,
635 sizeof(policy))) {
636 err = -EFAULT;
637 goto encryption_policy_out;
638 }
618 639
640 err = ext4_process_policy(&policy, inode);
641encryption_policy_out:
642 return err;
643#else
644 return -EOPNOTSUPP;
645#endif
646 }
647 case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
648 int err, err2;
649 struct ext4_sb_info *sbi = EXT4_SB(sb);
650 handle_t *handle;
651
652 if (!ext4_sb_has_crypto(sb))
653 return -EOPNOTSUPP;
654 if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
655 err = mnt_want_write_file(filp);
656 if (err)
657 return err;
658 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
659 if (IS_ERR(handle)) {
660 err = PTR_ERR(handle);
661 goto pwsalt_err_exit;
662 }
663 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
664 if (err)
665 goto pwsalt_err_journal;
666 generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
667 err = ext4_handle_dirty_metadata(handle, NULL,
668 sbi->s_sbh);
669 pwsalt_err_journal:
670 err2 = ext4_journal_stop(handle);
671 if (err2 && !err)
672 err = err2;
673 pwsalt_err_exit:
674 mnt_drop_write_file(filp);
675 if (err)
676 return err;
677 }
678 if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt,
679 16))
680 return -EFAULT;
681 return 0;
682 }
683 case EXT4_IOC_GET_ENCRYPTION_POLICY: {
684#ifdef CONFIG_EXT4_FS_ENCRYPTION
685 struct ext4_encryption_policy policy;
686 int err = 0;
687
688 if (!ext4_encrypted_inode(inode))
689 return -ENOENT;
690 err = ext4_get_policy(inode, &policy);
691 if (err)
692 return err;
693 if (copy_to_user((void *)arg, &policy, sizeof(policy)))
694 return -EFAULT;
695 return 0;
696#else
697 return -EOPNOTSUPP;
698#endif
699 }
619 default: 700 default:
620 return -ENOTTY; 701 return -ENOTTY;
621 } 702 }
@@ -680,6 +761,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
680 case FITRIM: 761 case FITRIM:
681 case EXT4_IOC_RESIZE_FS: 762 case EXT4_IOC_RESIZE_FS:
682 case EXT4_IOC_PRECACHE_EXTENTS: 763 case EXT4_IOC_PRECACHE_EXTENTS:
764 case EXT4_IOC_SET_ENCRYPTION_POLICY:
765 case EXT4_IOC_GET_ENCRYPTION_PWSALT:
766 case EXT4_IOC_GET_ENCRYPTION_POLICY:
683 break; 767 break;
684 default: 768 default:
685 return -ENOIOCTLCMD; 769 return -ENOIOCTLCMD;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2291923dae4e..ef22cd951c0c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/pagemap.h> 28#include <linux/pagemap.h>
29#include <linux/jbd2.h>
30#include <linux/time.h> 29#include <linux/time.h>
31#include <linux/fcntl.h> 30#include <linux/fcntl.h>
32#include <linux/stat.h> 31#include <linux/stat.h>
@@ -254,8 +253,9 @@ static struct dx_frame *dx_probe(const struct qstr *d_name,
254 struct dx_hash_info *hinfo, 253 struct dx_hash_info *hinfo,
255 struct dx_frame *frame); 254 struct dx_frame *frame);
256static void dx_release(struct dx_frame *frames); 255static void dx_release(struct dx_frame *frames);
257static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, 256static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
258 struct dx_hash_info *hinfo, struct dx_map_entry map[]); 257 unsigned blocksize, struct dx_hash_info *hinfo,
258 struct dx_map_entry map[]);
259static void dx_sort_map(struct dx_map_entry *map, unsigned count); 259static void dx_sort_map(struct dx_map_entry *map, unsigned count);
260static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, 260static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
261 struct dx_map_entry *offsets, int count, unsigned blocksize); 261 struct dx_map_entry *offsets, int count, unsigned blocksize);
@@ -586,8 +586,10 @@ struct stats
586 unsigned bcount; 586 unsigned bcount;
587}; 587};
588 588
589static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de, 589static struct stats dx_show_leaf(struct inode *dir,
590 int size, int show_names) 590 struct dx_hash_info *hinfo,
591 struct ext4_dir_entry_2 *de,
592 int size, int show_names)
591{ 593{
592 unsigned names = 0, space = 0; 594 unsigned names = 0, space = 0;
593 char *base = (char *) de; 595 char *base = (char *) de;
@@ -600,12 +602,80 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent
600 { 602 {
601 if (show_names) 603 if (show_names)
602 { 604 {
605#ifdef CONFIG_EXT4_FS_ENCRYPTION
606 int len;
607 char *name;
608 struct ext4_str fname_crypto_str
609 = {.name = NULL, .len = 0};
610 struct ext4_fname_crypto_ctx *ctx = NULL;
611 int res;
612
613 name = de->name;
614 len = de->name_len;
615 ctx = ext4_get_fname_crypto_ctx(dir,
616 EXT4_NAME_LEN);
617 if (IS_ERR(ctx)) {
618 printk(KERN_WARNING "Error acquiring"
619 " crypto ctxt--skipping crypto\n");
620 ctx = NULL;
621 }
622 if (ctx == NULL) {
623 /* Directory is not encrypted */
624 ext4fs_dirhash(de->name,
625 de->name_len, &h);
626 printk("%*.s:(U)%x.%u ", len,
627 name, h.hash,
628 (unsigned) ((char *) de
629 - base));
630 } else {
631 /* Directory is encrypted */
632 res = ext4_fname_crypto_alloc_buffer(
633 ctx, de->name_len,
634 &fname_crypto_str);
635 if (res < 0) {
636 printk(KERN_WARNING "Error "
637 "allocating crypto "
638 "buffer--skipping "
639 "crypto\n");
640 ext4_put_fname_crypto_ctx(&ctx);
641 ctx = NULL;
642 }
643 res = ext4_fname_disk_to_usr(ctx, de,
644 &fname_crypto_str);
645 if (res < 0) {
646 printk(KERN_WARNING "Error "
647 "converting filename "
648 "from disk to usr"
649 "\n");
650 name = "??";
651 len = 2;
652 } else {
653 name = fname_crypto_str.name;
654 len = fname_crypto_str.len;
655 }
656 res = ext4_fname_disk_to_hash(ctx, de,
657 &h);
658 if (res < 0) {
659 printk(KERN_WARNING "Error "
660 "converting filename "
661 "from disk to htree"
662 "\n");
663 h.hash = 0xDEADBEEF;
664 }
665 printk("%*.s:(E)%x.%u ", len, name,
666 h.hash, (unsigned) ((char *) de
667 - base));
668 ext4_put_fname_crypto_ctx(&ctx);
669 ext4_fname_crypto_free_buffer(
670 &fname_crypto_str);
671 }
672#else
603 int len = de->name_len; 673 int len = de->name_len;
604 char *name = de->name; 674 char *name = de->name;
605 while (len--) printk("%c", *name++);
606 ext4fs_dirhash(de->name, de->name_len, &h); 675 ext4fs_dirhash(de->name, de->name_len, &h);
607 printk(":%x.%u ", h.hash, 676 printk("%*.s:%x.%u ", len, name, h.hash,
608 (unsigned) ((char *) de - base)); 677 (unsigned) ((char *) de - base));
678#endif
609 } 679 }
610 space += EXT4_DIR_REC_LEN(de->name_len); 680 space += EXT4_DIR_REC_LEN(de->name_len);
611 names++; 681 names++;
@@ -623,7 +693,6 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
623 unsigned count = dx_get_count(entries), names = 0, space = 0, i; 693 unsigned count = dx_get_count(entries), names = 0, space = 0, i;
624 unsigned bcount = 0; 694 unsigned bcount = 0;
625 struct buffer_head *bh; 695 struct buffer_head *bh;
626 int err;
627 printk("%i indexed blocks...\n", count); 696 printk("%i indexed blocks...\n", count);
628 for (i = 0; i < count; i++, entries++) 697 for (i = 0; i < count; i++, entries++)
629 { 698 {
@@ -637,7 +706,8 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
637 continue; 706 continue;
638 stats = levels? 707 stats = levels?
639 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): 708 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
640 dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); 709 dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *)
710 bh->b_data, blocksize, 0);
641 names += stats.names; 711 names += stats.names;
642 space += stats.space; 712 space += stats.space;
643 bcount += stats.bcount; 713 bcount += stats.bcount;
@@ -687,8 +757,28 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
687 if (hinfo->hash_version <= DX_HASH_TEA) 757 if (hinfo->hash_version <= DX_HASH_TEA)
688 hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; 758 hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
689 hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed; 759 hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
760#ifdef CONFIG_EXT4_FS_ENCRYPTION
761 if (d_name) {
762 struct ext4_fname_crypto_ctx *ctx = NULL;
763 int res;
764
765 /* Check if the directory is encrypted */
766 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
767 if (IS_ERR(ctx)) {
768 ret_err = ERR_PTR(PTR_ERR(ctx));
769 goto fail;
770 }
771 res = ext4_fname_usr_to_hash(ctx, d_name, hinfo);
772 if (res < 0) {
773 ret_err = ERR_PTR(res);
774 goto fail;
775 }
776 ext4_put_fname_crypto_ctx(&ctx);
777 }
778#else
690 if (d_name) 779 if (d_name)
691 ext4fs_dirhash(d_name->name, d_name->len, hinfo); 780 ext4fs_dirhash(d_name->name, d_name->len, hinfo);
781#endif
692 hash = hinfo->hash; 782 hash = hinfo->hash;
693 783
694 if (root->info.unused_flags & 1) { 784 if (root->info.unused_flags & 1) {
@@ -773,6 +863,7 @@ fail:
773 brelse(frame->bh); 863 brelse(frame->bh);
774 frame--; 864 frame--;
775 } 865 }
866
776 if (ret_err == ERR_PTR(ERR_BAD_DX_DIR)) 867 if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
777 ext4_warning(dir->i_sb, 868 ext4_warning(dir->i_sb,
778 "Corrupt dir inode %lu, running e2fsck is " 869 "Corrupt dir inode %lu, running e2fsck is "
@@ -878,6 +969,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
878 struct buffer_head *bh; 969 struct buffer_head *bh;
879 struct ext4_dir_entry_2 *de, *top; 970 struct ext4_dir_entry_2 *de, *top;
880 int err = 0, count = 0; 971 int err = 0, count = 0;
972 struct ext4_fname_crypto_ctx *ctx = NULL;
973 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str;
881 974
882 dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n", 975 dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
883 (unsigned long)block)); 976 (unsigned long)block));
@@ -889,6 +982,24 @@ static int htree_dirblock_to_tree(struct file *dir_file,
889 top = (struct ext4_dir_entry_2 *) ((char *) de + 982 top = (struct ext4_dir_entry_2 *) ((char *) de +
890 dir->i_sb->s_blocksize - 983 dir->i_sb->s_blocksize -
891 EXT4_DIR_REC_LEN(0)); 984 EXT4_DIR_REC_LEN(0));
985#ifdef CONFIG_EXT4_FS_ENCRYPTION
986 /* Check if the directory is encrypted */
987 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
988 if (IS_ERR(ctx)) {
989 err = PTR_ERR(ctx);
990 brelse(bh);
991 return err;
992 }
993 if (ctx != NULL) {
994 err = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
995 &fname_crypto_str);
996 if (err < 0) {
997 ext4_put_fname_crypto_ctx(&ctx);
998 brelse(bh);
999 return err;
1000 }
1001 }
1002#endif
892 for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { 1003 for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
893 if (ext4_check_dir_entry(dir, NULL, de, bh, 1004 if (ext4_check_dir_entry(dir, NULL, de, bh,
894 bh->b_data, bh->b_size, 1005 bh->b_data, bh->b_size,
@@ -897,21 +1008,52 @@ static int htree_dirblock_to_tree(struct file *dir_file,
897 /* silently ignore the rest of the block */ 1008 /* silently ignore the rest of the block */
898 break; 1009 break;
899 } 1010 }
1011#ifdef CONFIG_EXT4_FS_ENCRYPTION
1012 err = ext4_fname_disk_to_hash(ctx, de, hinfo);
1013 if (err < 0) {
1014 count = err;
1015 goto errout;
1016 }
1017#else
900 ext4fs_dirhash(de->name, de->name_len, hinfo); 1018 ext4fs_dirhash(de->name, de->name_len, hinfo);
1019#endif
901 if ((hinfo->hash < start_hash) || 1020 if ((hinfo->hash < start_hash) ||
902 ((hinfo->hash == start_hash) && 1021 ((hinfo->hash == start_hash) &&
903 (hinfo->minor_hash < start_minor_hash))) 1022 (hinfo->minor_hash < start_minor_hash)))
904 continue; 1023 continue;
905 if (de->inode == 0) 1024 if (de->inode == 0)
906 continue; 1025 continue;
907 if ((err = ext4_htree_store_dirent(dir_file, 1026 if (ctx == NULL) {
908 hinfo->hash, hinfo->minor_hash, de)) != 0) { 1027 /* Directory is not encrypted */
909 brelse(bh); 1028 tmp_str.name = de->name;
910 return err; 1029 tmp_str.len = de->name_len;
1030 err = ext4_htree_store_dirent(dir_file,
1031 hinfo->hash, hinfo->minor_hash, de,
1032 &tmp_str);
1033 } else {
1034 /* Directory is encrypted */
1035 err = ext4_fname_disk_to_usr(ctx, de,
1036 &fname_crypto_str);
1037 if (err < 0) {
1038 count = err;
1039 goto errout;
1040 }
1041 err = ext4_htree_store_dirent(dir_file,
1042 hinfo->hash, hinfo->minor_hash, de,
1043 &fname_crypto_str);
1044 }
1045 if (err != 0) {
1046 count = err;
1047 goto errout;
911 } 1048 }
912 count++; 1049 count++;
913 } 1050 }
1051errout:
914 brelse(bh); 1052 brelse(bh);
1053#ifdef CONFIG_EXT4_FS_ENCRYPTION
1054 ext4_put_fname_crypto_ctx(&ctx);
1055 ext4_fname_crypto_free_buffer(&fname_crypto_str);
1056#endif
915 return count; 1057 return count;
916} 1058}
917 1059
@@ -935,6 +1077,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
935 int count = 0; 1077 int count = 0;
936 int ret, err; 1078 int ret, err;
937 __u32 hashval; 1079 __u32 hashval;
1080 struct ext4_str tmp_str;
938 1081
939 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", 1082 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
940 start_hash, start_minor_hash)); 1083 start_hash, start_minor_hash));
@@ -970,14 +1113,22 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
970 /* Add '.' and '..' from the htree header */ 1113 /* Add '.' and '..' from the htree header */
971 if (!start_hash && !start_minor_hash) { 1114 if (!start_hash && !start_minor_hash) {
972 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; 1115 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
973 if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0) 1116 tmp_str.name = de->name;
1117 tmp_str.len = de->name_len;
1118 err = ext4_htree_store_dirent(dir_file, 0, 0,
1119 de, &tmp_str);
1120 if (err != 0)
974 goto errout; 1121 goto errout;
975 count++; 1122 count++;
976 } 1123 }
977 if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) { 1124 if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
978 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data; 1125 de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
979 de = ext4_next_entry(de, dir->i_sb->s_blocksize); 1126 de = ext4_next_entry(de, dir->i_sb->s_blocksize);
980 if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0) 1127 tmp_str.name = de->name;
1128 tmp_str.len = de->name_len;
1129 err = ext4_htree_store_dirent(dir_file, 2, 0,
1130 de, &tmp_str);
1131 if (err != 0)
981 goto errout; 1132 goto errout;
982 count++; 1133 count++;
983 } 1134 }
@@ -1035,17 +1186,33 @@ static inline int search_dirblock(struct buffer_head *bh,
1035 * Create map of hash values, offsets, and sizes, stored at end of block. 1186 * Create map of hash values, offsets, and sizes, stored at end of block.
1036 * Returns number of entries mapped. 1187 * Returns number of entries mapped.
1037 */ 1188 */
1038static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, 1189static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
1039 struct dx_hash_info *hinfo, 1190 unsigned blocksize, struct dx_hash_info *hinfo,
1040 struct dx_map_entry *map_tail) 1191 struct dx_map_entry *map_tail)
1041{ 1192{
1042 int count = 0; 1193 int count = 0;
1043 char *base = (char *) de; 1194 char *base = (char *) de;
1044 struct dx_hash_info h = *hinfo; 1195 struct dx_hash_info h = *hinfo;
1196#ifdef CONFIG_EXT4_FS_ENCRYPTION
1197 struct ext4_fname_crypto_ctx *ctx = NULL;
1198 int err;
1199
1200 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
1201 if (IS_ERR(ctx))
1202 return PTR_ERR(ctx);
1203#endif
1045 1204
1046 while ((char *) de < base + blocksize) { 1205 while ((char *) de < base + blocksize) {
1047 if (de->name_len && de->inode) { 1206 if (de->name_len && de->inode) {
1207#ifdef CONFIG_EXT4_FS_ENCRYPTION
1208 err = ext4_fname_disk_to_hash(ctx, de, &h);
1209 if (err < 0) {
1210 ext4_put_fname_crypto_ctx(&ctx);
1211 return err;
1212 }
1213#else
1048 ext4fs_dirhash(de->name, de->name_len, &h); 1214 ext4fs_dirhash(de->name, de->name_len, &h);
1215#endif
1049 map_tail--; 1216 map_tail--;
1050 map_tail->hash = h.hash; 1217 map_tail->hash = h.hash;
1051 map_tail->offs = ((char *) de - base)>>2; 1218 map_tail->offs = ((char *) de - base)>>2;
@@ -1056,6 +1223,9 @@ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
1056 /* XXX: do we need to check rec_len == 0 case? -Chris */ 1223 /* XXX: do we need to check rec_len == 0 case? -Chris */
1057 de = ext4_next_entry(de, blocksize); 1224 de = ext4_next_entry(de, blocksize);
1058 } 1225 }
1226#ifdef CONFIG_EXT4_FS_ENCRYPTION
1227 ext4_put_fname_crypto_ctx(&ctx);
1228#endif
1059 return count; 1229 return count;
1060} 1230}
1061 1231
@@ -1106,57 +1276,107 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
1106 * `len <= EXT4_NAME_LEN' is guaranteed by caller. 1276 * `len <= EXT4_NAME_LEN' is guaranteed by caller.
1107 * `de != NULL' is guaranteed by caller. 1277 * `de != NULL' is guaranteed by caller.
1108 */ 1278 */
1109static inline int ext4_match (int len, const char * const name, 1279static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx,
1110 struct ext4_dir_entry_2 * de) 1280 struct ext4_str *fname_crypto_str,
1281 int len, const char * const name,
1282 struct ext4_dir_entry_2 *de)
1111{ 1283{
1112 if (len != de->name_len) 1284 int res;
1113 return 0; 1285
1114 if (!de->inode) 1286 if (!de->inode)
1115 return 0; 1287 return 0;
1116 return !memcmp(name, de->name, len); 1288
1289#ifdef CONFIG_EXT4_FS_ENCRYPTION
1290 if (ctx) {
1291 /* Directory is encrypted */
1292 res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str);
1293 if (res < 0)
1294 return res;
1295 if (len != res)
1296 return 0;
1297 res = memcmp(name, fname_crypto_str->name, len);
1298 return (res == 0) ? 1 : 0;
1299 }
1300#endif
1301 if (len != de->name_len)
1302 return 0;
1303 res = memcmp(name, de->name, len);
1304 return (res == 0) ? 1 : 0;
1117} 1305}
1118 1306
1119/* 1307/*
1120 * Returns 0 if not found, -1 on failure, and 1 on success 1308 * Returns 0 if not found, -1 on failure, and 1 on success
1121 */ 1309 */
1122int search_dir(struct buffer_head *bh, 1310int search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1123 char *search_buf, 1311 struct inode *dir, const struct qstr *d_name,
1124 int buf_size, 1312 unsigned int offset, struct ext4_dir_entry_2 **res_dir)
1125 struct inode *dir,
1126 const struct qstr *d_name,
1127 unsigned int offset,
1128 struct ext4_dir_entry_2 **res_dir)
1129{ 1313{
1130 struct ext4_dir_entry_2 * de; 1314 struct ext4_dir_entry_2 * de;
1131 char * dlimit; 1315 char * dlimit;
1132 int de_len; 1316 int de_len;
1133 const char *name = d_name->name; 1317 const char *name = d_name->name;
1134 int namelen = d_name->len; 1318 int namelen = d_name->len;
1319 struct ext4_fname_crypto_ctx *ctx = NULL;
1320 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
1321 int res;
1322
1323 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
1324 if (IS_ERR(ctx))
1325 return -1;
1326
1327 if (ctx != NULL) {
1328 /* Allocate buffer to hold maximum name length */
1329 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1330 &fname_crypto_str);
1331 if (res < 0) {
1332 ext4_put_fname_crypto_ctx(&ctx);
1333 return -1;
1334 }
1335 }
1135 1336
1136 de = (struct ext4_dir_entry_2 *)search_buf; 1337 de = (struct ext4_dir_entry_2 *)search_buf;
1137 dlimit = search_buf + buf_size; 1338 dlimit = search_buf + buf_size;
1138 while ((char *) de < dlimit) { 1339 while ((char *) de < dlimit) {
1139 /* this code is executed quadratically often */ 1340 /* this code is executed quadratically often */
1140 /* do minimal checking `by hand' */ 1341 /* do minimal checking `by hand' */
1342 if ((char *) de + de->name_len <= dlimit) {
1343 res = ext4_match(ctx, &fname_crypto_str, namelen,
1344 name, de);
1345 if (res < 0) {
1346 res = -1;
1347 goto return_result;
1348 }
1349 if (res > 0) {
1350 /* found a match - just to be sure, do
1351 * a full check */
1352 if (ext4_check_dir_entry(dir, NULL, de, bh,
1353 bh->b_data,
1354 bh->b_size, offset)) {
1355 res = -1;
1356 goto return_result;
1357 }
1358 *res_dir = de;
1359 res = 1;
1360 goto return_result;
1361 }
1141 1362
1142 if ((char *) de + namelen <= dlimit &&
1143 ext4_match (namelen, name, de)) {
1144 /* found a match - just to be sure, do a full check */
1145 if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
1146 bh->b_size, offset))
1147 return -1;
1148 *res_dir = de;
1149 return 1;
1150 } 1363 }
1151 /* prevent looping on a bad block */ 1364 /* prevent looping on a bad block */
1152 de_len = ext4_rec_len_from_disk(de->rec_len, 1365 de_len = ext4_rec_len_from_disk(de->rec_len,
1153 dir->i_sb->s_blocksize); 1366 dir->i_sb->s_blocksize);
1154 if (de_len <= 0) 1367 if (de_len <= 0) {
1155 return -1; 1368 res = -1;
1369 goto return_result;
1370 }
1156 offset += de_len; 1371 offset += de_len;
1157 de = (struct ext4_dir_entry_2 *) ((char *) de + de_len); 1372 de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
1158 } 1373 }
1159 return 0; 1374
1375 res = 0;
1376return_result:
1377 ext4_put_fname_crypto_ctx(&ctx);
1378 ext4_fname_crypto_free_buffer(&fname_crypto_str);
1379 return res;
1160} 1380}
1161 1381
1162static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block, 1382static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
@@ -1345,6 +1565,9 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
1345 ext4_lblk_t block; 1565 ext4_lblk_t block;
1346 int retval; 1566 int retval;
1347 1567
1568#ifdef CONFIG_EXT4_FS_ENCRYPTION
1569 *res_dir = NULL;
1570#endif
1348 frame = dx_probe(d_name, dir, &hinfo, frames); 1571 frame = dx_probe(d_name, dir, &hinfo, frames);
1349 if (IS_ERR(frame)) 1572 if (IS_ERR(frame))
1350 return (struct buffer_head *) frame; 1573 return (struct buffer_head *) frame;
@@ -1417,6 +1640,18 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1417 ino); 1640 ino);
1418 return ERR_PTR(-EIO); 1641 return ERR_PTR(-EIO);
1419 } 1642 }
1643 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
1644 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1645 S_ISLNK(inode->i_mode)) &&
1646 !ext4_is_child_context_consistent_with_parent(dir,
1647 inode)) {
1648 iput(inode);
1649 ext4_warning(inode->i_sb,
1650 "Inconsistent encryption contexts: %lu/%lu\n",
1651 (unsigned long) dir->i_ino,
1652 (unsigned long) inode->i_ino);
1653 return ERR_PTR(-EPERM);
1654 }
1420 } 1655 }
1421 return d_splice_alias(inode, dentry); 1656 return d_splice_alias(inode, dentry);
1422} 1657}
@@ -1541,7 +1776,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1541 1776
1542 /* create map in the end of data2 block */ 1777 /* create map in the end of data2 block */
1543 map = (struct dx_map_entry *) (data2 + blocksize); 1778 map = (struct dx_map_entry *) (data2 + blocksize);
1544 count = dx_make_map((struct ext4_dir_entry_2 *) data1, 1779 count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1,
1545 blocksize, hinfo, map); 1780 blocksize, hinfo, map);
1546 map -= count; 1781 map -= count;
1547 dx_sort_map(map, count); 1782 dx_sort_map(map, count);
@@ -1564,7 +1799,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1564 hash2, split, count-split)); 1799 hash2, split, count-split));
1565 1800
1566 /* Fancy dance to stay within two buffers */ 1801 /* Fancy dance to stay within two buffers */
1567 de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); 1802 de2 = dx_move_dirents(data1, data2, map + split, count - split,
1803 blocksize);
1568 de = dx_pack_dirents(data1, blocksize); 1804 de = dx_pack_dirents(data1, blocksize);
1569 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) - 1805 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1570 (char *) de, 1806 (char *) de,
@@ -1580,8 +1816,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1580 initialize_dirent_tail(t, blocksize); 1816 initialize_dirent_tail(t, blocksize);
1581 } 1817 }
1582 1818
1583 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); 1819 dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1,
1584 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); 1820 blocksize, 1));
1821 dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
1822 blocksize, 1));
1585 1823
1586 /* Which block gets the new entry? */ 1824 /* Which block gets the new entry? */
1587 if (hinfo->hash >= hash2) { 1825 if (hinfo->hash >= hash2) {
@@ -1618,15 +1856,48 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
1618 int nlen, rlen; 1856 int nlen, rlen;
1619 unsigned int offset = 0; 1857 unsigned int offset = 0;
1620 char *top; 1858 char *top;
1859 struct ext4_fname_crypto_ctx *ctx = NULL;
1860 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
1861 int res;
1862
1863 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
1864 if (IS_ERR(ctx))
1865 return -1;
1866
1867 if (ctx != NULL) {
1868 /* Calculate record length needed to store the entry */
1869 res = ext4_fname_crypto_namelen_on_disk(ctx, namelen);
1870 if (res < 0) {
1871 ext4_put_fname_crypto_ctx(&ctx);
1872 return res;
1873 }
1874 reclen = EXT4_DIR_REC_LEN(res);
1875
1876 /* Allocate buffer to hold maximum name length */
1877 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1878 &fname_crypto_str);
1879 if (res < 0) {
1880 ext4_put_fname_crypto_ctx(&ctx);
1881 return -1;
1882 }
1883 }
1621 1884
1622 de = (struct ext4_dir_entry_2 *)buf; 1885 de = (struct ext4_dir_entry_2 *)buf;
1623 top = buf + buf_size - reclen; 1886 top = buf + buf_size - reclen;
1624 while ((char *) de <= top) { 1887 while ((char *) de <= top) {
1625 if (ext4_check_dir_entry(dir, NULL, de, bh, 1888 if (ext4_check_dir_entry(dir, NULL, de, bh,
1626 buf, buf_size, offset)) 1889 buf, buf_size, offset)) {
1627 return -EIO; 1890 res = -EIO;
1628 if (ext4_match(namelen, name, de)) 1891 goto return_result;
1629 return -EEXIST; 1892 }
1893 /* Provide crypto context and crypto buffer to ext4 match */
1894 res = ext4_match(ctx, &fname_crypto_str, namelen, name, de);
1895 if (res < 0)
1896 goto return_result;
1897 if (res > 0) {
1898 res = -EEXIST;
1899 goto return_result;
1900 }
1630 nlen = EXT4_DIR_REC_LEN(de->name_len); 1901 nlen = EXT4_DIR_REC_LEN(de->name_len);
1631 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); 1902 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
1632 if ((de->inode ? rlen - nlen : rlen) >= reclen) 1903 if ((de->inode ? rlen - nlen : rlen) >= reclen)
@@ -1634,26 +1905,62 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
1634 de = (struct ext4_dir_entry_2 *)((char *)de + rlen); 1905 de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
1635 offset += rlen; 1906 offset += rlen;
1636 } 1907 }
1637 if ((char *) de > top)
1638 return -ENOSPC;
1639 1908
1640 *dest_de = de; 1909 if ((char *) de > top)
1641 return 0; 1910 res = -ENOSPC;
1911 else {
1912 *dest_de = de;
1913 res = 0;
1914 }
1915return_result:
1916 ext4_put_fname_crypto_ctx(&ctx);
1917 ext4_fname_crypto_free_buffer(&fname_crypto_str);
1918 return res;
1642} 1919}
1643 1920
1644void ext4_insert_dentry(struct inode *inode, 1921int ext4_insert_dentry(struct inode *dir,
1645 struct ext4_dir_entry_2 *de, 1922 struct inode *inode,
1646 int buf_size, 1923 struct ext4_dir_entry_2 *de,
1647 const char *name, int namelen) 1924 int buf_size,
1925 const struct qstr *iname,
1926 const char *name, int namelen)
1648{ 1927{
1649 1928
1650 int nlen, rlen; 1929 int nlen, rlen;
1930 struct ext4_fname_crypto_ctx *ctx = NULL;
1931 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
1932 struct ext4_str tmp_str;
1933 int res;
1934
1935 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
1936 if (IS_ERR(ctx))
1937 return -EIO;
1938 /* By default, the input name would be written to the disk */
1939 tmp_str.name = (unsigned char *)name;
1940 tmp_str.len = namelen;
1941 if (ctx != NULL) {
1942 /* Directory is encrypted */
1943 res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
1944 &fname_crypto_str);
1945 if (res < 0) {
1946 ext4_put_fname_crypto_ctx(&ctx);
1947 return -ENOMEM;
1948 }
1949 res = ext4_fname_usr_to_disk(ctx, iname, &fname_crypto_str);
1950 if (res < 0) {
1951 ext4_put_fname_crypto_ctx(&ctx);
1952 ext4_fname_crypto_free_buffer(&fname_crypto_str);
1953 return res;
1954 }
1955 tmp_str.name = fname_crypto_str.name;
1956 tmp_str.len = fname_crypto_str.len;
1957 }
1651 1958
1652 nlen = EXT4_DIR_REC_LEN(de->name_len); 1959 nlen = EXT4_DIR_REC_LEN(de->name_len);
1653 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size); 1960 rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
1654 if (de->inode) { 1961 if (de->inode) {
1655 struct ext4_dir_entry_2 *de1 = 1962 struct ext4_dir_entry_2 *de1 =
1656 (struct ext4_dir_entry_2 *)((char *)de + nlen); 1963 (struct ext4_dir_entry_2 *)((char *)de + nlen);
1657 de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size); 1964 de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
1658 de->rec_len = ext4_rec_len_to_disk(nlen, buf_size); 1965 de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
1659 de = de1; 1966 de = de1;
@@ -1661,9 +1968,14 @@ void ext4_insert_dentry(struct inode *inode,
1661 de->file_type = EXT4_FT_UNKNOWN; 1968 de->file_type = EXT4_FT_UNKNOWN;
1662 de->inode = cpu_to_le32(inode->i_ino); 1969 de->inode = cpu_to_le32(inode->i_ino);
1663 ext4_set_de_type(inode->i_sb, de, inode->i_mode); 1970 ext4_set_de_type(inode->i_sb, de, inode->i_mode);
1664 de->name_len = namelen; 1971 de->name_len = tmp_str.len;
1665 memcpy(de->name, name, namelen); 1972
1973 memcpy(de->name, tmp_str.name, tmp_str.len);
1974 ext4_put_fname_crypto_ctx(&ctx);
1975 ext4_fname_crypto_free_buffer(&fname_crypto_str);
1976 return 0;
1666} 1977}
1978
1667/* 1979/*
1668 * Add a new entry into a directory (leaf) block. If de is non-NULL, 1980 * Add a new entry into a directory (leaf) block. If de is non-NULL,
1669 * it points to a directory entry which is guaranteed to be large 1981 * it points to a directory entry which is guaranteed to be large
@@ -1700,8 +2012,12 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1700 return err; 2012 return err;
1701 } 2013 }
1702 2014
1703 /* By now the buffer is marked for journaling */ 2015 /* By now the buffer is marked for journaling. Due to crypto operations,
1704 ext4_insert_dentry(inode, de, blocksize, name, namelen); 2016 * the following function call may fail */
2017 err = ext4_insert_dentry(dir, inode, de, blocksize, &dentry->d_name,
2018 name, namelen);
2019 if (err < 0)
2020 return err;
1705 2021
1706 /* 2022 /*
1707 * XXX shouldn't update any times until successful 2023 * XXX shouldn't update any times until successful
@@ -1733,8 +2049,13 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1733 struct inode *inode, struct buffer_head *bh) 2049 struct inode *inode, struct buffer_head *bh)
1734{ 2050{
1735 struct inode *dir = dentry->d_parent->d_inode; 2051 struct inode *dir = dentry->d_parent->d_inode;
2052#ifdef CONFIG_EXT4_FS_ENCRYPTION
2053 struct ext4_fname_crypto_ctx *ctx = NULL;
2054 int res;
2055#else
1736 const char *name = dentry->d_name.name; 2056 const char *name = dentry->d_name.name;
1737 int namelen = dentry->d_name.len; 2057 int namelen = dentry->d_name.len;
2058#endif
1738 struct buffer_head *bh2; 2059 struct buffer_head *bh2;
1739 struct dx_root *root; 2060 struct dx_root *root;
1740 struct dx_frame frames[2], *frame; 2061 struct dx_frame frames[2], *frame;
@@ -1748,7 +2069,13 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1748 struct dx_hash_info hinfo; 2069 struct dx_hash_info hinfo;
1749 ext4_lblk_t block; 2070 ext4_lblk_t block;
1750 struct fake_dirent *fde; 2071 struct fake_dirent *fde;
1751 int csum_size = 0; 2072 int csum_size = 0;
2073
2074#ifdef CONFIG_EXT4_FS_ENCRYPTION
2075 ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
2076 if (IS_ERR(ctx))
2077 return PTR_ERR(ctx);
2078#endif
1752 2079
1753 if (ext4_has_metadata_csum(inode->i_sb)) 2080 if (ext4_has_metadata_csum(inode->i_sb))
1754 csum_size = sizeof(struct ext4_dir_entry_tail); 2081 csum_size = sizeof(struct ext4_dir_entry_tail);
@@ -1815,7 +2142,18 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1815 if (hinfo.hash_version <= DX_HASH_TEA) 2142 if (hinfo.hash_version <= DX_HASH_TEA)
1816 hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; 2143 hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1817 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 2144 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
2145#ifdef CONFIG_EXT4_FS_ENCRYPTION
2146 res = ext4_fname_usr_to_hash(ctx, &dentry->d_name, &hinfo);
2147 if (res < 0) {
2148 ext4_put_fname_crypto_ctx(&ctx);
2149 ext4_mark_inode_dirty(handle, dir);
2150 brelse(bh);
2151 return res;
2152 }
2153 ext4_put_fname_crypto_ctx(&ctx);
2154#else
1818 ext4fs_dirhash(name, namelen, &hinfo); 2155 ext4fs_dirhash(name, namelen, &hinfo);
2156#endif
1819 memset(frames, 0, sizeof(frames)); 2157 memset(frames, 0, sizeof(frames));
1820 frame = frames; 2158 frame = frames;
1821 frame->entries = entries; 2159 frame->entries = entries;
@@ -1865,7 +2203,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1865 struct inode *inode) 2203 struct inode *inode)
1866{ 2204{
1867 struct inode *dir = dentry->d_parent->d_inode; 2205 struct inode *dir = dentry->d_parent->d_inode;
1868 struct buffer_head *bh; 2206 struct buffer_head *bh = NULL;
1869 struct ext4_dir_entry_2 *de; 2207 struct ext4_dir_entry_2 *de;
1870 struct ext4_dir_entry_tail *t; 2208 struct ext4_dir_entry_tail *t;
1871 struct super_block *sb; 2209 struct super_block *sb;
@@ -1889,14 +2227,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1889 return retval; 2227 return retval;
1890 if (retval == 1) { 2228 if (retval == 1) {
1891 retval = 0; 2229 retval = 0;
1892 return retval; 2230 goto out;
1893 } 2231 }
1894 } 2232 }
1895 2233
1896 if (is_dx(dir)) { 2234 if (is_dx(dir)) {
1897 retval = ext4_dx_add_entry(handle, dentry, inode); 2235 retval = ext4_dx_add_entry(handle, dentry, inode);
1898 if (!retval || (retval != ERR_BAD_DX_DIR)) 2236 if (!retval || (retval != ERR_BAD_DX_DIR))
1899 return retval; 2237 goto out;
1900 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); 2238 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1901 dx_fallback++; 2239 dx_fallback++;
1902 ext4_mark_inode_dirty(handle, dir); 2240 ext4_mark_inode_dirty(handle, dir);
@@ -1908,14 +2246,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1908 return PTR_ERR(bh); 2246 return PTR_ERR(bh);
1909 2247
1910 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); 2248 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1911 if (retval != -ENOSPC) { 2249 if (retval != -ENOSPC)
1912 brelse(bh); 2250 goto out;
1913 return retval;
1914 }
1915 2251
1916 if (blocks == 1 && !dx_fallback && 2252 if (blocks == 1 && !dx_fallback &&
1917 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) 2253 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
1918 return make_indexed_dir(handle, dentry, inode, bh); 2254 retval = make_indexed_dir(handle, dentry, inode, bh);
2255 bh = NULL; /* make_indexed_dir releases bh */
2256 goto out;
2257 }
1919 brelse(bh); 2258 brelse(bh);
1920 } 2259 }
1921 bh = ext4_append(handle, dir, &block); 2260 bh = ext4_append(handle, dir, &block);
@@ -1931,6 +2270,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1931 } 2270 }
1932 2271
1933 retval = add_dirent_to_buf(handle, dentry, inode, de, bh); 2272 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
2273out:
1934 brelse(bh); 2274 brelse(bh);
1935 if (retval == 0) 2275 if (retval == 0)
1936 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); 2276 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -2237,7 +2577,20 @@ retry:
2237 inode->i_op = &ext4_file_inode_operations; 2577 inode->i_op = &ext4_file_inode_operations;
2238 inode->i_fop = &ext4_file_operations; 2578 inode->i_fop = &ext4_file_operations;
2239 ext4_set_aops(inode); 2579 ext4_set_aops(inode);
2240 err = ext4_add_nondir(handle, dentry, inode); 2580 err = 0;
2581#ifdef CONFIG_EXT4_FS_ENCRYPTION
2582 if (!err && (ext4_encrypted_inode(dir) ||
2583 DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)))) {
2584 err = ext4_inherit_context(dir, inode);
2585 if (err) {
2586 clear_nlink(inode);
2587 unlock_new_inode(inode);
2588 iput(inode);
2589 }
2590 }
2591#endif
2592 if (!err)
2593 err = ext4_add_nondir(handle, dentry, inode);
2241 if (!err && IS_DIRSYNC(dir)) 2594 if (!err && IS_DIRSYNC(dir))
2242 ext4_handle_sync(handle); 2595 ext4_handle_sync(handle);
2243 } 2596 }
@@ -2418,6 +2771,14 @@ retry:
2418 err = ext4_init_new_dir(handle, dir, inode); 2771 err = ext4_init_new_dir(handle, dir, inode);
2419 if (err) 2772 if (err)
2420 goto out_clear_inode; 2773 goto out_clear_inode;
2774#ifdef CONFIG_EXT4_FS_ENCRYPTION
2775 if (ext4_encrypted_inode(dir) ||
2776 DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) {
2777 err = ext4_inherit_context(dir, inode);
2778 if (err)
2779 goto out_clear_inode;
2780 }
2781#endif
2421 err = ext4_mark_inode_dirty(handle, inode); 2782 err = ext4_mark_inode_dirty(handle, inode);
2422 if (!err) 2783 if (!err)
2423 err = ext4_add_entry(handle, dentry, inode); 2784 err = ext4_add_entry(handle, dentry, inode);
@@ -2450,7 +2811,7 @@ out_stop:
2450/* 2811/*
2451 * routine to check that the specified directory is empty (for rmdir) 2812 * routine to check that the specified directory is empty (for rmdir)
2452 */ 2813 */
2453static int empty_dir(struct inode *inode) 2814int ext4_empty_dir(struct inode *inode)
2454{ 2815{
2455 unsigned int offset; 2816 unsigned int offset;
2456 struct buffer_head *bh; 2817 struct buffer_head *bh;
@@ -2718,7 +3079,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2718 goto end_rmdir; 3079 goto end_rmdir;
2719 3080
2720 retval = -ENOTEMPTY; 3081 retval = -ENOTEMPTY;
2721 if (!empty_dir(inode)) 3082 if (!ext4_empty_dir(inode))
2722 goto end_rmdir; 3083 goto end_rmdir;
2723 3084
2724 handle = ext4_journal_start(dir, EXT4_HT_DIR, 3085 handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -2828,16 +3189,25 @@ static int ext4_symlink(struct inode *dir,
2828{ 3189{
2829 handle_t *handle; 3190 handle_t *handle;
2830 struct inode *inode; 3191 struct inode *inode;
2831 int l, err, retries = 0; 3192 int err, len = strlen(symname);
2832 int credits; 3193 int credits;
2833 3194 bool encryption_required;
2834 l = strlen(symname)+1; 3195 struct ext4_str disk_link;
2835 if (l > dir->i_sb->s_blocksize) 3196 struct ext4_encrypted_symlink_data *sd = NULL;
3197
3198 disk_link.len = len + 1;
3199 disk_link.name = (char *) symname;
3200
3201 encryption_required = (ext4_encrypted_inode(dir) ||
3202 DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)));
3203 if (encryption_required)
3204 disk_link.len = encrypted_symlink_data_len(len) + 1;
3205 if (disk_link.len > dir->i_sb->s_blocksize)
2836 return -ENAMETOOLONG; 3206 return -ENAMETOOLONG;
2837 3207
2838 dquot_initialize(dir); 3208 dquot_initialize(dir);
2839 3209
2840 if (l > EXT4_N_BLOCKS * 4) { 3210 if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
2841 /* 3211 /*
2842 * For non-fast symlinks, we just allocate inode and put it on 3212 * For non-fast symlinks, we just allocate inode and put it on
2843 * orphan list in the first transaction => we need bitmap, 3213 * orphan list in the first transaction => we need bitmap,
@@ -2856,16 +3226,49 @@ static int ext4_symlink(struct inode *dir,
2856 credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 3226 credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2857 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3; 3227 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
2858 } 3228 }
2859retry: 3229
2860 inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO, 3230 inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
2861 &dentry->d_name, 0, NULL, 3231 &dentry->d_name, 0, NULL,
2862 EXT4_HT_DIR, credits); 3232 EXT4_HT_DIR, credits);
2863 handle = ext4_journal_current_handle(); 3233 handle = ext4_journal_current_handle();
2864 err = PTR_ERR(inode); 3234 if (IS_ERR(inode)) {
2865 if (IS_ERR(inode)) 3235 if (handle)
2866 goto out_stop; 3236 ext4_journal_stop(handle);
3237 return PTR_ERR(inode);
3238 }
3239
3240 if (encryption_required) {
3241 struct ext4_fname_crypto_ctx *ctx = NULL;
3242 struct qstr istr;
3243 struct ext4_str ostr;
3244
3245 sd = kzalloc(disk_link.len, GFP_NOFS);
3246 if (!sd) {
3247 err = -ENOMEM;
3248 goto err_drop_inode;
3249 }
3250 err = ext4_inherit_context(dir, inode);
3251 if (err)
3252 goto err_drop_inode;
3253 ctx = ext4_get_fname_crypto_ctx(inode,
3254 inode->i_sb->s_blocksize);
3255 if (IS_ERR_OR_NULL(ctx)) {
3256 /* We just set the policy, so ctx should not be NULL */
3257 err = (ctx == NULL) ? -EIO : PTR_ERR(ctx);
3258 goto err_drop_inode;
3259 }
3260 istr.name = (const unsigned char *) symname;
3261 istr.len = len;
3262 ostr.name = sd->encrypted_path;
3263 err = ext4_fname_usr_to_disk(ctx, &istr, &ostr);
3264 ext4_put_fname_crypto_ctx(&ctx);
3265 if (err < 0)
3266 goto err_drop_inode;
3267 sd->len = cpu_to_le16(ostr.len);
3268 disk_link.name = (char *) sd;
3269 }
2867 3270
2868 if (l > EXT4_N_BLOCKS * 4) { 3271 if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
2869 inode->i_op = &ext4_symlink_inode_operations; 3272 inode->i_op = &ext4_symlink_inode_operations;
2870 ext4_set_aops(inode); 3273 ext4_set_aops(inode);
2871 /* 3274 /*
@@ -2881,9 +3284,10 @@ retry:
2881 drop_nlink(inode); 3284 drop_nlink(inode);
2882 err = ext4_orphan_add(handle, inode); 3285 err = ext4_orphan_add(handle, inode);
2883 ext4_journal_stop(handle); 3286 ext4_journal_stop(handle);
3287 handle = NULL;
2884 if (err) 3288 if (err)
2885 goto err_drop_inode; 3289 goto err_drop_inode;
2886 err = __page_symlink(inode, symname, l, 1); 3290 err = __page_symlink(inode, disk_link.name, disk_link.len, 1);
2887 if (err) 3291 if (err)
2888 goto err_drop_inode; 3292 goto err_drop_inode;
2889 /* 3293 /*
@@ -2895,34 +3299,37 @@ retry:
2895 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1); 3299 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
2896 if (IS_ERR(handle)) { 3300 if (IS_ERR(handle)) {
2897 err = PTR_ERR(handle); 3301 err = PTR_ERR(handle);
3302 handle = NULL;
2898 goto err_drop_inode; 3303 goto err_drop_inode;
2899 } 3304 }
2900 set_nlink(inode, 1); 3305 set_nlink(inode, 1);
2901 err = ext4_orphan_del(handle, inode); 3306 err = ext4_orphan_del(handle, inode);
2902 if (err) { 3307 if (err)
2903 ext4_journal_stop(handle);
2904 clear_nlink(inode);
2905 goto err_drop_inode; 3308 goto err_drop_inode;
2906 }
2907 } else { 3309 } else {
2908 /* clear the extent format for fast symlink */ 3310 /* clear the extent format for fast symlink */
2909 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); 3311 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
2910 inode->i_op = &ext4_fast_symlink_inode_operations; 3312 inode->i_op = encryption_required ?
2911 memcpy((char *)&EXT4_I(inode)->i_data, symname, l); 3313 &ext4_symlink_inode_operations :
2912 inode->i_size = l-1; 3314 &ext4_fast_symlink_inode_operations;
3315 memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name,
3316 disk_link.len);
3317 inode->i_size = disk_link.len - 1;
2913 } 3318 }
2914 EXT4_I(inode)->i_disksize = inode->i_size; 3319 EXT4_I(inode)->i_disksize = inode->i_size;
2915 err = ext4_add_nondir(handle, dentry, inode); 3320 err = ext4_add_nondir(handle, dentry, inode);
2916 if (!err && IS_DIRSYNC(dir)) 3321 if (!err && IS_DIRSYNC(dir))
2917 ext4_handle_sync(handle); 3322 ext4_handle_sync(handle);
2918 3323
2919out_stop:
2920 if (handle) 3324 if (handle)
2921 ext4_journal_stop(handle); 3325 ext4_journal_stop(handle);
2922 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 3326 kfree(sd);
2923 goto retry;
2924 return err; 3327 return err;
2925err_drop_inode: 3328err_drop_inode:
3329 if (handle)
3330 ext4_journal_stop(handle);
3331 kfree(sd);
3332 clear_nlink(inode);
2926 unlock_new_inode(inode); 3333 unlock_new_inode(inode);
2927 iput(inode); 3334 iput(inode);
2928 return err; 3335 return err;
@@ -2937,7 +3344,9 @@ static int ext4_link(struct dentry *old_dentry,
2937 3344
2938 if (inode->i_nlink >= EXT4_LINK_MAX) 3345 if (inode->i_nlink >= EXT4_LINK_MAX)
2939 return -EMLINK; 3346 return -EMLINK;
2940 3347 if (ext4_encrypted_inode(dir) &&
3348 !ext4_is_child_context_consistent_with_parent(dir, inode))
3349 return -EPERM;
2941 dquot_initialize(dir); 3350 dquot_initialize(dir);
2942 3351
2943retry: 3352retry:
@@ -3238,6 +3647,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3238 if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) 3647 if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
3239 goto end_rename; 3648 goto end_rename;
3240 3649
3650 if ((old.dir != new.dir) &&
3651 ext4_encrypted_inode(new.dir) &&
3652 !ext4_is_child_context_consistent_with_parent(new.dir,
3653 old.inode)) {
3654 retval = -EPERM;
3655 goto end_rename;
3656 }
3657
3241 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, 3658 new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
3242 &new.de, &new.inlined); 3659 &new.de, &new.inlined);
3243 if (IS_ERR(new.bh)) { 3660 if (IS_ERR(new.bh)) {
@@ -3258,12 +3675,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3258 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); 3675 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
3259 if (!(flags & RENAME_WHITEOUT)) { 3676 if (!(flags & RENAME_WHITEOUT)) {
3260 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); 3677 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
3261 if (IS_ERR(handle)) 3678 if (IS_ERR(handle)) {
3262 return PTR_ERR(handle); 3679 retval = PTR_ERR(handle);
3680 handle = NULL;
3681 goto end_rename;
3682 }
3263 } else { 3683 } else {
3264 whiteout = ext4_whiteout_for_rename(&old, credits, &handle); 3684 whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
3265 if (IS_ERR(whiteout)) 3685 if (IS_ERR(whiteout)) {
3266 return PTR_ERR(whiteout); 3686 retval = PTR_ERR(whiteout);
3687 whiteout = NULL;
3688 goto end_rename;
3689 }
3267 } 3690 }
3268 3691
3269 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) 3692 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
@@ -3272,7 +3695,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3272 if (S_ISDIR(old.inode->i_mode)) { 3695 if (S_ISDIR(old.inode->i_mode)) {
3273 if (new.inode) { 3696 if (new.inode) {
3274 retval = -ENOTEMPTY; 3697 retval = -ENOTEMPTY;
3275 if (!empty_dir(new.inode)) 3698 if (!ext4_empty_dir(new.inode))
3276 goto end_rename; 3699 goto end_rename;
3277 } else { 3700 } else {
3278 retval = -EMLINK; 3701 retval = -EMLINK;
@@ -3346,8 +3769,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3346 3769
3347 ext4_dec_count(handle, old.dir); 3770 ext4_dec_count(handle, old.dir);
3348 if (new.inode) { 3771 if (new.inode) {
3349 /* checked empty_dir above, can't have another parent, 3772 /* checked ext4_empty_dir above, can't have another
3350 * ext4_dec_count() won't work for many-linked dirs */ 3773 * parent, ext4_dec_count() won't work for many-linked
3774 * dirs */
3351 clear_nlink(new.inode); 3775 clear_nlink(new.inode);
3352 } else { 3776 } else {
3353 ext4_inc_count(handle, new.dir); 3777 ext4_inc_count(handle, new.dir);
@@ -3427,8 +3851,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
3427 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, 3851 handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
3428 (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + 3852 (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
3429 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); 3853 2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
3430 if (IS_ERR(handle)) 3854 if (IS_ERR(handle)) {
3431 return PTR_ERR(handle); 3855 retval = PTR_ERR(handle);
3856 handle = NULL;
3857 goto end_rename;
3858 }
3432 3859
3433 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) 3860 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
3434 ext4_handle_sync(handle); 3861 ext4_handle_sync(handle);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 464984261e69..5765f88b3904 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/jbd2.h>
12#include <linux/highuid.h> 11#include <linux/highuid.h>
13#include <linux/pagemap.h> 12#include <linux/pagemap.h>
14#include <linux/quotaops.h> 13#include <linux/quotaops.h>
@@ -24,7 +23,6 @@
24#include <linux/kernel.h> 23#include <linux/kernel.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/ratelimit.h>
28 26
29#include "ext4_jbd2.h" 27#include "ext4_jbd2.h"
30#include "xattr.h" 28#include "xattr.h"
@@ -68,6 +66,10 @@ static void ext4_finish_bio(struct bio *bio)
68 66
69 bio_for_each_segment_all(bvec, bio, i) { 67 bio_for_each_segment_all(bvec, bio, i) {
70 struct page *page = bvec->bv_page; 68 struct page *page = bvec->bv_page;
69#ifdef CONFIG_EXT4_FS_ENCRYPTION
70 struct page *data_page = NULL;
71 struct ext4_crypto_ctx *ctx = NULL;
72#endif
71 struct buffer_head *bh, *head; 73 struct buffer_head *bh, *head;
72 unsigned bio_start = bvec->bv_offset; 74 unsigned bio_start = bvec->bv_offset;
73 unsigned bio_end = bio_start + bvec->bv_len; 75 unsigned bio_end = bio_start + bvec->bv_len;
@@ -77,6 +79,15 @@ static void ext4_finish_bio(struct bio *bio)
77 if (!page) 79 if (!page)
78 continue; 80 continue;
79 81
82#ifdef CONFIG_EXT4_FS_ENCRYPTION
83 if (!page->mapping) {
84 /* The bounce data pages are unmapped. */
85 data_page = page;
86 ctx = (struct ext4_crypto_ctx *)page_private(data_page);
87 page = ctx->control_page;
88 }
89#endif
90
80 if (error) { 91 if (error) {
81 SetPageError(page); 92 SetPageError(page);
82 set_bit(AS_EIO, &page->mapping->flags); 93 set_bit(AS_EIO, &page->mapping->flags);
@@ -101,8 +112,13 @@ static void ext4_finish_bio(struct bio *bio)
101 } while ((bh = bh->b_this_page) != head); 112 } while ((bh = bh->b_this_page) != head);
102 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); 113 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
103 local_irq_restore(flags); 114 local_irq_restore(flags);
104 if (!under_io) 115 if (!under_io) {
116#ifdef CONFIG_EXT4_FS_ENCRYPTION
117 if (ctx)
118 ext4_restore_control_page(data_page);
119#endif
105 end_page_writeback(page); 120 end_page_writeback(page);
121 }
106 } 122 }
107} 123}
108 124
@@ -377,6 +393,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
377 393
378static int io_submit_add_bh(struct ext4_io_submit *io, 394static int io_submit_add_bh(struct ext4_io_submit *io,
379 struct inode *inode, 395 struct inode *inode,
396 struct page *page,
380 struct buffer_head *bh) 397 struct buffer_head *bh)
381{ 398{
382 int ret; 399 int ret;
@@ -390,7 +407,7 @@ submit_and_retry:
390 if (ret) 407 if (ret)
391 return ret; 408 return ret;
392 } 409 }
393 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); 410 ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
394 if (ret != bh->b_size) 411 if (ret != bh->b_size)
395 goto submit_and_retry; 412 goto submit_and_retry;
396 io->io_next_block++; 413 io->io_next_block++;
@@ -403,6 +420,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
403 struct writeback_control *wbc, 420 struct writeback_control *wbc,
404 bool keep_towrite) 421 bool keep_towrite)
405{ 422{
423 struct page *data_page = NULL;
406 struct inode *inode = page->mapping->host; 424 struct inode *inode = page->mapping->host;
407 unsigned block_start, blocksize; 425 unsigned block_start, blocksize;
408 struct buffer_head *bh, *head; 426 struct buffer_head *bh, *head;
@@ -462,19 +480,29 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
462 set_buffer_async_write(bh); 480 set_buffer_async_write(bh);
463 } while ((bh = bh->b_this_page) != head); 481 } while ((bh = bh->b_this_page) != head);
464 482
465 /* Now submit buffers to write */
466 bh = head = page_buffers(page); 483 bh = head = page_buffers(page);
484
485 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
486 data_page = ext4_encrypt(inode, page);
487 if (IS_ERR(data_page)) {
488 ret = PTR_ERR(data_page);
489 data_page = NULL;
490 goto out;
491 }
492 }
493
494 /* Now submit buffers to write */
467 do { 495 do {
468 if (!buffer_async_write(bh)) 496 if (!buffer_async_write(bh))
469 continue; 497 continue;
470 ret = io_submit_add_bh(io, inode, bh); 498 ret = io_submit_add_bh(io, inode,
499 data_page ? data_page : page, bh);
471 if (ret) { 500 if (ret) {
472 /* 501 /*
473 * We only get here on ENOMEM. Not much else 502 * We only get here on ENOMEM. Not much else
474 * we can do but mark the page as dirty, and 503 * we can do but mark the page as dirty, and
475 * better luck next time. 504 * better luck next time.
476 */ 505 */
477 redirty_page_for_writepage(wbc, page);
478 break; 506 break;
479 } 507 }
480 nr_submitted++; 508 nr_submitted++;
@@ -483,6 +511,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
483 511
484 /* Error stopped previous loop? Clean up buffers... */ 512 /* Error stopped previous loop? Clean up buffers... */
485 if (ret) { 513 if (ret) {
514 out:
515 if (data_page)
516 ext4_restore_control_page(data_page);
517 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
518 redirty_page_for_writepage(wbc, page);
486 do { 519 do {
487 clear_buffer_async_write(bh); 520 clear_buffer_async_write(bh);
488 bh = bh->b_this_page; 521 bh = bh->b_this_page;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
new file mode 100644
index 000000000000..171b9ac4b45e
--- /dev/null
+++ b/fs/ext4/readpage.c
@@ -0,0 +1,328 @@
1/*
2 * linux/fs/ext4/readpage.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2015, Google, Inc.
6 *
7 * This was originally taken from fs/mpage.c
8 *
9 * The intent is the ext4_mpage_readpages() function here is intended
10 * to replace mpage_readpages() in the general case, not just for
11 * encrypted files. It has some limitations (see below), where it
12 * will fall back to read_block_full_page(), but these limitations
13 * should only be hit when page_size != block_size.
14 *
15 * This will allow us to attach a callback function to support ext4
16 * encryption.
17 *
18 * If anything unusual happens, such as:
19 *
20 * - encountering a page which has buffers
21 * - encountering a page which has a non-hole after a hole
22 * - encountering a page with non-contiguous blocks
23 *
24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
27 *
28 */
29
30#include <linux/kernel.h>
31#include <linux/export.h>
32#include <linux/mm.h>
33#include <linux/kdev_t.h>
34#include <linux/gfp.h>
35#include <linux/bio.h>
36#include <linux/fs.h>
37#include <linux/buffer_head.h>
38#include <linux/blkdev.h>
39#include <linux/highmem.h>
40#include <linux/prefetch.h>
41#include <linux/mpage.h>
42#include <linux/writeback.h>
43#include <linux/backing-dev.h>
44#include <linux/pagevec.h>
45#include <linux/cleancache.h>
46
47#include "ext4.h"
48
49/*
50 * Call ext4_decrypt on every single page, reusing the encryption
51 * context.
52 */
53static void completion_pages(struct work_struct *work)
54{
55#ifdef CONFIG_EXT4_FS_ENCRYPTION
56 struct ext4_crypto_ctx *ctx =
57 container_of(work, struct ext4_crypto_ctx, work);
58 struct bio *bio = ctx->bio;
59 struct bio_vec *bv;
60 int i;
61
62 bio_for_each_segment_all(bv, bio, i) {
63 struct page *page = bv->bv_page;
64
65 int ret = ext4_decrypt(ctx, page);
66 if (ret) {
67 WARN_ON_ONCE(1);
68 SetPageError(page);
69 } else
70 SetPageUptodate(page);
71 unlock_page(page);
72 }
73 ext4_release_crypto_ctx(ctx);
74 bio_put(bio);
75#else
76 BUG();
77#endif
78}
79
80static inline bool ext4_bio_encrypted(struct bio *bio)
81{
82#ifdef CONFIG_EXT4_FS_ENCRYPTION
83 return unlikely(bio->bi_private != NULL);
84#else
85 return false;
86#endif
87}
88
89/*
90 * I/O completion handler for multipage BIOs.
91 *
92 * The mpage code never puts partial pages into a BIO (except for end-of-file).
93 * If a page does not map to a contiguous run of blocks then it simply falls
94 * back to block_read_full_page().
95 *
96 * Why is this? If a page's completion depends on a number of different BIOs
97 * which can complete in any order (or at the same time) then determining the
98 * status of that page is hard. See end_buffer_async_read() for the details.
99 * There is no point in duplicating all that complexity.
100 */
101static void mpage_end_io(struct bio *bio, int err)
102{
103 struct bio_vec *bv;
104 int i;
105
106 if (ext4_bio_encrypted(bio)) {
107 struct ext4_crypto_ctx *ctx = bio->bi_private;
108
109 if (err) {
110 ext4_release_crypto_ctx(ctx);
111 } else {
112 INIT_WORK(&ctx->work, completion_pages);
113 ctx->bio = bio;
114 queue_work(ext4_read_workqueue, &ctx->work);
115 return;
116 }
117 }
118 bio_for_each_segment_all(bv, bio, i) {
119 struct page *page = bv->bv_page;
120
121 if (!err) {
122 SetPageUptodate(page);
123 } else {
124 ClearPageUptodate(page);
125 SetPageError(page);
126 }
127 unlock_page(page);
128 }
129
130 bio_put(bio);
131}
132
133int ext4_mpage_readpages(struct address_space *mapping,
134 struct list_head *pages, struct page *page,
135 unsigned nr_pages)
136{
137 struct bio *bio = NULL;
138 unsigned page_idx;
139 sector_t last_block_in_bio = 0;
140
141 struct inode *inode = mapping->host;
142 const unsigned blkbits = inode->i_blkbits;
143 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
144 const unsigned blocksize = 1 << blkbits;
145 sector_t block_in_file;
146 sector_t last_block;
147 sector_t last_block_in_file;
148 sector_t blocks[MAX_BUF_PER_PAGE];
149 unsigned page_block;
150 struct block_device *bdev = inode->i_sb->s_bdev;
151 int length;
152 unsigned relative_block = 0;
153 struct ext4_map_blocks map;
154
155 map.m_pblk = 0;
156 map.m_lblk = 0;
157 map.m_len = 0;
158 map.m_flags = 0;
159
160 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
161 int fully_mapped = 1;
162 unsigned first_hole = blocks_per_page;
163
164 prefetchw(&page->flags);
165 if (pages) {
166 page = list_entry(pages->prev, struct page, lru);
167 list_del(&page->lru);
168 if (add_to_page_cache_lru(page, mapping,
169 page->index, GFP_KERNEL))
170 goto next_page;
171 }
172
173 if (page_has_buffers(page))
174 goto confused;
175
176 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
177 last_block = block_in_file + nr_pages * blocks_per_page;
178 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
179 if (last_block > last_block_in_file)
180 last_block = last_block_in_file;
181 page_block = 0;
182
183 /*
184 * Map blocks using the previous result first.
185 */
186 if ((map.m_flags & EXT4_MAP_MAPPED) &&
187 block_in_file > map.m_lblk &&
188 block_in_file < (map.m_lblk + map.m_len)) {
189 unsigned map_offset = block_in_file - map.m_lblk;
190 unsigned last = map.m_len - map_offset;
191
192 for (relative_block = 0; ; relative_block++) {
193 if (relative_block == last) {
194 /* needed? */
195 map.m_flags &= ~EXT4_MAP_MAPPED;
196 break;
197 }
198 if (page_block == blocks_per_page)
199 break;
200 blocks[page_block] = map.m_pblk + map_offset +
201 relative_block;
202 page_block++;
203 block_in_file++;
204 }
205 }
206
207 /*
208 * Then do more ext4_map_blocks() calls until we are
209 * done with this page.
210 */
211 while (page_block < blocks_per_page) {
212 if (block_in_file < last_block) {
213 map.m_lblk = block_in_file;
214 map.m_len = last_block - block_in_file;
215
216 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
217 set_error_page:
218 SetPageError(page);
219 zero_user_segment(page, 0,
220 PAGE_CACHE_SIZE);
221 unlock_page(page);
222 goto next_page;
223 }
224 }
225 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
226 fully_mapped = 0;
227 if (first_hole == blocks_per_page)
228 first_hole = page_block;
229 page_block++;
230 block_in_file++;
231 continue;
232 }
233 if (first_hole != blocks_per_page)
234 goto confused; /* hole -> non-hole */
235
236 /* Contiguous blocks? */
237 if (page_block && blocks[page_block-1] != map.m_pblk-1)
238 goto confused;
239 for (relative_block = 0; ; relative_block++) {
240 if (relative_block == map.m_len) {
241 /* needed? */
242 map.m_flags &= ~EXT4_MAP_MAPPED;
243 break;
244 } else if (page_block == blocks_per_page)
245 break;
246 blocks[page_block] = map.m_pblk+relative_block;
247 page_block++;
248 block_in_file++;
249 }
250 }
251 if (first_hole != blocks_per_page) {
252 zero_user_segment(page, first_hole << blkbits,
253 PAGE_CACHE_SIZE);
254 if (first_hole == 0) {
255 SetPageUptodate(page);
256 unlock_page(page);
257 goto next_page;
258 }
259 } else if (fully_mapped) {
260 SetPageMappedToDisk(page);
261 }
262 if (fully_mapped && blocks_per_page == 1 &&
263 !PageUptodate(page) && cleancache_get_page(page) == 0) {
264 SetPageUptodate(page);
265 goto confused;
266 }
267
268 /*
269 * This page will go to BIO. Do we need to send this
270 * BIO off first?
271 */
272 if (bio && (last_block_in_bio != blocks[0] - 1)) {
273 submit_and_realloc:
274 submit_bio(READ, bio);
275 bio = NULL;
276 }
277 if (bio == NULL) {
278 struct ext4_crypto_ctx *ctx = NULL;
279
280 if (ext4_encrypted_inode(inode) &&
281 S_ISREG(inode->i_mode)) {
282 ctx = ext4_get_crypto_ctx(inode);
283 if (IS_ERR(ctx))
284 goto set_error_page;
285 }
286 bio = bio_alloc(GFP_KERNEL,
287 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
288 if (!bio) {
289 if (ctx)
290 ext4_release_crypto_ctx(ctx);
291 goto set_error_page;
292 }
293 bio->bi_bdev = bdev;
294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
295 bio->bi_end_io = mpage_end_io;
296 bio->bi_private = ctx;
297 }
298
299 length = first_hole << blkbits;
300 if (bio_add_page(bio, page, length, 0) < length)
301 goto submit_and_realloc;
302
303 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
304 (relative_block == map.m_len)) ||
305 (first_hole != blocks_per_page)) {
306 submit_bio(READ, bio);
307 bio = NULL;
308 } else
309 last_block_in_bio = blocks[blocks_per_page - 1];
310 goto next_page;
311 confused:
312 if (bio) {
313 submit_bio(READ, bio);
314 bio = NULL;
315 }
316 if (!PageUptodate(page))
317 block_read_full_page(page, ext4_get_block);
318 else
319 unlock_page(page);
320 next_page:
321 if (pages)
322 page_cache_release(page);
323 }
324 BUG_ON(pages && !list_empty(pages));
325 if (bio)
326 submit_bio(READ, bio);
327 return 0;
328}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d348c7d29d80..821f22dbe825 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -21,7 +21,6 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/jbd2.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/init.h> 25#include <linux/init.h>
27#include <linux/blkdev.h> 26#include <linux/blkdev.h>
@@ -323,22 +322,6 @@ static void save_error_info(struct super_block *sb, const char *func,
323 ext4_commit_super(sb, 1); 322 ext4_commit_super(sb, 1);
324} 323}
325 324
326/*
327 * The del_gendisk() function uninitializes the disk-specific data
328 * structures, including the bdi structure, without telling anyone
329 * else. Once this happens, any attempt to call mark_buffer_dirty()
330 * (for example, by ext4_commit_super), will cause a kernel OOPS.
331 * This is a kludge to prevent these oops until we can put in a proper
332 * hook in del_gendisk() to inform the VFS and file system layers.
333 */
334static int block_device_ejected(struct super_block *sb)
335{
336 struct inode *bd_inode = sb->s_bdev->bd_inode;
337 struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
338
339 return bdi->dev == NULL;
340}
341
342static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) 325static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
343{ 326{
344 struct super_block *sb = journal->j_private; 327 struct super_block *sb = journal->j_private;
@@ -893,6 +876,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
893 atomic_set(&ei->i_ioend_count, 0); 876 atomic_set(&ei->i_ioend_count, 0);
894 atomic_set(&ei->i_unwritten, 0); 877 atomic_set(&ei->i_unwritten, 0);
895 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); 878 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
879#ifdef CONFIG_EXT4_FS_ENCRYPTION
880 ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
881#endif
896 882
897 return &ei->vfs_inode; 883 return &ei->vfs_inode;
898} 884}
@@ -1120,7 +1106,7 @@ enum {
1120 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, 1106 Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
1121 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, 1107 Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
1122 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 1108 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1123 Opt_data_err_abort, Opt_data_err_ignore, 1109 Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
1124 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 1110 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1125 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, 1111 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1126 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, 1112 Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
@@ -1211,6 +1197,7 @@ static const match_table_t tokens = {
1211 {Opt_init_itable, "init_itable"}, 1197 {Opt_init_itable, "init_itable"},
1212 {Opt_noinit_itable, "noinit_itable"}, 1198 {Opt_noinit_itable, "noinit_itable"},
1213 {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, 1199 {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1200 {Opt_test_dummy_encryption, "test_dummy_encryption"},
1214 {Opt_removed, "check=none"}, /* mount option from ext2/3 */ 1201 {Opt_removed, "check=none"}, /* mount option from ext2/3 */
1215 {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ 1202 {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
1216 {Opt_removed, "reservation"}, /* mount option from ext2/3 */ 1203 {Opt_removed, "reservation"}, /* mount option from ext2/3 */
@@ -1412,6 +1399,7 @@ static const struct mount_opts {
1412 {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, 1399 {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
1413 {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, 1400 {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1414 {Opt_max_dir_size_kb, 0, MOPT_GTE0}, 1401 {Opt_max_dir_size_kb, 0, MOPT_GTE0},
1402 {Opt_test_dummy_encryption, 0, MOPT_GTE0},
1415 {Opt_err, 0, 0} 1403 {Opt_err, 0, 0}
1416}; 1404};
1417 1405
@@ -1588,6 +1576,15 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1588 } 1576 }
1589 *journal_ioprio = 1577 *journal_ioprio =
1590 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); 1578 IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
1579 } else if (token == Opt_test_dummy_encryption) {
1580#ifdef CONFIG_EXT4_FS_ENCRYPTION
1581 sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
1582 ext4_msg(sb, KERN_WARNING,
1583 "Test dummy encryption mode enabled");
1584#else
1585 ext4_msg(sb, KERN_WARNING,
1586 "Test dummy encryption mount option ignored");
1587#endif
1591 } else if (m->flags & MOPT_DATAJ) { 1588 } else if (m->flags & MOPT_DATAJ) {
1592 if (is_remount) { 1589 if (is_remount) {
1593 if (!sbi->s_journal) 1590 if (!sbi->s_journal)
@@ -2685,11 +2682,13 @@ static struct attribute *ext4_attrs[] = {
2685EXT4_INFO_ATTR(lazy_itable_init); 2682EXT4_INFO_ATTR(lazy_itable_init);
2686EXT4_INFO_ATTR(batched_discard); 2683EXT4_INFO_ATTR(batched_discard);
2687EXT4_INFO_ATTR(meta_bg_resize); 2684EXT4_INFO_ATTR(meta_bg_resize);
2685EXT4_INFO_ATTR(encryption);
2688 2686
2689static struct attribute *ext4_feat_attrs[] = { 2687static struct attribute *ext4_feat_attrs[] = {
2690 ATTR_LIST(lazy_itable_init), 2688 ATTR_LIST(lazy_itable_init),
2691 ATTR_LIST(batched_discard), 2689 ATTR_LIST(batched_discard),
2692 ATTR_LIST(meta_bg_resize), 2690 ATTR_LIST(meta_bg_resize),
2691 ATTR_LIST(encryption),
2693 NULL, 2692 NULL,
2694}; 2693};
2695 2694
@@ -3448,6 +3447,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3448 if (sb->s_bdev->bd_part) 3447 if (sb->s_bdev->bd_part)
3449 sbi->s_sectors_written_start = 3448 sbi->s_sectors_written_start =
3450 part_stat_read(sb->s_bdev->bd_part, sectors[1]); 3449 part_stat_read(sb->s_bdev->bd_part, sectors[1]);
3450#ifdef CONFIG_EXT4_FS_ENCRYPTION
3451 /* Modes of operations for file and directory encryption. */
3452 sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
3453 sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID;
3454#endif
3451 3455
3452 /* Cleanup superblock name */ 3456 /* Cleanup superblock name */
3453 for (cp = sb->s_id; (cp = strchr(cp, '/'));) 3457 for (cp = sb->s_id; (cp = strchr(cp, '/'));)
@@ -3692,6 +3696,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3692 } 3696 }
3693 } 3697 }
3694 3698
3699 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT) &&
3700 es->s_encryption_level) {
3701 ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
3702 es->s_encryption_level);
3703 goto failed_mount;
3704 }
3705
3695 if (sb->s_blocksize != blocksize) { 3706 if (sb->s_blocksize != blocksize) {
3696 /* Validate the filesystem blocksize */ 3707 /* Validate the filesystem blocksize */
3697 if (!sb_set_blocksize(sb, blocksize)) { 3708 if (!sb_set_blocksize(sb, blocksize)) {
@@ -4054,6 +4065,13 @@ no_journal:
4054 } 4065 }
4055 } 4066 }
4056 4067
4068 if (unlikely(sbi->s_mount_flags & EXT4_MF_TEST_DUMMY_ENCRYPTION) &&
4069 !(sb->s_flags & MS_RDONLY) &&
4070 !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT)) {
4071 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
4072 ext4_commit_super(sb, 1);
4073 }
4074
4057 /* 4075 /*
4058 * Get the # of file system overhead blocks from the 4076 * Get the # of file system overhead blocks from the
4059 * superblock if present. 4077 * superblock if present.
@@ -4570,7 +4588,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
4570 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 4588 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
4571 int error = 0; 4589 int error = 0;
4572 4590
4573 if (!sbh || block_device_ejected(sb)) 4591 if (!sbh)
4574 return error; 4592 return error;
4575 if (buffer_write_io_error(sbh)) { 4593 if (buffer_write_io_error(sbh)) {
4576 /* 4594 /*
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index ff3711932018..136ca0e911fd 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -18,13 +18,101 @@
18 */ 18 */
19 19
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/jbd2.h>
22#include <linux/namei.h> 21#include <linux/namei.h>
23#include "ext4.h" 22#include "ext4.h"
24#include "xattr.h" 23#include "xattr.h"
25 24
25#ifdef CONFIG_EXT4_FS_ENCRYPTION
26static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd) 26static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
27{ 27{
28 struct page *cpage = NULL;
29 char *caddr, *paddr = NULL;
30 struct ext4_str cstr, pstr;
31 struct inode *inode = dentry->d_inode;
32 struct ext4_fname_crypto_ctx *ctx = NULL;
33 struct ext4_encrypted_symlink_data *sd;
34 loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
35 int res;
36 u32 plen, max_size = inode->i_sb->s_blocksize;
37
38 if (!ext4_encrypted_inode(inode))
39 return page_follow_link_light(dentry, nd);
40
41 ctx = ext4_get_fname_crypto_ctx(inode, inode->i_sb->s_blocksize);
42 if (IS_ERR(ctx))
43 return ctx;
44
45 if (ext4_inode_is_fast_symlink(inode)) {
46 caddr = (char *) EXT4_I(dentry->d_inode)->i_data;
47 max_size = sizeof(EXT4_I(dentry->d_inode)->i_data);
48 } else {
49 cpage = read_mapping_page(inode->i_mapping, 0, NULL);
50 if (IS_ERR(cpage)) {
51 ext4_put_fname_crypto_ctx(&ctx);
52 return cpage;
53 }
54 caddr = kmap(cpage);
55 caddr[size] = 0;
56 }
57
58 /* Symlink is encrypted */
59 sd = (struct ext4_encrypted_symlink_data *)caddr;
60 cstr.name = sd->encrypted_path;
61 cstr.len = le32_to_cpu(sd->len);
62 if ((cstr.len +
63 sizeof(struct ext4_encrypted_symlink_data) - 1) >
64 max_size) {
65 /* Symlink data on the disk is corrupted */
66 res = -EIO;
67 goto errout;
68 }
69 plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ?
70 EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len;
71 paddr = kmalloc(plen + 1, GFP_NOFS);
72 if (!paddr) {
73 res = -ENOMEM;
74 goto errout;
75 }
76 pstr.name = paddr;
77 res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr);
78 if (res < 0)
79 goto errout;
80 /* Null-terminate the name */
81 if (res <= plen)
82 paddr[res] = '\0';
83 nd_set_link(nd, paddr);
84 ext4_put_fname_crypto_ctx(&ctx);
85 if (cpage) {
86 kunmap(cpage);
87 page_cache_release(cpage);
88 }
89 return NULL;
90errout:
91 ext4_put_fname_crypto_ctx(&ctx);
92 if (cpage) {
93 kunmap(cpage);
94 page_cache_release(cpage);
95 }
96 kfree(paddr);
97 return ERR_PTR(res);
98}
99
100static void ext4_put_link(struct dentry *dentry, struct nameidata *nd,
101 void *cookie)
102{
103 struct page *page = cookie;
104
105 if (!page) {
106 kfree(nd_get_link(nd));
107 } else {
108 kunmap(page);
109 page_cache_release(page);
110 }
111}
112#endif
113
114static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd)
115{
28 struct ext4_inode_info *ei = EXT4_I(dentry->d_inode); 116 struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
29 nd_set_link(nd, (char *) ei->i_data); 117 nd_set_link(nd, (char *) ei->i_data);
30 return NULL; 118 return NULL;
@@ -32,8 +120,13 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
32 120
33const struct inode_operations ext4_symlink_inode_operations = { 121const struct inode_operations ext4_symlink_inode_operations = {
34 .readlink = generic_readlink, 122 .readlink = generic_readlink,
123#ifdef CONFIG_EXT4_FS_ENCRYPTION
124 .follow_link = ext4_follow_link,
125 .put_link = ext4_put_link,
126#else
35 .follow_link = page_follow_link_light, 127 .follow_link = page_follow_link_light,
36 .put_link = page_put_link, 128 .put_link = page_put_link,
129#endif
37 .setattr = ext4_setattr, 130 .setattr = ext4_setattr,
38 .setxattr = generic_setxattr, 131 .setxattr = generic_setxattr,
39 .getxattr = generic_getxattr, 132 .getxattr = generic_getxattr,
@@ -43,7 +136,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
43 136
44const struct inode_operations ext4_fast_symlink_inode_operations = { 137const struct inode_operations ext4_fast_symlink_inode_operations = {
45 .readlink = generic_readlink, 138 .readlink = generic_readlink,
46 .follow_link = ext4_follow_link, 139 .follow_link = ext4_follow_fast_link,
47 .setattr = ext4_setattr, 140 .setattr = ext4_setattr,
48 .setxattr = generic_setxattr, 141 .setxattr = generic_setxattr,
49 .getxattr = generic_getxattr, 142 .getxattr = generic_getxattr,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 1e09fc77395c..759842ff8af0 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -55,7 +55,6 @@
55#include <linux/slab.h> 55#include <linux/slab.h>
56#include <linux/mbcache.h> 56#include <linux/mbcache.h>
57#include <linux/quotaops.h> 57#include <linux/quotaops.h>
58#include <linux/rwsem.h>
59#include "ext4_jbd2.h" 58#include "ext4_jbd2.h"
60#include "ext4.h" 59#include "ext4.h"
61#include "xattr.h" 60#include "xattr.h"
@@ -639,8 +638,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
639 free += EXT4_XATTR_LEN(name_len); 638 free += EXT4_XATTR_LEN(name_len);
640 } 639 }
641 if (i->value) { 640 if (i->value) {
642 if (free < EXT4_XATTR_SIZE(i->value_len) || 641 if (free < EXT4_XATTR_LEN(name_len) +
643 free < EXT4_XATTR_LEN(name_len) +
644 EXT4_XATTR_SIZE(i->value_len)) 642 EXT4_XATTR_SIZE(i->value_len))
645 return -ENOSPC; 643 return -ENOSPC;
646 } 644 }
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 29bedf5589f6..ddc0957760ba 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -23,6 +23,7 @@
23#define EXT4_XATTR_INDEX_SECURITY 6 23#define EXT4_XATTR_INDEX_SECURITY 6
24#define EXT4_XATTR_INDEX_SYSTEM 7 24#define EXT4_XATTR_INDEX_SYSTEM 7
25#define EXT4_XATTR_INDEX_RICHACL 8 25#define EXT4_XATTR_INDEX_RICHACL 8
26#define EXT4_XATTR_INDEX_ENCRYPTION 9
26 27
27struct ext4_xattr_header { 28struct ext4_xattr_header {
28 __le32 h_magic; /* magic number for identification */ 29 __le32 h_magic; /* magic number for identification */
@@ -98,6 +99,8 @@ extern const struct xattr_handler ext4_xattr_user_handler;
98extern const struct xattr_handler ext4_xattr_trusted_handler; 99extern const struct xattr_handler ext4_xattr_trusted_handler;
99extern const struct xattr_handler ext4_xattr_security_handler; 100extern const struct xattr_handler ext4_xattr_security_handler;
100 101
102#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
103
101extern ssize_t ext4_listxattr(struct dentry *, char *, size_t); 104extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
102 105
103extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t); 106extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);