summaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2019-07-22 12:26:24 -0400
committerEric Biggers <ebiggers@google.com>2019-08-12 22:33:51 -0400
commit22cfe4b48ccb5a3dbb92d6dcb88f396e0f400f74 (patch)
treea542a34c4ff891e8b361b18c55602304e6487371 /fs/ext4
parentc93d8f88580921c84d2213161ef3c22560511b84 (diff)
ext4: add fs-verity read support
Make ext4_mpage_readpages() verify data as it is read from fs-verity files, using the helper functions from fs/verity/. To support both encryption and verity simultaneously, this required refactoring the decryption workflow into a generic "post-read processing" workflow which can do decryption, verification, or both. The case where the ext4 block size is not equal to the PAGE_SIZE is not supported yet, since in that case ext4_mpage_readpages() sometimes falls back to block_read_full_page(), which does not support fs-verity yet. Co-developed-by: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Eric Biggers <ebiggers@google.com>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/readpage.c211
-rw-r--r--fs/ext4/super.c9
4 files changed, 188 insertions, 36 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 736972f46ea6..9c7f4036021b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3191,6 +3191,8 @@ static inline void ext4_set_de_type(struct super_block *sb,
3191extern int ext4_mpage_readpages(struct address_space *mapping, 3191extern int ext4_mpage_readpages(struct address_space *mapping,
3192 struct list_head *pages, struct page *page, 3192 struct list_head *pages, struct page *page,
3193 unsigned nr_pages, bool is_readahead); 3193 unsigned nr_pages, bool is_readahead);
3194extern int __init ext4_init_post_read_processing(void);
3195extern void ext4_exit_post_read_processing(void);
3194 3196
3195/* symlink.c */ 3197/* symlink.c */
3196extern const struct inode_operations ext4_encrypted_symlink_inode_operations; 3198extern const struct inode_operations ext4_encrypted_symlink_inode_operations;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6de3d4ba28f3..cf0fce1173a4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3912,6 +3912,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3912 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) 3912 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
3913 return 0; 3913 return 0;
3914#endif 3914#endif
3915 if (fsverity_active(inode))
3916 return 0;
3915 3917
3916 /* 3918 /*
3917 * If we are doing data journalling we don't support O_DIRECT 3919 * If we are doing data journalling we don't support O_DIRECT
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index c916017db334..a30b203fa461 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -47,13 +47,103 @@
47 47
48#include "ext4.h" 48#include "ext4.h"
49 49
50static inline bool ext4_bio_encrypted(struct bio *bio) 50#define NUM_PREALLOC_POST_READ_CTXS 128
51
52static struct kmem_cache *bio_post_read_ctx_cache;
53static mempool_t *bio_post_read_ctx_pool;
54
55/* postprocessing steps for read bios */
56enum bio_post_read_step {
57 STEP_INITIAL = 0,
58 STEP_DECRYPT,
59 STEP_VERITY,
60};
61
62struct bio_post_read_ctx {
63 struct bio *bio;
64 struct work_struct work;
65 unsigned int cur_step;
66 unsigned int enabled_steps;
67};
68
69static void __read_end_io(struct bio *bio)
51{ 70{
52#ifdef CONFIG_FS_ENCRYPTION 71 struct page *page;
53 return unlikely(bio->bi_private != NULL); 72 struct bio_vec *bv;
54#else 73 struct bvec_iter_all iter_all;
55 return false; 74
56#endif 75 bio_for_each_segment_all(bv, bio, iter_all) {
76 page = bv->bv_page;
77
78 /* PG_error was set if any post_read step failed */
79 if (bio->bi_status || PageError(page)) {
80 ClearPageUptodate(page);
81 /* will re-read again later */
82 ClearPageError(page);
83 } else {
84 SetPageUptodate(page);
85 }
86 unlock_page(page);
87 }
88 if (bio->bi_private)
89 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
90 bio_put(bio);
91}
92
93static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
94
95static void decrypt_work(struct work_struct *work)
96{
97 struct bio_post_read_ctx *ctx =
98 container_of(work, struct bio_post_read_ctx, work);
99
100 fscrypt_decrypt_bio(ctx->bio);
101
102 bio_post_read_processing(ctx);
103}
104
105static void verity_work(struct work_struct *work)
106{
107 struct bio_post_read_ctx *ctx =
108 container_of(work, struct bio_post_read_ctx, work);
109
110 fsverity_verify_bio(ctx->bio);
111
112 bio_post_read_processing(ctx);
113}
114
115static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
116{
117 /*
118 * We use different work queues for decryption and for verity because
119 * verity may require reading metadata pages that need decryption, and
120 * we shouldn't recurse to the same workqueue.
121 */
122 switch (++ctx->cur_step) {
123 case STEP_DECRYPT:
124 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
125 INIT_WORK(&ctx->work, decrypt_work);
126 fscrypt_enqueue_decrypt_work(&ctx->work);
127 return;
128 }
129 ctx->cur_step++;
130 /* fall-through */
131 case STEP_VERITY:
132 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
133 INIT_WORK(&ctx->work, verity_work);
134 fsverity_enqueue_verify_work(&ctx->work);
135 return;
136 }
137 ctx->cur_step++;
138 /* fall-through */
139 default:
140 __read_end_io(ctx->bio);
141 }
142}
143
144static bool bio_post_read_required(struct bio *bio)
145{
146 return bio->bi_private && !bio->bi_status;
57} 147}
58 148
59/* 149/*
@@ -70,30 +160,53 @@ static inline bool ext4_bio_encrypted(struct bio *bio)
70 */ 160 */
71static void mpage_end_io(struct bio *bio) 161static void mpage_end_io(struct bio *bio)
72{ 162{
73 struct bio_vec *bv; 163 if (bio_post_read_required(bio)) {
74 struct bvec_iter_all iter_all; 164 struct bio_post_read_ctx *ctx = bio->bi_private;
75 165
76 if (ext4_bio_encrypted(bio)) { 166 ctx->cur_step = STEP_INITIAL;
77 if (bio->bi_status) { 167 bio_post_read_processing(ctx);
78 fscrypt_release_ctx(bio->bi_private); 168 return;
79 } else {
80 fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
81 return;
82 }
83 } 169 }
84 bio_for_each_segment_all(bv, bio, iter_all) { 170 __read_end_io(bio);
85 struct page *page = bv->bv_page; 171}
86 172
87 if (!bio->bi_status) { 173static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
88 SetPageUptodate(page); 174{
89 } else { 175 return fsverity_active(inode) &&
90 ClearPageUptodate(page); 176 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
91 SetPageError(page); 177}
92 } 178
93 unlock_page(page); 179static struct bio_post_read_ctx *get_bio_post_read_ctx(struct inode *inode,
180 struct bio *bio,
181 pgoff_t first_idx)
182{
183 unsigned int post_read_steps = 0;
184 struct bio_post_read_ctx *ctx = NULL;
185
186 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
187 post_read_steps |= 1 << STEP_DECRYPT;
188
189 if (ext4_need_verity(inode, first_idx))
190 post_read_steps |= 1 << STEP_VERITY;
191
192 if (post_read_steps) {
193 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
194 if (!ctx)
195 return ERR_PTR(-ENOMEM);
196 ctx->bio = bio;
197 ctx->enabled_steps = post_read_steps;
198 bio->bi_private = ctx;
94 } 199 }
200 return ctx;
201}
95 202
96 bio_put(bio); 203static inline loff_t ext4_readpage_limit(struct inode *inode)
204{
205 if (IS_ENABLED(CONFIG_FS_VERITY) &&
206 (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
207 return inode->i_sb->s_maxbytes;
208
209 return i_size_read(inode);
97} 210}
98 211
99int ext4_mpage_readpages(struct address_space *mapping, 212int ext4_mpage_readpages(struct address_space *mapping,
@@ -141,7 +254,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
141 254
142 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 255 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
143 last_block = block_in_file + nr_pages * blocks_per_page; 256 last_block = block_in_file + nr_pages * blocks_per_page;
144 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 257 last_block_in_file = (ext4_readpage_limit(inode) +
258 blocksize - 1) >> blkbits;
145 if (last_block > last_block_in_file) 259 if (last_block > last_block_in_file)
146 last_block = last_block_in_file; 260 last_block = last_block_in_file;
147 page_block = 0; 261 page_block = 0;
@@ -218,6 +332,9 @@ int ext4_mpage_readpages(struct address_space *mapping,
218 zero_user_segment(page, first_hole << blkbits, 332 zero_user_segment(page, first_hole << blkbits,
219 PAGE_SIZE); 333 PAGE_SIZE);
220 if (first_hole == 0) { 334 if (first_hole == 0) {
335 if (ext4_need_verity(inode, page->index) &&
336 !fsverity_verify_page(page))
337 goto set_error_page;
221 SetPageUptodate(page); 338 SetPageUptodate(page);
222 unlock_page(page); 339 unlock_page(page);
223 goto next_page; 340 goto next_page;
@@ -241,18 +358,16 @@ int ext4_mpage_readpages(struct address_space *mapping,
241 bio = NULL; 358 bio = NULL;
242 } 359 }
243 if (bio == NULL) { 360 if (bio == NULL) {
244 struct fscrypt_ctx *ctx = NULL; 361 struct bio_post_read_ctx *ctx;
245 362
246 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
247 ctx = fscrypt_get_ctx(GFP_NOFS);
248 if (IS_ERR(ctx))
249 goto set_error_page;
250 }
251 bio = bio_alloc(GFP_KERNEL, 363 bio = bio_alloc(GFP_KERNEL,
252 min_t(int, nr_pages, BIO_MAX_PAGES)); 364 min_t(int, nr_pages, BIO_MAX_PAGES));
253 if (!bio) { 365 if (!bio)
254 if (ctx) 366 goto set_error_page;
255 fscrypt_release_ctx(ctx); 367 ctx = get_bio_post_read_ctx(inode, bio, page->index);
368 if (IS_ERR(ctx)) {
369 bio_put(bio);
370 bio = NULL;
256 goto set_error_page; 371 goto set_error_page;
257 } 372 }
258 bio_set_dev(bio, bdev); 373 bio_set_dev(bio, bdev);
@@ -293,3 +408,29 @@ int ext4_mpage_readpages(struct address_space *mapping,
293 submit_bio(bio); 408 submit_bio(bio);
294 return 0; 409 return 0;
295} 410}
411
412int __init ext4_init_post_read_processing(void)
413{
414 bio_post_read_ctx_cache =
415 kmem_cache_create("ext4_bio_post_read_ctx",
416 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
417 if (!bio_post_read_ctx_cache)
418 goto fail;
419 bio_post_read_ctx_pool =
420 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
421 bio_post_read_ctx_cache);
422 if (!bio_post_read_ctx_pool)
423 goto fail_free_cache;
424 return 0;
425
426fail_free_cache:
427 kmem_cache_destroy(bio_post_read_ctx_cache);
428fail:
429 return -ENOMEM;
430}
431
432void ext4_exit_post_read_processing(void)
433{
434 mempool_destroy(bio_post_read_ctx_pool);
435 kmem_cache_destroy(bio_post_read_ctx_cache);
436}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 05a9874687c3..23e7acd43e4e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -6104,6 +6104,10 @@ static int __init ext4_init_fs(void)
6104 6104
6105 err = ext4_init_pending(); 6105 err = ext4_init_pending();
6106 if (err) 6106 if (err)
6107 goto out7;
6108
6109 err = ext4_init_post_read_processing();
6110 if (err)
6107 goto out6; 6111 goto out6;
6108 6112
6109 err = ext4_init_pageio(); 6113 err = ext4_init_pageio();
@@ -6144,8 +6148,10 @@ out3:
6144out4: 6148out4:
6145 ext4_exit_pageio(); 6149 ext4_exit_pageio();
6146out5: 6150out5:
6147 ext4_exit_pending(); 6151 ext4_exit_post_read_processing();
6148out6: 6152out6:
6153 ext4_exit_pending();
6154out7:
6149 ext4_exit_es(); 6155 ext4_exit_es();
6150 6156
6151 return err; 6157 return err;
@@ -6162,6 +6168,7 @@ static void __exit ext4_exit_fs(void)
6162 ext4_exit_sysfs(); 6168 ext4_exit_sysfs();
6163 ext4_exit_system_zone(); 6169 ext4_exit_system_zone();
6164 ext4_exit_pageio(); 6170 ext4_exit_pageio();
6171 ext4_exit_post_read_processing();
6165 ext4_exit_es(); 6172 ext4_exit_es();
6166 ext4_exit_pending(); 6173 ext4_exit_pending();
6167} 6174}