diff options
| -rw-r--r-- | fs/ext4/file.c | 18 | ||||
| -rw-r--r-- | fs/ext4/inode.c | 7 | ||||
| -rw-r--r-- | fs/ext4/readpage.c | 66 |
3 files changed, 88 insertions, 3 deletions
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index fcc6c1349186..b132a3c6b6fb 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
| @@ -218,6 +218,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = { | |||
| 218 | 218 | ||
| 219 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | 219 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) |
| 220 | { | 220 | { |
| 221 | struct inode *inode = file->f_mapping->host; | ||
| 222 | |||
| 223 | if (ext4_encrypted_inode(inode)) { | ||
| 224 | int err = ext4_generate_encryption_key(inode); | ||
| 225 | if (err) | ||
| 226 | return 0; | ||
| 227 | } | ||
| 221 | file_accessed(file); | 228 | file_accessed(file); |
| 222 | if (IS_DAX(file_inode(file))) { | 229 | if (IS_DAX(file_inode(file))) { |
| 223 | vma->vm_ops = &ext4_dax_vm_ops; | 230 | vma->vm_ops = &ext4_dax_vm_ops; |
| @@ -235,6 +242,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
| 235 | struct vfsmount *mnt = filp->f_path.mnt; | 242 | struct vfsmount *mnt = filp->f_path.mnt; |
| 236 | struct path path; | 243 | struct path path; |
| 237 | char buf[64], *cp; | 244 | char buf[64], *cp; |
| 245 | int ret; | ||
| 238 | 246 | ||
| 239 | if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && | 247 | if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && |
| 240 | !(sb->s_flags & MS_RDONLY))) { | 248 | !(sb->s_flags & MS_RDONLY))) { |
| @@ -273,11 +281,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
| 273 | * writing and the journal is present | 281 | * writing and the journal is present |
| 274 | */ | 282 | */ |
| 275 | if (filp->f_mode & FMODE_WRITE) { | 283 | if (filp->f_mode & FMODE_WRITE) { |
| 276 | int ret = ext4_inode_attach_jinode(inode); | 284 | ret = ext4_inode_attach_jinode(inode); |
| 277 | if (ret < 0) | 285 | if (ret < 0) |
| 278 | return ret; | 286 | return ret; |
| 279 | } | 287 | } |
| 280 | return dquot_file_open(inode, filp); | 288 | ret = dquot_file_open(inode, filp); |
| 289 | if (!ret && ext4_encrypted_inode(inode)) { | ||
| 290 | ret = ext4_generate_encryption_key(inode); | ||
| 291 | if (ret) | ||
| 292 | ret = -EACCES; | ||
| 293 | } | ||
| 294 | return ret; | ||
| 281 | } | 295 | } |
| 282 | 296 | ||
| 283 | /* | 297 | /* |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7c4527e10ae4..8b4fe626919a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -3370,6 +3370,13 @@ static int __ext4_block_zero_page_range(handle_t *handle, | |||
| 3370 | /* Uhhuh. Read error. Complain and punt. */ | 3370 | /* Uhhuh. Read error. Complain and punt. */ |
| 3371 | if (!buffer_uptodate(bh)) | 3371 | if (!buffer_uptodate(bh)) |
| 3372 | goto unlock; | 3372 | goto unlock; |
| 3373 | if (S_ISREG(inode->i_mode) && | ||
| 3374 | ext4_encrypted_inode(inode)) { | ||
| 3375 | /* We expect the key to be set. */ | ||
| 3376 | BUG_ON(!ext4_has_encryption_key(inode)); | ||
| 3377 | BUG_ON(blocksize != PAGE_CACHE_SIZE); | ||
| 3378 | WARN_ON_ONCE(ext4_decrypt_one(inode, page)); | ||
| 3379 | } | ||
| 3373 | } | 3380 | } |
| 3374 | if (ext4_should_journal_data(inode)) { | 3381 | if (ext4_should_journal_data(inode)) { |
| 3375 | BUFFER_TRACE(bh, "get write access"); | 3382 | BUFFER_TRACE(bh, "get write access"); |
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index fff9fe6aacf8..171b9ac4b45e 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c | |||
| @@ -47,6 +47,46 @@ | |||
| 47 | #include "ext4.h" | 47 | #include "ext4.h" |
| 48 | 48 | ||
| 49 | /* | 49 | /* |
| 50 | * Call ext4_decrypt on every single page, reusing the encryption | ||
| 51 | * context. | ||
| 52 | */ | ||
| 53 | static void completion_pages(struct work_struct *work) | ||
| 54 | { | ||
| 55 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
| 56 | struct ext4_crypto_ctx *ctx = | ||
| 57 | container_of(work, struct ext4_crypto_ctx, work); | ||
| 58 | struct bio *bio = ctx->bio; | ||
| 59 | struct bio_vec *bv; | ||
| 60 | int i; | ||
| 61 | |||
| 62 | bio_for_each_segment_all(bv, bio, i) { | ||
| 63 | struct page *page = bv->bv_page; | ||
| 64 | |||
| 65 | int ret = ext4_decrypt(ctx, page); | ||
| 66 | if (ret) { | ||
| 67 | WARN_ON_ONCE(1); | ||
| 68 | SetPageError(page); | ||
| 69 | } else | ||
| 70 | SetPageUptodate(page); | ||
| 71 | unlock_page(page); | ||
| 72 | } | ||
| 73 | ext4_release_crypto_ctx(ctx); | ||
| 74 | bio_put(bio); | ||
| 75 | #else | ||
| 76 | BUG(); | ||
| 77 | #endif | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline bool ext4_bio_encrypted(struct bio *bio) | ||
| 81 | { | ||
| 82 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
| 83 | return unlikely(bio->bi_private != NULL); | ||
| 84 | #else | ||
| 85 | return false; | ||
| 86 | #endif | ||
| 87 | } | ||
| 88 | |||
| 89 | /* | ||
| 50 | * I/O completion handler for multipage BIOs. | 90 | * I/O completion handler for multipage BIOs. |
| 51 | * | 91 | * |
| 52 | * The mpage code never puts partial pages into a BIO (except for end-of-file). | 92 | * The mpage code never puts partial pages into a BIO (except for end-of-file). |
| @@ -63,6 +103,18 @@ static void mpage_end_io(struct bio *bio, int err) | |||
| 63 | struct bio_vec *bv; | 103 | struct bio_vec *bv; |
| 64 | int i; | 104 | int i; |
| 65 | 105 | ||
| 106 | if (ext4_bio_encrypted(bio)) { | ||
| 107 | struct ext4_crypto_ctx *ctx = bio->bi_private; | ||
| 108 | |||
| 109 | if (err) { | ||
| 110 | ext4_release_crypto_ctx(ctx); | ||
| 111 | } else { | ||
| 112 | INIT_WORK(&ctx->work, completion_pages); | ||
| 113 | ctx->bio = bio; | ||
| 114 | queue_work(ext4_read_workqueue, &ctx->work); | ||
| 115 | return; | ||
| 116 | } | ||
| 117 | } | ||
| 66 | bio_for_each_segment_all(bv, bio, i) { | 118 | bio_for_each_segment_all(bv, bio, i) { |
| 67 | struct page *page = bv->bv_page; | 119 | struct page *page = bv->bv_page; |
| 68 | 120 | ||
| @@ -223,13 +275,25 @@ int ext4_mpage_readpages(struct address_space *mapping, | |||
| 223 | bio = NULL; | 275 | bio = NULL; |
| 224 | } | 276 | } |
| 225 | if (bio == NULL) { | 277 | if (bio == NULL) { |
| 278 | struct ext4_crypto_ctx *ctx = NULL; | ||
| 279 | |||
| 280 | if (ext4_encrypted_inode(inode) && | ||
| 281 | S_ISREG(inode->i_mode)) { | ||
| 282 | ctx = ext4_get_crypto_ctx(inode); | ||
| 283 | if (IS_ERR(ctx)) | ||
| 284 | goto set_error_page; | ||
| 285 | } | ||
| 226 | bio = bio_alloc(GFP_KERNEL, | 286 | bio = bio_alloc(GFP_KERNEL, |
| 227 | min_t(int, nr_pages, bio_get_nr_vecs(bdev))); | 287 | min_t(int, nr_pages, bio_get_nr_vecs(bdev))); |
| 228 | if (!bio) | 288 | if (!bio) { |
| 289 | if (ctx) | ||
| 290 | ext4_release_crypto_ctx(ctx); | ||
| 229 | goto set_error_page; | 291 | goto set_error_page; |
| 292 | } | ||
| 230 | bio->bi_bdev = bdev; | 293 | bio->bi_bdev = bdev; |
| 231 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); | 294 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); |
| 232 | bio->bi_end_io = mpage_end_io; | 295 | bio->bi_end_io = mpage_end_io; |
| 296 | bio->bi_private = ctx; | ||
| 233 | } | 297 | } |
| 234 | 298 | ||
| 235 | length = first_hole << blkbits; | 299 | length = first_hole << blkbits; |
