aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/crypto.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2015-05-31 13:34:24 -0400
committerTheodore Ts'o <tytso@mit.edu>2015-05-31 13:34:24 -0400
commit95ea68b4c7105179f507d31f7bf571623373aa0b (patch)
tree5aead7a970083ed83e40f57c7a25323075de792e /fs/ext4/crypto.c
parentc936e1ec2879e43599d801dfa6fe58e7ccfee433 (diff)
ext4 crypto: fix memory leaks in ext4_encrypted_zeroout
ext4_encrypted_zeroout() could end up leaking a bio and bounce page. Fortunately it's not used much. While we're fixing things up, refactor out common code into the static function alloc_bounce_page() and fix up error handling if mempool_alloc() fails. Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/crypto.c')
-rw-r--r--fs/ext4/crypto.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index c3a9b08309db..1c9a8c499369 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -314,6 +314,26 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
314 return 0; 314 return 0;
315} 315}
316 316
317static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
318{
319 struct page *ciphertext_page = alloc_page(GFP_NOFS);
320
321 if (!ciphertext_page) {
322 /* This is a potential bottleneck, but at least we'll have
323 * forward progress. */
324 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
325 GFP_NOFS);
326 if (ciphertext_page == NULL)
327 return ERR_PTR(-ENOMEM);
328 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
329 } else {
330 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
331 }
332 ctx->flags |= EXT4_WRITE_PATH_FL;
333 ctx->w.bounce_page = ciphertext_page;
334 return ciphertext_page;
335}
336
317/** 337/**
318 * ext4_encrypt() - Encrypts a page 338 * ext4_encrypt() - Encrypts a page
319 * @inode: The inode for which the encryption should take place 339 * @inode: The inode for which the encryption should take place
@@ -343,28 +363,17 @@ struct page *ext4_encrypt(struct inode *inode,
343 return (struct page *) ctx; 363 return (struct page *) ctx;
344 364
345 /* The encryption operation will require a bounce page. */ 365 /* The encryption operation will require a bounce page. */
346 ciphertext_page = alloc_page(GFP_NOFS); 366 ciphertext_page = alloc_bounce_page(ctx);
347 if (!ciphertext_page) { 367 if (IS_ERR(ciphertext_page))
348 /* This is a potential bottleneck, but at least we'll have 368 goto errout;
349 * forward progress. */
350 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
351 GFP_NOFS);
352 if (WARN_ON_ONCE(!ciphertext_page)) {
353 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
354 GFP_NOFS | __GFP_WAIT);
355 }
356 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
357 } else {
358 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
359 }
360 ctx->flags |= EXT4_WRITE_PATH_FL;
361 ctx->w.bounce_page = ciphertext_page;
362 ctx->w.control_page = plaintext_page; 369 ctx->w.control_page = plaintext_page;
363 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, 370 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
364 plaintext_page, ciphertext_page); 371 plaintext_page, ciphertext_page);
365 if (err) { 372 if (err) {
373 ciphertext_page = ERR_PTR(err);
374 errout:
366 ext4_release_crypto_ctx(ctx); 375 ext4_release_crypto_ctx(ctx);
367 return ERR_PTR(err); 376 return ciphertext_page;
368 } 377 }
369 SetPagePrivate(ciphertext_page); 378 SetPagePrivate(ciphertext_page);
370 set_page_private(ciphertext_page, (unsigned long)ctx); 379 set_page_private(ciphertext_page, (unsigned long)ctx);
@@ -424,21 +433,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
424 if (IS_ERR(ctx)) 433 if (IS_ERR(ctx))
425 return PTR_ERR(ctx); 434 return PTR_ERR(ctx);
426 435
427 ciphertext_page = alloc_page(GFP_NOFS); 436 ciphertext_page = alloc_bounce_page(ctx);
428 if (!ciphertext_page) { 437 if (IS_ERR(ciphertext_page)) {
429 /* This is a potential bottleneck, but at least we'll have 438 err = PTR_ERR(ciphertext_page);
430 * forward progress. */ 439 goto errout;
431 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
432 GFP_NOFS);
433 if (WARN_ON_ONCE(!ciphertext_page)) {
434 ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
435 GFP_NOFS | __GFP_WAIT);
436 }
437 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
438 } else {
439 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
440 } 440 }
441 ctx->w.bounce_page = ciphertext_page;
442 441
443 while (len--) { 442 while (len--) {
444 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, 443 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
@@ -460,6 +459,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
460 goto errout; 459 goto errout;
461 } 460 }
462 err = submit_bio_wait(WRITE, bio); 461 err = submit_bio_wait(WRITE, bio);
462 bio_put(bio);
463 if (err) 463 if (err)
464 goto errout; 464 goto errout;
465 } 465 }