diff options
author | Eric Sandeen <sandeen@redhat.com> | 2008-07-28 18:46:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-28 19:30:21 -0400 |
commit | 7fcba054373d5dfc43d26e243a5c9b92069972ee (patch) | |
tree | 3503fba122a654946b5455bc95fa3978cbc4f68b | |
parent | 25947d5ac56004378d8c2d31ebf22600d5bc0c02 (diff) |
eCryptfs: use page_alloc not kmalloc to get a page of memory
With SLUB debugging turned on in 2.6.26, I was getting memory corruption
when testing eCryptfs. The root cause turned out to be that eCryptfs was
doing kmalloc(PAGE_CACHE_SIZE); virt_to_page() and treating that as a nice
page-aligned chunk of memory. But at least with SLUB debugging on, this
is not always true, and the page we get from virt_to_page does not
necessarily match the PAGE_CACHE_SIZE worth of memory we got from kmalloc.
My simple testcase was 2 loops doing "rm -f fileX; cp /tmp/fileX ." for 2
different multi-megabyte files. With this change I no longer see the
corruption.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Acked-by: Michael Halcrow <mhalcrow@us.ibm.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@kernel.org> [2.6.25.x, 2.6.26.x]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/ecryptfs/crypto.c | 30 |
1 files changed, 18 insertions, 12 deletions
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 7b99917ffadc..06db79d05c12 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -475,8 +475,8 @@ int ecryptfs_encrypt_page(struct page *page) | |||
475 | { | 475 | { |
476 | struct inode *ecryptfs_inode; | 476 | struct inode *ecryptfs_inode; |
477 | struct ecryptfs_crypt_stat *crypt_stat; | 477 | struct ecryptfs_crypt_stat *crypt_stat; |
478 | char *enc_extent_virt = NULL; | 478 | char *enc_extent_virt; |
479 | struct page *enc_extent_page; | 479 | struct page *enc_extent_page = NULL; |
480 | loff_t extent_offset; | 480 | loff_t extent_offset; |
481 | int rc = 0; | 481 | int rc = 0; |
482 | 482 | ||
@@ -492,14 +492,14 @@ int ecryptfs_encrypt_page(struct page *page) | |||
492 | page->index); | 492 | page->index); |
493 | goto out; | 493 | goto out; |
494 | } | 494 | } |
495 | enc_extent_virt = kmalloc(PAGE_CACHE_SIZE, GFP_USER); | 495 | enc_extent_page = alloc_page(GFP_USER); |
496 | if (!enc_extent_virt) { | 496 | if (!enc_extent_page) { |
497 | rc = -ENOMEM; | 497 | rc = -ENOMEM; |
498 | ecryptfs_printk(KERN_ERR, "Error allocating memory for " | 498 | ecryptfs_printk(KERN_ERR, "Error allocating memory for " |
499 | "encrypted extent\n"); | 499 | "encrypted extent\n"); |
500 | goto out; | 500 | goto out; |
501 | } | 501 | } |
502 | enc_extent_page = virt_to_page(enc_extent_virt); | 502 | enc_extent_virt = kmap(enc_extent_page); |
503 | for (extent_offset = 0; | 503 | for (extent_offset = 0; |
504 | extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); | 504 | extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); |
505 | extent_offset++) { | 505 | extent_offset++) { |
@@ -527,7 +527,10 @@ int ecryptfs_encrypt_page(struct page *page) | |||
527 | } | 527 | } |
528 | } | 528 | } |
529 | out: | 529 | out: |
530 | kfree(enc_extent_virt); | 530 | if (enc_extent_page) { |
531 | kunmap(enc_extent_page); | ||
532 | __free_page(enc_extent_page); | ||
533 | } | ||
531 | return rc; | 534 | return rc; |
532 | } | 535 | } |
533 | 536 | ||
@@ -609,8 +612,8 @@ int ecryptfs_decrypt_page(struct page *page) | |||
609 | { | 612 | { |
610 | struct inode *ecryptfs_inode; | 613 | struct inode *ecryptfs_inode; |
611 | struct ecryptfs_crypt_stat *crypt_stat; | 614 | struct ecryptfs_crypt_stat *crypt_stat; |
612 | char *enc_extent_virt = NULL; | 615 | char *enc_extent_virt; |
613 | struct page *enc_extent_page; | 616 | struct page *enc_extent_page = NULL; |
614 | unsigned long extent_offset; | 617 | unsigned long extent_offset; |
615 | int rc = 0; | 618 | int rc = 0; |
616 | 619 | ||
@@ -627,14 +630,14 @@ int ecryptfs_decrypt_page(struct page *page) | |||
627 | page->index); | 630 | page->index); |
628 | goto out; | 631 | goto out; |
629 | } | 632 | } |
630 | enc_extent_virt = kmalloc(PAGE_CACHE_SIZE, GFP_USER); | 633 | enc_extent_page = alloc_page(GFP_USER); |
631 | if (!enc_extent_virt) { | 634 | if (!enc_extent_page) { |
632 | rc = -ENOMEM; | 635 | rc = -ENOMEM; |
633 | ecryptfs_printk(KERN_ERR, "Error allocating memory for " | 636 | ecryptfs_printk(KERN_ERR, "Error allocating memory for " |
634 | "encrypted extent\n"); | 637 | "encrypted extent\n"); |
635 | goto out; | 638 | goto out; |
636 | } | 639 | } |
637 | enc_extent_page = virt_to_page(enc_extent_virt); | 640 | enc_extent_virt = kmap(enc_extent_page); |
638 | for (extent_offset = 0; | 641 | for (extent_offset = 0; |
639 | extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); | 642 | extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); |
640 | extent_offset++) { | 643 | extent_offset++) { |
@@ -662,7 +665,10 @@ int ecryptfs_decrypt_page(struct page *page) | |||
662 | } | 665 | } |
663 | } | 666 | } |
664 | out: | 667 | out: |
665 | kfree(enc_extent_virt); | 668 | if (enc_extent_page) { |
669 | kunmap(enc_extent_page); | ||
670 | __free_page(enc_extent_page); | ||
671 | } | ||
666 | return rc; | 672 | return rc; |
667 | } | 673 | } |
668 | 674 | ||