diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/shmem.c | 45 | ||||
| -rw-r--r-- | mm/truncate.c | 10 |
2 files changed, 28 insertions, 27 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 855eaf5b8d5b..7e5030ae18ff 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -727,10 +727,11 @@ done2: | |||
| 727 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { | 727 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { |
| 728 | /* | 728 | /* |
| 729 | * Call truncate_inode_pages again: racing shmem_unuse_inode | 729 | * Call truncate_inode_pages again: racing shmem_unuse_inode |
| 730 | * may have swizzled a page in from swap since vmtruncate or | 730 | * may have swizzled a page in from swap since |
| 731 | * generic_delete_inode did it, before we lowered next_index. | 731 | * truncate_pagecache or generic_delete_inode did it, before we |
| 732 | * Also, though shmem_getpage checks i_size before adding to | 732 | * lowered next_index. Also, though shmem_getpage checks |
| 733 | * cache, no recheck after: so fix the narrow window there too. | 733 | * i_size before adding to cache, no recheck after: so fix the |
| 734 | * narrow window there too. | ||
| 734 | * | 735 | * |
| 735 | * Recalling truncate_inode_pages_range and unmap_mapping_range | 736 | * Recalling truncate_inode_pages_range and unmap_mapping_range |
| 736 | * every time for punch_hole (which never got a chance to clear | 737 | * every time for punch_hole (which never got a chance to clear |
| @@ -760,19 +761,16 @@ done2: | |||
| 760 | } | 761 | } |
| 761 | } | 762 | } |
| 762 | 763 | ||
| 763 | static void shmem_truncate(struct inode *inode) | ||
| 764 | { | ||
| 765 | shmem_truncate_range(inode, inode->i_size, (loff_t)-1); | ||
| 766 | } | ||
| 767 | |||
| 768 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | 764 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) |
| 769 | { | 765 | { |
| 770 | struct inode *inode = dentry->d_inode; | 766 | struct inode *inode = dentry->d_inode; |
| 771 | struct page *page = NULL; | ||
| 772 | int error; | 767 | int error; |
| 773 | 768 | ||
| 774 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { | 769 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
| 775 | if (attr->ia_size < inode->i_size) { | 770 | loff_t newsize = attr->ia_size; |
| 771 | struct page *page = NULL; | ||
| 772 | |||
| 773 | if (newsize < inode->i_size) { | ||
| 776 | /* | 774 | /* |
| 777 | * If truncating down to a partial page, then | 775 | * If truncating down to a partial page, then |
| 778 | * if that page is already allocated, hold it | 776 | * if that page is already allocated, hold it |
| @@ -780,9 +778,9 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |||
| 780 | * truncate_partial_page cannnot miss it were | 778 | * truncate_partial_page cannnot miss it were |
| 781 | * it assigned to swap. | 779 | * it assigned to swap. |
| 782 | */ | 780 | */ |
| 783 | if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { | 781 | if (newsize & (PAGE_CACHE_SIZE-1)) { |
| 784 | (void) shmem_getpage(inode, | 782 | (void) shmem_getpage(inode, |
| 785 | attr->ia_size>>PAGE_CACHE_SHIFT, | 783 | newsize >> PAGE_CACHE_SHIFT, |
| 786 | &page, SGP_READ, NULL); | 784 | &page, SGP_READ, NULL); |
| 787 | if (page) | 785 | if (page) |
| 788 | unlock_page(page); | 786 | unlock_page(page); |
| @@ -794,24 +792,29 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | |||
| 794 | * if it's being fully truncated to zero-length: the | 792 | * if it's being fully truncated to zero-length: the |
| 795 | * nrpages check is efficient enough in that case. | 793 | * nrpages check is efficient enough in that case. |
| 796 | */ | 794 | */ |
| 797 | if (attr->ia_size) { | 795 | if (newsize) { |
| 798 | struct shmem_inode_info *info = SHMEM_I(inode); | 796 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 799 | spin_lock(&info->lock); | 797 | spin_lock(&info->lock); |
| 800 | info->flags &= ~SHMEM_PAGEIN; | 798 | info->flags &= ~SHMEM_PAGEIN; |
| 801 | spin_unlock(&info->lock); | 799 | spin_unlock(&info->lock); |
| 802 | } | 800 | } |
| 803 | } | 801 | } |
| 802 | |||
| 803 | error = simple_setsize(inode, newsize); | ||
| 804 | if (page) | ||
| 805 | page_cache_release(page); | ||
| 806 | if (error) | ||
| 807 | return error; | ||
| 808 | shmem_truncate_range(inode, newsize, (loff_t)-1); | ||
| 804 | } | 809 | } |
| 805 | 810 | ||
| 806 | error = inode_change_ok(inode, attr); | 811 | error = inode_change_ok(inode, attr); |
| 807 | if (!error) | 812 | if (!error) |
| 808 | error = inode_setattr(inode, attr); | 813 | generic_setattr(inode, attr); |
| 809 | #ifdef CONFIG_TMPFS_POSIX_ACL | 814 | #ifdef CONFIG_TMPFS_POSIX_ACL |
| 810 | if (!error && (attr->ia_valid & ATTR_MODE)) | 815 | if (!error && (attr->ia_valid & ATTR_MODE)) |
| 811 | error = generic_acl_chmod(inode); | 816 | error = generic_acl_chmod(inode); |
| 812 | #endif | 817 | #endif |
| 813 | if (page) | ||
| 814 | page_cache_release(page); | ||
| 815 | return error; | 818 | return error; |
| 816 | } | 819 | } |
| 817 | 820 | ||
| @@ -819,11 +822,11 @@ static void shmem_delete_inode(struct inode *inode) | |||
| 819 | { | 822 | { |
| 820 | struct shmem_inode_info *info = SHMEM_I(inode); | 823 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 821 | 824 | ||
| 822 | if (inode->i_op->truncate == shmem_truncate) { | 825 | if (inode->i_mapping->a_ops == &shmem_aops) { |
| 823 | truncate_inode_pages(inode->i_mapping, 0); | 826 | truncate_inode_pages(inode->i_mapping, 0); |
| 824 | shmem_unacct_size(info->flags, inode->i_size); | 827 | shmem_unacct_size(info->flags, inode->i_size); |
| 825 | inode->i_size = 0; | 828 | inode->i_size = 0; |
| 826 | shmem_truncate(inode); | 829 | shmem_truncate_range(inode, 0, (loff_t)-1); |
| 827 | if (!list_empty(&info->swaplist)) { | 830 | if (!list_empty(&info->swaplist)) { |
| 828 | mutex_lock(&shmem_swaplist_mutex); | 831 | mutex_lock(&shmem_swaplist_mutex); |
| 829 | list_del_init(&info->swaplist); | 832 | list_del_init(&info->swaplist); |
| @@ -2022,7 +2025,6 @@ static const struct inode_operations shmem_symlink_inline_operations = { | |||
| 2022 | }; | 2025 | }; |
| 2023 | 2026 | ||
| 2024 | static const struct inode_operations shmem_symlink_inode_operations = { | 2027 | static const struct inode_operations shmem_symlink_inode_operations = { |
| 2025 | .truncate = shmem_truncate, | ||
| 2026 | .readlink = generic_readlink, | 2028 | .readlink = generic_readlink, |
| 2027 | .follow_link = shmem_follow_link, | 2029 | .follow_link = shmem_follow_link, |
| 2028 | .put_link = shmem_put_link, | 2030 | .put_link = shmem_put_link, |
| @@ -2433,14 +2435,13 @@ static const struct file_operations shmem_file_operations = { | |||
| 2433 | .write = do_sync_write, | 2435 | .write = do_sync_write, |
| 2434 | .aio_read = shmem_file_aio_read, | 2436 | .aio_read = shmem_file_aio_read, |
| 2435 | .aio_write = generic_file_aio_write, | 2437 | .aio_write = generic_file_aio_write, |
| 2436 | .fsync = simple_sync_file, | 2438 | .fsync = noop_fsync, |
| 2437 | .splice_read = generic_file_splice_read, | 2439 | .splice_read = generic_file_splice_read, |
| 2438 | .splice_write = generic_file_splice_write, | 2440 | .splice_write = generic_file_splice_write, |
| 2439 | #endif | 2441 | #endif |
| 2440 | }; | 2442 | }; |
| 2441 | 2443 | ||
| 2442 | static const struct inode_operations shmem_inode_operations = { | 2444 | static const struct inode_operations shmem_inode_operations = { |
| 2443 | .truncate = shmem_truncate, | ||
| 2444 | .setattr = shmem_notify_change, | 2445 | .setattr = shmem_notify_change, |
| 2445 | .truncate_range = shmem_truncate_range, | 2446 | .truncate_range = shmem_truncate_range, |
| 2446 | #ifdef CONFIG_TMPFS_POSIX_ACL | 2447 | #ifdef CONFIG_TMPFS_POSIX_ACL |
diff --git a/mm/truncate.c b/mm/truncate.c index f42675a3615d..937571b8b233 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
| @@ -548,18 +548,18 @@ EXPORT_SYMBOL(truncate_pagecache); | |||
| 548 | * NOTE! We have to be ready to update the memory sharing | 548 | * NOTE! We have to be ready to update the memory sharing |
| 549 | * between the file and the memory map for a potential last | 549 | * between the file and the memory map for a potential last |
| 550 | * incomplete page. Ugly, but necessary. | 550 | * incomplete page. Ugly, but necessary. |
| 551 | * | ||
| 552 | * This function is deprecated and simple_setsize or truncate_pagecache | ||
| 553 | * should be used instead. | ||
| 551 | */ | 554 | */ |
| 552 | int vmtruncate(struct inode *inode, loff_t offset) | 555 | int vmtruncate(struct inode *inode, loff_t offset) |
| 553 | { | 556 | { |
| 554 | loff_t oldsize; | ||
| 555 | int error; | 557 | int error; |
| 556 | 558 | ||
| 557 | error = inode_newsize_ok(inode, offset); | 559 | error = simple_setsize(inode, offset); |
| 558 | if (error) | 560 | if (error) |
| 559 | return error; | 561 | return error; |
| 560 | oldsize = inode->i_size; | 562 | |
| 561 | i_size_write(inode, offset); | ||
| 562 | truncate_pagecache(inode, oldsize, offset); | ||
| 563 | if (inode->i_op->truncate) | 563 | if (inode->i_op->truncate) |
| 564 | inode->i_op->truncate(inode); | 564 | inode->i_op->truncate(inode); |
| 565 | 565 | ||
