aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c74
1 files changed, 52 insertions, 22 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index d221a1cfd7b1..fcedf5464eb7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -539,7 +539,7 @@ static void shmem_free_pages(struct list_head *next)
539 } while (next); 539 } while (next);
540} 540}
541 541
542static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 542void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
543{ 543{
544 struct shmem_inode_info *info = SHMEM_I(inode); 544 struct shmem_inode_info *info = SHMEM_I(inode);
545 unsigned long idx; 545 unsigned long idx;
@@ -562,6 +562,8 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
562 spinlock_t *punch_lock; 562 spinlock_t *punch_lock;
563 unsigned long upper_limit; 563 unsigned long upper_limit;
564 564
565 truncate_inode_pages_range(inode->i_mapping, start, end);
566
565 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 567 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
566 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 568 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
567 if (idx >= info->next_index) 569 if (idx >= info->next_index)
@@ -738,16 +740,8 @@ done2:
738 * lowered next_index. Also, though shmem_getpage checks 740 * lowered next_index. Also, though shmem_getpage checks
739 * i_size before adding to cache, no recheck after: so fix the 741 * i_size before adding to cache, no recheck after: so fix the
740 * narrow window there too. 742 * narrow window there too.
741 *
742 * Recalling truncate_inode_pages_range and unmap_mapping_range
743 * every time for punch_hole (which never got a chance to clear
744 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
745 * yet hardly ever necessary: try to optimize them out later.
746 */ 743 */
747 truncate_inode_pages_range(inode->i_mapping, start, end); 744 truncate_inode_pages_range(inode->i_mapping, start, end);
748 if (punch_hole)
749 unmap_mapping_range(inode->i_mapping, start,
750 end - start, 1);
751 } 745 }
752 746
753 spin_lock(&info->lock); 747 spin_lock(&info->lock);
@@ -766,22 +760,23 @@ done2:
766 shmem_free_pages(pages_to_free.next); 760 shmem_free_pages(pages_to_free.next);
767 } 761 }
768} 762}
763EXPORT_SYMBOL_GPL(shmem_truncate_range);
769 764
770static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 765static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
771{ 766{
772 struct inode *inode = dentry->d_inode; 767 struct inode *inode = dentry->d_inode;
773 loff_t newsize = attr->ia_size;
774 int error; 768 int error;
775 769
776 error = inode_change_ok(inode, attr); 770 error = inode_change_ok(inode, attr);
777 if (error) 771 if (error)
778 return error; 772 return error;
779 773
780 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) 774 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
781 && newsize != inode->i_size) { 775 loff_t oldsize = inode->i_size;
776 loff_t newsize = attr->ia_size;
782 struct page *page = NULL; 777 struct page *page = NULL;
783 778
784 if (newsize < inode->i_size) { 779 if (newsize < oldsize) {
785 /* 780 /*
786 * If truncating down to a partial page, then 781 * If truncating down to a partial page, then
787 * if that page is already allocated, hold it 782 * if that page is already allocated, hold it
@@ -810,12 +805,19 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
810 spin_unlock(&info->lock); 805 spin_unlock(&info->lock);
811 } 806 }
812 } 807 }
813 808 if (newsize != oldsize) {
814 /* XXX(truncate): truncate_setsize should be called last */ 809 i_size_write(inode, newsize);
815 truncate_setsize(inode, newsize); 810 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
811 }
812 if (newsize < oldsize) {
813 loff_t holebegin = round_up(newsize, PAGE_SIZE);
814 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
815 shmem_truncate_range(inode, newsize, (loff_t)-1);
816 /* unmap again to remove racily COWed private pages */
817 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
818 }
816 if (page) 819 if (page)
817 page_cache_release(page); 820 page_cache_release(page);
818 shmem_truncate_range(inode, newsize, (loff_t)-1);
819 } 821 }
820 822
821 setattr_copy(inode, attr); 823 setattr_copy(inode, attr);
@@ -832,7 +834,6 @@ static void shmem_evict_inode(struct inode *inode)
832 struct shmem_xattr *xattr, *nxattr; 834 struct shmem_xattr *xattr, *nxattr;
833 835
834 if (inode->i_mapping->a_ops == &shmem_aops) { 836 if (inode->i_mapping->a_ops == &shmem_aops) {
835 truncate_inode_pages(inode->i_mapping, 0);
836 shmem_unacct_size(info->flags, inode->i_size); 837 shmem_unacct_size(info->flags, inode->i_size);
837 inode->i_size = 0; 838 inode->i_size = 0;
838 shmem_truncate_range(inode, 0, (loff_t)-1); 839 shmem_truncate_range(inode, 0, (loff_t)-1);
@@ -2706,7 +2707,7 @@ static const struct file_operations shmem_file_operations = {
2706}; 2707};
2707 2708
2708static const struct inode_operations shmem_inode_operations = { 2709static const struct inode_operations shmem_inode_operations = {
2709 .setattr = shmem_notify_change, 2710 .setattr = shmem_setattr,
2710 .truncate_range = shmem_truncate_range, 2711 .truncate_range = shmem_truncate_range,
2711#ifdef CONFIG_TMPFS_XATTR 2712#ifdef CONFIG_TMPFS_XATTR
2712 .setxattr = shmem_setxattr, 2713 .setxattr = shmem_setxattr,
@@ -2739,7 +2740,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
2739 .removexattr = shmem_removexattr, 2740 .removexattr = shmem_removexattr,
2740#endif 2741#endif
2741#ifdef CONFIG_TMPFS_POSIX_ACL 2742#ifdef CONFIG_TMPFS_POSIX_ACL
2742 .setattr = shmem_notify_change, 2743 .setattr = shmem_setattr,
2743 .check_acl = generic_check_acl, 2744 .check_acl = generic_check_acl,
2744#endif 2745#endif
2745}; 2746};
@@ -2752,7 +2753,7 @@ static const struct inode_operations shmem_special_inode_operations = {
2752 .removexattr = shmem_removexattr, 2753 .removexattr = shmem_removexattr,
2753#endif 2754#endif
2754#ifdef CONFIG_TMPFS_POSIX_ACL 2755#ifdef CONFIG_TMPFS_POSIX_ACL
2755 .setattr = shmem_notify_change, 2756 .setattr = shmem_setattr,
2756 .check_acl = generic_check_acl, 2757 .check_acl = generic_check_acl,
2757#endif 2758#endif
2758}; 2759};
@@ -2908,6 +2909,12 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2908 return 0; 2909 return 0;
2909} 2910}
2910 2911
2912void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
2913{
2914 truncate_inode_pages_range(inode->i_mapping, start, end);
2915}
2916EXPORT_SYMBOL_GPL(shmem_truncate_range);
2917
2911#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2918#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2912/** 2919/**
2913 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file 2920 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
@@ -3028,3 +3035,26 @@ int shmem_zero_setup(struct vm_area_struct *vma)
3028 vma->vm_flags |= VM_CAN_NONLINEAR; 3035 vma->vm_flags |= VM_CAN_NONLINEAR;
3029 return 0; 3036 return 0;
3030} 3037}
3038
3039/**
3040 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3041 * @mapping: the page's address_space
3042 * @index: the page index
3043 * @gfp: the page allocator flags to use if allocating
3044 *
3045 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3046 * with any new page allocations done using the specified allocation flags.
3047 * But read_cache_page_gfp() uses the ->readpage() method: which does not
3048 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3049 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3050 *
3051 * Provide a stub for those callers to start using now, then later
3052 * flesh it out to call shmem_getpage() with additional gfp mask, when
3053 * shmem_file_splice_read() is added and shmem_readpage() is removed.
3054 */
3055struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3056 pgoff_t index, gfp_t gfp)
3057{
3058 return read_cache_page_gfp(mapping, index, gfp);
3059}
3060EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);