aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c495
1 files changed, 227 insertions, 268 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 51b3d6ccddab..0f246c44a574 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -78,11 +78,10 @@
78 78
79/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 79/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80enum sgp_type { 80enum sgp_type {
81 SGP_QUICK, /* don't try more than file page cache lookup */
82 SGP_READ, /* don't exceed i_size, don't allocate page */ 81 SGP_READ, /* don't exceed i_size, don't allocate page */
83 SGP_CACHE, /* don't exceed i_size, may allocate page */ 82 SGP_CACHE, /* don't exceed i_size, may allocate page */
83 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
84 SGP_WRITE, /* may exceed i_size, may allocate page */ 84 SGP_WRITE, /* may exceed i_size, may allocate page */
85 SGP_FAULT, /* same as SGP_CACHE, return with page locked */
86}; 85};
87 86
88static int shmem_getpage(struct inode *inode, unsigned long idx, 87static int shmem_getpage(struct inode *inode, unsigned long idx,
@@ -194,7 +193,7 @@ static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
194}; 193};
195 194
196static LIST_HEAD(shmem_swaplist); 195static LIST_HEAD(shmem_swaplist);
197static DEFINE_SPINLOCK(shmem_swaplist_lock); 196static DEFINE_MUTEX(shmem_swaplist_mutex);
198 197
199static void shmem_free_blocks(struct inode *inode, long pages) 198static void shmem_free_blocks(struct inode *inode, long pages)
200{ 199{
@@ -207,6 +206,31 @@ static void shmem_free_blocks(struct inode *inode, long pages)
207 } 206 }
208} 207}
209 208
209static int shmem_reserve_inode(struct super_block *sb)
210{
211 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
212 if (sbinfo->max_inodes) {
213 spin_lock(&sbinfo->stat_lock);
214 if (!sbinfo->free_inodes) {
215 spin_unlock(&sbinfo->stat_lock);
216 return -ENOSPC;
217 }
218 sbinfo->free_inodes--;
219 spin_unlock(&sbinfo->stat_lock);
220 }
221 return 0;
222}
223
224static void shmem_free_inode(struct super_block *sb)
225{
226 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
227 if (sbinfo->max_inodes) {
228 spin_lock(&sbinfo->stat_lock);
229 sbinfo->free_inodes++;
230 spin_unlock(&sbinfo->stat_lock);
231 }
232}
233
210/* 234/*
211 * shmem_recalc_inode - recalculate the size of an inode 235 * shmem_recalc_inode - recalculate the size of an inode
212 * 236 *
@@ -731,6 +755,8 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
731 (void) shmem_getpage(inode, 755 (void) shmem_getpage(inode,
732 attr->ia_size>>PAGE_CACHE_SHIFT, 756 attr->ia_size>>PAGE_CACHE_SHIFT,
733 &page, SGP_READ, NULL); 757 &page, SGP_READ, NULL);
758 if (page)
759 unlock_page(page);
734 } 760 }
735 /* 761 /*
736 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 762 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
@@ -762,7 +788,6 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
762 788
763static void shmem_delete_inode(struct inode *inode) 789static void shmem_delete_inode(struct inode *inode)
764{ 790{
765 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
766 struct shmem_inode_info *info = SHMEM_I(inode); 791 struct shmem_inode_info *info = SHMEM_I(inode);
767 792
768 if (inode->i_op->truncate == shmem_truncate) { 793 if (inode->i_op->truncate == shmem_truncate) {
@@ -771,17 +796,13 @@ static void shmem_delete_inode(struct inode *inode)
771 inode->i_size = 0; 796 inode->i_size = 0;
772 shmem_truncate(inode); 797 shmem_truncate(inode);
773 if (!list_empty(&info->swaplist)) { 798 if (!list_empty(&info->swaplist)) {
774 spin_lock(&shmem_swaplist_lock); 799 mutex_lock(&shmem_swaplist_mutex);
775 list_del_init(&info->swaplist); 800 list_del_init(&info->swaplist);
776 spin_unlock(&shmem_swaplist_lock); 801 mutex_unlock(&shmem_swaplist_mutex);
777 } 802 }
778 } 803 }
779 BUG_ON(inode->i_blocks); 804 BUG_ON(inode->i_blocks);
780 if (sbinfo->max_inodes) { 805 shmem_free_inode(inode->i_sb);
781 spin_lock(&sbinfo->stat_lock);
782 sbinfo->free_inodes++;
783 spin_unlock(&sbinfo->stat_lock);
784 }
785 clear_inode(inode); 806 clear_inode(inode);
786} 807}
787 808
@@ -807,19 +828,22 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
807 struct page *subdir; 828 struct page *subdir;
808 swp_entry_t *ptr; 829 swp_entry_t *ptr;
809 int offset; 830 int offset;
831 int error;
810 832
811 idx = 0; 833 idx = 0;
812 ptr = info->i_direct; 834 ptr = info->i_direct;
813 spin_lock(&info->lock); 835 spin_lock(&info->lock);
836 if (!info->swapped) {
837 list_del_init(&info->swaplist);
838 goto lost2;
839 }
814 limit = info->next_index; 840 limit = info->next_index;
815 size = limit; 841 size = limit;
816 if (size > SHMEM_NR_DIRECT) 842 if (size > SHMEM_NR_DIRECT)
817 size = SHMEM_NR_DIRECT; 843 size = SHMEM_NR_DIRECT;
818 offset = shmem_find_swp(entry, ptr, ptr+size); 844 offset = shmem_find_swp(entry, ptr, ptr+size);
819 if (offset >= 0) { 845 if (offset >= 0)
820 shmem_swp_balance_unmap();
821 goto found; 846 goto found;
822 }
823 if (!info->i_indirect) 847 if (!info->i_indirect)
824 goto lost2; 848 goto lost2;
825 849
@@ -829,6 +853,14 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
829 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 853 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
830 if (unlikely(idx == stage)) { 854 if (unlikely(idx == stage)) {
831 shmem_dir_unmap(dir-1); 855 shmem_dir_unmap(dir-1);
856 if (cond_resched_lock(&info->lock)) {
857 /* check it has not been truncated */
858 if (limit > info->next_index) {
859 limit = info->next_index;
860 if (idx >= limit)
861 goto lost2;
862 }
863 }
832 dir = shmem_dir_map(info->i_indirect) + 864 dir = shmem_dir_map(info->i_indirect) +
833 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 865 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
834 while (!*dir) { 866 while (!*dir) {
@@ -849,11 +881,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
849 if (size > ENTRIES_PER_PAGE) 881 if (size > ENTRIES_PER_PAGE)
850 size = ENTRIES_PER_PAGE; 882 size = ENTRIES_PER_PAGE;
851 offset = shmem_find_swp(entry, ptr, ptr+size); 883 offset = shmem_find_swp(entry, ptr, ptr+size);
884 shmem_swp_unmap(ptr);
852 if (offset >= 0) { 885 if (offset >= 0) {
853 shmem_dir_unmap(dir); 886 shmem_dir_unmap(dir);
854 goto found; 887 goto found;
855 } 888 }
856 shmem_swp_unmap(ptr);
857 } 889 }
858 } 890 }
859lost1: 891lost1:
@@ -863,19 +895,63 @@ lost2:
863 return 0; 895 return 0;
864found: 896found:
865 idx += offset; 897 idx += offset;
866 inode = &info->vfs_inode; 898 inode = igrab(&info->vfs_inode);
867 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
868 info->flags |= SHMEM_PAGEIN;
869 shmem_swp_set(info, ptr + offset, 0);
870 }
871 shmem_swp_unmap(ptr);
872 spin_unlock(&info->lock); 899 spin_unlock(&info->lock);
900
873 /* 901 /*
874 * Decrement swap count even when the entry is left behind: 902 * Move _head_ to start search for next from here.
875 * try_to_unuse will skip over mms, then reincrement count. 903 * But be careful: shmem_delete_inode checks list_empty without taking
904 * mutex, and there's an instant in list_move_tail when info->swaplist
905 * would appear empty, if it were the only one on shmem_swaplist. We
906 * could avoid doing it if inode NULL; or use this minor optimization.
876 */ 907 */
877 swap_free(entry); 908 if (shmem_swaplist.next != &info->swaplist)
878 return 1; 909 list_move_tail(&shmem_swaplist, &info->swaplist);
910 mutex_unlock(&shmem_swaplist_mutex);
911
912 error = 1;
913 if (!inode)
914 goto out;
915 error = radix_tree_preload(GFP_KERNEL);
916 if (error)
917 goto out;
918 error = 1;
919
920 spin_lock(&info->lock);
921 ptr = shmem_swp_entry(info, idx, NULL);
922 if (ptr && ptr->val == entry.val)
923 error = add_to_page_cache(page, inode->i_mapping,
924 idx, GFP_NOWAIT);
925 if (error == -EEXIST) {
926 struct page *filepage = find_get_page(inode->i_mapping, idx);
927 error = 1;
928 if (filepage) {
929 /*
930 * There might be a more uptodate page coming down
931 * from a stacked writepage: forget our swappage if so.
932 */
933 if (PageUptodate(filepage))
934 error = 0;
935 page_cache_release(filepage);
936 }
937 }
938 if (!error) {
939 delete_from_swap_cache(page);
940 set_page_dirty(page);
941 info->flags |= SHMEM_PAGEIN;
942 shmem_swp_set(info, ptr, 0);
943 swap_free(entry);
944 error = 1; /* not an error, but entry was found */
945 }
946 if (ptr)
947 shmem_swp_unmap(ptr);
948 spin_unlock(&info->lock);
949 radix_tree_preload_end();
950out:
951 unlock_page(page);
952 page_cache_release(page);
953 iput(inode); /* allows for NULL */
954 return error;
879} 955}
880 956
881/* 957/*
@@ -887,20 +963,16 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
887 struct shmem_inode_info *info; 963 struct shmem_inode_info *info;
888 int found = 0; 964 int found = 0;
889 965
890 spin_lock(&shmem_swaplist_lock); 966 mutex_lock(&shmem_swaplist_mutex);
891 list_for_each_safe(p, next, &shmem_swaplist) { 967 list_for_each_safe(p, next, &shmem_swaplist) {
892 info = list_entry(p, struct shmem_inode_info, swaplist); 968 info = list_entry(p, struct shmem_inode_info, swaplist);
893 if (!info->swapped) 969 found = shmem_unuse_inode(info, entry, page);
894 list_del_init(&info->swaplist); 970 cond_resched();
895 else if (shmem_unuse_inode(info, entry, page)) { 971 if (found)
896 /* move head to start search for next from here */ 972 goto out;
897 list_move_tail(&shmem_swaplist, &info->swaplist);
898 found = 1;
899 break;
900 }
901 } 973 }
902 spin_unlock(&shmem_swaplist_lock); 974 mutex_unlock(&shmem_swaplist_mutex);
903 return found; 975out: return found; /* 0 or 1 or -ENOMEM */
904} 976}
905 977
906/* 978/*
@@ -915,54 +987,65 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
915 struct inode *inode; 987 struct inode *inode;
916 988
917 BUG_ON(!PageLocked(page)); 989 BUG_ON(!PageLocked(page));
918 /*
919 * shmem_backing_dev_info's capabilities prevent regular writeback or
920 * sync from ever calling shmem_writepage; but a stacking filesystem
921 * may use the ->writepage of its underlying filesystem, in which case
922 * we want to do nothing when that underlying filesystem is tmpfs
923 * (writing out to swap is useful as a response to memory pressure, but
924 * of no use to stabilize the data) - just redirty the page, unlock it
925 * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
926 * page_mapped check below, must be avoided unless we're in reclaim.
927 */
928 if (!wbc->for_reclaim) {
929 set_page_dirty(page);
930 unlock_page(page);
931 return 0;
932 }
933 BUG_ON(page_mapped(page));
934
935 mapping = page->mapping; 990 mapping = page->mapping;
936 index = page->index; 991 index = page->index;
937 inode = mapping->host; 992 inode = mapping->host;
938 info = SHMEM_I(inode); 993 info = SHMEM_I(inode);
939 if (info->flags & VM_LOCKED) 994 if (info->flags & VM_LOCKED)
940 goto redirty; 995 goto redirty;
941 swap = get_swap_page(); 996 if (!total_swap_pages)
942 if (!swap.val)
943 goto redirty; 997 goto redirty;
944 998
999 /*
1000 * shmem_backing_dev_info's capabilities prevent regular writeback or
1001 * sync from ever calling shmem_writepage; but a stacking filesystem
1002 * may use the ->writepage of its underlying filesystem, in which case
1003 * tmpfs should write out to swap only in response to memory pressure,
1004 * and not for pdflush or sync. However, in those cases, we do still
1005 * want to check if there's a redundant swappage to be discarded.
1006 */
1007 if (wbc->for_reclaim)
1008 swap = get_swap_page();
1009 else
1010 swap.val = 0;
1011
945 spin_lock(&info->lock); 1012 spin_lock(&info->lock);
946 shmem_recalc_inode(inode);
947 if (index >= info->next_index) { 1013 if (index >= info->next_index) {
948 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 1014 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
949 goto unlock; 1015 goto unlock;
950 } 1016 }
951 entry = shmem_swp_entry(info, index, NULL); 1017 entry = shmem_swp_entry(info, index, NULL);
952 BUG_ON(!entry); 1018 if (entry->val) {
953 BUG_ON(entry->val); 1019 /*
1020 * The more uptodate page coming down from a stacked
1021 * writepage should replace our old swappage.
1022 */
1023 free_swap_and_cache(*entry);
1024 shmem_swp_set(info, entry, 0);
1025 }
1026 shmem_recalc_inode(inode);
954 1027
955 if (move_to_swap_cache(page, swap) == 0) { 1028 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1029 remove_from_page_cache(page);
956 shmem_swp_set(info, entry, swap.val); 1030 shmem_swp_set(info, entry, swap.val);
957 shmem_swp_unmap(entry); 1031 shmem_swp_unmap(entry);
1032 if (list_empty(&info->swaplist))
1033 inode = igrab(inode);
1034 else
1035 inode = NULL;
958 spin_unlock(&info->lock); 1036 spin_unlock(&info->lock);
959 if (list_empty(&info->swaplist)) { 1037 swap_duplicate(swap);
960 spin_lock(&shmem_swaplist_lock); 1038 BUG_ON(page_mapped(page));
1039 page_cache_release(page); /* pagecache ref */
1040 set_page_dirty(page);
1041 unlock_page(page);
1042 if (inode) {
1043 mutex_lock(&shmem_swaplist_mutex);
961 /* move instead of add in case we're racing */ 1044 /* move instead of add in case we're racing */
962 list_move_tail(&info->swaplist, &shmem_swaplist); 1045 list_move_tail(&info->swaplist, &shmem_swaplist);
963 spin_unlock(&shmem_swaplist_lock); 1046 mutex_unlock(&shmem_swaplist_mutex);
1047 iput(inode);
964 } 1048 }
965 unlock_page(page);
966 return 0; 1049 return 0;
967 } 1050 }
968 1051
@@ -972,7 +1055,10 @@ unlock:
972 swap_free(swap); 1055 swap_free(swap);
973redirty: 1056redirty:
974 set_page_dirty(page); 1057 set_page_dirty(page);
975 return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */ 1058 if (wbc->for_reclaim)
1059 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1060 unlock_page(page);
1061 return 0;
976} 1062}
977 1063
978#ifdef CONFIG_NUMA 1064#ifdef CONFIG_NUMA
@@ -1025,53 +1111,33 @@ out:
1025 return err; 1111 return err;
1026} 1112}
1027 1113
1028static struct page *shmem_swapin_async(struct shared_policy *p, 1114static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1029 swp_entry_t entry, unsigned long idx) 1115 struct shmem_inode_info *info, unsigned long idx)
1030{ 1116{
1031 struct page *page;
1032 struct vm_area_struct pvma; 1117 struct vm_area_struct pvma;
1118 struct page *page;
1033 1119
1034 /* Create a pseudo vma that just contains the policy */ 1120 /* Create a pseudo vma that just contains the policy */
1035 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1121 pvma.vm_start = 0;
1036 pvma.vm_end = PAGE_SIZE;
1037 pvma.vm_pgoff = idx; 1122 pvma.vm_pgoff = idx;
1038 pvma.vm_policy = mpol_shared_policy_lookup(p, idx); 1123 pvma.vm_ops = NULL;
1039 page = read_swap_cache_async(entry, &pvma, 0); 1124 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1125 page = swapin_readahead(entry, gfp, &pvma, 0);
1040 mpol_free(pvma.vm_policy); 1126 mpol_free(pvma.vm_policy);
1041 return page; 1127 return page;
1042} 1128}
1043 1129
1044static struct page *shmem_swapin(struct shmem_inode_info *info, 1130static struct page *shmem_alloc_page(gfp_t gfp,
1045 swp_entry_t entry, unsigned long idx) 1131 struct shmem_inode_info *info, unsigned long idx)
1046{
1047 struct shared_policy *p = &info->policy;
1048 int i, num;
1049 struct page *page;
1050 unsigned long offset;
1051
1052 num = valid_swaphandles(entry, &offset);
1053 for (i = 0; i < num; offset++, i++) {
1054 page = shmem_swapin_async(p,
1055 swp_entry(swp_type(entry), offset), idx);
1056 if (!page)
1057 break;
1058 page_cache_release(page);
1059 }
1060 lru_add_drain(); /* Push any new pages onto the LRU now */
1061 return shmem_swapin_async(p, entry, idx);
1062}
1063
1064static struct page *
1065shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1066 unsigned long idx)
1067{ 1132{
1068 struct vm_area_struct pvma; 1133 struct vm_area_struct pvma;
1069 struct page *page; 1134 struct page *page;
1070 1135
1071 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1136 /* Create a pseudo vma that just contains the policy */
1072 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1137 pvma.vm_start = 0;
1073 pvma.vm_pgoff = idx; 1138 pvma.vm_pgoff = idx;
1074 pvma.vm_end = PAGE_SIZE; 1139 pvma.vm_ops = NULL;
1140 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1075 page = alloc_page_vma(gfp, &pvma, 0); 1141 page = alloc_page_vma(gfp, &pvma, 0);
1076 mpol_free(pvma.vm_policy); 1142 mpol_free(pvma.vm_policy);
1077 return page; 1143 return page;
@@ -1083,15 +1149,14 @@ static inline int shmem_parse_mpol(char *value, int *policy,
1083 return 1; 1149 return 1;
1084} 1150}
1085 1151
1086static inline struct page * 1152static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1087shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 1153 struct shmem_inode_info *info, unsigned long idx)
1088{ 1154{
1089 swapin_readahead(entry, 0, NULL); 1155 return swapin_readahead(entry, gfp, NULL, 0);
1090 return read_swap_cache_async(entry, NULL, 0);
1091} 1156}
1092 1157
1093static inline struct page * 1158static inline struct page *shmem_alloc_page(gfp_t gfp,
1094shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 1159 struct shmem_inode_info *info, unsigned long idx)
1095{ 1160{
1096 return alloc_page(gfp); 1161 return alloc_page(gfp);
1097} 1162}
@@ -1114,6 +1179,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1114 struct page *swappage; 1179 struct page *swappage;
1115 swp_entry_t *entry; 1180 swp_entry_t *entry;
1116 swp_entry_t swap; 1181 swp_entry_t swap;
1182 gfp_t gfp;
1117 int error; 1183 int error;
1118 1184
1119 if (idx >= SHMEM_MAX_INDEX) 1185 if (idx >= SHMEM_MAX_INDEX)
@@ -1126,7 +1192,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1126 * Normally, filepage is NULL on entry, and either found 1192 * Normally, filepage is NULL on entry, and either found
1127 * uptodate immediately, or allocated and zeroed, or read 1193 * uptodate immediately, or allocated and zeroed, or read
1128 * in under swappage, which is then assigned to filepage. 1194 * in under swappage, which is then assigned to filepage.
1129 * But shmem_readpage and shmem_write_begin pass in a locked 1195 * But shmem_readpage (required for splice) passes in a locked
1130 * filepage, which may be found not uptodate by other callers 1196 * filepage, which may be found not uptodate by other callers
1131 * too, and may need to be copied from the swappage read in. 1197 * too, and may need to be copied from the swappage read in.
1132 */ 1198 */
@@ -1136,8 +1202,17 @@ repeat:
1136 if (filepage && PageUptodate(filepage)) 1202 if (filepage && PageUptodate(filepage))
1137 goto done; 1203 goto done;
1138 error = 0; 1204 error = 0;
1139 if (sgp == SGP_QUICK) 1205 gfp = mapping_gfp_mask(mapping);
1140 goto failed; 1206 if (!filepage) {
1207 /*
1208 * Try to preload while we can wait, to not make a habit of
1209 * draining atomic reserves; but don't latch on to this cpu.
1210 */
1211 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1212 if (error)
1213 goto failed;
1214 radix_tree_preload_end();
1215 }
1141 1216
1142 spin_lock(&info->lock); 1217 spin_lock(&info->lock);
1143 shmem_recalc_inode(inode); 1218 shmem_recalc_inode(inode);
@@ -1160,7 +1235,7 @@ repeat:
1160 *type |= VM_FAULT_MAJOR; 1235 *type |= VM_FAULT_MAJOR;
1161 } 1236 }
1162 spin_unlock(&info->lock); 1237 spin_unlock(&info->lock);
1163 swappage = shmem_swapin(info, swap, idx); 1238 swappage = shmem_swapin(swap, gfp, info, idx);
1164 if (!swappage) { 1239 if (!swappage) {
1165 spin_lock(&info->lock); 1240 spin_lock(&info->lock);
1166 entry = shmem_swp_alloc(info, idx, sgp); 1241 entry = shmem_swp_alloc(info, idx, sgp);
@@ -1218,23 +1293,21 @@ repeat:
1218 SetPageUptodate(filepage); 1293 SetPageUptodate(filepage);
1219 set_page_dirty(filepage); 1294 set_page_dirty(filepage);
1220 swap_free(swap); 1295 swap_free(swap);
1221 } else if (!(error = move_from_swap_cache( 1296 } else if (!(error = add_to_page_cache(
1222 swappage, idx, mapping))) { 1297 swappage, mapping, idx, GFP_NOWAIT))) {
1223 info->flags |= SHMEM_PAGEIN; 1298 info->flags |= SHMEM_PAGEIN;
1224 shmem_swp_set(info, entry, 0); 1299 shmem_swp_set(info, entry, 0);
1225 shmem_swp_unmap(entry); 1300 shmem_swp_unmap(entry);
1301 delete_from_swap_cache(swappage);
1226 spin_unlock(&info->lock); 1302 spin_unlock(&info->lock);
1227 filepage = swappage; 1303 filepage = swappage;
1304 set_page_dirty(filepage);
1228 swap_free(swap); 1305 swap_free(swap);
1229 } else { 1306 } else {
1230 shmem_swp_unmap(entry); 1307 shmem_swp_unmap(entry);
1231 spin_unlock(&info->lock); 1308 spin_unlock(&info->lock);
1232 unlock_page(swappage); 1309 unlock_page(swappage);
1233 page_cache_release(swappage); 1310 page_cache_release(swappage);
1234 if (error == -ENOMEM) {
1235 /* let kswapd refresh zone for GFP_ATOMICs */
1236 congestion_wait(WRITE, HZ/50);
1237 }
1238 goto repeat; 1311 goto repeat;
1239 } 1312 }
1240 } else if (sgp == SGP_READ && !filepage) { 1313 } else if (sgp == SGP_READ && !filepage) {
@@ -1272,9 +1345,7 @@ repeat:
1272 1345
1273 if (!filepage) { 1346 if (!filepage) {
1274 spin_unlock(&info->lock); 1347 spin_unlock(&info->lock);
1275 filepage = shmem_alloc_page(mapping_gfp_mask(mapping), 1348 filepage = shmem_alloc_page(gfp, info, idx);
1276 info,
1277 idx);
1278 if (!filepage) { 1349 if (!filepage) {
1279 shmem_unacct_blocks(info->flags, 1); 1350 shmem_unacct_blocks(info->flags, 1);
1280 shmem_free_blocks(inode, 1); 1351 shmem_free_blocks(inode, 1);
@@ -1291,7 +1362,7 @@ repeat:
1291 shmem_swp_unmap(entry); 1362 shmem_swp_unmap(entry);
1292 } 1363 }
1293 if (error || swap.val || 0 != add_to_page_cache_lru( 1364 if (error || swap.val || 0 != add_to_page_cache_lru(
1294 filepage, mapping, idx, GFP_ATOMIC)) { 1365 filepage, mapping, idx, GFP_NOWAIT)) {
1295 spin_unlock(&info->lock); 1366 spin_unlock(&info->lock);
1296 page_cache_release(filepage); 1367 page_cache_release(filepage);
1297 shmem_unacct_blocks(info->flags, 1); 1368 shmem_unacct_blocks(info->flags, 1);
@@ -1309,14 +1380,11 @@ repeat:
1309 clear_highpage(filepage); 1380 clear_highpage(filepage);
1310 flush_dcache_page(filepage); 1381 flush_dcache_page(filepage);
1311 SetPageUptodate(filepage); 1382 SetPageUptodate(filepage);
1383 if (sgp == SGP_DIRTY)
1384 set_page_dirty(filepage);
1312 } 1385 }
1313done: 1386done:
1314 if (*pagep != filepage) { 1387 *pagep = filepage;
1315 *pagep = filepage;
1316 if (sgp != SGP_FAULT)
1317 unlock_page(filepage);
1318
1319 }
1320 return 0; 1388 return 0;
1321 1389
1322failed: 1390failed:
@@ -1336,7 +1404,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1336 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1404 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1337 return VM_FAULT_SIGBUS; 1405 return VM_FAULT_SIGBUS;
1338 1406
1339 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret); 1407 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1340 if (error) 1408 if (error)
1341 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1409 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1342 1410
@@ -1399,15 +1467,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1399 struct shmem_inode_info *info; 1467 struct shmem_inode_info *info;
1400 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1468 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1401 1469
1402 if (sbinfo->max_inodes) { 1470 if (shmem_reserve_inode(sb))
1403 spin_lock(&sbinfo->stat_lock); 1471 return NULL;
1404 if (!sbinfo->free_inodes) {
1405 spin_unlock(&sbinfo->stat_lock);
1406 return NULL;
1407 }
1408 sbinfo->free_inodes--;
1409 spin_unlock(&sbinfo->stat_lock);
1410 }
1411 1472
1412 inode = new_inode(sb); 1473 inode = new_inode(sb);
1413 if (inode) { 1474 if (inode) {
@@ -1451,11 +1512,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1451 NULL); 1512 NULL);
1452 break; 1513 break;
1453 } 1514 }
1454 } else if (sbinfo->max_inodes) { 1515 } else
1455 spin_lock(&sbinfo->stat_lock); 1516 shmem_free_inode(sb);
1456 sbinfo->free_inodes++;
1457 spin_unlock(&sbinfo->stat_lock);
1458 }
1459 return inode; 1517 return inode;
1460} 1518}
1461 1519
@@ -1494,123 +1552,30 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1494{ 1552{
1495 struct inode *inode = mapping->host; 1553 struct inode *inode = mapping->host;
1496 1554
1555 if (pos + copied > inode->i_size)
1556 i_size_write(inode, pos + copied);
1557
1558 unlock_page(page);
1497 set_page_dirty(page); 1559 set_page_dirty(page);
1498 page_cache_release(page); 1560 page_cache_release(page);
1499 1561
1500 if (pos+copied > inode->i_size)
1501 i_size_write(inode, pos+copied);
1502
1503 return copied; 1562 return copied;
1504} 1563}
1505 1564
1506static ssize_t
1507shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1508{
1509 struct inode *inode = file->f_path.dentry->d_inode;
1510 loff_t pos;
1511 unsigned long written;
1512 ssize_t err;
1513
1514 if ((ssize_t) count < 0)
1515 return -EINVAL;
1516
1517 if (!access_ok(VERIFY_READ, buf, count))
1518 return -EFAULT;
1519
1520 mutex_lock(&inode->i_mutex);
1521
1522 pos = *ppos;
1523 written = 0;
1524
1525 err = generic_write_checks(file, &pos, &count, 0);
1526 if (err || !count)
1527 goto out;
1528
1529 err = remove_suid(file->f_path.dentry);
1530 if (err)
1531 goto out;
1532
1533 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1534
1535 do {
1536 struct page *page = NULL;
1537 unsigned long bytes, index, offset;
1538 char *kaddr;
1539 int left;
1540
1541 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1542 index = pos >> PAGE_CACHE_SHIFT;
1543 bytes = PAGE_CACHE_SIZE - offset;
1544 if (bytes > count)
1545 bytes = count;
1546
1547 /*
1548 * We don't hold page lock across copy from user -
1549 * what would it guard against? - so no deadlock here.
1550 * But it still may be a good idea to prefault below.
1551 */
1552
1553 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1554 if (err)
1555 break;
1556
1557 left = bytes;
1558 if (PageHighMem(page)) {
1559 volatile unsigned char dummy;
1560 __get_user(dummy, buf);
1561 __get_user(dummy, buf + bytes - 1);
1562
1563 kaddr = kmap_atomic(page, KM_USER0);
1564 left = __copy_from_user_inatomic(kaddr + offset,
1565 buf, bytes);
1566 kunmap_atomic(kaddr, KM_USER0);
1567 }
1568 if (left) {
1569 kaddr = kmap(page);
1570 left = __copy_from_user(kaddr + offset, buf, bytes);
1571 kunmap(page);
1572 }
1573
1574 written += bytes;
1575 count -= bytes;
1576 pos += bytes;
1577 buf += bytes;
1578 if (pos > inode->i_size)
1579 i_size_write(inode, pos);
1580
1581 flush_dcache_page(page);
1582 set_page_dirty(page);
1583 mark_page_accessed(page);
1584 page_cache_release(page);
1585
1586 if (left) {
1587 pos -= left;
1588 written -= left;
1589 err = -EFAULT;
1590 break;
1591 }
1592
1593 /*
1594 * Our dirty pages are not counted in nr_dirty,
1595 * and we do not attempt to balance dirty pages.
1596 */
1597
1598 cond_resched();
1599 } while (count);
1600
1601 *ppos = pos;
1602 if (written)
1603 err = written;
1604out:
1605 mutex_unlock(&inode->i_mutex);
1606 return err;
1607}
1608
1609static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1565static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1610{ 1566{
1611 struct inode *inode = filp->f_path.dentry->d_inode; 1567 struct inode *inode = filp->f_path.dentry->d_inode;
1612 struct address_space *mapping = inode->i_mapping; 1568 struct address_space *mapping = inode->i_mapping;
1613 unsigned long index, offset; 1569 unsigned long index, offset;
1570 enum sgp_type sgp = SGP_READ;
1571
1572 /*
1573 * Might this read be for a stacking filesystem? Then when reading
1574 * holes of a sparse file, we actually need to allocate those pages,
1575 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1576 */
1577 if (segment_eq(get_fs(), KERNEL_DS))
1578 sgp = SGP_DIRTY;
1614 1579
1615 index = *ppos >> PAGE_CACHE_SHIFT; 1580 index = *ppos >> PAGE_CACHE_SHIFT;
1616 offset = *ppos & ~PAGE_CACHE_MASK; 1581 offset = *ppos & ~PAGE_CACHE_MASK;
@@ -1629,12 +1594,14 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
1629 break; 1594 break;
1630 } 1595 }
1631 1596
1632 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); 1597 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1633 if (desc->error) { 1598 if (desc->error) {
1634 if (desc->error == -EINVAL) 1599 if (desc->error == -EINVAL)
1635 desc->error = 0; 1600 desc->error = 0;
1636 break; 1601 break;
1637 } 1602 }
1603 if (page)
1604 unlock_page(page);
1638 1605
1639 /* 1606 /*
1640 * We must evaluate after, since reads (unlike writes) 1607 * We must evaluate after, since reads (unlike writes)
@@ -1798,22 +1765,16 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1798static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1765static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1799{ 1766{
1800 struct inode *inode = old_dentry->d_inode; 1767 struct inode *inode = old_dentry->d_inode;
1801 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1768 int ret;
1802 1769
1803 /* 1770 /*
1804 * No ordinary (disk based) filesystem counts links as inodes; 1771 * No ordinary (disk based) filesystem counts links as inodes;
1805 * but each new link needs a new dentry, pinning lowmem, and 1772 * but each new link needs a new dentry, pinning lowmem, and
1806 * tmpfs dentries cannot be pruned until they are unlinked. 1773 * tmpfs dentries cannot be pruned until they are unlinked.
1807 */ 1774 */
1808 if (sbinfo->max_inodes) { 1775 ret = shmem_reserve_inode(inode->i_sb);
1809 spin_lock(&sbinfo->stat_lock); 1776 if (ret)
1810 if (!sbinfo->free_inodes) { 1777 goto out;
1811 spin_unlock(&sbinfo->stat_lock);
1812 return -ENOSPC;
1813 }
1814 sbinfo->free_inodes--;
1815 spin_unlock(&sbinfo->stat_lock);
1816 }
1817 1778
1818 dir->i_size += BOGO_DIRENT_SIZE; 1779 dir->i_size += BOGO_DIRENT_SIZE;
1819 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1780 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -1821,21 +1782,16 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
1821 atomic_inc(&inode->i_count); /* New dentry reference */ 1782 atomic_inc(&inode->i_count); /* New dentry reference */
1822 dget(dentry); /* Extra pinning count for the created dentry */ 1783 dget(dentry); /* Extra pinning count for the created dentry */
1823 d_instantiate(dentry, inode); 1784 d_instantiate(dentry, inode);
1824 return 0; 1785out:
1786 return ret;
1825} 1787}
1826 1788
1827static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1789static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1828{ 1790{
1829 struct inode *inode = dentry->d_inode; 1791 struct inode *inode = dentry->d_inode;
1830 1792
1831 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { 1793 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1832 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1794 shmem_free_inode(inode->i_sb);
1833 if (sbinfo->max_inodes) {
1834 spin_lock(&sbinfo->stat_lock);
1835 sbinfo->free_inodes++;
1836 spin_unlock(&sbinfo->stat_lock);
1837 }
1838 }
1839 1795
1840 dir->i_size -= BOGO_DIRENT_SIZE; 1796 dir->i_size -= BOGO_DIRENT_SIZE;
1841 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1797 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -1924,6 +1880,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1924 iput(inode); 1880 iput(inode);
1925 return error; 1881 return error;
1926 } 1882 }
1883 unlock_page(page);
1927 inode->i_op = &shmem_symlink_inode_operations; 1884 inode->i_op = &shmem_symlink_inode_operations;
1928 kaddr = kmap_atomic(page, KM_USER0); 1885 kaddr = kmap_atomic(page, KM_USER0);
1929 memcpy(kaddr, symname, len); 1886 memcpy(kaddr, symname, len);
@@ -1951,6 +1908,8 @@ static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1951 struct page *page = NULL; 1908 struct page *page = NULL;
1952 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1909 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1953 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1910 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1911 if (page)
1912 unlock_page(page);
1954 return page; 1913 return page;
1955} 1914}
1956 1915
@@ -1996,8 +1955,7 @@ static int shmem_xattr_security_get(struct inode *inode, const char *name,
1996{ 1955{
1997 if (strcmp(name, "") == 0) 1956 if (strcmp(name, "") == 0)
1998 return -EINVAL; 1957 return -EINVAL;
1999 return security_inode_getsecurity(inode, name, buffer, size, 1958 return xattr_getsecurity(inode, name, buffer, size);
2000 -EOPNOTSUPP);
2001} 1959}
2002 1960
2003static int shmem_xattr_security_set(struct inode *inode, const char *name, 1961static int shmem_xattr_security_set(struct inode *inode, const char *name,
@@ -2138,7 +2096,7 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2138 } 2096 }
2139 if (*rest) 2097 if (*rest)
2140 goto bad_val; 2098 goto bad_val;
2141 *blocks = size >> PAGE_CACHE_SHIFT; 2099 *blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2142 } else if (!strcmp(this_char,"nr_blocks")) { 2100 } else if (!strcmp(this_char,"nr_blocks")) {
2143 *blocks = memparse(value,&rest); 2101 *blocks = memparse(value,&rest);
2144 if (*rest) 2102 if (*rest)
@@ -2375,7 +2333,8 @@ static const struct file_operations shmem_file_operations = {
2375#ifdef CONFIG_TMPFS 2333#ifdef CONFIG_TMPFS
2376 .llseek = generic_file_llseek, 2334 .llseek = generic_file_llseek,
2377 .read = shmem_file_read, 2335 .read = shmem_file_read,
2378 .write = shmem_file_write, 2336 .write = do_sync_write,
2337 .aio_write = generic_file_aio_write,
2379 .fsync = simple_sync_file, 2338 .fsync = simple_sync_file,
2380 .splice_read = generic_file_splice_read, 2339 .splice_read = generic_file_splice_read,
2381 .splice_write = generic_file_splice_write, 2340 .splice_write = generic_file_splice_write,