diff options
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 67afba5117f2..50c5b8f3a359 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -643,7 +643,7 @@ static void shmem_evict_inode(struct inode *inode) | |||
643 | kfree(info->symlink); | 643 | kfree(info->symlink); |
644 | 644 | ||
645 | simple_xattrs_free(&info->xattrs); | 645 | simple_xattrs_free(&info->xattrs); |
646 | BUG_ON(inode->i_blocks); | 646 | WARN_ON(inode->i_blocks); |
647 | shmem_free_inode(inode->i_sb); | 647 | shmem_free_inode(inode->i_sb); |
648 | clear_inode(inode); | 648 | clear_inode(inode); |
649 | } | 649 | } |
@@ -910,25 +910,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | |||
910 | static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, | 910 | static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, |
911 | struct shmem_inode_info *info, pgoff_t index) | 911 | struct shmem_inode_info *info, pgoff_t index) |
912 | { | 912 | { |
913 | struct mempolicy mpol, *spol; | ||
914 | struct vm_area_struct pvma; | 913 | struct vm_area_struct pvma; |
915 | 914 | struct page *page; | |
916 | spol = mpol_cond_copy(&mpol, | ||
917 | mpol_shared_policy_lookup(&info->policy, index)); | ||
918 | 915 | ||
919 | /* Create a pseudo vma that just contains the policy */ | 916 | /* Create a pseudo vma that just contains the policy */ |
920 | pvma.vm_start = 0; | 917 | pvma.vm_start = 0; |
921 | /* Bias interleave by inode number to distribute better across nodes */ | 918 | /* Bias interleave by inode number to distribute better across nodes */ |
922 | pvma.vm_pgoff = index + info->vfs_inode.i_ino; | 919 | pvma.vm_pgoff = index + info->vfs_inode.i_ino; |
923 | pvma.vm_ops = NULL; | 920 | pvma.vm_ops = NULL; |
924 | pvma.vm_policy = spol; | 921 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); |
925 | return swapin_readahead(swap, gfp, &pvma, 0); | 922 | |
923 | page = swapin_readahead(swap, gfp, &pvma, 0); | ||
924 | |||
925 | /* Drop reference taken by mpol_shared_policy_lookup() */ | ||
926 | mpol_cond_put(pvma.vm_policy); | ||
927 | |||
928 | return page; | ||
926 | } | 929 | } |
927 | 930 | ||
928 | static struct page *shmem_alloc_page(gfp_t gfp, | 931 | static struct page *shmem_alloc_page(gfp_t gfp, |
929 | struct shmem_inode_info *info, pgoff_t index) | 932 | struct shmem_inode_info *info, pgoff_t index) |
930 | { | 933 | { |
931 | struct vm_area_struct pvma; | 934 | struct vm_area_struct pvma; |
935 | struct page *page; | ||
932 | 936 | ||
933 | /* Create a pseudo vma that just contains the policy */ | 937 | /* Create a pseudo vma that just contains the policy */ |
934 | pvma.vm_start = 0; | 938 | pvma.vm_start = 0; |
@@ -937,10 +941,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, | |||
937 | pvma.vm_ops = NULL; | 941 | pvma.vm_ops = NULL; |
938 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); | 942 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); |
939 | 943 | ||
940 | /* | 944 | page = alloc_page_vma(gfp, &pvma, 0); |
941 | * alloc_page_vma() will drop the shared policy reference | 945 | |
942 | */ | 946 | /* Drop reference taken by mpol_shared_policy_lookup() */ |
943 | return alloc_page_vma(gfp, &pvma, 0); | 947 | mpol_cond_put(pvma.vm_policy); |
948 | |||
949 | return page; | ||
944 | } | 950 | } |
945 | #else /* !CONFIG_NUMA */ | 951 | #else /* !CONFIG_NUMA */ |
946 | #ifdef CONFIG_TMPFS | 952 | #ifdef CONFIG_TMPFS |
@@ -1145,8 +1151,20 @@ repeat: | |||
1145 | if (!error) { | 1151 | if (!error) { |
1146 | error = shmem_add_to_page_cache(page, mapping, index, | 1152 | error = shmem_add_to_page_cache(page, mapping, index, |
1147 | gfp, swp_to_radix_entry(swap)); | 1153 | gfp, swp_to_radix_entry(swap)); |
1148 | /* We already confirmed swap, and make no allocation */ | 1154 | /* |
1149 | VM_BUG_ON(error); | 1155 | * We already confirmed swap under page lock, and make |
1156 | * no memory allocation here, so usually no possibility | ||
1157 | * of error; but free_swap_and_cache() only trylocks a | ||
1158 | * page, so it is just possible that the entry has been | ||
1159 | * truncated or holepunched since swap was confirmed. | ||
1160 | * shmem_undo_range() will have done some of the | ||
1161 | * unaccounting, now delete_from_swap_cache() will do | ||
1162 | * the rest (including mem_cgroup_uncharge_swapcache). | ||
1163 | * Reset swap.val? No, leave it so "failed" goes back to | ||
1164 | * "repeat": reading a hole and writing should succeed. | ||
1165 | */ | ||
1166 | if (error) | ||
1167 | delete_from_swap_cache(page); | ||
1150 | } | 1168 | } |
1151 | if (error) | 1169 | if (error) |
1152 | goto failed; | 1170 | goto failed; |