aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 9ffbea9b79e1..d58305e8a484 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -922,20 +922,26 @@ found:
922 error = 1; 922 error = 1;
923 if (!inode) 923 if (!inode)
924 goto out; 924 goto out;
925 /* Precharge page while we can wait, compensate afterwards */ 925 /* Precharge page using GFP_KERNEL while we can wait */
926 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 926 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
927 if (error) 927 if (error)
928 goto out; 928 goto out;
929 error = radix_tree_preload(GFP_KERNEL); 929 error = radix_tree_preload(GFP_KERNEL);
930 if (error) 930 if (error) {
931 goto uncharge; 931 mem_cgroup_uncharge_cache_page(page);
932 goto out;
933 }
932 error = 1; 934 error = 1;
933 935
934 spin_lock(&info->lock); 936 spin_lock(&info->lock);
935 ptr = shmem_swp_entry(info, idx, NULL); 937 ptr = shmem_swp_entry(info, idx, NULL);
936 if (ptr && ptr->val == entry.val) 938 if (ptr && ptr->val == entry.val) {
937 error = add_to_page_cache(page, inode->i_mapping, 939 error = add_to_page_cache(page, inode->i_mapping,
938 idx, GFP_NOWAIT); 940 idx, GFP_NOWAIT);
941 /* does mem_cgroup_uncharge_cache_page on error */
942 } else /* we must compensate for our precharge above */
943 mem_cgroup_uncharge_cache_page(page);
944
939 if (error == -EEXIST) { 945 if (error == -EEXIST) {
940 struct page *filepage = find_get_page(inode->i_mapping, idx); 946 struct page *filepage = find_get_page(inode->i_mapping, idx);
941 error = 1; 947 error = 1;
@@ -961,8 +967,6 @@ found:
961 shmem_swp_unmap(ptr); 967 shmem_swp_unmap(ptr);
962 spin_unlock(&info->lock); 968 spin_unlock(&info->lock);
963 radix_tree_preload_end(); 969 radix_tree_preload_end();
964uncharge:
965 mem_cgroup_uncharge_page(page);
966out: 970out:
967 unlock_page(page); 971 unlock_page(page);
968 page_cache_release(page); 972 page_cache_release(page);
@@ -1319,7 +1323,7 @@ repeat:
1319 page_cache_release(swappage); 1323 page_cache_release(swappage);
1320 goto failed; 1324 goto failed;
1321 } 1325 }
1322 mem_cgroup_uncharge_page(swappage); 1326 mem_cgroup_uncharge_cache_page(swappage);
1323 } 1327 }
1324 page_cache_release(swappage); 1328 page_cache_release(swappage);
1325 goto repeat; 1329 goto repeat;
@@ -1358,6 +1362,8 @@ repeat:
1358 } 1362 }
1359 1363
1360 if (!filepage) { 1364 if (!filepage) {
1365 int ret;
1366
1361 spin_unlock(&info->lock); 1367 spin_unlock(&info->lock);
1362 filepage = shmem_alloc_page(gfp, info, idx); 1368 filepage = shmem_alloc_page(gfp, info, idx);
1363 if (!filepage) { 1369 if (!filepage) {
@@ -1386,10 +1392,18 @@ repeat:
1386 swap = *entry; 1392 swap = *entry;
1387 shmem_swp_unmap(entry); 1393 shmem_swp_unmap(entry);
1388 } 1394 }
1389 if (error || swap.val || 0 != add_to_page_cache_lru( 1395 ret = error || swap.val;
1390 filepage, mapping, idx, GFP_NOWAIT)) { 1396 if (ret)
1397 mem_cgroup_uncharge_cache_page(filepage);
1398 else
1399 ret = add_to_page_cache_lru(filepage, mapping,
1400 idx, GFP_NOWAIT);
1401 /*
1402 * At add_to_page_cache_lru() failure, uncharge will
1403 * be done automatically.
1404 */
1405 if (ret) {
1391 spin_unlock(&info->lock); 1406 spin_unlock(&info->lock);
1392 mem_cgroup_uncharge_page(filepage);
1393 page_cache_release(filepage); 1407 page_cache_release(filepage);
1394 shmem_unacct_blocks(info->flags, 1); 1408 shmem_unacct_blocks(info->flags, 1);
1395 shmem_free_blocks(inode, 1); 1409 shmem_free_blocks(inode, 1);
@@ -1398,7 +1412,6 @@ repeat:
1398 goto failed; 1412 goto failed;
1399 goto repeat; 1413 goto repeat;
1400 } 1414 }
1401 mem_cgroup_uncharge_page(filepage);
1402 info->flags |= SHMEM_PAGEIN; 1415 info->flags |= SHMEM_PAGEIN;
1403 } 1416 }
1404 1417