aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-07-25 20:12:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-25 23:57:11 -0400
commite83c32e8f92724a06a22a3b42f3afc07db93e131 (patch)
treeeeefe97f26e7b4faf672eba777a12749bfdad024 /mm/shmem.c
parent9276aad6c898dbcc31d095f2934dedd5cbb2e93e (diff)
tmpfs: simplify prealloc_page
The prealloc_page handling in shmem_getpage_gfp() is unnecessarily complicated: first simplify that before going on to filepage/swappage. That's right, don't report ENOMEM when the preallocation fails: we may or may not need the page. But simply report ENOMEM once we find we do need it, instead of dropping lock, repeating allocation, unwinding on failure etc. And leave the out label on the fast path, don't goto. Fix something that looks like a bug but turns out not to be: set PageSwapBacked on prealloc_page before its mem_cgroup_cache_charge(), as the removed case was doing. That's important before adding to LRU (determines which LRU the page goes on), and does affect which path it takes through memcontrol.c, but in the end MEM_CGROUP_CHANGE_TYPE_ SHMEM is handled no differently from CACHE. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Shaohua Li <shaohua.li@intel.com> Cc: "Zhang, Yanmin" <yanmin.zhang@intel.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c60
1 files changed, 16 insertions, 44 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index ff6713a2579e..8f8534f35476 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1269,9 +1269,9 @@ repeat:
1269 goto failed; 1269 goto failed;
1270 radix_tree_preload_end(); 1270 radix_tree_preload_end();
1271 if (sgp != SGP_READ && !prealloc_page) { 1271 if (sgp != SGP_READ && !prealloc_page) {
1272 /* We don't care if this fails */
1273 prealloc_page = shmem_alloc_page(gfp, info, idx); 1272 prealloc_page = shmem_alloc_page(gfp, info, idx);
1274 if (prealloc_page) { 1273 if (prealloc_page) {
1274 SetPageSwapBacked(prealloc_page);
1275 if (mem_cgroup_cache_charge(prealloc_page, 1275 if (mem_cgroup_cache_charge(prealloc_page,
1276 current->mm, GFP_KERNEL)) { 1276 current->mm, GFP_KERNEL)) {
1277 page_cache_release(prealloc_page); 1277 page_cache_release(prealloc_page);
@@ -1403,7 +1403,8 @@ repeat:
1403 goto repeat; 1403 goto repeat;
1404 } 1404 }
1405 spin_unlock(&info->lock); 1405 spin_unlock(&info->lock);
1406 } else { 1406
1407 } else if (prealloc_page) {
1407 shmem_swp_unmap(entry); 1408 shmem_swp_unmap(entry);
1408 sbinfo = SHMEM_SB(inode->i_sb); 1409 sbinfo = SHMEM_SB(inode->i_sb);
1409 if (sbinfo->max_blocks) { 1410 if (sbinfo->max_blocks) {
@@ -1419,41 +1420,8 @@ repeat:
1419 if (!filepage) { 1420 if (!filepage) {
1420 int ret; 1421 int ret;
1421 1422
1422 if (!prealloc_page) { 1423 filepage = prealloc_page;
1423 spin_unlock(&info->lock); 1424 prealloc_page = NULL;
1424 filepage = shmem_alloc_page(gfp, info, idx);
1425 if (!filepage) {
1426 spin_lock(&info->lock);
1427 shmem_unacct_blocks(info->flags, 1);
1428 shmem_free_blocks(inode, 1);
1429 spin_unlock(&info->lock);
1430 error = -ENOMEM;
1431 goto failed;
1432 }
1433 SetPageSwapBacked(filepage);
1434
1435 /*
1436 * Precharge page while we can wait, compensate
1437 * after
1438 */
1439 error = mem_cgroup_cache_charge(filepage,
1440 current->mm, GFP_KERNEL);
1441 if (error) {
1442 page_cache_release(filepage);
1443 spin_lock(&info->lock);
1444 shmem_unacct_blocks(info->flags, 1);
1445 shmem_free_blocks(inode, 1);
1446 spin_unlock(&info->lock);
1447 filepage = NULL;
1448 goto failed;
1449 }
1450
1451 spin_lock(&info->lock);
1452 } else {
1453 filepage = prealloc_page;
1454 prealloc_page = NULL;
1455 SetPageSwapBacked(filepage);
1456 }
1457 1425
1458 entry = shmem_swp_alloc(info, idx, sgp, gfp); 1426 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1459 if (IS_ERR(entry)) 1427 if (IS_ERR(entry))
@@ -1492,11 +1460,20 @@ repeat:
1492 SetPageUptodate(filepage); 1460 SetPageUptodate(filepage);
1493 if (sgp == SGP_DIRTY) 1461 if (sgp == SGP_DIRTY)
1494 set_page_dirty(filepage); 1462 set_page_dirty(filepage);
1463 } else {
1464 spin_unlock(&info->lock);
1465 error = -ENOMEM;
1466 goto out;
1495 } 1467 }
1496done: 1468done:
1497 *pagep = filepage; 1469 *pagep = filepage;
1498 error = 0; 1470 error = 0;
1499 goto out; 1471out:
1472 if (prealloc_page) {
1473 mem_cgroup_uncharge_cache_page(prealloc_page);
1474 page_cache_release(prealloc_page);
1475 }
1476 return error;
1500 1477
1501nospace: 1478nospace:
1502 /* 1479 /*
@@ -1520,12 +1497,7 @@ failed:
1520 unlock_page(filepage); 1497 unlock_page(filepage);
1521 page_cache_release(filepage); 1498 page_cache_release(filepage);
1522 } 1499 }
1523out: 1500 goto out;
1524 if (prealloc_page) {
1525 mem_cgroup_uncharge_cache_page(prealloc_page);
1526 page_cache_release(prealloc_page);
1527 }
1528 return error;
1529} 1501}
1530 1502
1531static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1503static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)