aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2010-08-09 20:19:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:44:58 -0400
commitff36b801624d02a876bb7deded6ab860ea3503f2 (patch)
treea3f44d01de629234d70ad6211b9fd73fd705a76c /mm/shmem.c
parent7e496299d4d2ad8083effed6c5a18313a919edc6 (diff)
shmem: reduce pagefault lock contention
I'm running a shmem pagefault test case (see attached file) under a 64 CPU system. Profile shows shmem_inode_info->lock is heavily contented and 100% CPUs time are trying to get the lock. In the pagefault (no swap) case, shmem_getpage gets the lock twice, the last one is avoidable if we prealloc a page so we could reduce one time of locking. This is what below patch does. The result of the test case: 2.6.35-rc3: ~20s 2.6.35-rc3 + patch: ~12s so this is 40% improvement. One might argue if we could have better locking for shmem. But even shmem is lockless, the pagefault will soon have pagecache lock heavily contented because shmem must add new page to pagecache. So before we have better locking for pagecache, improving shmem locking doesn't have too much improvement. I did a similar pagefault test against a ramfs file, the test result is ~10.5s. [akpm@linux-foundation.org: fix comment, clean up code layout, elimintate code duplication] Signed-off-by: Shaohua Li <shaohua.li@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Zhang, Yanmin" <yanmin.zhang@intel.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c70
1 files changed, 49 insertions, 21 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 0618fdad406c..566f9a481e64 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1222,6 +1222,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
1222 struct shmem_sb_info *sbinfo; 1222 struct shmem_sb_info *sbinfo;
1223 struct page *filepage = *pagep; 1223 struct page *filepage = *pagep;
1224 struct page *swappage; 1224 struct page *swappage;
1225 struct page *prealloc_page = NULL;
1225 swp_entry_t *entry; 1226 swp_entry_t *entry;
1226 swp_entry_t swap; 1227 swp_entry_t swap;
1227 gfp_t gfp; 1228 gfp_t gfp;
@@ -1246,7 +1247,6 @@ repeat:
1246 filepage = find_lock_page(mapping, idx); 1247 filepage = find_lock_page(mapping, idx);
1247 if (filepage && PageUptodate(filepage)) 1248 if (filepage && PageUptodate(filepage))
1248 goto done; 1249 goto done;
1249 error = 0;
1250 gfp = mapping_gfp_mask(mapping); 1250 gfp = mapping_gfp_mask(mapping);
1251 if (!filepage) { 1251 if (!filepage) {
1252 /* 1252 /*
@@ -1257,7 +1257,19 @@ repeat:
1257 if (error) 1257 if (error)
1258 goto failed; 1258 goto failed;
1259 radix_tree_preload_end(); 1259 radix_tree_preload_end();
1260 if (sgp != SGP_READ && !prealloc_page) {
1261 /* We don't care if this fails */
1262 prealloc_page = shmem_alloc_page(gfp, info, idx);
1263 if (prealloc_page) {
1264 if (mem_cgroup_cache_charge(prealloc_page,
1265 current->mm, GFP_KERNEL)) {
1266 page_cache_release(prealloc_page);
1267 prealloc_page = NULL;
1268 }
1269 }
1270 }
1260 } 1271 }
1272 error = 0;
1261 1273
1262 spin_lock(&info->lock); 1274 spin_lock(&info->lock);
1263 shmem_recalc_inode(inode); 1275 shmem_recalc_inode(inode);
@@ -1405,28 +1417,38 @@ repeat:
1405 if (!filepage) { 1417 if (!filepage) {
1406 int ret; 1418 int ret;
1407 1419
1408 spin_unlock(&info->lock); 1420 if (!prealloc_page) {
1409 filepage = shmem_alloc_page(gfp, info, idx); 1421 spin_unlock(&info->lock);
1410 if (!filepage) { 1422 filepage = shmem_alloc_page(gfp, info, idx);
1411 shmem_unacct_blocks(info->flags, 1); 1423 if (!filepage) {
1412 shmem_free_blocks(inode, 1); 1424 shmem_unacct_blocks(info->flags, 1);
1413 error = -ENOMEM; 1425 shmem_free_blocks(inode, 1);
1414 goto failed; 1426 error = -ENOMEM;
1415 } 1427 goto failed;
1416 SetPageSwapBacked(filepage); 1428 }
1429 SetPageSwapBacked(filepage);
1417 1430
1418 /* Precharge page while we can wait, compensate after */ 1431 /*
1419 error = mem_cgroup_cache_charge(filepage, current->mm, 1432 * Precharge page while we can wait, compensate
1420 GFP_KERNEL); 1433 * after
1421 if (error) { 1434 */
1422 page_cache_release(filepage); 1435 error = mem_cgroup_cache_charge(filepage,
1423 shmem_unacct_blocks(info->flags, 1); 1436 current->mm, GFP_KERNEL);
1424 shmem_free_blocks(inode, 1); 1437 if (error) {
1425 filepage = NULL; 1438 page_cache_release(filepage);
1426 goto failed; 1439 shmem_unacct_blocks(info->flags, 1);
1440 shmem_free_blocks(inode, 1);
1441 filepage = NULL;
1442 goto failed;
1443 }
1444
1445 spin_lock(&info->lock);
1446 } else {
1447 filepage = prealloc_page;
1448 prealloc_page = NULL;
1449 SetPageSwapBacked(filepage);
1427 } 1450 }
1428 1451
1429 spin_lock(&info->lock);
1430 entry = shmem_swp_alloc(info, idx, sgp); 1452 entry = shmem_swp_alloc(info, idx, sgp);
1431 if (IS_ERR(entry)) 1453 if (IS_ERR(entry))
1432 error = PTR_ERR(entry); 1454 error = PTR_ERR(entry);
@@ -1467,13 +1489,19 @@ repeat:
1467 } 1489 }
1468done: 1490done:
1469 *pagep = filepage; 1491 *pagep = filepage;
1470 return 0; 1492 error = 0;
1493 goto out;
1471 1494
1472failed: 1495failed:
1473 if (*pagep != filepage) { 1496 if (*pagep != filepage) {
1474 unlock_page(filepage); 1497 unlock_page(filepage);
1475 page_cache_release(filepage); 1498 page_cache_release(filepage);
1476 } 1499 }
1500out:
1501 if (prealloc_page) {
1502 mem_cgroup_uncharge_cache_page(prealloc_page);
1503 page_cache_release(prealloc_page);
1504 }
1477 return error; 1505 return error;
1478} 1506}
1479 1507