diff options
author | Hugh Dickins <hughd@google.com> | 2011-08-03 19:21:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-03 20:25:24 -0400 |
commit | aa3b189551ad8e5cc1d9c663735c131650238278 (patch) | |
tree | c789ab5c7f890a6065811d8bba15a784caf9e859 /mm | |
parent | 54af60421822bb9cb664dd5cd7aac46c01ccfcf8 (diff) |
tmpfs: convert mem_cgroup shmem to radix-swap
Remove mem_cgroup_shmem_charge_fallback(): it was only required when we
had to move swappage to filecache with GFP_NOWAIT.
Remove the GFP_NOWAIT special case from mem_cgroup_cache_charge(), by
moving its call out from shmem_add_to_page_cache() to two of thats three
callers. But leave it doing mem_cgroup_uncharge_cache_page() on error:
although asymmetrical, it's easier for all 3 callers to handle.
These two changes would also be appropriate if anyone were to start
using shmem_read_mapping_page_gfp() with GFP_NOWAIT.
Remove mem_cgroup_get_shmem_target(): mc_handle_file_pte() can test
radix_tree_exceptional_entry() to get what it needs for itself.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 66 | ||||
-rw-r--r-- | mm/shmem.c | 83 |
2 files changed, 20 insertions, 129 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5f84d2351ddb..f4ec4e7ca4cd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/limits.h> | 35 | #include <linux/limits.h> |
36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
37 | #include <linux/rbtree.h> | 37 | #include <linux/rbtree.h> |
38 | #include <linux/shmem_fs.h> | ||
39 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
40 | #include <linux/swap.h> | 39 | #include <linux/swap.h> |
41 | #include <linux/swapops.h> | 40 | #include <linux/swapops.h> |
@@ -2873,30 +2872,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | |||
2873 | return 0; | 2872 | return 0; |
2874 | if (PageCompound(page)) | 2873 | if (PageCompound(page)) |
2875 | return 0; | 2874 | return 0; |
2876 | /* | ||
2877 | * Corner case handling. This is called from add_to_page_cache() | ||
2878 | * in usual. But some FS (shmem) precharges this page before calling it | ||
2879 | * and call add_to_page_cache() with GFP_NOWAIT. | ||
2880 | * | ||
2881 | * For GFP_NOWAIT case, the page may be pre-charged before calling | ||
2882 | * add_to_page_cache(). (See shmem.c) check it here and avoid to call | ||
2883 | * charge twice. (It works but has to pay a bit larger cost.) | ||
2884 | * And when the page is SwapCache, it should take swap information | ||
2885 | * into account. This is under lock_page() now. | ||
2886 | */ | ||
2887 | if (!(gfp_mask & __GFP_WAIT)) { | ||
2888 | struct page_cgroup *pc; | ||
2889 | |||
2890 | pc = lookup_page_cgroup(page); | ||
2891 | if (!pc) | ||
2892 | return 0; | ||
2893 | lock_page_cgroup(pc); | ||
2894 | if (PageCgroupUsed(pc)) { | ||
2895 | unlock_page_cgroup(pc); | ||
2896 | return 0; | ||
2897 | } | ||
2898 | unlock_page_cgroup(pc); | ||
2899 | } | ||
2900 | 2875 | ||
2901 | if (unlikely(!mm)) | 2876 | if (unlikely(!mm)) |
2902 | mm = &init_mm; | 2877 | mm = &init_mm; |
@@ -3486,31 +3461,6 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
3486 | cgroup_release_and_wakeup_rmdir(&mem->css); | 3461 | cgroup_release_and_wakeup_rmdir(&mem->css); |
3487 | } | 3462 | } |
3488 | 3463 | ||
3489 | /* | ||
3490 | * A call to try to shrink memory usage on charge failure at shmem's swapin. | ||
3491 | * Calling hierarchical_reclaim is not enough because we should update | ||
3492 | * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM. | ||
3493 | * Moreover considering hierarchy, we should reclaim from the mem_over_limit, | ||
3494 | * not from the memcg which this page would be charged to. | ||
3495 | * try_charge_swapin does all of these works properly. | ||
3496 | */ | ||
3497 | int mem_cgroup_shmem_charge_fallback(struct page *page, | ||
3498 | struct mm_struct *mm, | ||
3499 | gfp_t gfp_mask) | ||
3500 | { | ||
3501 | struct mem_cgroup *mem; | ||
3502 | int ret; | ||
3503 | |||
3504 | if (mem_cgroup_disabled()) | ||
3505 | return 0; | ||
3506 | |||
3507 | ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); | ||
3508 | if (!ret) | ||
3509 | mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */ | ||
3510 | |||
3511 | return ret; | ||
3512 | } | ||
3513 | |||
3514 | #ifdef CONFIG_DEBUG_VM | 3464 | #ifdef CONFIG_DEBUG_VM |
3515 | static struct page_cgroup *lookup_page_cgroup_used(struct page *page) | 3465 | static struct page_cgroup *lookup_page_cgroup_used(struct page *page) |
3516 | { | 3466 | { |
@@ -5330,15 +5280,17 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, | |||
5330 | pgoff = pte_to_pgoff(ptent); | 5280 | pgoff = pte_to_pgoff(ptent); |
5331 | 5281 | ||
5332 | /* page is moved even if it's not RSS of this task(page-faulted). */ | 5282 | /* page is moved even if it's not RSS of this task(page-faulted). */ |
5333 | if (!mapping_cap_swap_backed(mapping)) { /* normal file */ | 5283 | page = find_get_page(mapping, pgoff); |
5334 | page = find_get_page(mapping, pgoff); | 5284 | |
5335 | } else { /* shmem/tmpfs file. we should take account of swap too. */ | 5285 | #ifdef CONFIG_SWAP |
5336 | swp_entry_t ent; | 5286 | /* shmem/tmpfs may report page out on swap: account for that too. */ |
5337 | mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent); | 5287 | if (radix_tree_exceptional_entry(page)) { |
5288 | swp_entry_t swap = radix_to_swp_entry(page); | ||
5338 | if (do_swap_account) | 5289 | if (do_swap_account) |
5339 | entry->val = ent.val; | 5290 | *entry = swap; |
5291 | page = find_get_page(&swapper_space, swap.val); | ||
5340 | } | 5292 | } |
5341 | 5293 | #endif | |
5342 | return page; | 5294 | return page; |
5343 | } | 5295 | } |
5344 | 5296 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 92f01d7cc150..13ef2d7e912d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -262,15 +262,11 @@ static int shmem_add_to_page_cache(struct page *page, | |||
262 | struct address_space *mapping, | 262 | struct address_space *mapping, |
263 | pgoff_t index, gfp_t gfp, void *expected) | 263 | pgoff_t index, gfp_t gfp, void *expected) |
264 | { | 264 | { |
265 | int error; | 265 | int error = 0; |
266 | 266 | ||
267 | VM_BUG_ON(!PageLocked(page)); | 267 | VM_BUG_ON(!PageLocked(page)); |
268 | VM_BUG_ON(!PageSwapBacked(page)); | 268 | VM_BUG_ON(!PageSwapBacked(page)); |
269 | 269 | ||
270 | error = mem_cgroup_cache_charge(page, current->mm, | ||
271 | gfp & GFP_RECLAIM_MASK); | ||
272 | if (error) | ||
273 | goto out; | ||
274 | if (!expected) | 270 | if (!expected) |
275 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); | 271 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); |
276 | if (!error) { | 272 | if (!error) { |
@@ -300,7 +296,6 @@ static int shmem_add_to_page_cache(struct page *page, | |||
300 | } | 296 | } |
301 | if (error) | 297 | if (error) |
302 | mem_cgroup_uncharge_cache_page(page); | 298 | mem_cgroup_uncharge_cache_page(page); |
303 | out: | ||
304 | return error; | 299 | return error; |
305 | } | 300 | } |
306 | 301 | ||
@@ -660,7 +655,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page) | |||
660 | * Charge page using GFP_KERNEL while we can wait, before taking | 655 | * Charge page using GFP_KERNEL while we can wait, before taking |
661 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). | 656 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). |
662 | * Charged back to the user (not to caller) when swap account is used. | 657 | * Charged back to the user (not to caller) when swap account is used. |
663 | * shmem_add_to_page_cache() will be called with GFP_NOWAIT. | ||
664 | */ | 658 | */ |
665 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | 659 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); |
666 | if (error) | 660 | if (error) |
@@ -954,8 +948,11 @@ repeat: | |||
954 | goto failed; | 948 | goto failed; |
955 | } | 949 | } |
956 | 950 | ||
957 | error = shmem_add_to_page_cache(page, mapping, index, | 951 | error = mem_cgroup_cache_charge(page, current->mm, |
958 | gfp, swp_to_radix_entry(swap)); | 952 | gfp & GFP_RECLAIM_MASK); |
953 | if (!error) | ||
954 | error = shmem_add_to_page_cache(page, mapping, index, | ||
955 | gfp, swp_to_radix_entry(swap)); | ||
959 | if (error) | 956 | if (error) |
960 | goto failed; | 957 | goto failed; |
961 | 958 | ||
@@ -990,8 +987,11 @@ repeat: | |||
990 | 987 | ||
991 | SetPageSwapBacked(page); | 988 | SetPageSwapBacked(page); |
992 | __set_page_locked(page); | 989 | __set_page_locked(page); |
993 | error = shmem_add_to_page_cache(page, mapping, index, | 990 | error = mem_cgroup_cache_charge(page, current->mm, |
994 | gfp, NULL); | 991 | gfp & GFP_RECLAIM_MASK); |
992 | if (!error) | ||
993 | error = shmem_add_to_page_cache(page, mapping, index, | ||
994 | gfp, NULL); | ||
995 | if (error) | 995 | if (error) |
996 | goto decused; | 996 | goto decused; |
997 | lru_cache_add_anon(page); | 997 | lru_cache_add_anon(page); |
@@ -2442,42 +2442,6 @@ out4: | |||
2442 | return error; | 2442 | return error; |
2443 | } | 2443 | } |
2444 | 2444 | ||
2445 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
2446 | /** | ||
2447 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file | ||
2448 | * @inode: the inode to be searched | ||
2449 | * @index: the page offset to be searched | ||
2450 | * @pagep: the pointer for the found page to be stored | ||
2451 | * @swapp: the pointer for the found swap entry to be stored | ||
2452 | * | ||
2453 | * If a page is found, refcount of it is incremented. Callers should handle | ||
2454 | * these refcount. | ||
2455 | */ | ||
2456 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, | ||
2457 | struct page **pagep, swp_entry_t *swapp) | ||
2458 | { | ||
2459 | struct shmem_inode_info *info = SHMEM_I(inode); | ||
2460 | struct page *page = NULL; | ||
2461 | swp_entry_t swap = {0}; | ||
2462 | |||
2463 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | ||
2464 | goto out; | ||
2465 | |||
2466 | spin_lock(&info->lock); | ||
2467 | #ifdef CONFIG_SWAP | ||
2468 | swap = shmem_get_swap(info, index); | ||
2469 | if (swap.val) | ||
2470 | page = find_get_page(&swapper_space, swap.val); | ||
2471 | else | ||
2472 | #endif | ||
2473 | page = find_get_page(inode->i_mapping, index); | ||
2474 | spin_unlock(&info->lock); | ||
2475 | out: | ||
2476 | *pagep = page; | ||
2477 | *swapp = swap; | ||
2478 | } | ||
2479 | #endif | ||
2480 | |||
2481 | #else /* !CONFIG_SHMEM */ | 2445 | #else /* !CONFIG_SHMEM */ |
2482 | 2446 | ||
2483 | /* | 2447 | /* |
@@ -2523,31 +2487,6 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | |||
2523 | } | 2487 | } |
2524 | EXPORT_SYMBOL_GPL(shmem_truncate_range); | 2488 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
2525 | 2489 | ||
2526 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
2527 | /** | ||
2528 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file | ||
2529 | * @inode: the inode to be searched | ||
2530 | * @index: the page offset to be searched | ||
2531 | * @pagep: the pointer for the found page to be stored | ||
2532 | * @swapp: the pointer for the found swap entry to be stored | ||
2533 | * | ||
2534 | * If a page is found, refcount of it is incremented. Callers should handle | ||
2535 | * these refcount. | ||
2536 | */ | ||
2537 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, | ||
2538 | struct page **pagep, swp_entry_t *swapp) | ||
2539 | { | ||
2540 | struct page *page = NULL; | ||
2541 | |||
2542 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | ||
2543 | goto out; | ||
2544 | page = find_get_page(inode->i_mapping, index); | ||
2545 | out: | ||
2546 | *pagep = page; | ||
2547 | *swapp = (swp_entry_t){0}; | ||
2548 | } | ||
2549 | #endif | ||
2550 | |||
2551 | #define shmem_vm_ops generic_file_vm_ops | 2490 | #define shmem_vm_ops generic_file_vm_ops |
2552 | #define shmem_file_operations ramfs_file_operations | 2491 | #define shmem_file_operations ramfs_file_operations |
2553 | #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) | 2492 | #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) |