diff options
author | Hugh Dickins <hughd@google.com> | 2011-08-03 19:21:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-03 20:25:24 -0400 |
commit | aa3b189551ad8e5cc1d9c663735c131650238278 (patch) | |
tree | c789ab5c7f890a6065811d8bba15a784caf9e859 /mm/shmem.c | |
parent | 54af60421822bb9cb664dd5cd7aac46c01ccfcf8 (diff) |
tmpfs: convert mem_cgroup shmem to radix-swap
Remove mem_cgroup_shmem_charge_fallback(): it was only required when we
had to move swappage to filecache with GFP_NOWAIT.
Remove the GFP_NOWAIT special case from mem_cgroup_cache_charge(), by
moving its call out from shmem_add_to_page_cache() to two of thats three
callers. But leave it doing mem_cgroup_uncharge_cache_page() on error:
although asymmetrical, it's easier for all 3 callers to handle.
These two changes would also be appropriate if anyone were to start
using shmem_read_mapping_page_gfp() with GFP_NOWAIT.
Remove mem_cgroup_get_shmem_target(): mc_handle_file_pte() can test
radix_tree_exceptional_entry() to get what it needs for itself.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 83 |
1 files changed, 11 insertions, 72 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 92f01d7cc150..13ef2d7e912d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -262,15 +262,11 @@ static int shmem_add_to_page_cache(struct page *page, | |||
262 | struct address_space *mapping, | 262 | struct address_space *mapping, |
263 | pgoff_t index, gfp_t gfp, void *expected) | 263 | pgoff_t index, gfp_t gfp, void *expected) |
264 | { | 264 | { |
265 | int error; | 265 | int error = 0; |
266 | 266 | ||
267 | VM_BUG_ON(!PageLocked(page)); | 267 | VM_BUG_ON(!PageLocked(page)); |
268 | VM_BUG_ON(!PageSwapBacked(page)); | 268 | VM_BUG_ON(!PageSwapBacked(page)); |
269 | 269 | ||
270 | error = mem_cgroup_cache_charge(page, current->mm, | ||
271 | gfp & GFP_RECLAIM_MASK); | ||
272 | if (error) | ||
273 | goto out; | ||
274 | if (!expected) | 270 | if (!expected) |
275 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); | 271 | error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); |
276 | if (!error) { | 272 | if (!error) { |
@@ -300,7 +296,6 @@ static int shmem_add_to_page_cache(struct page *page, | |||
300 | } | 296 | } |
301 | if (error) | 297 | if (error) |
302 | mem_cgroup_uncharge_cache_page(page); | 298 | mem_cgroup_uncharge_cache_page(page); |
303 | out: | ||
304 | return error; | 299 | return error; |
305 | } | 300 | } |
306 | 301 | ||
@@ -660,7 +655,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page) | |||
660 | * Charge page using GFP_KERNEL while we can wait, before taking | 655 | * Charge page using GFP_KERNEL while we can wait, before taking |
661 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). | 656 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). |
662 | * Charged back to the user (not to caller) when swap account is used. | 657 | * Charged back to the user (not to caller) when swap account is used. |
663 | * shmem_add_to_page_cache() will be called with GFP_NOWAIT. | ||
664 | */ | 658 | */ |
665 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | 659 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); |
666 | if (error) | 660 | if (error) |
@@ -954,8 +948,11 @@ repeat: | |||
954 | goto failed; | 948 | goto failed; |
955 | } | 949 | } |
956 | 950 | ||
957 | error = shmem_add_to_page_cache(page, mapping, index, | 951 | error = mem_cgroup_cache_charge(page, current->mm, |
958 | gfp, swp_to_radix_entry(swap)); | 952 | gfp & GFP_RECLAIM_MASK); |
953 | if (!error) | ||
954 | error = shmem_add_to_page_cache(page, mapping, index, | ||
955 | gfp, swp_to_radix_entry(swap)); | ||
959 | if (error) | 956 | if (error) |
960 | goto failed; | 957 | goto failed; |
961 | 958 | ||
@@ -990,8 +987,11 @@ repeat: | |||
990 | 987 | ||
991 | SetPageSwapBacked(page); | 988 | SetPageSwapBacked(page); |
992 | __set_page_locked(page); | 989 | __set_page_locked(page); |
993 | error = shmem_add_to_page_cache(page, mapping, index, | 990 | error = mem_cgroup_cache_charge(page, current->mm, |
994 | gfp, NULL); | 991 | gfp & GFP_RECLAIM_MASK); |
992 | if (!error) | ||
993 | error = shmem_add_to_page_cache(page, mapping, index, | ||
994 | gfp, NULL); | ||
995 | if (error) | 995 | if (error) |
996 | goto decused; | 996 | goto decused; |
997 | lru_cache_add_anon(page); | 997 | lru_cache_add_anon(page); |
@@ -2442,42 +2442,6 @@ out4: | |||
2442 | return error; | 2442 | return error; |
2443 | } | 2443 | } |
2444 | 2444 | ||
2445 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
2446 | /** | ||
2447 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file | ||
2448 | * @inode: the inode to be searched | ||
2449 | * @index: the page offset to be searched | ||
2450 | * @pagep: the pointer for the found page to be stored | ||
2451 | * @swapp: the pointer for the found swap entry to be stored | ||
2452 | * | ||
2453 | * If a page is found, refcount of it is incremented. Callers should handle | ||
2454 | * these refcount. | ||
2455 | */ | ||
2456 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, | ||
2457 | struct page **pagep, swp_entry_t *swapp) | ||
2458 | { | ||
2459 | struct shmem_inode_info *info = SHMEM_I(inode); | ||
2460 | struct page *page = NULL; | ||
2461 | swp_entry_t swap = {0}; | ||
2462 | |||
2463 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | ||
2464 | goto out; | ||
2465 | |||
2466 | spin_lock(&info->lock); | ||
2467 | #ifdef CONFIG_SWAP | ||
2468 | swap = shmem_get_swap(info, index); | ||
2469 | if (swap.val) | ||
2470 | page = find_get_page(&swapper_space, swap.val); | ||
2471 | else | ||
2472 | #endif | ||
2473 | page = find_get_page(inode->i_mapping, index); | ||
2474 | spin_unlock(&info->lock); | ||
2475 | out: | ||
2476 | *pagep = page; | ||
2477 | *swapp = swap; | ||
2478 | } | ||
2479 | #endif | ||
2480 | |||
2481 | #else /* !CONFIG_SHMEM */ | 2445 | #else /* !CONFIG_SHMEM */ |
2482 | 2446 | ||
2483 | /* | 2447 | /* |
@@ -2523,31 +2487,6 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | |||
2523 | } | 2487 | } |
2524 | EXPORT_SYMBOL_GPL(shmem_truncate_range); | 2488 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
2525 | 2489 | ||
2526 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
2527 | /** | ||
2528 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file | ||
2529 | * @inode: the inode to be searched | ||
2530 | * @index: the page offset to be searched | ||
2531 | * @pagep: the pointer for the found page to be stored | ||
2532 | * @swapp: the pointer for the found swap entry to be stored | ||
2533 | * | ||
2534 | * If a page is found, refcount of it is incremented. Callers should handle | ||
2535 | * these refcount. | ||
2536 | */ | ||
2537 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, | ||
2538 | struct page **pagep, swp_entry_t *swapp) | ||
2539 | { | ||
2540 | struct page *page = NULL; | ||
2541 | |||
2542 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | ||
2543 | goto out; | ||
2544 | page = find_get_page(inode->i_mapping, index); | ||
2545 | out: | ||
2546 | *pagep = page; | ||
2547 | *swapp = (swp_entry_t){0}; | ||
2548 | } | ||
2549 | #endif | ||
2550 | |||
2551 | #define shmem_vm_ops generic_file_vm_ops | 2490 | #define shmem_vm_ops generic_file_vm_ops |
2552 | #define shmem_file_operations ramfs_file_operations | 2491 | #define shmem_file_operations ramfs_file_operations |
2553 | #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) | 2492 | #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) |