aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-07 03:14:13 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 11:42:20 -0500
commitfa1de9008c9bcce8ab5122529dd19b24c273eba2 (patch)
tree5afd94b03265ae4d9173ef471b6d5a30ed6d4ff3 /mm/swap_state.c
parent436c6541b13a73790646eb11429bdc8ee50eec41 (diff)
memcgroup: revert swap_state mods
If we're charging rss and we're charging cache, it seems obvious that we should be charging swapcache - as has been done. But in practice that doesn't work out so well: both swapin readahead and swapoff leave the majority of pages charged to the wrong cgroup (the cgroup that happened to read them in, rather than the cgroup to which they belong). (Which is why unuse_pte's GFP_KERNEL while holding pte lock never showed up as a problem: no allocation was ever done there, every page read being already charged to the cgroup which initiated the swapoff.) It all works rather better if we leave the charging to do_swap_page and unuse_pte, and do nothing for swapcache itself: revert mm/swap_state.c to what it was before the memory-controller patches. This also speeds up significantly a contained process working at its limit: because it no longer needs to keep waiting for swap writeback to complete. Is it unfair that swap pages become uncharged once they're unmapped, even though they're still clearly private to particular cgroups? For a short while, yes; but PageReclaim arranges for those pages to go to the end of the inactive list and be reclaimed soon if necessary. shmem/tmpfs pages are a distinct case: their charging also benefits from this change, but their second life on the lists as swapcache pages may prove more unfair - that I need to check next. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Pavel Emelianov <xemul@openvz.org> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c13
1 files changed, 1 insertions, 12 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 6ce0669acedc..ec42f01a8d02 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,7 +17,6 @@
17#include <linux/backing-dev.h> 17#include <linux/backing-dev.h>
18#include <linux/pagevec.h> 18#include <linux/pagevec.h>
19#include <linux/migrate.h> 19#include <linux/migrate.h>
20#include <linux/memcontrol.h>
21 20
22#include <asm/pgtable.h> 21#include <asm/pgtable.h>
23 22
@@ -75,11 +74,6 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
75 BUG_ON(!PageLocked(page)); 74 BUG_ON(!PageLocked(page));
76 BUG_ON(PageSwapCache(page)); 75 BUG_ON(PageSwapCache(page));
77 BUG_ON(PagePrivate(page)); 76 BUG_ON(PagePrivate(page));
78
79 error = mem_cgroup_cache_charge(page, current->mm, gfp_mask);
80 if (error)
81 goto out;
82
83 error = radix_tree_preload(gfp_mask); 77 error = radix_tree_preload(gfp_mask);
84 if (!error) { 78 if (!error) {
85 write_lock_irq(&swapper_space.tree_lock); 79 write_lock_irq(&swapper_space.tree_lock);
@@ -92,14 +86,10 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
92 total_swapcache_pages++; 86 total_swapcache_pages++;
93 __inc_zone_page_state(page, NR_FILE_PAGES); 87 __inc_zone_page_state(page, NR_FILE_PAGES);
94 INC_CACHE_INFO(add_total); 88 INC_CACHE_INFO(add_total);
95 } else {
96 mem_cgroup_uncharge_page(page);
97 } 89 }
98 write_unlock_irq(&swapper_space.tree_lock); 90 write_unlock_irq(&swapper_space.tree_lock);
99 radix_tree_preload_end(); 91 radix_tree_preload_end();
100 } else 92 }
101 mem_cgroup_uncharge_page(page);
102out:
103 return error; 93 return error;
104} 94}
105 95
@@ -114,7 +104,6 @@ void __delete_from_swap_cache(struct page *page)
114 BUG_ON(PageWriteback(page)); 104 BUG_ON(PageWriteback(page));
115 BUG_ON(PagePrivate(page)); 105 BUG_ON(PagePrivate(page));
116 106
117 mem_cgroup_uncharge_page(page);
118 radix_tree_delete(&swapper_space.page_tree, page_private(page)); 107 radix_tree_delete(&swapper_space.page_tree, page_private(page));
119 set_page_private(page, 0); 108 set_page_private(page, 0);
120 ClearPageSwapCache(page); 109 ClearPageSwapCache(page);