diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2009-01-07 21:07:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:05 -0500 |
commit | d13d144309d2e5a3e6ad978b16c1d0226ddc9231 (patch) | |
tree | 37c19902b527823956db969d9428737081b2a94d /include | |
parent | c1e862c1f5ad34771b6d0a528cf681e0dcad7c86 (diff) |
memcg: handle swap caches
SwapCache support for memory resource controller (memcg)
Before mem+swap controller, memcg itself should handle SwapCache in proper
way. This is cut-out from it.
In current memcg, SwapCache is just leaked and the user can create tons of
SwapCache. This is a leak of account and should be handled.
SwapCache accounting is done as following.
charge (anon)
- charged when it's mapped.
(because of readahead, charge at add_to_swap_cache() is not sane)
uncharge (anon)
- uncharged when it's dropped from swapcache and fully unmapped.
means it's not uncharged at unmap.
Note: delete from swap cache at swap-in is done after rmap information
is established.
charge (shmem)
- charged at swap-in. this prevents charge at add_to_page_cache().
uncharge (shmem)
- uncharged when it's dropped from swapcache and not on shmem's
radix-tree.
at migration, check against 'old page' is modified to handle shmem.
Comparing to the old version discussed (and caused troubles), we have
advantages of
- PCG_USED bit.
- simple migrating handling.
So, situation is much easier than several months ago, maybe.
[hugh@veritas.com: memcg: handle swap caches build fix]
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Tested-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/swap.h | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 91dee50fe260..f8f3907533f0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -333,6 +333,22 @@ static inline void disable_swap_token(void) | |||
333 | put_swap_token(swap_token_mm); | 333 | put_swap_token(swap_token_mm); |
334 | } | 334 | } |
335 | 335 | ||
336 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | ||
337 | extern int mem_cgroup_cache_charge_swapin(struct page *page, | ||
338 | struct mm_struct *mm, gfp_t mask, bool locked); | ||
339 | extern void mem_cgroup_uncharge_swapcache(struct page *page); | ||
340 | #else | ||
341 | static inline | ||
342 | int mem_cgroup_cache_charge_swapin(struct page *page, | ||
343 | struct mm_struct *mm, gfp_t mask, bool locked) | ||
344 | { | ||
345 | return 0; | ||
346 | } | ||
347 | static inline void mem_cgroup_uncharge_swapcache(struct page *page) | ||
348 | { | ||
349 | } | ||
350 | #endif | ||
351 | |||
336 | #else /* CONFIG_SWAP */ | 352 | #else /* CONFIG_SWAP */ |
337 | 353 | ||
338 | #define nr_swap_pages 0L | 354 | #define nr_swap_pages 0L |
@@ -409,6 +425,12 @@ static inline swp_entry_t get_swap_page(void) | |||
409 | #define has_swap_token(x) 0 | 425 | #define has_swap_token(x) 0 |
410 | #define disable_swap_token() do { } while(0) | 426 | #define disable_swap_token() do { } while(0) |
411 | 427 | ||
428 | static inline int mem_cgroup_cache_charge_swapin(struct page *page, | ||
429 | struct mm_struct *mm, gfp_t mask, bool locked) | ||
430 | { | ||
431 | return 0; | ||
432 | } | ||
433 | |||
412 | #endif /* CONFIG_SWAP */ | 434 | #endif /* CONFIG_SWAP */ |
413 | #endif /* __KERNEL__*/ | 435 | #endif /* __KERNEL__*/ |
414 | #endif /* _LINUX_SWAP_H */ | 436 | #endif /* _LINUX_SWAP_H */ |