diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2009-12-15 19:47:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-16 10:20:07 -0500 |
commit | 57f9fd7d25ac9a0d7e3a4ced580e780ab4524e3b (patch) | |
tree | ff95e62e7326ba7e77903f7f767e9650c0d9b3dd /include/linux/page_cgroup.h | |
parent | a3032a2c15c6967f9f0c0c28375b1a5c833a3112 (diff) |
memcg: cleanup mem_cgroup_move_parent()
mem_cgroup_move_parent() calls try_charge first and cancel_charge on
failure. IMHO, charge/uncharge(especially charge) is high cost operation,
so we should avoid it as far as possible.
This patch tries to delay try_charge in mem_cgroup_move_parent() by
re-ordering checks it does.
And this patch renames mem_cgroup_move_account() to
__mem_cgroup_move_account(), changes the return value of
__mem_cgroup_move_account() from int to void, and adds a new
wrapper(mem_cgroup_move_account()), which checks whether a @pc is valid
for moving account and calls __mem_cgroup_move_account().
This patch removes the last caller of trylock_page_cgroup(), so removes
its definition too.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/page_cgroup.h')
-rw-r--r-- | include/linux/page_cgroup.h | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 4b938d4f3ac2..b0e4eb126236 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ | |||
57 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ | 57 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ |
58 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } | 58 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } |
59 | 59 | ||
60 | TESTPCGFLAG(Locked, LOCK) | ||
61 | |||
60 | /* Cache flag is set only once (at allocation) */ | 62 | /* Cache flag is set only once (at allocation) */ |
61 | TESTPCGFLAG(Cache, CACHE) | 63 | TESTPCGFLAG(Cache, CACHE) |
62 | CLEARPCGFLAG(Cache, CACHE) | 64 | CLEARPCGFLAG(Cache, CACHE) |
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc) | |||
86 | bit_spin_lock(PCG_LOCK, &pc->flags); | 88 | bit_spin_lock(PCG_LOCK, &pc->flags); |
87 | } | 89 | } |
88 | 90 | ||
89 | static inline int trylock_page_cgroup(struct page_cgroup *pc) | ||
90 | { | ||
91 | return bit_spin_trylock(PCG_LOCK, &pc->flags); | ||
92 | } | ||
93 | |||
94 | static inline void unlock_page_cgroup(struct page_cgroup *pc) | 91 | static inline void unlock_page_cgroup(struct page_cgroup *pc) |
95 | { | 92 | { |
96 | bit_spin_unlock(PCG_LOCK, &pc->flags); | 93 | bit_spin_unlock(PCG_LOCK, &pc->flags); |