diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2012-03-21 19:34:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:55:01 -0400 |
commit | 312734c04e2fecc58429aec98194e4ff12d8f7d6 (patch) | |
tree | c1195cd46733b6a3909c11b2b5abcdee4412b99b /include/linux/page_cgroup.h | |
parent | 619d094b5872a5af153f1af77a8b7f7326faf0d0 (diff) |
memcg: remove PCG_MOVE_LOCK flag from page_cgroup
PCG_MOVE_LOCK is used for bit spinlock to avoid race between overwriting
pc->mem_cgroup and page statistics accounting per memcg. This lock helps
to avoid the race but the race is very rare because moving tasks between
cgroup is not a usual job. So, it seems using 1bit per page is too
costly.
This patch changes this lock as per-memcg spinlock and removes
PCG_MOVE_LOCK.
If smaller lock is required, we'll be able to add some hashes but I'd like
to start from this.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/page_cgroup.h')
-rw-r--r-- | include/linux/page_cgroup.h | 19 |
1 files changed, 0 insertions, 19 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 106029243ff4..7a3af748f32b 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -7,7 +7,6 @@ enum { | |||
7 | PCG_USED, /* this object is in use. */ | 7 | PCG_USED, /* this object is in use. */ |
8 | PCG_MIGRATION, /* under page migration */ | 8 | PCG_MIGRATION, /* under page migration */ |
9 | /* flags for mem_cgroup and file and I/O status */ | 9 | /* flags for mem_cgroup and file and I/O status */ |
10 | PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ | ||
11 | PCG_FILE_MAPPED, /* page is accounted as "mapped" */ | 10 | PCG_FILE_MAPPED, /* page is accounted as "mapped" */ |
12 | __NR_PCG_FLAGS, | 11 | __NR_PCG_FLAGS, |
13 | }; | 12 | }; |
@@ -89,24 +88,6 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc) | |||
89 | bit_spin_unlock(PCG_LOCK, &pc->flags); | 88 | bit_spin_unlock(PCG_LOCK, &pc->flags); |
90 | } | 89 | } |
91 | 90 | ||
92 | static inline void move_lock_page_cgroup(struct page_cgroup *pc, | ||
93 | unsigned long *flags) | ||
94 | { | ||
95 | /* | ||
96 | * We know updates to pc->flags of page cache's stats are from both of | ||
97 | * usual context or IRQ context. Disable IRQ to avoid deadlock. | ||
98 | */ | ||
99 | local_irq_save(*flags); | ||
100 | bit_spin_lock(PCG_MOVE_LOCK, &pc->flags); | ||
101 | } | ||
102 | |||
103 | static inline void move_unlock_page_cgroup(struct page_cgroup *pc, | ||
104 | unsigned long *flags) | ||
105 | { | ||
106 | bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags); | ||
107 | local_irq_restore(*flags); | ||
108 | } | ||
109 | |||
110 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 91 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
111 | struct page_cgroup; | 92 | struct page_cgroup; |
112 | 93 | ||