diff options
author | Andrew Bresticker <abrestic@google.com> | 2011-11-02 16:40:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-02 19:07:03 -0400 |
commit | c1e2ee2dc436574880758b3836fc96935b774c32 (patch) | |
tree | aa496a9ba20e06749194faa4dbb14b6046e6b06b /kernel | |
parent | 080d676de095a14ecba14c0b9a91acb5bbb634df (diff) |
memcg: replace ss->id_lock with a rwlock
While back-porting Johannes Weiner's patch "mm: memcg-aware global
reclaim" for an internal effort, we noticed a significant performance
regression during page-reclaim heavy workloads due to high contention of
the ss->id_lock. This lock protects idr map, and serializes calls to
idr_get_next() in css_get_next() (which is used during the memcg hierarchy
walk).
Since idr_get_next() is just doing a look up, we need only serialize it
with respect to idr_remove()/idr_get_new(). By making the ss->id_lock a
rwlock, contention is greatly reduced and performance improves.
Tested: cat a 256m file from a ramdisk in a 128m container 50 times on
each core (one file + container per core) in parallel on a NUMA machine.
Result is the time for the test to complete in 1 of the containers.
Both kernels included Johannes' memcg-aware global reclaim patches.
Before rwlock patch: 1710.778s
After rwlock patch: 152.227s
Signed-off-by: Andrew Bresticker <abrestic@google.com>
Cc: Paul Menage <menage@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8386b21224ef..d9d5648f3cdc 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -4883,9 +4883,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) | |||
4883 | 4883 | ||
4884 | rcu_assign_pointer(id->css, NULL); | 4884 | rcu_assign_pointer(id->css, NULL); |
4885 | rcu_assign_pointer(css->id, NULL); | 4885 | rcu_assign_pointer(css->id, NULL); |
4886 | spin_lock(&ss->id_lock); | 4886 | write_lock(&ss->id_lock); |
4887 | idr_remove(&ss->idr, id->id); | 4887 | idr_remove(&ss->idr, id->id); |
4888 | spin_unlock(&ss->id_lock); | 4888 | write_unlock(&ss->id_lock); |
4889 | kfree_rcu(id, rcu_head); | 4889 | kfree_rcu(id, rcu_head); |
4890 | } | 4890 | } |
4891 | EXPORT_SYMBOL_GPL(free_css_id); | 4891 | EXPORT_SYMBOL_GPL(free_css_id); |
@@ -4911,10 +4911,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) | |||
4911 | error = -ENOMEM; | 4911 | error = -ENOMEM; |
4912 | goto err_out; | 4912 | goto err_out; |
4913 | } | 4913 | } |
4914 | spin_lock(&ss->id_lock); | 4914 | write_lock(&ss->id_lock); |
4915 | /* Don't use 0. allocates an ID of 1-65535 */ | 4915 | /* Don't use 0. allocates an ID of 1-65535 */ |
4916 | error = idr_get_new_above(&ss->idr, newid, 1, &myid); | 4916 | error = idr_get_new_above(&ss->idr, newid, 1, &myid); |
4917 | spin_unlock(&ss->id_lock); | 4917 | write_unlock(&ss->id_lock); |
4918 | 4918 | ||
4919 | /* Returns error when there are no free spaces for new ID.*/ | 4919 | /* Returns error when there are no free spaces for new ID.*/ |
4920 | if (error) { | 4920 | if (error) { |
@@ -4929,9 +4929,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) | |||
4929 | return newid; | 4929 | return newid; |
4930 | remove_idr: | 4930 | remove_idr: |
4931 | error = -ENOSPC; | 4931 | error = -ENOSPC; |
4932 | spin_lock(&ss->id_lock); | 4932 | write_lock(&ss->id_lock); |
4933 | idr_remove(&ss->idr, myid); | 4933 | idr_remove(&ss->idr, myid); |
4934 | spin_unlock(&ss->id_lock); | 4934 | write_unlock(&ss->id_lock); |
4935 | err_out: | 4935 | err_out: |
4936 | kfree(newid); | 4936 | kfree(newid); |
4937 | return ERR_PTR(error); | 4937 | return ERR_PTR(error); |
@@ -4943,7 +4943,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss, | |||
4943 | { | 4943 | { |
4944 | struct css_id *newid; | 4944 | struct css_id *newid; |
4945 | 4945 | ||
4946 | spin_lock_init(&ss->id_lock); | 4946 | rwlock_init(&ss->id_lock); |
4947 | idr_init(&ss->idr); | 4947 | idr_init(&ss->idr); |
4948 | 4948 | ||
4949 | newid = get_new_cssid(ss, 0); | 4949 | newid = get_new_cssid(ss, 0); |
@@ -5038,9 +5038,9 @@ css_get_next(struct cgroup_subsys *ss, int id, | |||
5038 | * scan next entry from bitmap(tree), tmpid is updated after | 5038 | * scan next entry from bitmap(tree), tmpid is updated after |
5039 | * idr_get_next(). | 5039 | * idr_get_next(). |
5040 | */ | 5040 | */ |
5041 | spin_lock(&ss->id_lock); | 5041 | read_lock(&ss->id_lock); |
5042 | tmp = idr_get_next(&ss->idr, &tmpid); | 5042 | tmp = idr_get_next(&ss->idr, &tmpid); |
5043 | spin_unlock(&ss->id_lock); | 5043 | read_unlock(&ss->id_lock); |
5044 | 5044 | ||
5045 | if (!tmp) | 5045 | if (!tmp) |
5046 | break; | 5046 | break; |