aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-05-29 18:07:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:27 -0400
commit2bb2ba9d51a8044a71a29608d2c4ef8f5b2d57a2 (patch)
tree5e58869c606c541d41a9bfa62aa6e8bc42cae5ac
parentf9be23d6da035241b7687b25e64401171986dcef (diff)
rescounters: add res_counter_uncharge_until()
When killing a res_counter which is a child of other counter, we need to do res_counter_uncharge(child, xxx) res_counter_charge(parent, xxx) This is not atomic and wastes CPU. This patch adds res_counter_uncharge_until(). This function's uncharge propagates to ancestors until specified res_counter. res_counter_uncharge_until(child, parent, xxx) Now the operation is atomic and efficient. Signed-off-by: Frederic Weisbecker <fweisbec@redhat.com> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Ying Han <yinghan@google.com> Cc: Glauber Costa <glommer@parallels.com> Reviewed-by: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/cgroups/resource_counter.txt8
-rw-r--r--include/linux/res_counter.h3
-rw-r--r--kernel/res_counter.c10
3 files changed, 19 insertions, 2 deletions
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index f3c4ec3626a2..0c4a344e78fa 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -92,6 +92,14 @@ to work with it.
92 92
93 The _locked routines imply that the res_counter->lock is taken. 93 The _locked routines imply that the res_counter->lock is taken.
94 94
95 f. void res_counter_uncharge_until
96 (struct res_counter *rc, struct res_counter *top,
97 unsinged long val)
98
99 Almost same as res_cunter_uncharge() but propagation of uncharge
100 stops when rc == top. This is useful when kill a res_coutner in
101 child cgroup.
102
95 2.1 Other accounting routines 103 2.1 Other accounting routines
96 104
97 There are more routines that may help you with common needs, like 105 There are more routines that may help you with common needs, like
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fb201896a8b0..5de7a146ead9 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -135,6 +135,9 @@ int __must_check res_counter_charge_nofail(struct res_counter *counter,
135void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 135void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
136void res_counter_uncharge(struct res_counter *counter, unsigned long val); 136void res_counter_uncharge(struct res_counter *counter, unsigned long val);
137 137
138void res_counter_uncharge_until(struct res_counter *counter,
139 struct res_counter *top,
140 unsigned long val);
138/** 141/**
139 * res_counter_margin - calculate chargeable space of a counter 142 * res_counter_margin - calculate chargeable space of a counter
140 * @cnt: the counter 143 * @cnt: the counter
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index bebe2b170d49..ad581aa2369a 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -94,13 +94,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
94 counter->usage -= val; 94 counter->usage -= val;
95} 95}
96 96
97void res_counter_uncharge(struct res_counter *counter, unsigned long val) 97void res_counter_uncharge_until(struct res_counter *counter,
98 struct res_counter *top,
99 unsigned long val)
98{ 100{
99 unsigned long flags; 101 unsigned long flags;
100 struct res_counter *c; 102 struct res_counter *c;
101 103
102 local_irq_save(flags); 104 local_irq_save(flags);
103 for (c = counter; c != NULL; c = c->parent) { 105 for (c = counter; c != top; c = c->parent) {
104 spin_lock(&c->lock); 106 spin_lock(&c->lock);
105 res_counter_uncharge_locked(c, val); 107 res_counter_uncharge_locked(c, val);
106 spin_unlock(&c->lock); 108 spin_unlock(&c->lock);
@@ -108,6 +110,10 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
108 local_irq_restore(flags); 110 local_irq_restore(flags);
109} 111}
110 112
113void res_counter_uncharge(struct res_counter *counter, unsigned long val)
114{
115 res_counter_uncharge_until(counter, NULL, val);
116}
111 117
112static inline unsigned long long * 118static inline unsigned long long *
113res_counter_member(struct res_counter *counter, int member) 119res_counter_member(struct res_counter *counter, int member)