aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:22:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:12 -0500
commit50bdd430c20566b13d8bc59946184b08f5875de6 (patch)
tree15ccc558002367c3d2c6b092ccc1d49e6d14acf1
parent6a1a0d3b625a4091e7a0eb249aefc6a644385149 (diff)
res_counter: return amount of charges after res_counter_uncharge()
It is useful to know how many charges are still left after a call to res_counter_uncharge. While it is possible to issue a res_counter_read after uncharge, this can be racy. If we need, for instance, to take some action when the counters drop down to 0, only one of the callers should see it. This is the same semantics as the atomic variables in the kernel. Since the current return value is void, we don't need to worry about anything breaking due to this change: nobody relied on that, and only users appearing from now on will be checking this value. Signed-off-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Acked-by: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: JoonSoo Kim <js1304@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/cgroups/resource_counter.txt7
-rw-r--r--include/linux/res_counter.h12
-rw-r--r--kernel/res_counter.c20
3 files changed, 24 insertions, 15 deletions
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index 0c4a344e78fa..c4d99ed0b418 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -83,16 +83,17 @@ to work with it.
83 res_counter->lock internally (it must be called with res_counter->lock 83 res_counter->lock internally (it must be called with res_counter->lock
84 held). The force parameter indicates whether we can bypass the limit. 84 held). The force parameter indicates whether we can bypass the limit.
85 85
86 e. void res_counter_uncharge[_locked] 86 e. u64 res_counter_uncharge[_locked]
87 (struct res_counter *rc, unsigned long val) 87 (struct res_counter *rc, unsigned long val)
88 88
89 When a resource is released (freed) it should be de-accounted 89 When a resource is released (freed) it should be de-accounted
90 from the resource counter it was accounted to. This is called 90 from the resource counter it was accounted to. This is called
91 "uncharging". 91 "uncharging". The return value of this function indicate the amount
92 of charges still present in the counter.
92 93
93 The _locked routines imply that the res_counter->lock is taken. 94 The _locked routines imply that the res_counter->lock is taken.
94 95
95 f. void res_counter_uncharge_until 96 f. u64 res_counter_uncharge_until
96 (struct res_counter *rc, struct res_counter *top, 97 (struct res_counter *rc, struct res_counter *top,
97 unsinged long val) 98 unsinged long val)
98 99
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 6f54e40fa218..5ae8456d9670 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -125,14 +125,16 @@ int res_counter_charge_nofail(struct res_counter *counter,
125 * 125 *
126 * these calls check for usage underflow and show a warning on the console 126 * these calls check for usage underflow and show a warning on the console
127 * _locked call expects the counter->lock to be taken 127 * _locked call expects the counter->lock to be taken
128 *
129 * returns the total charges still present in @counter.
128 */ 130 */
129 131
130void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 132u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
131void res_counter_uncharge(struct res_counter *counter, unsigned long val); 133u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
132 134
133void res_counter_uncharge_until(struct res_counter *counter, 135u64 res_counter_uncharge_until(struct res_counter *counter,
134 struct res_counter *top, 136 struct res_counter *top,
135 unsigned long val); 137 unsigned long val);
136/** 138/**
137 * res_counter_margin - calculate chargeable space of a counter 139 * res_counter_margin - calculate chargeable space of a counter
138 * @cnt: the counter 140 * @cnt: the counter
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 3920d593e63c..ff55247e7049 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -86,33 +86,39 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
86 return __res_counter_charge(counter, val, limit_fail_at, true); 86 return __res_counter_charge(counter, val, limit_fail_at, true);
87} 87}
88 88
89void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) 89u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
90{ 90{
91 if (WARN_ON(counter->usage < val)) 91 if (WARN_ON(counter->usage < val))
92 val = counter->usage; 92 val = counter->usage;
93 93
94 counter->usage -= val; 94 counter->usage -= val;
95 return counter->usage;
95} 96}
96 97
97void res_counter_uncharge_until(struct res_counter *counter, 98u64 res_counter_uncharge_until(struct res_counter *counter,
98 struct res_counter *top, 99 struct res_counter *top,
99 unsigned long val) 100 unsigned long val)
100{ 101{
101 unsigned long flags; 102 unsigned long flags;
102 struct res_counter *c; 103 struct res_counter *c;
104 u64 ret = 0;
103 105
104 local_irq_save(flags); 106 local_irq_save(flags);
105 for (c = counter; c != top; c = c->parent) { 107 for (c = counter; c != top; c = c->parent) {
108 u64 r;
106 spin_lock(&c->lock); 109 spin_lock(&c->lock);
107 res_counter_uncharge_locked(c, val); 110 r = res_counter_uncharge_locked(c, val);
111 if (c == counter)
112 ret = r;
108 spin_unlock(&c->lock); 113 spin_unlock(&c->lock);
109 } 114 }
110 local_irq_restore(flags); 115 local_irq_restore(flags);
116 return ret;
111} 117}
112 118
113void res_counter_uncharge(struct res_counter *counter, unsigned long val) 119u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
114{ 120{
115 res_counter_uncharge_until(counter, NULL, val); 121 return res_counter_uncharge_until(counter, NULL, val);
116} 122}
117 123
118static inline unsigned long long * 124static inline unsigned long long *