aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/res_counter.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/res_counter.h')
-rw-r--r--include/linux/res_counter.h72
1 files changed, 14 insertions, 58 deletions
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index a5930cb66145..c9d625ca659e 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -129,20 +129,22 @@ int __must_check res_counter_charge(struct res_counter *counter,
129void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 129void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
130void res_counter_uncharge(struct res_counter *counter, unsigned long val); 130void res_counter_uncharge(struct res_counter *counter, unsigned long val);
131 131
132static inline bool res_counter_limit_check_locked(struct res_counter *cnt) 132/**
133{ 133 * res_counter_margin - calculate chargeable space of a counter
134 if (cnt->usage < cnt->limit) 134 * @cnt: the counter
135 return true; 135 *
136 136 * Returns the difference between the hard limit and the current usage
137 return false; 137 * of resource counter @cnt.
138} 138 */
139 139static inline unsigned long long res_counter_margin(struct res_counter *cnt)
140static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt)
141{ 140{
142 if (cnt->usage < cnt->soft_limit) 141 unsigned long long margin;
143 return true; 142 unsigned long flags;
144 143
145 return false; 144 spin_lock_irqsave(&cnt->lock, flags);
145 margin = cnt->limit - cnt->usage;
146 spin_unlock_irqrestore(&cnt->lock, flags);
147 return margin;
146} 148}
147 149
148/** 150/**
@@ -167,52 +169,6 @@ res_counter_soft_limit_excess(struct res_counter *cnt)
167 return excess; 169 return excess;
168} 170}
169 171
170/*
171 * Helper function to detect if the cgroup is within it's limit or
172 * not. It's currently called from cgroup_rss_prepare()
173 */
174static inline bool res_counter_check_under_limit(struct res_counter *cnt)
175{
176 bool ret;
177 unsigned long flags;
178
179 spin_lock_irqsave(&cnt->lock, flags);
180 ret = res_counter_limit_check_locked(cnt);
181 spin_unlock_irqrestore(&cnt->lock, flags);
182 return ret;
183}
184
185/**
186 * res_counter_check_margin - check if the counter allows charging
187 * @cnt: the resource counter to check
188 * @bytes: the number of bytes to check the remaining space against
189 *
190 * Returns a boolean value on whether the counter can be charged
191 * @bytes or whether this would exceed the limit.
192 */
193static inline bool res_counter_check_margin(struct res_counter *cnt,
194 unsigned long bytes)
195{
196 bool ret;
197 unsigned long flags;
198
199 spin_lock_irqsave(&cnt->lock, flags);
200 ret = cnt->limit - cnt->usage >= bytes;
201 spin_unlock_irqrestore(&cnt->lock, flags);
202 return ret;
203}
204
205static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
206{
207 bool ret;
208 unsigned long flags;
209
210 spin_lock_irqsave(&cnt->lock, flags);
211 ret = res_counter_soft_limit_check_locked(cnt);
212 spin_unlock_irqrestore(&cnt->lock, flags);
213 return ret;
214}
215
216static inline void res_counter_reset_max(struct res_counter *cnt) 172static inline void res_counter_reset_max(struct res_counter *cnt)
217{ 173{
218 unsigned long flags; 174 unsigned long flags;