diff options
Diffstat (limited to 'include/linux/res_counter.h')
-rw-r--r-- | include/linux/res_counter.h | 64 |
1 files changed, 62 insertions, 2 deletions
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 511f42fc681..731af71cddc 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h | |||
@@ -35,6 +35,10 @@ struct res_counter { | |||
35 | */ | 35 | */ |
36 | unsigned long long limit; | 36 | unsigned long long limit; |
37 | /* | 37 | /* |
38 | * the limit that usage can be exceed | ||
39 | */ | ||
40 | unsigned long long soft_limit; | ||
41 | /* | ||
38 | * the number of unsuccessful attempts to consume the resource | 42 | * the number of unsuccessful attempts to consume the resource |
39 | */ | 43 | */ |
40 | unsigned long long failcnt; | 44 | unsigned long long failcnt; |
@@ -87,6 +91,7 @@ enum { | |||
87 | RES_MAX_USAGE, | 91 | RES_MAX_USAGE, |
88 | RES_LIMIT, | 92 | RES_LIMIT, |
89 | RES_FAILCNT, | 93 | RES_FAILCNT, |
94 | RES_SOFT_LIMIT, | ||
90 | }; | 95 | }; |
91 | 96 | ||
92 | /* | 97 | /* |
@@ -109,7 +114,8 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); | |||
109 | int __must_check res_counter_charge_locked(struct res_counter *counter, | 114 | int __must_check res_counter_charge_locked(struct res_counter *counter, |
110 | unsigned long val); | 115 | unsigned long val); |
111 | int __must_check res_counter_charge(struct res_counter *counter, | 116 | int __must_check res_counter_charge(struct res_counter *counter, |
112 | unsigned long val, struct res_counter **limit_fail_at); | 117 | unsigned long val, struct res_counter **limit_fail_at, |
118 | struct res_counter **soft_limit_at); | ||
113 | 119 | ||
114 | /* | 120 | /* |
115 | * uncharge - tell that some portion of the resource is released | 121 | * uncharge - tell that some portion of the resource is released |
@@ -122,7 +128,8 @@ int __must_check res_counter_charge(struct res_counter *counter, | |||
122 | */ | 128 | */ |
123 | 129 | ||
124 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); | 130 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); |
125 | void res_counter_uncharge(struct res_counter *counter, unsigned long val); | 131 | void res_counter_uncharge(struct res_counter *counter, unsigned long val, |
132 | bool *was_soft_limit_excess); | ||
126 | 133 | ||
127 | static inline bool res_counter_limit_check_locked(struct res_counter *cnt) | 134 | static inline bool res_counter_limit_check_locked(struct res_counter *cnt) |
128 | { | 135 | { |
@@ -132,6 +139,36 @@ static inline bool res_counter_limit_check_locked(struct res_counter *cnt) | |||
132 | return false; | 139 | return false; |
133 | } | 140 | } |
134 | 141 | ||
142 | static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) | ||
143 | { | ||
144 | if (cnt->usage < cnt->soft_limit) | ||
145 | return true; | ||
146 | |||
147 | return false; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * Get the difference between the usage and the soft limit | ||
152 | * @cnt: The counter | ||
153 | * | ||
154 | * Returns 0 if usage is less than or equal to soft limit | ||
155 | * The difference between usage and soft limit, otherwise. | ||
156 | */ | ||
157 | static inline unsigned long long | ||
158 | res_counter_soft_limit_excess(struct res_counter *cnt) | ||
159 | { | ||
160 | unsigned long long excess; | ||
161 | unsigned long flags; | ||
162 | |||
163 | spin_lock_irqsave(&cnt->lock, flags); | ||
164 | if (cnt->usage <= cnt->soft_limit) | ||
165 | excess = 0; | ||
166 | else | ||
167 | excess = cnt->usage - cnt->soft_limit; | ||
168 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
169 | return excess; | ||
170 | } | ||
171 | |||
135 | /* | 172 | /* |
136 | * Helper function to detect if the cgroup is within it's limit or | 173 | * Helper function to detect if the cgroup is within it's limit or |
137 | * not. It's currently called from cgroup_rss_prepare() | 174 | * not. It's currently called from cgroup_rss_prepare() |
@@ -147,6 +184,17 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt) | |||
147 | return ret; | 184 | return ret; |
148 | } | 185 | } |
149 | 186 | ||
187 | static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) | ||
188 | { | ||
189 | bool ret; | ||
190 | unsigned long flags; | ||
191 | |||
192 | spin_lock_irqsave(&cnt->lock, flags); | ||
193 | ret = res_counter_soft_limit_check_locked(cnt); | ||
194 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
195 | return ret; | ||
196 | } | ||
197 | |||
150 | static inline void res_counter_reset_max(struct res_counter *cnt) | 198 | static inline void res_counter_reset_max(struct res_counter *cnt) |
151 | { | 199 | { |
152 | unsigned long flags; | 200 | unsigned long flags; |
@@ -180,4 +228,16 @@ static inline int res_counter_set_limit(struct res_counter *cnt, | |||
180 | return ret; | 228 | return ret; |
181 | } | 229 | } |
182 | 230 | ||
231 | static inline int | ||
232 | res_counter_set_soft_limit(struct res_counter *cnt, | ||
233 | unsigned long long soft_limit) | ||
234 | { | ||
235 | unsigned long flags; | ||
236 | |||
237 | spin_lock_irqsave(&cnt->lock, flags); | ||
238 | cnt->soft_limit = soft_limit; | ||
239 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
240 | return 0; | ||
241 | } | ||
242 | |||
183 | #endif | 243 | #endif |