diff options
author | Tejun Heo <tj@kernel.org> | 2018-04-26 17:29:05 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2018-04-26 17:29:05 -0400 |
commit | 6162cef0f741c70eb0c7ac7e6142f85808d8abc4 (patch) | |
tree | 355935993063eb4aa28ff6e8491d40380a83db7f /kernel/cgroup | |
parent | a17556f8d9798e8feff9e34d746e489e78ee1dab (diff) |
cgroup: Factor out and expose cgroup_rstat_*() interface functions
cgroup_rstat is being generalized so that controllers can use it too.
This patch factors out and exposes the following interface functions.
* cgroup_rstat_updated(): Renamed from cgroup_rstat_cpu_updated() for
consistency.
* cgroup_rstat_flush_hold/release(): Factored out from base stat
implementation.
* cgroup_rstat_flush(): Verbatim expose.
While at it, drop assert on cgroup_rstat_mutex in
cgroup_base_stat_flush() as it crosses layers and make a minor comment
update.
v2: Added EXPORT_SYMBOL_GPL(cgroup_rstat_updated) to fix a build bug.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r-- | kernel/cgroup/cgroup-internal.h | 1 | ||||
-rw-r--r-- | kernel/cgroup/rstat.c | 42 |
2 files changed, 30 insertions, 13 deletions
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 2bf6fb417588..b68e1a7c146c 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h | |||
@@ -203,7 +203,6 @@ int cgroup_task_count(const struct cgroup *cgrp); | |||
203 | /* | 203 | /* |
204 | * rstat.c | 204 | * rstat.c |
205 | */ | 205 | */ |
206 | void cgroup_rstat_flush(struct cgroup *cgrp); | ||
207 | int cgroup_rstat_init(struct cgroup *cgrp); | 206 | int cgroup_rstat_init(struct cgroup *cgrp); |
208 | void cgroup_rstat_exit(struct cgroup *cgrp); | 207 | void cgroup_rstat_exit(struct cgroup *cgrp); |
209 | void cgroup_rstat_boot(void); | 208 | void cgroup_rstat_boot(void); |
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 87d7252769e7..d49bf92ac3d4 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c | |||
@@ -13,7 +13,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) | |||
13 | } | 13 | } |
14 | 14 | ||
15 | /** | 15 | /** |
16 | * cgroup_rstat_cpu_updated - keep track of updated rstat_cpu | 16 | * cgroup_rstat_updated - keep track of updated rstat_cpu |
17 | * @cgrp: target cgroup | 17 | * @cgrp: target cgroup |
18 | * @cpu: cpu on which rstat_cpu was updated | 18 | * @cpu: cpu on which rstat_cpu was updated |
19 | * | 19 | * |
@@ -21,7 +21,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) | |||
21 | * rstat_cpu->updated_children list. See the comment on top of | 21 | * rstat_cpu->updated_children list. See the comment on top of |
22 | * cgroup_rstat_cpu definition for details. | 22 | * cgroup_rstat_cpu definition for details. |
23 | */ | 23 | */ |
24 | static void cgroup_rstat_cpu_updated(struct cgroup *cgrp, int cpu) | 24 | void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) |
25 | { | 25 | { |
26 | raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); | 26 | raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); |
27 | struct cgroup *parent; | 27 | struct cgroup *parent; |
@@ -59,6 +59,7 @@ static void cgroup_rstat_cpu_updated(struct cgroup *cgrp, int cpu) | |||
59 | 59 | ||
60 | raw_spin_unlock_irqrestore(cpu_lock, flags); | 60 | raw_spin_unlock_irqrestore(cpu_lock, flags); |
61 | } | 61 | } |
62 | EXPORT_SYMBOL_GPL(cgroup_rstat_updated); | ||
62 | 63 | ||
63 | /** | 64 | /** |
64 | * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree | 65 | * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree |
@@ -167,6 +168,29 @@ void cgroup_rstat_flush(struct cgroup *cgrp) | |||
167 | mutex_unlock(&cgroup_rstat_mutex); | 168 | mutex_unlock(&cgroup_rstat_mutex); |
168 | } | 169 | } |
169 | 170 | ||
171 | /** | ||
172 | * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold | ||
173 | * @cgrp: target cgroup | ||
174 | * | ||
175 | * Flush stats in @cgrp's subtree and prevent further flushes. Must be | ||
176 | * paired with cgroup_rstat_flush_release(). | ||
177 | */ | ||
178 | void cgroup_rstat_flush_hold(struct cgroup *cgrp) | ||
179 | __acquires(&cgroup_rstat_mutex) | ||
180 | { | ||
181 | mutex_lock(&cgroup_rstat_mutex); | ||
182 | cgroup_rstat_flush_locked(cgrp); | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold() | ||
187 | */ | ||
188 | void cgroup_rstat_flush_release(void) | ||
189 | __releases(&cgroup_rstat_mutex) | ||
190 | { | ||
191 | mutex_unlock(&cgroup_rstat_mutex); | ||
192 | } | ||
193 | |||
170 | int cgroup_rstat_init(struct cgroup *cgrp) | 194 | int cgroup_rstat_init(struct cgroup *cgrp) |
171 | { | 195 | { |
172 | int cpu; | 196 | int cpu; |
@@ -239,15 +263,13 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) | |||
239 | struct cgroup_base_stat delta; | 263 | struct cgroup_base_stat delta; |
240 | unsigned seq; | 264 | unsigned seq; |
241 | 265 | ||
242 | lockdep_assert_held(&cgroup_rstat_mutex); | ||
243 | |||
244 | /* fetch the current per-cpu values */ | 266 | /* fetch the current per-cpu values */ |
245 | do { | 267 | do { |
246 | seq = __u64_stats_fetch_begin(&rstatc->bsync); | 268 | seq = __u64_stats_fetch_begin(&rstatc->bsync); |
247 | cputime = rstatc->bstat.cputime; | 269 | cputime = rstatc->bstat.cputime; |
248 | } while (__u64_stats_fetch_retry(&rstatc->bsync, seq)); | 270 | } while (__u64_stats_fetch_retry(&rstatc->bsync, seq)); |
249 | 271 | ||
250 | /* accumulate the deltas to propgate */ | 272 | /* calculate the delta to propgate */ |
251 | delta.cputime.utime = cputime.utime - last_cputime->utime; | 273 | delta.cputime.utime = cputime.utime - last_cputime->utime; |
252 | delta.cputime.stime = cputime.stime - last_cputime->stime; | 274 | delta.cputime.stime = cputime.stime - last_cputime->stime; |
253 | delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime - | 275 | delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime - |
@@ -278,7 +300,7 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp, | |||
278 | struct cgroup_rstat_cpu *rstatc) | 300 | struct cgroup_rstat_cpu *rstatc) |
279 | { | 301 | { |
280 | u64_stats_update_end(&rstatc->bsync); | 302 | u64_stats_update_end(&rstatc->bsync); |
281 | cgroup_rstat_cpu_updated(cgrp, smp_processor_id()); | 303 | cgroup_rstat_updated(cgrp, smp_processor_id()); |
282 | put_cpu_ptr(rstatc); | 304 | put_cpu_ptr(rstatc); |
283 | } | 305 | } |
284 | 306 | ||
@@ -323,14 +345,10 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) | |||
323 | if (!cgroup_parent(cgrp)) | 345 | if (!cgroup_parent(cgrp)) |
324 | return; | 346 | return; |
325 | 347 | ||
326 | mutex_lock(&cgroup_rstat_mutex); | 348 | cgroup_rstat_flush_hold(cgrp); |
327 | |||
328 | cgroup_rstat_flush_locked(cgrp); | ||
329 | |||
330 | usage = cgrp->bstat.cputime.sum_exec_runtime; | 349 | usage = cgrp->bstat.cputime.sum_exec_runtime; |
331 | cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime); | 350 | cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime); |
332 | 351 | cgroup_rstat_flush_release(); | |
333 | mutex_unlock(&cgroup_rstat_mutex); | ||
334 | 352 | ||
335 | do_div(usage, NSEC_PER_USEC); | 353 | do_div(usage, NSEC_PER_USEC); |
336 | do_div(utime, NSEC_PER_USEC); | 354 | do_div(utime, NSEC_PER_USEC); |