summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-04-26 17:29:05 -0400
committerTejun Heo <tj@kernel.org>2018-04-26 17:29:05 -0400
commita17556f8d9798e8feff9e34d746e489e78ee1dab (patch)
tree458150f17e299ac8180dae93faa42f9c9a44c8c5 /kernel/cgroup
parentd4ff749b5e0f1e2d4d69a3e4ea81cdeaeb4904d2 (diff)
cgroup: Reorganize kernel/cgroup/rstat.c
Currently, rstat.c has rstat and base stat implementations intermixed. Collect base stat implementation at the end of the file. Also, reorder the prototypes. This patch doesn't make any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/cgroup-internal.h2
-rw-r--r--kernel/cgroup/rstat.c182
2 files changed, 95 insertions, 89 deletions
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index aab4d0a09670..2bf6fb417588 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -206,8 +206,8 @@ int cgroup_task_count(const struct cgroup *cgrp);
206void cgroup_rstat_flush(struct cgroup *cgrp); 206void cgroup_rstat_flush(struct cgroup *cgrp);
207int cgroup_rstat_init(struct cgroup *cgrp); 207int cgroup_rstat_init(struct cgroup *cgrp);
208void cgroup_rstat_exit(struct cgroup *cgrp); 208void cgroup_rstat_exit(struct cgroup *cgrp);
209void cgroup_base_stat_cputime_show(struct seq_file *seq);
210void cgroup_rstat_boot(void); 209void cgroup_rstat_boot(void);
210void cgroup_base_stat_cputime_show(struct seq_file *seq);
211 211
212/* 212/*
213 * namespace.c 213 * namespace.c
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 7670191fa776..87d7252769e7 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -5,6 +5,8 @@
5static DEFINE_MUTEX(cgroup_rstat_mutex); 5static DEFINE_MUTEX(cgroup_rstat_mutex);
6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); 6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
7 7
8static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
9
8static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) 10static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
9{ 11{
10 return per_cpu_ptr(cgrp->rstat_cpu, cpu); 12 return per_cpu_ptr(cgrp->rstat_cpu, cpu);
@@ -128,6 +130,98 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
128 return pos; 130 return pos;
129} 131}
130 132
133/* see cgroup_rstat_flush() */
134static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
135{
136 int cpu;
137
138 lockdep_assert_held(&cgroup_rstat_mutex);
139
140 for_each_possible_cpu(cpu) {
141 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
142 cpu);
143 struct cgroup *pos = NULL;
144
145 raw_spin_lock_irq(cpu_lock);
146 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
147 cgroup_base_stat_flush(pos, cpu);
148 raw_spin_unlock_irq(cpu_lock);
149 }
150}
151
152/**
153 * cgroup_rstat_flush - flush stats in @cgrp's subtree
154 * @cgrp: target cgroup
155 *
156 * Collect all per-cpu stats in @cgrp's subtree into the global counters
157 * and propagate them upwards. After this function returns, all cgroups in
158 * the subtree have up-to-date ->stat.
159 *
160 * This also gets all cgroups in the subtree including @cgrp off the
161 * ->updated_children lists.
162 */
163void cgroup_rstat_flush(struct cgroup *cgrp)
164{
165 mutex_lock(&cgroup_rstat_mutex);
166 cgroup_rstat_flush_locked(cgrp);
167 mutex_unlock(&cgroup_rstat_mutex);
168}
169
170int cgroup_rstat_init(struct cgroup *cgrp)
171{
172 int cpu;
173
174 /* the root cgrp has rstat_cpu preallocated */
175 if (!cgrp->rstat_cpu) {
176 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
177 if (!cgrp->rstat_cpu)
178 return -ENOMEM;
179 }
180
181 /* ->updated_children list is self terminated */
182 for_each_possible_cpu(cpu) {
183 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
184
185 rstatc->updated_children = cgrp;
186 u64_stats_init(&rstatc->bsync);
187 }
188
189 return 0;
190}
191
192void cgroup_rstat_exit(struct cgroup *cgrp)
193{
194 int cpu;
195
196 cgroup_rstat_flush(cgrp);
197
198 /* sanity check */
199 for_each_possible_cpu(cpu) {
200 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
201
202 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
203 WARN_ON_ONCE(rstatc->updated_next))
204 return;
205 }
206
207 free_percpu(cgrp->rstat_cpu);
208 cgrp->rstat_cpu = NULL;
209}
210
211void __init cgroup_rstat_boot(void)
212{
213 int cpu;
214
215 for_each_possible_cpu(cpu)
216 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
217
218 BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
219}
220
221/*
222 * Functions for cgroup basic resource statistics implemented on top of
223 * rstat.
224 */
131static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat, 225static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
132 struct cgroup_base_stat *src_bstat) 226 struct cgroup_base_stat *src_bstat)
133{ 227{
@@ -170,43 +264,6 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
170 cgroup_base_stat_accumulate(&parent->pending_bstat, &delta); 264 cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
171} 265}
172 266
173/* see cgroup_rstat_flush() */
174static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
175{
176 int cpu;
177
178 lockdep_assert_held(&cgroup_rstat_mutex);
179
180 for_each_possible_cpu(cpu) {
181 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
182 cpu);
183 struct cgroup *pos = NULL;
184
185 raw_spin_lock_irq(cpu_lock);
186 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
187 cgroup_base_stat_flush(pos, cpu);
188 raw_spin_unlock_irq(cpu_lock);
189 }
190}
191
192/**
193 * cgroup_rstat_flush - flush stats in @cgrp's subtree
194 * @cgrp: target cgroup
195 *
196 * Collect all per-cpu stats in @cgrp's subtree into the global counters
197 * and propagate them upwards. After this function returns, all cgroups in
198 * the subtree have up-to-date ->stat.
199 *
200 * This also gets all cgroups in the subtree including @cgrp off the
201 * ->updated_children lists.
202 */
203void cgroup_rstat_flush(struct cgroup *cgrp)
204{
205 mutex_lock(&cgroup_rstat_mutex);
206 cgroup_rstat_flush_locked(cgrp);
207 mutex_unlock(&cgroup_rstat_mutex);
208}
209
210static struct cgroup_rstat_cpu * 267static struct cgroup_rstat_cpu *
211cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp) 268cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
212{ 269{
@@ -284,54 +341,3 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
284 "system_usec %llu\n", 341 "system_usec %llu\n",
285 usage, utime, stime); 342 usage, utime, stime);
286} 343}
287
288int cgroup_rstat_init(struct cgroup *cgrp)
289{
290 int cpu;
291
292 /* the root cgrp has rstat_cpu preallocated */
293 if (!cgrp->rstat_cpu) {
294 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
295 if (!cgrp->rstat_cpu)
296 return -ENOMEM;
297 }
298
299 /* ->updated_children list is self terminated */
300 for_each_possible_cpu(cpu) {
301 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
302
303 rstatc->updated_children = cgrp;
304 u64_stats_init(&rstatc->bsync);
305 }
306
307 return 0;
308}
309
310void cgroup_rstat_exit(struct cgroup *cgrp)
311{
312 int cpu;
313
314 cgroup_rstat_flush(cgrp);
315
316 /* sanity check */
317 for_each_possible_cpu(cpu) {
318 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
319
320 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
321 WARN_ON_ONCE(rstatc->updated_next))
322 return;
323 }
324
325 free_percpu(cgrp->rstat_cpu);
326 cgrp->rstat_cpu = NULL;
327}
328
329void __init cgroup_rstat_boot(void)
330{
331 int cpu;
332
333 for_each_possible_cpu(cpu)
334 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
335
336 BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
337}