summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-04-26 17:29:04 -0400
committerTejun Heo <tj@kernel.org>2018-04-26 17:29:04 -0400
commitc58632b3631cb222da41d9dc0dd39e106c1eafd0 (patch)
treebd8be17287250d1e79e7076ca310ca7912c854a7 /kernel/cgroup
parenta5c2b93f79ef7d746a3cd2c1bd66833286f9be70 (diff)
cgroup: Rename stat to rstat
stat is too generic a name and ends up causing subtle confusions. It'll be made generic so that controllers can plug into it, which will make the problem worse. Let's rename it to something more specific - cgroup_rstat for cgroup recursive stat. This patch does the following renames. No other changes. * cpu_stat -> rstat_cpu * stat -> rstat * ?cstat -> ?rstatc Note that the renames are selective. The unrenamed are the ones which implement basic resource statistics on top of rstat. This will be further cleaned up in the following patches. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/cgroup-internal.h10
-rw-r--r--kernel/cgroup/cgroup.c14
-rw-r--r--kernel/cgroup/rstat.c180
3 files changed, 103 insertions, 101 deletions
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index b928b27050c6..092711114a1f 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -201,13 +201,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
201int cgroup_task_count(const struct cgroup *cgrp); 201int cgroup_task_count(const struct cgroup *cgrp);
202 202
203/* 203/*
204 * stat.c 204 * rstat.c
205 */ 205 */
206void cgroup_stat_flush(struct cgroup *cgrp); 206void cgroup_rstat_flush(struct cgroup *cgrp);
207int cgroup_stat_init(struct cgroup *cgrp); 207int cgroup_rstat_init(struct cgroup *cgrp);
208void cgroup_stat_exit(struct cgroup *cgrp); 208void cgroup_rstat_exit(struct cgroup *cgrp);
209void cgroup_stat_show_cputime(struct seq_file *seq); 209void cgroup_stat_show_cputime(struct seq_file *seq);
210void cgroup_stat_boot(void); 210void cgroup_rstat_boot(void);
211 211
212/* 212/*
213 * namespace.c 213 * namespace.c
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index fdb7a582f8fc..32eb7ce0ad71 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -144,14 +144,14 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
144}; 144};
145#undef SUBSYS 145#undef SUBSYS
146 146
147static DEFINE_PER_CPU(struct cgroup_cpu_stat, cgrp_dfl_root_cpu_stat); 147static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
148 148
149/* 149/*
150 * The default hierarchy, reserved for the subsystems that are otherwise 150 * The default hierarchy, reserved for the subsystems that are otherwise
151 * unattached - it never has more than a single cgroup, and all tasks are 151 * unattached - it never has more than a single cgroup, and all tasks are
152 * part of that cgroup. 152 * part of that cgroup.
153 */ 153 */
154struct cgroup_root cgrp_dfl_root = { .cgrp.cpu_stat = &cgrp_dfl_root_cpu_stat }; 154struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
155EXPORT_SYMBOL_GPL(cgrp_dfl_root); 155EXPORT_SYMBOL_GPL(cgrp_dfl_root);
156 156
157/* 157/*
@@ -4592,7 +4592,7 @@ static void css_free_rwork_fn(struct work_struct *work)
4592 cgroup_put(cgroup_parent(cgrp)); 4592 cgroup_put(cgroup_parent(cgrp));
4593 kernfs_put(cgrp->kn); 4593 kernfs_put(cgrp->kn);
4594 if (cgroup_on_dfl(cgrp)) 4594 if (cgroup_on_dfl(cgrp))
4595 cgroup_stat_exit(cgrp); 4595 cgroup_rstat_exit(cgrp);
4596 kfree(cgrp); 4596 kfree(cgrp);
4597 } else { 4597 } else {
4598 /* 4598 /*
@@ -4629,7 +4629,7 @@ static void css_release_work_fn(struct work_struct *work)
4629 trace_cgroup_release(cgrp); 4629 trace_cgroup_release(cgrp);
4630 4630
4631 if (cgroup_on_dfl(cgrp)) 4631 if (cgroup_on_dfl(cgrp))
4632 cgroup_stat_flush(cgrp); 4632 cgroup_rstat_flush(cgrp);
4633 4633
4634 for (tcgrp = cgroup_parent(cgrp); tcgrp; 4634 for (tcgrp = cgroup_parent(cgrp); tcgrp;
4635 tcgrp = cgroup_parent(tcgrp)) 4635 tcgrp = cgroup_parent(tcgrp))
@@ -4817,7 +4817,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
4817 goto out_free_cgrp; 4817 goto out_free_cgrp;
4818 4818
4819 if (cgroup_on_dfl(parent)) { 4819 if (cgroup_on_dfl(parent)) {
4820 ret = cgroup_stat_init(cgrp); 4820 ret = cgroup_rstat_init(cgrp);
4821 if (ret) 4821 if (ret)
4822 goto out_cancel_ref; 4822 goto out_cancel_ref;
4823 } 4823 }
@@ -4882,7 +4882,7 @@ out_idr_free:
4882 cgroup_idr_remove(&root->cgroup_idr, cgrp->id); 4882 cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
4883out_stat_exit: 4883out_stat_exit:
4884 if (cgroup_on_dfl(parent)) 4884 if (cgroup_on_dfl(parent))
4885 cgroup_stat_exit(cgrp); 4885 cgroup_rstat_exit(cgrp);
4886out_cancel_ref: 4886out_cancel_ref:
4887 percpu_ref_exit(&cgrp->self.refcnt); 4887 percpu_ref_exit(&cgrp->self.refcnt);
4888out_free_cgrp: 4888out_free_cgrp:
@@ -5275,7 +5275,7 @@ int __init cgroup_init(void)
5275 BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); 5275 BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
5276 BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); 5276 BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
5277 5277
5278 cgroup_stat_boot(); 5278 cgroup_rstat_boot();
5279 5279
5280 /* 5280 /*
5281 * The latency of the synchronize_sched() is too high for cgroups, 5281 * The latency of the synchronize_sched() is too high for cgroups,
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 1e111dd455c4..6824047b57a9 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -2,26 +2,26 @@
2 2
3#include <linux/sched/cputime.h> 3#include <linux/sched/cputime.h>
4 4
5static DEFINE_MUTEX(cgroup_stat_mutex); 5static DEFINE_MUTEX(cgroup_rstat_mutex);
6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_cpu_stat_lock); 6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
7 7
8static struct cgroup_cpu_stat *cgroup_cpu_stat(struct cgroup *cgrp, int cpu) 8static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
9{ 9{
10 return per_cpu_ptr(cgrp->cpu_stat, cpu); 10 return per_cpu_ptr(cgrp->rstat_cpu, cpu);
11} 11}
12 12
13/** 13/**
14 * cgroup_cpu_stat_updated - keep track of updated cpu_stat 14 * cgroup_rstat_cpu_updated - keep track of updated rstat_cpu
15 * @cgrp: target cgroup 15 * @cgrp: target cgroup
16 * @cpu: cpu on which cpu_stat was updated 16 * @cpu: cpu on which rstat_cpu was updated
17 * 17 *
18 * @cgrp's cpu_stat on @cpu was updated. Put it on the parent's matching 18 * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
19 * cpu_stat->updated_children list. See the comment on top of 19 * rstat_cpu->updated_children list. See the comment on top of
20 * cgroup_cpu_stat definition for details. 20 * cgroup_rstat_cpu definition for details.
21 */ 21 */
22static void cgroup_cpu_stat_updated(struct cgroup *cgrp, int cpu) 22static void cgroup_rstat_cpu_updated(struct cgroup *cgrp, int cpu)
23{ 23{
24 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu); 24 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
25 struct cgroup *parent; 25 struct cgroup *parent;
26 unsigned long flags; 26 unsigned long flags;
27 27
@@ -33,7 +33,7 @@ static void cgroup_cpu_stat_updated(struct cgroup *cgrp, int cpu)
33 * instead of NULL, we can tell whether @cgrp is on the list by 33 * instead of NULL, we can tell whether @cgrp is on the list by
34 * testing the next pointer for NULL. 34 * testing the next pointer for NULL.
35 */ 35 */
36 if (cgroup_cpu_stat(cgrp, cpu)->updated_next) 36 if (cgroup_rstat_cpu(cgrp, cpu)->updated_next)
37 return; 37 return;
38 38
39 raw_spin_lock_irqsave(cpu_lock, flags); 39 raw_spin_lock_irqsave(cpu_lock, flags);
@@ -41,42 +41,42 @@ static void cgroup_cpu_stat_updated(struct cgroup *cgrp, int cpu)
41 /* put @cgrp and all ancestors on the corresponding updated lists */ 41 /* put @cgrp and all ancestors on the corresponding updated lists */
42 for (parent = cgroup_parent(cgrp); parent; 42 for (parent = cgroup_parent(cgrp); parent;
43 cgrp = parent, parent = cgroup_parent(cgrp)) { 43 cgrp = parent, parent = cgroup_parent(cgrp)) {
44 struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu); 44 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
45 struct cgroup_cpu_stat *pcstat = cgroup_cpu_stat(parent, cpu); 45 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
46 46
47 /* 47 /*
48 * Both additions and removals are bottom-up. If a cgroup 48 * Both additions and removals are bottom-up. If a cgroup
49 * is already in the tree, all ancestors are. 49 * is already in the tree, all ancestors are.
50 */ 50 */
51 if (cstat->updated_next) 51 if (rstatc->updated_next)
52 break; 52 break;
53 53
54 cstat->updated_next = pcstat->updated_children; 54 rstatc->updated_next = prstatc->updated_children;
55 pcstat->updated_children = cgrp; 55 prstatc->updated_children = cgrp;
56 } 56 }
57 57
58 raw_spin_unlock_irqrestore(cpu_lock, flags); 58 raw_spin_unlock_irqrestore(cpu_lock, flags);
59} 59}
60 60
61/** 61/**
62 * cgroup_cpu_stat_pop_updated - iterate and dismantle cpu_stat updated tree 62 * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
63 * @pos: current position 63 * @pos: current position
64 * @root: root of the tree to traversal 64 * @root: root of the tree to traversal
65 * @cpu: target cpu 65 * @cpu: target cpu
66 * 66 *
67 * Walks the udpated cpu_stat tree on @cpu from @root. %NULL @pos starts 67 * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts
68 * the traversal and %NULL return indicates the end. During traversal, 68 * the traversal and %NULL return indicates the end. During traversal,
69 * each returned cgroup is unlinked from the tree. Must be called with the 69 * each returned cgroup is unlinked from the tree. Must be called with the
70 * matching cgroup_cpu_stat_lock held. 70 * matching cgroup_rstat_cpu_lock held.
71 * 71 *
72 * The only ordering guarantee is that, for a parent and a child pair 72 * The only ordering guarantee is that, for a parent and a child pair
73 * covered by a given traversal, if a child is visited, its parent is 73 * covered by a given traversal, if a child is visited, its parent is
74 * guaranteed to be visited afterwards. 74 * guaranteed to be visited afterwards.
75 */ 75 */
76static struct cgroup *cgroup_cpu_stat_pop_updated(struct cgroup *pos, 76static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
77 struct cgroup *root, int cpu) 77 struct cgroup *root, int cpu)
78{ 78{
79 struct cgroup_cpu_stat *cstat; 79 struct cgroup_rstat_cpu *rstatc;
80 struct cgroup *parent; 80 struct cgroup *parent;
81 81
82 if (pos == root) 82 if (pos == root)
@@ -93,10 +93,10 @@ static struct cgroup *cgroup_cpu_stat_pop_updated(struct cgroup *pos,
93 93
94 /* walk down to the first leaf */ 94 /* walk down to the first leaf */
95 while (true) { 95 while (true) {
96 cstat = cgroup_cpu_stat(pos, cpu); 96 rstatc = cgroup_rstat_cpu(pos, cpu);
97 if (cstat->updated_children == pos) 97 if (rstatc->updated_children == pos)
98 break; 98 break;
99 pos = cstat->updated_children; 99 pos = rstatc->updated_children;
100 } 100 }
101 101
102 /* 102 /*
@@ -106,23 +106,23 @@ static struct cgroup *cgroup_cpu_stat_pop_updated(struct cgroup *pos,
106 * child in most cases. The only exception is @root. 106 * child in most cases. The only exception is @root.
107 */ 107 */
108 parent = cgroup_parent(pos); 108 parent = cgroup_parent(pos);
109 if (parent && cstat->updated_next) { 109 if (parent && rstatc->updated_next) {
110 struct cgroup_cpu_stat *pcstat = cgroup_cpu_stat(parent, cpu); 110 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
111 struct cgroup_cpu_stat *ncstat; 111 struct cgroup_rstat_cpu *nrstatc;
112 struct cgroup **nextp; 112 struct cgroup **nextp;
113 113
114 nextp = &pcstat->updated_children; 114 nextp = &prstatc->updated_children;
115 while (true) { 115 while (true) {
116 ncstat = cgroup_cpu_stat(*nextp, cpu); 116 nrstatc = cgroup_rstat_cpu(*nextp, cpu);
117 if (*nextp == pos) 117 if (*nextp == pos)
118 break; 118 break;
119 119
120 WARN_ON_ONCE(*nextp == parent); 120 WARN_ON_ONCE(*nextp == parent);
121 nextp = &ncstat->updated_next; 121 nextp = &nrstatc->updated_next;
122 } 122 }
123 123
124 *nextp = cstat->updated_next; 124 *nextp = rstatc->updated_next;
125 cstat->updated_next = NULL; 125 rstatc->updated_next = NULL;
126 } 126 }
127 127
128 return pos; 128 return pos;
@@ -139,19 +139,19 @@ static void cgroup_stat_accumulate(struct cgroup_stat *dst_stat,
139static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu) 139static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
140{ 140{
141 struct cgroup *parent = cgroup_parent(cgrp); 141 struct cgroup *parent = cgroup_parent(cgrp);
142 struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu); 142 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
143 struct task_cputime *last_cputime = &cstat->last_cputime; 143 struct task_cputime *last_cputime = &rstatc->last_cputime;
144 struct task_cputime cputime; 144 struct task_cputime cputime;
145 struct cgroup_stat delta; 145 struct cgroup_stat delta;
146 unsigned seq; 146 unsigned seq;
147 147
148 lockdep_assert_held(&cgroup_stat_mutex); 148 lockdep_assert_held(&cgroup_rstat_mutex);
149 149
150 /* fetch the current per-cpu values */ 150 /* fetch the current per-cpu values */
151 do { 151 do {
152 seq = __u64_stats_fetch_begin(&cstat->sync); 152 seq = __u64_stats_fetch_begin(&rstatc->sync);
153 cputime = cstat->cputime; 153 cputime = rstatc->cputime;
154 } while (__u64_stats_fetch_retry(&cstat->sync, seq)); 154 } while (__u64_stats_fetch_retry(&rstatc->sync, seq));
155 155
156 /* accumulate the deltas to propgate */ 156 /* accumulate the deltas to propgate */
157 delta.cputime.utime = cputime.utime - last_cputime->utime; 157 delta.cputime.utime = cputime.utime - last_cputime->utime;
@@ -170,26 +170,27 @@ static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
170 cgroup_stat_accumulate(&parent->pending_stat, &delta); 170 cgroup_stat_accumulate(&parent->pending_stat, &delta);
171} 171}
172 172
173/* see cgroup_stat_flush() */ 173/* see cgroup_rstat_flush() */
174static void cgroup_stat_flush_locked(struct cgroup *cgrp) 174static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
175{ 175{
176 int cpu; 176 int cpu;
177 177
178 lockdep_assert_held(&cgroup_stat_mutex); 178 lockdep_assert_held(&cgroup_rstat_mutex);
179 179
180 for_each_possible_cpu(cpu) { 180 for_each_possible_cpu(cpu) {
181 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu); 181 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
182 cpu);
182 struct cgroup *pos = NULL; 183 struct cgroup *pos = NULL;
183 184
184 raw_spin_lock_irq(cpu_lock); 185 raw_spin_lock_irq(cpu_lock);
185 while ((pos = cgroup_cpu_stat_pop_updated(pos, cgrp, cpu))) 186 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
186 cgroup_cpu_stat_flush_one(pos, cpu); 187 cgroup_cpu_stat_flush_one(pos, cpu);
187 raw_spin_unlock_irq(cpu_lock); 188 raw_spin_unlock_irq(cpu_lock);
188 } 189 }
189} 190}
190 191
191/** 192/**
192 * cgroup_stat_flush - flush stats in @cgrp's subtree 193 * cgroup_rstat_flush - flush stats in @cgrp's subtree
193 * @cgrp: target cgroup 194 * @cgrp: target cgroup
194 * 195 *
195 * Collect all per-cpu stats in @cgrp's subtree into the global counters 196 * Collect all per-cpu stats in @cgrp's subtree into the global counters
@@ -199,61 +200,62 @@ static void cgroup_stat_flush_locked(struct cgroup *cgrp)
199 * This also gets all cgroups in the subtree including @cgrp off the 200 * This also gets all cgroups in the subtree including @cgrp off the
200 * ->updated_children lists. 201 * ->updated_children lists.
201 */ 202 */
202void cgroup_stat_flush(struct cgroup *cgrp) 203void cgroup_rstat_flush(struct cgroup *cgrp)
203{ 204{
204 mutex_lock(&cgroup_stat_mutex); 205 mutex_lock(&cgroup_rstat_mutex);
205 cgroup_stat_flush_locked(cgrp); 206 cgroup_rstat_flush_locked(cgrp);
206 mutex_unlock(&cgroup_stat_mutex); 207 mutex_unlock(&cgroup_rstat_mutex);
207} 208}
208 209
209static struct cgroup_cpu_stat *cgroup_cpu_stat_account_begin(struct cgroup *cgrp) 210static struct cgroup_rstat_cpu *
211cgroup_cpu_stat_account_begin(struct cgroup *cgrp)
210{ 212{
211 struct cgroup_cpu_stat *cstat; 213 struct cgroup_rstat_cpu *rstatc;
212 214
213 cstat = get_cpu_ptr(cgrp->cpu_stat); 215 rstatc = get_cpu_ptr(cgrp->rstat_cpu);
214 u64_stats_update_begin(&cstat->sync); 216 u64_stats_update_begin(&rstatc->sync);
215 return cstat; 217 return rstatc;
216} 218}
217 219
218static void cgroup_cpu_stat_account_end(struct cgroup *cgrp, 220static void cgroup_cpu_stat_account_end(struct cgroup *cgrp,
219 struct cgroup_cpu_stat *cstat) 221 struct cgroup_rstat_cpu *rstatc)
220{ 222{
221 u64_stats_update_end(&cstat->sync); 223 u64_stats_update_end(&rstatc->sync);
222 cgroup_cpu_stat_updated(cgrp, smp_processor_id()); 224 cgroup_rstat_cpu_updated(cgrp, smp_processor_id());
223 put_cpu_ptr(cstat); 225 put_cpu_ptr(rstatc);
224} 226}
225 227
226void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec) 228void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
227{ 229{
228 struct cgroup_cpu_stat *cstat; 230 struct cgroup_rstat_cpu *rstatc;
229 231
230 cstat = cgroup_cpu_stat_account_begin(cgrp); 232 rstatc = cgroup_cpu_stat_account_begin(cgrp);
231 cstat->cputime.sum_exec_runtime += delta_exec; 233 rstatc->cputime.sum_exec_runtime += delta_exec;
232 cgroup_cpu_stat_account_end(cgrp, cstat); 234 cgroup_cpu_stat_account_end(cgrp, rstatc);
233} 235}
234 236
235void __cgroup_account_cputime_field(struct cgroup *cgrp, 237void __cgroup_account_cputime_field(struct cgroup *cgrp,
236 enum cpu_usage_stat index, u64 delta_exec) 238 enum cpu_usage_stat index, u64 delta_exec)
237{ 239{
238 struct cgroup_cpu_stat *cstat; 240 struct cgroup_rstat_cpu *rstatc;
239 241
240 cstat = cgroup_cpu_stat_account_begin(cgrp); 242 rstatc = cgroup_cpu_stat_account_begin(cgrp);
241 243
242 switch (index) { 244 switch (index) {
243 case CPUTIME_USER: 245 case CPUTIME_USER:
244 case CPUTIME_NICE: 246 case CPUTIME_NICE:
245 cstat->cputime.utime += delta_exec; 247 rstatc->cputime.utime += delta_exec;
246 break; 248 break;
247 case CPUTIME_SYSTEM: 249 case CPUTIME_SYSTEM:
248 case CPUTIME_IRQ: 250 case CPUTIME_IRQ:
249 case CPUTIME_SOFTIRQ: 251 case CPUTIME_SOFTIRQ:
250 cstat->cputime.stime += delta_exec; 252 rstatc->cputime.stime += delta_exec;
251 break; 253 break;
252 default: 254 default:
253 break; 255 break;
254 } 256 }
255 257
256 cgroup_cpu_stat_account_end(cgrp, cstat); 258 cgroup_cpu_stat_account_end(cgrp, rstatc);
257} 259}
258 260
259void cgroup_stat_show_cputime(struct seq_file *seq) 261void cgroup_stat_show_cputime(struct seq_file *seq)
@@ -264,15 +266,15 @@ void cgroup_stat_show_cputime(struct seq_file *seq)
264 if (!cgroup_parent(cgrp)) 266 if (!cgroup_parent(cgrp))
265 return; 267 return;
266 268
267 mutex_lock(&cgroup_stat_mutex); 269 mutex_lock(&cgroup_rstat_mutex);
268 270
269 cgroup_stat_flush_locked(cgrp); 271 cgroup_rstat_flush_locked(cgrp);
270 272
271 usage = cgrp->stat.cputime.sum_exec_runtime; 273 usage = cgrp->stat.cputime.sum_exec_runtime;
272 cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime, 274 cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime,
273 &utime, &stime); 275 &utime, &stime);
274 276
275 mutex_unlock(&cgroup_stat_mutex); 277 mutex_unlock(&cgroup_rstat_mutex);
276 278
277 do_div(usage, NSEC_PER_USEC); 279 do_div(usage, NSEC_PER_USEC);
278 do_div(utime, NSEC_PER_USEC); 280 do_div(utime, NSEC_PER_USEC);
@@ -284,23 +286,23 @@ void cgroup_stat_show_cputime(struct seq_file *seq)
284 usage, utime, stime); 286 usage, utime, stime);
285} 287}
286 288
287int cgroup_stat_init(struct cgroup *cgrp) 289int cgroup_rstat_init(struct cgroup *cgrp)
288{ 290{
289 int cpu; 291 int cpu;
290 292
291 /* the root cgrp has cpu_stat preallocated */ 293 /* the root cgrp has rstat_cpu preallocated */
292 if (!cgrp->cpu_stat) { 294 if (!cgrp->rstat_cpu) {
293 cgrp->cpu_stat = alloc_percpu(struct cgroup_cpu_stat); 295 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
294 if (!cgrp->cpu_stat) 296 if (!cgrp->rstat_cpu)
295 return -ENOMEM; 297 return -ENOMEM;
296 } 298 }
297 299
298 /* ->updated_children list is self terminated */ 300 /* ->updated_children list is self terminated */
299 for_each_possible_cpu(cpu) { 301 for_each_possible_cpu(cpu) {
300 struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu); 302 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
301 303
302 cstat->updated_children = cgrp; 304 rstatc->updated_children = cgrp;
303 u64_stats_init(&cstat->sync); 305 u64_stats_init(&rstatc->sync);
304 } 306 }
305 307
306 prev_cputime_init(&cgrp->stat.prev_cputime); 308 prev_cputime_init(&cgrp->stat.prev_cputime);
@@ -308,31 +310,31 @@ int cgroup_stat_init(struct cgroup *cgrp)
308 return 0; 310 return 0;
309} 311}
310 312
311void cgroup_stat_exit(struct cgroup *cgrp) 313void cgroup_rstat_exit(struct cgroup *cgrp)
312{ 314{
313 int cpu; 315 int cpu;
314 316
315 cgroup_stat_flush(cgrp); 317 cgroup_rstat_flush(cgrp);
316 318
317 /* sanity check */ 319 /* sanity check */
318 for_each_possible_cpu(cpu) { 320 for_each_possible_cpu(cpu) {
319 struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu); 321 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
320 322
321 if (WARN_ON_ONCE(cstat->updated_children != cgrp) || 323 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
322 WARN_ON_ONCE(cstat->updated_next)) 324 WARN_ON_ONCE(rstatc->updated_next))
323 return; 325 return;
324 } 326 }
325 327
326 free_percpu(cgrp->cpu_stat); 328 free_percpu(cgrp->rstat_cpu);
327 cgrp->cpu_stat = NULL; 329 cgrp->rstat_cpu = NULL;
328} 330}
329 331
330void __init cgroup_stat_boot(void) 332void __init cgroup_rstat_boot(void)
331{ 333{
332 int cpu; 334 int cpu;
333 335
334 for_each_possible_cpu(cpu) 336 for_each_possible_cpu(cpu)
335 raw_spin_lock_init(per_cpu_ptr(&cgroup_cpu_stat_lock, cpu)); 337 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
336 338
337 BUG_ON(cgroup_stat_init(&cgrp_dfl_root.cgrp)); 339 BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
338} 340}