aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--kernel/cgroup/rstat.c58
2 files changed, 46 insertions, 13 deletions
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5c6018fef5aa..c9fdf6f57913 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -696,6 +696,7 @@ static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
696 */ 696 */
697void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); 697void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
698void cgroup_rstat_flush(struct cgroup *cgrp); 698void cgroup_rstat_flush(struct cgroup *cgrp);
699void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
699void cgroup_rstat_flush_hold(struct cgroup *cgrp); 700void cgroup_rstat_flush_hold(struct cgroup *cgrp);
700void cgroup_rstat_flush_release(void); 701void cgroup_rstat_flush_release(void);
701 702
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index d49bf92ac3d4..3386fb251a9e 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -2,7 +2,7 @@
2 2
3#include <linux/sched/cputime.h> 3#include <linux/sched/cputime.h>
4 4
5static DEFINE_MUTEX(cgroup_rstat_mutex); 5static DEFINE_SPINLOCK(cgroup_rstat_lock);
6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); 6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
7 7
8static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 8static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
@@ -132,21 +132,31 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
132} 132}
133 133
134/* see cgroup_rstat_flush() */ 134/* see cgroup_rstat_flush() */
135static void cgroup_rstat_flush_locked(struct cgroup *cgrp) 135static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
136 __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
136{ 137{
137 int cpu; 138 int cpu;
138 139
139 lockdep_assert_held(&cgroup_rstat_mutex); 140 lockdep_assert_held(&cgroup_rstat_lock);
140 141
141 for_each_possible_cpu(cpu) { 142 for_each_possible_cpu(cpu) {
142 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, 143 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
143 cpu); 144 cpu);
144 struct cgroup *pos = NULL; 145 struct cgroup *pos = NULL;
145 146
146 raw_spin_lock_irq(cpu_lock); 147 raw_spin_lock(cpu_lock);
147 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) 148 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
148 cgroup_base_stat_flush(pos, cpu); 149 cgroup_base_stat_flush(pos, cpu);
149 raw_spin_unlock_irq(cpu_lock); 150 raw_spin_unlock(cpu_lock);
151
152 /* if @may_sleep, play nice and yield if necessary */
153 if (may_sleep && (need_resched() ||
154 spin_needbreak(&cgroup_rstat_lock))) {
155 spin_unlock_irq(&cgroup_rstat_lock);
156 if (!cond_resched())
157 cpu_relax();
158 spin_lock_irq(&cgroup_rstat_lock);
159 }
150 } 160 }
151} 161}
152 162
@@ -160,12 +170,31 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
160 * 170 *
161 * This also gets all cgroups in the subtree including @cgrp off the 171 * This also gets all cgroups in the subtree including @cgrp off the
162 * ->updated_children lists. 172 * ->updated_children lists.
173 *
174 * This function may block.
163 */ 175 */
164void cgroup_rstat_flush(struct cgroup *cgrp) 176void cgroup_rstat_flush(struct cgroup *cgrp)
165{ 177{
166 mutex_lock(&cgroup_rstat_mutex); 178 might_sleep();
167 cgroup_rstat_flush_locked(cgrp); 179
168 mutex_unlock(&cgroup_rstat_mutex); 180 spin_lock_irq(&cgroup_rstat_lock);
181 cgroup_rstat_flush_locked(cgrp, true);
182 spin_unlock_irq(&cgroup_rstat_lock);
183}
184
185/**
186 * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
187 * @cgrp: target cgroup
188 *
189 * This function can be called from any context.
190 */
191void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
192{
193 unsigned long flags;
194
195 spin_lock_irqsave(&cgroup_rstat_lock, flags);
196 cgroup_rstat_flush_locked(cgrp, false);
197 spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
169} 198}
170 199
171/** 200/**
@@ -174,21 +203,24 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
174 * 203 *
175 * Flush stats in @cgrp's subtree and prevent further flushes. Must be 204 * Flush stats in @cgrp's subtree and prevent further flushes. Must be
176 * paired with cgroup_rstat_flush_release(). 205 * paired with cgroup_rstat_flush_release().
206 *
207 * This function may block.
177 */ 208 */
178void cgroup_rstat_flush_hold(struct cgroup *cgrp) 209void cgroup_rstat_flush_hold(struct cgroup *cgrp)
179 __acquires(&cgroup_rstat_mutex) 210 __acquires(&cgroup_rstat_lock)
180{ 211{
181 mutex_lock(&cgroup_rstat_mutex); 212 might_sleep();
182 cgroup_rstat_flush_locked(cgrp); 213 spin_lock_irq(&cgroup_rstat_lock);
214 cgroup_rstat_flush_locked(cgrp, true);
183} 215}
184 216
185/** 217/**
186 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold() 218 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
187 */ 219 */
188void cgroup_rstat_flush_release(void) 220void cgroup_rstat_flush_release(void)
189 __releases(&cgroup_rstat_mutex) 221 __releases(&cgroup_rstat_lock)
190{ 222{
191 mutex_unlock(&cgroup_rstat_mutex); 223 spin_unlock_irq(&cgroup_rstat_lock);
192} 224}
193 225
194int cgroup_rstat_init(struct cgroup *cgrp) 226int cgroup_rstat_init(struct cgroup *cgrp)