aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-04-26 17:29:05 -0400
committerTejun Heo <tj@kernel.org>2018-04-26 17:29:05 -0400
commit0fa294fb1985c06c4e3325e30e759d4ca580f59a (patch)
tree41bc9abc473d8df21ab0cb1dca6c1b8746abcacb /kernel/cgroup
parent6162cef0f741c70eb0c7ac7e6142f85808d8abc4 (diff)
cgroup: Replace cgroup_rstat_mutex with a spinlock
Currently, rstat flush path is protected with a mutex which is fine as all the existing users are from interface file show path. However, rstat is being generalized for use by controllers and flushing from atomic contexts will be necessary. This patch replaces cgroup_rstat_mutex with a spinlock and adds a irq-safe flush function - cgroup_rstat_flush_irqsafe(). Explicit yield handling is added to the flush path so that other flush functions can yield to other threads and flushers. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/rstat.c58
1 files changed, 45 insertions, 13 deletions
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index d49bf92ac3d4..3386fb251a9e 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -2,7 +2,7 @@
2 2
3#include <linux/sched/cputime.h> 3#include <linux/sched/cputime.h>
4 4
5static DEFINE_MUTEX(cgroup_rstat_mutex); 5static DEFINE_SPINLOCK(cgroup_rstat_lock);
6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); 6static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
7 7
8static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 8static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
@@ -132,21 +132,31 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
132} 132}
133 133
134/* see cgroup_rstat_flush() */ 134/* see cgroup_rstat_flush() */
135static void cgroup_rstat_flush_locked(struct cgroup *cgrp) 135static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
136 __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
136{ 137{
137 int cpu; 138 int cpu;
138 139
139 lockdep_assert_held(&cgroup_rstat_mutex); 140 lockdep_assert_held(&cgroup_rstat_lock);
140 141
141 for_each_possible_cpu(cpu) { 142 for_each_possible_cpu(cpu) {
142 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, 143 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
143 cpu); 144 cpu);
144 struct cgroup *pos = NULL; 145 struct cgroup *pos = NULL;
145 146
146 raw_spin_lock_irq(cpu_lock); 147 raw_spin_lock(cpu_lock);
147 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) 148 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
148 cgroup_base_stat_flush(pos, cpu); 149 cgroup_base_stat_flush(pos, cpu);
149 raw_spin_unlock_irq(cpu_lock); 150 raw_spin_unlock(cpu_lock);
151
152 /* if @may_sleep, play nice and yield if necessary */
153 if (may_sleep && (need_resched() ||
154 spin_needbreak(&cgroup_rstat_lock))) {
155 spin_unlock_irq(&cgroup_rstat_lock);
156 if (!cond_resched())
157 cpu_relax();
158 spin_lock_irq(&cgroup_rstat_lock);
159 }
150 } 160 }
151} 161}
152 162
@@ -160,12 +170,31 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
160 * 170 *
161 * This also gets all cgroups in the subtree including @cgrp off the 171 * This also gets all cgroups in the subtree including @cgrp off the
162 * ->updated_children lists. 172 * ->updated_children lists.
173 *
174 * This function may block.
163 */ 175 */
164void cgroup_rstat_flush(struct cgroup *cgrp) 176void cgroup_rstat_flush(struct cgroup *cgrp)
165{ 177{
166 mutex_lock(&cgroup_rstat_mutex); 178 might_sleep();
167 cgroup_rstat_flush_locked(cgrp); 179
168 mutex_unlock(&cgroup_rstat_mutex); 180 spin_lock_irq(&cgroup_rstat_lock);
181 cgroup_rstat_flush_locked(cgrp, true);
182 spin_unlock_irq(&cgroup_rstat_lock);
183}
184
185/**
186 * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
187 * @cgrp: target cgroup
188 *
189 * This function can be called from any context.
190 */
191void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
192{
193 unsigned long flags;
194
195 spin_lock_irqsave(&cgroup_rstat_lock, flags);
196 cgroup_rstat_flush_locked(cgrp, false);
197 spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
169} 198}
170 199
171/** 200/**
@@ -174,21 +203,24 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
174 * 203 *
175 * Flush stats in @cgrp's subtree and prevent further flushes. Must be 204 * Flush stats in @cgrp's subtree and prevent further flushes. Must be
176 * paired with cgroup_rstat_flush_release(). 205 * paired with cgroup_rstat_flush_release().
206 *
207 * This function may block.
177 */ 208 */
178void cgroup_rstat_flush_hold(struct cgroup *cgrp) 209void cgroup_rstat_flush_hold(struct cgroup *cgrp)
179 __acquires(&cgroup_rstat_mutex) 210 __acquires(&cgroup_rstat_lock)
180{ 211{
181 mutex_lock(&cgroup_rstat_mutex); 212 might_sleep();
182 cgroup_rstat_flush_locked(cgrp); 213 spin_lock_irq(&cgroup_rstat_lock);
214 cgroup_rstat_flush_locked(cgrp, true);
183} 215}
184 216
185/** 217/**
186 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold() 218 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
187 */ 219 */
188void cgroup_rstat_flush_release(void) 220void cgroup_rstat_flush_release(void)
189 __releases(&cgroup_rstat_mutex) 221 __releases(&cgroup_rstat_lock)
190{ 222{
191 mutex_unlock(&cgroup_rstat_mutex); 223 spin_unlock_irq(&cgroup_rstat_lock);
192} 224}
193 225
194int cgroup_rstat_init(struct cgroup *cgrp) 226int cgroup_rstat_init(struct cgroup *cgrp)