aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-17 13:49:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-17 13:49:42 -0400
commitc2ea72fd869145130969d6b07273c479cf2a22f5 (patch)
treeda226ab4151c20ae259249f8fc301744eeb461c6
parenta706797febf4ff60ad61f855a01707be9fc3cf4c (diff)
parent5473e0cc37c03c576adbda7591a6cc8e37c1bb7f (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "A migrate_tasks() locking fix, and a late-coming nohz change plus a nohz debug check" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: 'Annotate' migrate_tasks() nohz: Assert existing housekeepers when nohz full enabled nohz: Affine unpinned timers to housekeepers
-rw-r--r--include/linux/tick.h9
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/time/tick-sched.c15
3 files changed, 51 insertions, 9 deletions
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 48d901f83f92..e312219ff823 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
147 cpumask_or(mask, mask, tick_nohz_full_mask); 147 cpumask_or(mask, mask, tick_nohz_full_mask);
148} 148}
149 149
150static inline int housekeeping_any_cpu(void)
151{
152 return cpumask_any_and(housekeeping_mask, cpu_online_mask);
153}
154
150extern void tick_nohz_full_kick(void); 155extern void tick_nohz_full_kick(void);
151extern void tick_nohz_full_kick_cpu(int cpu); 156extern void tick_nohz_full_kick_cpu(int cpu);
152extern void tick_nohz_full_kick_all(void); 157extern void tick_nohz_full_kick_all(void);
153extern void __tick_nohz_task_switch(void); 158extern void __tick_nohz_task_switch(void);
154#else 159#else
160static inline int housekeeping_any_cpu(void)
161{
162 return smp_processor_id();
163}
155static inline bool tick_nohz_full_enabled(void) { return false; } 164static inline bool tick_nohz_full_enabled(void) { return false; }
156static inline bool tick_nohz_full_cpu(int cpu) { return false; } 165static inline bool tick_nohz_full_cpu(int cpu) { return false; }
157static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 166static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3595403921bd..97d276ff1edb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,18 +621,21 @@ int get_nohz_timer_target(void)
621 int i, cpu = smp_processor_id(); 621 int i, cpu = smp_processor_id();
622 struct sched_domain *sd; 622 struct sched_domain *sd;
623 623
624 if (!idle_cpu(cpu)) 624 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
625 return cpu; 625 return cpu;
626 626
627 rcu_read_lock(); 627 rcu_read_lock();
628 for_each_domain(cpu, sd) { 628 for_each_domain(cpu, sd) {
629 for_each_cpu(i, sched_domain_span(sd)) { 629 for_each_cpu(i, sched_domain_span(sd)) {
630 if (!idle_cpu(i)) { 630 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
631 cpu = i; 631 cpu = i;
632 goto unlock; 632 goto unlock;
633 } 633 }
634 } 634 }
635 } 635 }
636
637 if (!is_housekeeping_cpu(cpu))
638 cpu = housekeeping_any_cpu();
636unlock: 639unlock:
637 rcu_read_unlock(); 640 rcu_read_unlock();
638 return cpu; 641 return cpu;
@@ -5178,24 +5181,47 @@ static void migrate_tasks(struct rq *dead_rq)
5178 break; 5181 break;
5179 5182
5180 /* 5183 /*
5181 * Ensure rq->lock covers the entire task selection 5184 * pick_next_task assumes pinned rq->lock.
5182 * until the migration.
5183 */ 5185 */
5184 lockdep_pin_lock(&rq->lock); 5186 lockdep_pin_lock(&rq->lock);
5185 next = pick_next_task(rq, &fake_task); 5187 next = pick_next_task(rq, &fake_task);
5186 BUG_ON(!next); 5188 BUG_ON(!next);
5187 next->sched_class->put_prev_task(rq, next); 5189 next->sched_class->put_prev_task(rq, next);
5188 5190
5191 /*
5192 * Rules for changing task_struct::cpus_allowed are holding
5193 * both pi_lock and rq->lock, such that holding either
5194 * stabilizes the mask.
5195 *
5196 * Drop rq->lock is not quite as disastrous as it usually is
5197 * because !cpu_active at this point, which means load-balance
5198 * will not interfere. Also, stop-machine.
5199 */
5200 lockdep_unpin_lock(&rq->lock);
5201 raw_spin_unlock(&rq->lock);
5202 raw_spin_lock(&next->pi_lock);
5203 raw_spin_lock(&rq->lock);
5204
5205 /*
5206 * Since we're inside stop-machine, _nothing_ should have
5207 * changed the task, WARN if weird stuff happened, because in
5208 * that case the above rq->lock drop is a fail too.
5209 */
5210 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5211 raw_spin_unlock(&next->pi_lock);
5212 continue;
5213 }
5214
5189 /* Find suitable destination for @next, with force if needed. */ 5215 /* Find suitable destination for @next, with force if needed. */
5190 dest_cpu = select_fallback_rq(dead_rq->cpu, next); 5216 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5191 5217
5192 lockdep_unpin_lock(&rq->lock);
5193 rq = __migrate_task(rq, next, dest_cpu); 5218 rq = __migrate_task(rq, next, dest_cpu);
5194 if (rq != dead_rq) { 5219 if (rq != dead_rq) {
5195 raw_spin_unlock(&rq->lock); 5220 raw_spin_unlock(&rq->lock);
5196 rq = dead_rq; 5221 rq = dead_rq;
5197 raw_spin_lock(&rq->lock); 5222 raw_spin_lock(&rq->lock);
5198 } 5223 }
5224 raw_spin_unlock(&next->pi_lock);
5199 } 5225 }
5200 5226
5201 rq->stop = stop; 5227 rq->stop = stop;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3319e16f31e5..7c7ec4515983 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -290,16 +290,17 @@ static int __init tick_nohz_full_setup(char *str)
290__setup("nohz_full=", tick_nohz_full_setup); 290__setup("nohz_full=", tick_nohz_full_setup);
291 291
292static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, 292static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
293 unsigned long action, 293 unsigned long action,
294 void *hcpu) 294 void *hcpu)
295{ 295{
296 unsigned int cpu = (unsigned long)hcpu; 296 unsigned int cpu = (unsigned long)hcpu;
297 297
298 switch (action & ~CPU_TASKS_FROZEN) { 298 switch (action & ~CPU_TASKS_FROZEN) {
299 case CPU_DOWN_PREPARE: 299 case CPU_DOWN_PREPARE:
300 /* 300 /*
301 * If we handle the timekeeping duty for full dynticks CPUs, 301 * The boot CPU handles housekeeping duty (unbound timers,
302 * we can't safely shutdown that CPU. 302 * workqueues, timekeeping, ...) on behalf of full dynticks
303 * CPUs. It must remain online when nohz full is enabled.
303 */ 304 */
304 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) 305 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
305 return NOTIFY_BAD; 306 return NOTIFY_BAD;
@@ -370,6 +371,12 @@ void __init tick_nohz_init(void)
370 cpu_notifier(tick_nohz_cpu_down_callback, 0); 371 cpu_notifier(tick_nohz_cpu_down_callback, 0);
371 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", 372 pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
372 cpumask_pr_args(tick_nohz_full_mask)); 373 cpumask_pr_args(tick_nohz_full_mask));
374
375 /*
376 * We need at least one CPU to handle housekeeping work such
377 * as timekeeping, unbound timers, workqueues, ...
378 */
379 WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
373} 380}
374#endif 381#endif
375 382