aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-13 15:22:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-13 15:22:32 -0400
commitec846ecd6350857a8b8b9a6b78c763d45e0f09b8 (patch)
tree44896da8501c42f2c41b7fc17975dc7fe16b2789
parentb5df1b3a5637deae352d282b50d4b99d0e2b8d1d (diff)
parent9469eb01db891b55367ee7539f1b9f7f6fd2819d (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Three CPU hotplug related fixes and a debugging improvement" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/debug: Add debugfs knob for "sched_debug" sched/core: WARN() when migrating to an offline CPU sched/fair: Plug hole between hotplug and active_load_balance() sched/fair: Avoid newidle balance for !active CPUs
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/debug.c5
-rw-r--r--kernel/sched/fair.c13
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/topology.c4
5 files changed, 25 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 136a76d80dbf..18a6966567da 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1173,6 +1173,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1173 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 1173 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1174 lockdep_is_held(&task_rq(p)->lock))); 1174 lockdep_is_held(&task_rq(p)->lock)));
1175#endif 1175#endif
1176 /*
1177 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
1178 */
1179 WARN_ON_ONCE(!cpu_online(new_cpu));
1176#endif 1180#endif
1177 1181
1178 trace_sched_migrate_task(p, new_cpu); 1182 trace_sched_migrate_task(p, new_cpu);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 8e536d963652..01217fb5a5de 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -181,11 +181,16 @@ static const struct file_operations sched_feat_fops = {
181 .release = single_release, 181 .release = single_release,
182}; 182};
183 183
184__read_mostly bool sched_debug_enabled;
185
184static __init int sched_init_debug(void) 186static __init int sched_init_debug(void)
185{ 187{
186 debugfs_create_file("sched_features", 0644, NULL, NULL, 188 debugfs_create_file("sched_features", 0644, NULL, NULL,
187 &sched_feat_fops); 189 &sched_feat_fops);
188 190
191 debugfs_create_bool("sched_debug", 0644, NULL,
192 &sched_debug_enabled);
193
189 return 0; 194 return 0;
190} 195}
191late_initcall(sched_init_debug); 196late_initcall(sched_init_debug);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0a85641e62ce..70ba32e08a23 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8437,6 +8437,12 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
8437 this_rq->idle_stamp = rq_clock(this_rq); 8437 this_rq->idle_stamp = rq_clock(this_rq);
8438 8438
8439 /* 8439 /*
8440 * Do not pull tasks towards !active CPUs...
8441 */
8442 if (!cpu_active(this_cpu))
8443 return 0;
8444
8445 /*
8440 * This is OK, because current is on_cpu, which avoids it being picked 8446 * This is OK, because current is on_cpu, which avoids it being picked
8441 * for load-balance and preemption/IRQs are still disabled avoiding 8447 * for load-balance and preemption/IRQs are still disabled avoiding
8442 * further scheduler activity on it and we're being very careful to 8448 * further scheduler activity on it and we're being very careful to
@@ -8543,6 +8549,13 @@ static int active_load_balance_cpu_stop(void *data)
8543 struct rq_flags rf; 8549 struct rq_flags rf;
8544 8550
8545 rq_lock_irq(busiest_rq, &rf); 8551 rq_lock_irq(busiest_rq, &rf);
8552 /*
8553 * Between queueing the stop-work and running it is a hole in which
8554 * CPUs can become inactive. We should not move tasks from or to
8555 * inactive CPUs.
8556 */
8557 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
8558 goto out_unlock;
8546 8559
8547 /* make sure the requested cpu hasn't gone down in the meantime */ 8560 /* make sure the requested cpu hasn't gone down in the meantime */
8548 if (unlikely(busiest_cpu != smp_processor_id() || 8561 if (unlikely(busiest_cpu != smp_processor_id() ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 746ac78ff492..14db76cd496f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1951,6 +1951,8 @@ extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1951extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); 1951extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1952 1952
1953#ifdef CONFIG_SCHED_DEBUG 1953#ifdef CONFIG_SCHED_DEBUG
1954extern bool sched_debug_enabled;
1955
1954extern void print_cfs_stats(struct seq_file *m, int cpu); 1956extern void print_cfs_stats(struct seq_file *m, int cpu);
1955extern void print_rt_stats(struct seq_file *m, int cpu); 1957extern void print_rt_stats(struct seq_file *m, int cpu);
1956extern void print_dl_stats(struct seq_file *m, int cpu); 1958extern void print_dl_stats(struct seq_file *m, int cpu);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 5d0062cc10cb..f1cf4f306a82 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -14,11 +14,9 @@ cpumask_var_t sched_domains_tmpmask2;
14 14
15#ifdef CONFIG_SCHED_DEBUG 15#ifdef CONFIG_SCHED_DEBUG
16 16
17static __read_mostly int sched_debug_enabled;
18
19static int __init sched_debug_setup(char *str) 17static int __init sched_debug_setup(char *str)
20{ 18{
21 sched_debug_enabled = 1; 19 sched_debug_enabled = true;
22 20
23 return 0; 21 return 0;
24} 22}