summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-07 17:42:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-07 17:42:34 -0500
commit609b07b72d3caaa8eed3a238886467946b78fa5e (patch)
treeb12ee470ed68675b3bbe4c0dcfa63a9978821fce /kernel
parentc3abcabe813b4a0976b58f7e6c1e63b2070c8dda (diff)
parentf94c8d116997597fc00f0812b0ab9256e7b0c58f (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "A fix for KVM's scheduler clock which (erroneously) was always marked unstable, a fix for RT/DL load balancing, plus latency fixes" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/clock, x86/tsc: Rework the x86 'unstable' sched_clock() interface sched/core: Fix pick_next_task() for RT,DL sched/fair: Make select_idle_cpu() more aggressive
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/features.h5
3 files changed, 14 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 956383844116..3b31fc05a0f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3287 struct task_struct *p; 3287 struct task_struct *p;
3288 3288
3289 /* 3289 /*
3290 * Optimization: we know that if all tasks are in 3290 * Optimization: we know that if all tasks are in the fair class we can
3291 * the fair class we can call that function directly: 3291 * call that function directly, but only if the @prev task wasn't of a
3292 * higher scheduling class, because otherwise those loose the
3293 * opportunity to pull in more work from other CPUs.
3292 */ 3294 */
3293 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 3295 if (likely((prev->sched_class == &idle_sched_class ||
3296 prev->sched_class == &fair_sched_class) &&
3297 rq->nr_running == rq->cfs.h_nr_running)) {
3298
3294 p = fair_sched_class.pick_next_task(rq, prev, rf); 3299 p = fair_sched_class.pick_next_task(rq, prev, rf);
3295 if (unlikely(p == RETRY_TASK)) 3300 if (unlikely(p == RETRY_TASK))
3296 goto again; 3301 goto again;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..dea138964b91 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5799 * Due to large variance we need a large fuzz factor; hackbench in 5799 * Due to large variance we need a large fuzz factor; hackbench in
5800 * particularly is sensitive here. 5800 * particularly is sensitive here.
5801 */ 5801 */
5802 if ((avg_idle / 512) < avg_cost) 5802 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5803 return -1; 5803 return -1;
5804 5804
5805 time = local_clock(); 5805 time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..1b3c8189b286 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
51 */ 51 */
52SCHED_FEAT(TTWU_QUEUE, true) 52SCHED_FEAT(TTWU_QUEUE, true)
53 53
54/*
55 * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
56 */
57SCHED_FEAT(SIS_AVG_CPU, false)
58
54#ifdef HAVE_RT_PUSH_IPI 59#ifdef HAVE_RT_PUSH_IPI
55/* 60/*
56 * In order to avoid a thundering herd attack of CPUs that are 61 * In order to avoid a thundering herd attack of CPUs that are