aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2aa987027d6..cf2cd6ce4cb2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
878#ifdef CONFIG_SCHED_HRTICK 878#ifdef CONFIG_SCHED_HRTICK
879static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 879static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
880{ 880{
881 int requeue = rq->curr == p;
882 struct sched_entity *se = &p->se; 881 struct sched_entity *se = &p->se;
883 struct cfs_rq *cfs_rq = cfs_rq_of(se); 882 struct cfs_rq *cfs_rq = cfs_rq_of(se);
884 883
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
899 * Don't schedule slices shorter than 10000ns, that just 898 * Don't schedule slices shorter than 10000ns, that just
900 * doesn't make sense. Rely on vruntime for fairness. 899 * doesn't make sense. Rely on vruntime for fairness.
901 */ 900 */
902 if (!requeue) 901 if (rq->curr != p)
903 delta = max(10000LL, delta); 902 delta = max(10000LL, delta);
904 903
905 hrtick_start(rq, delta, requeue); 904 hrtick_start(rq, delta);
906 } 905 }
907} 906}
908#else /* !CONFIG_SCHED_HRTICK */ 907#else /* !CONFIG_SCHED_HRTICK */
@@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq)
1004 * not idle and an idle cpu is available. The span of cpus to 1003 * not idle and an idle cpu is available. The span of cpus to
1005 * search starts with cpus closest then further out as needed, 1004 * search starts with cpus closest then further out as needed,
1006 * so we always favor a closer, idle cpu. 1005 * so we always favor a closer, idle cpu.
1006 * Domains may include CPUs that are not usable for migration,
1007 * hence we need to mask them out (cpu_active_map)
1007 * 1008 *
1008 * Returns the CPU we should wake onto. 1009 * Returns the CPU we should wake onto.
1009 */ 1010 */
@@ -1031,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p)
1031 || ((sd->flags & SD_WAKE_IDLE_FAR) 1032 || ((sd->flags & SD_WAKE_IDLE_FAR)
1032 && !task_hot(p, task_rq(p)->clock, sd))) { 1033 && !task_hot(p, task_rq(p)->clock, sd))) {
1033 cpus_and(tmp, sd->span, p->cpus_allowed); 1034 cpus_and(tmp, sd->span, p->cpus_allowed);
1034 for_each_cpu_mask(i, tmp) { 1035 cpus_and(tmp, tmp, cpu_active_map);
1036 for_each_cpu_mask_nr(i, tmp) {
1035 if (idle_cpu(i)) { 1037 if (idle_cpu(i)) {
1036 if (i != task_cpu(p)) { 1038 if (i != task_cpu(p)) {
1037 schedstat_inc(p, 1039 schedstat_inc(p,