aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c22
3 files changed, 29 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 166144c04ef6..92744e3f1556 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -555,6 +555,14 @@ struct task_struct {
555 unsigned long wakee_flip_decay_ts; 555 unsigned long wakee_flip_decay_ts;
556 struct task_struct *last_wakee; 556 struct task_struct *last_wakee;
557 557
558 /*
559 * recent_used_cpu is initially set as the last CPU used by a task
560 * that wakes affine another task. Waker/wakee relationships can
561 * push tasks around a CPU where each wakeup moves to the next one.
562 * Tracking a recently used CPU allows a quick search for a recently
563 * used CPU that may be idle.
564 */
565 int recent_used_cpu;
558 int wake_cpu; 566 int wake_cpu;
559#endif 567#endif
560 int on_rq; 568 int on_rq;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b40540e68104..36f113ac6353 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2461,6 +2461,7 @@ void wake_up_new_task(struct task_struct *p)
2461 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, 2461 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
2462 * as we're not fully set-up yet. 2462 * as we're not fully set-up yet.
2463 */ 2463 */
2464 p->recent_used_cpu = task_cpu(p);
2464 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2465 __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2465#endif 2466#endif
2466 rq = __task_rq_lock(p, &rf); 2467 rq = __task_rq_lock(p, &rf);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index db45b3554682..5eb3ffc9be84 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6197,7 +6197,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
6197static int select_idle_sibling(struct task_struct *p, int prev, int target) 6197static int select_idle_sibling(struct task_struct *p, int prev, int target)
6198{ 6198{
6199 struct sched_domain *sd; 6199 struct sched_domain *sd;
6200 int i; 6200 int i, recent_used_cpu;
6201 6201
6202 if (idle_cpu(target)) 6202 if (idle_cpu(target))
6203 return target; 6203 return target;
@@ -6208,6 +6208,21 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
6208 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) 6208 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
6209 return prev; 6209 return prev;
6210 6210
6211 /* Check a recently used CPU as a potential idle candidate */
6212 recent_used_cpu = p->recent_used_cpu;
6213 if (recent_used_cpu != prev &&
6214 recent_used_cpu != target &&
6215 cpus_share_cache(recent_used_cpu, target) &&
6216 idle_cpu(recent_used_cpu) &&
6217 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
6218 /*
6219 * Replace recent_used_cpu with prev as it is a potential
6220 * candidate for the next wake.
6221 */
6222 p->recent_used_cpu = prev;
6223 return recent_used_cpu;
6224 }
6225
6211 sd = rcu_dereference(per_cpu(sd_llc, target)); 6226 sd = rcu_dereference(per_cpu(sd_llc, target));
6212 if (!sd) 6227 if (!sd)
6213 return target; 6228 return target;
@@ -6375,9 +6390,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
6375 6390
6376 if (!sd) { 6391 if (!sd) {
6377pick_cpu: 6392pick_cpu:
6378 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */ 6393 if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
6379 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); 6394 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
6380 6395
6396 if (want_affine)
6397 current->recent_used_cpu = cpu;
6398 }
6381 } else { 6399 } else {
6382 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); 6400 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
6383 } 6401 }