aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-07-25 20:55:34 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-08-13 13:02:05 -0400
commitf03542a7019c600163ac4441d8a826c92c1bd510 (patch)
treecd644fb1d0feee636c400a58f4bddec30e2d95b9 /kernel/sched
parent78feefc512a09165627dd534111f651b6c8e605f (diff)
sched: recover SD_WAKE_AFFINE in select_task_rq_fair and code clean up
Since power saving code was removed from sched now, the implement code is out of service in this function, and even pollute other logical. like, 'want_sd' never has chance to be set '0', that remove the effect of SD_WAKE_AFFINE here. So, clean up the obsolete code, includes SD_PREFER_LOCAL. Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/5028F431.6000306@intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c34
2 files changed, 3 insertions, 32 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c9a3655e572d..4376c9f34790 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6622,7 +6622,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6622 | 0*SD_BALANCE_FORK 6622 | 0*SD_BALANCE_FORK
6623 | 0*SD_BALANCE_WAKE 6623 | 0*SD_BALANCE_WAKE
6624 | 0*SD_WAKE_AFFINE 6624 | 0*SD_WAKE_AFFINE
6625 | 0*SD_PREFER_LOCAL
6626 | 0*SD_SHARE_CPUPOWER 6625 | 0*SD_SHARE_CPUPOWER
6627 | 0*SD_SHARE_PKG_RESOURCES 6626 | 0*SD_SHARE_PKG_RESOURCES
6628 | 1*SD_SERIALIZE 6627 | 1*SD_SERIALIZE
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 287bfaca6420..01d3eda6b7f9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2686,7 +2686,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2686 int prev_cpu = task_cpu(p); 2686 int prev_cpu = task_cpu(p);
2687 int new_cpu = cpu; 2687 int new_cpu = cpu;
2688 int want_affine = 0; 2688 int want_affine = 0;
2689 int want_sd = 1;
2690 int sync = wake_flags & WF_SYNC; 2689 int sync = wake_flags & WF_SYNC;
2691 2690
2692 if (p->nr_cpus_allowed == 1) 2691 if (p->nr_cpus_allowed == 1)
@@ -2704,48 +2703,21 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2704 continue; 2703 continue;
2705 2704
2706 /* 2705 /*
2707 * If power savings logic is enabled for a domain, see if we
2708 * are not overloaded, if so, don't balance wider.
2709 */
2710 if (tmp->flags & (SD_PREFER_LOCAL)) {
2711 unsigned long power = 0;
2712 unsigned long nr_running = 0;
2713 unsigned long capacity;
2714 int i;
2715
2716 for_each_cpu(i, sched_domain_span(tmp)) {
2717 power += power_of(i);
2718 nr_running += cpu_rq(i)->cfs.nr_running;
2719 }
2720
2721 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
2722
2723 if (nr_running < capacity)
2724 want_sd = 0;
2725 }
2726
2727 /*
2728 * If both cpu and prev_cpu are part of this domain, 2706 * If both cpu and prev_cpu are part of this domain,
2729 * cpu is a valid SD_WAKE_AFFINE target. 2707 * cpu is a valid SD_WAKE_AFFINE target.
2730 */ 2708 */
2731 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 2709 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
2732 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 2710 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
2733 affine_sd = tmp; 2711 affine_sd = tmp;
2734 want_affine = 0;
2735 }
2736
2737 if (!want_sd && !want_affine)
2738 break; 2712 break;
2713 }
2739 2714
2740 if (!(tmp->flags & sd_flag)) 2715 if (tmp->flags & sd_flag)
2741 continue;
2742
2743 if (want_sd)
2744 sd = tmp; 2716 sd = tmp;
2745 } 2717 }
2746 2718
2747 if (affine_sd) { 2719 if (affine_sd) {
2748 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) 2720 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
2749 prev_cpu = cpu; 2721 prev_cpu = cpu;
2750 2722
2751 new_cpu = select_idle_sibling(p, prev_cpu); 2723 new_cpu = select_idle_sibling(p, prev_cpu);