diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/linux/topology.h | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 1 | ||||
-rw-r--r-- | kernel/sched/fair.c | 34 |
4 files changed, 3 insertions, 35 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b8c86648a2f9..f3eebc121ebc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -860,7 +860,6 @@ enum cpu_idle_type { | |||
860 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ | 860 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ |
861 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ | 861 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ |
862 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 862 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
863 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ | ||
864 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 863 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
865 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 864 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
866 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 865 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
diff --git a/include/linux/topology.h b/include/linux/topology.h index fec12d667211..d3cf0d6e7712 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -129,7 +129,6 @@ int arch_update_cpu_topology(void); | |||
129 | | 1*SD_BALANCE_FORK \ | 129 | | 1*SD_BALANCE_FORK \ |
130 | | 0*SD_BALANCE_WAKE \ | 130 | | 0*SD_BALANCE_WAKE \ |
131 | | 1*SD_WAKE_AFFINE \ | 131 | | 1*SD_WAKE_AFFINE \ |
132 | | 0*SD_PREFER_LOCAL \ | ||
133 | | 0*SD_SHARE_CPUPOWER \ | 132 | | 0*SD_SHARE_CPUPOWER \ |
134 | | 1*SD_SHARE_PKG_RESOURCES \ | 133 | | 1*SD_SHARE_PKG_RESOURCES \ |
135 | | 0*SD_SERIALIZE \ | 134 | | 0*SD_SERIALIZE \ |
@@ -160,7 +159,6 @@ int arch_update_cpu_topology(void); | |||
160 | | 1*SD_BALANCE_FORK \ | 159 | | 1*SD_BALANCE_FORK \ |
161 | | 0*SD_BALANCE_WAKE \ | 160 | | 0*SD_BALANCE_WAKE \ |
162 | | 1*SD_WAKE_AFFINE \ | 161 | | 1*SD_WAKE_AFFINE \ |
163 | | 0*SD_PREFER_LOCAL \ | ||
164 | | 0*SD_SHARE_CPUPOWER \ | 162 | | 0*SD_SHARE_CPUPOWER \ |
165 | | 0*SD_SHARE_PKG_RESOURCES \ | 163 | | 0*SD_SHARE_PKG_RESOURCES \ |
166 | | 0*SD_SERIALIZE \ | 164 | | 0*SD_SERIALIZE \ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c9a3655e572d..4376c9f34790 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -6622,7 +6622,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | |||
6622 | | 0*SD_BALANCE_FORK | 6622 | | 0*SD_BALANCE_FORK |
6623 | | 0*SD_BALANCE_WAKE | 6623 | | 0*SD_BALANCE_WAKE |
6624 | | 0*SD_WAKE_AFFINE | 6624 | | 0*SD_WAKE_AFFINE |
6625 | | 0*SD_PREFER_LOCAL | ||
6626 | | 0*SD_SHARE_CPUPOWER | 6625 | | 0*SD_SHARE_CPUPOWER |
6627 | | 0*SD_SHARE_PKG_RESOURCES | 6626 | | 0*SD_SHARE_PKG_RESOURCES |
6628 | | 1*SD_SERIALIZE | 6627 | | 1*SD_SERIALIZE |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 287bfaca6420..01d3eda6b7f9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2686,7 +2686,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |||
2686 | int prev_cpu = task_cpu(p); | 2686 | int prev_cpu = task_cpu(p); |
2687 | int new_cpu = cpu; | 2687 | int new_cpu = cpu; |
2688 | int want_affine = 0; | 2688 | int want_affine = 0; |
2689 | int want_sd = 1; | ||
2690 | int sync = wake_flags & WF_SYNC; | 2689 | int sync = wake_flags & WF_SYNC; |
2691 | 2690 | ||
2692 | if (p->nr_cpus_allowed == 1) | 2691 | if (p->nr_cpus_allowed == 1) |
@@ -2704,48 +2703,21 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |||
2704 | continue; | 2703 | continue; |
2705 | 2704 | ||
2706 | /* | 2705 | /* |
2707 | * If power savings logic is enabled for a domain, see if we | ||
2708 | * are not overloaded, if so, don't balance wider. | ||
2709 | */ | ||
2710 | if (tmp->flags & (SD_PREFER_LOCAL)) { | ||
2711 | unsigned long power = 0; | ||
2712 | unsigned long nr_running = 0; | ||
2713 | unsigned long capacity; | ||
2714 | int i; | ||
2715 | |||
2716 | for_each_cpu(i, sched_domain_span(tmp)) { | ||
2717 | power += power_of(i); | ||
2718 | nr_running += cpu_rq(i)->cfs.nr_running; | ||
2719 | } | ||
2720 | |||
2721 | capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | ||
2722 | |||
2723 | if (nr_running < capacity) | ||
2724 | want_sd = 0; | ||
2725 | } | ||
2726 | |||
2727 | /* | ||
2728 | * If both cpu and prev_cpu are part of this domain, | 2706 | * If both cpu and prev_cpu are part of this domain, |
2729 | * cpu is a valid SD_WAKE_AFFINE target. | 2707 | * cpu is a valid SD_WAKE_AFFINE target. |
2730 | */ | 2708 | */ |
2731 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 2709 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
2732 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 2710 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
2733 | affine_sd = tmp; | 2711 | affine_sd = tmp; |
2734 | want_affine = 0; | ||
2735 | } | ||
2736 | |||
2737 | if (!want_sd && !want_affine) | ||
2738 | break; | 2712 | break; |
2713 | } | ||
2739 | 2714 | ||
2740 | if (!(tmp->flags & sd_flag)) | 2715 | if (tmp->flags & sd_flag) |
2741 | continue; | ||
2742 | |||
2743 | if (want_sd) | ||
2744 | sd = tmp; | 2716 | sd = tmp; |
2745 | } | 2717 | } |
2746 | 2718 | ||
2747 | if (affine_sd) { | 2719 | if (affine_sd) { |
2748 | if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) | 2720 | if (cpu != prev_cpu && wake_affine(affine_sd, p, sync)) |
2749 | prev_cpu = cpu; | 2721 | prev_cpu = cpu; |
2750 | 2722 | ||
2751 | new_cpu = select_idle_sibling(p, prev_cpu); | 2723 | new_cpu = select_idle_sibling(p, prev_cpu); |