summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c4
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/tsc.c35
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/features.h5
10 files changed, 37 insertions, 32 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 35a5d5dca2fa..c36140d788fe 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
556 if (c->x86_power & (1 << 8)) { 556 if (c->x86_power & (1 << 8)) {
557 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 557 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
558 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 558 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
559 if (check_tsc_unstable())
560 clear_sched_clock_stable();
561 } else {
562 clear_sched_clock_stable();
563 } 559 }
564 560
565 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 561 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index adc0ebd8bed0..43955ee6715b 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
105#ifdef CONFIG_X86_64 105#ifdef CONFIG_X86_64
106 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 106 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
107#endif 107#endif
108
109 clear_sched_clock_stable();
110} 108}
111 109
112static void init_centaur(struct cpuinfo_x86 *c) 110static void init_centaur(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b11b38c3b0bd..58094a1f9e9d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c)
88 strcpy(c->x86_model_id, "386"); 88 strcpy(c->x86_model_id, "386");
89 } 89 }
90#endif 90#endif
91 clear_sched_clock_stable();
92} 91}
93 92
94static const struct cpu_dev default_cpu = { 93static const struct cpu_dev default_cpu = {
@@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1077 */ 1076 */
1078 if (this_cpu->c_init) 1077 if (this_cpu->c_init)
1079 this_cpu->c_init(c); 1078 this_cpu->c_init(c);
1080 else
1081 clear_sched_clock_stable();
1082 1079
1083 /* Disable the PN if appropriate */ 1080 /* Disable the PN if appropriate */
1084 squash_the_stupid_serial_number(c); 1081 squash_the_stupid_serial_number(c);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 0a3bc19de017..a70fd61095f8 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
185 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); 185 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
186 break; 186 break;
187 } 187 }
188 clear_sched_clock_stable();
189} 188}
190 189
191static void init_cyrix(struct cpuinfo_x86 *c) 190static void init_cyrix(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fe0a615a051b..063197771b8d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
162 if (c->x86_power & (1 << 8)) { 162 if (c->x86_power & (1 << 8)) {
163 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 163 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
164 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 164 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
165 if (check_tsc_unstable())
166 clear_sched_clock_stable();
167 } else {
168 clear_sched_clock_stable();
169 } 165 }
170 166
171 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 167 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 8457b4978668..d77d07ab310b 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
16 if (xlvl >= 0x80860001) 16 if (xlvl >= 0x80860001)
17 c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); 17 c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
18 } 18 }
19
20 clear_sched_clock_stable();
21} 19}
22 20
23static void init_transmeta(struct cpuinfo_x86 *c) 21static void init_transmeta(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 46bcda4cb1c2..4f7a9833d8e5 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -327,9 +327,16 @@ unsigned long long sched_clock(void)
327{ 327{
328 return paravirt_sched_clock(); 328 return paravirt_sched_clock();
329} 329}
330
331static inline bool using_native_sched_clock(void)
332{
333 return pv_time_ops.sched_clock == native_sched_clock;
334}
330#else 335#else
331unsigned long long 336unsigned long long
332sched_clock(void) __attribute__((alias("native_sched_clock"))); 337sched_clock(void) __attribute__((alias("native_sched_clock")));
338
339static inline bool using_native_sched_clock(void) { return true; }
333#endif 340#endif
334 341
335int check_tsc_unstable(void) 342int check_tsc_unstable(void)
@@ -1112,8 +1119,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
1112{ 1119{
1113 if (tsc_unstable) 1120 if (tsc_unstable)
1114 return; 1121 return;
1122
1115 tsc_unstable = 1; 1123 tsc_unstable = 1;
1116 clear_sched_clock_stable(); 1124 if (using_native_sched_clock())
1125 clear_sched_clock_stable();
1117 disable_sched_clock_irqtime(); 1126 disable_sched_clock_irqtime();
1118 pr_info("Marking TSC unstable due to clocksource watchdog\n"); 1127 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1119} 1128}
@@ -1135,18 +1144,20 @@ static struct clocksource clocksource_tsc = {
1135 1144
1136void mark_tsc_unstable(char *reason) 1145void mark_tsc_unstable(char *reason)
1137{ 1146{
1138 if (!tsc_unstable) { 1147 if (tsc_unstable)
1139 tsc_unstable = 1; 1148 return;
1149
1150 tsc_unstable = 1;
1151 if (using_native_sched_clock())
1140 clear_sched_clock_stable(); 1152 clear_sched_clock_stable();
1141 disable_sched_clock_irqtime(); 1153 disable_sched_clock_irqtime();
1142 pr_info("Marking TSC unstable due to %s\n", reason); 1154 pr_info("Marking TSC unstable due to %s\n", reason);
1143 /* Change only the rating, when not registered */ 1155 /* Change only the rating, when not registered */
1144 if (clocksource_tsc.mult) 1156 if (clocksource_tsc.mult) {
1145 clocksource_mark_unstable(&clocksource_tsc); 1157 clocksource_mark_unstable(&clocksource_tsc);
1146 else { 1158 } else {
1147 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; 1159 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
1148 clocksource_tsc.rating = 0; 1160 clocksource_tsc.rating = 0;
1149 }
1150 } 1161 }
1151} 1162}
1152 1163
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 956383844116..3b31fc05a0f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3287 struct task_struct *p; 3287 struct task_struct *p;
3288 3288
3289 /* 3289 /*
3290 * Optimization: we know that if all tasks are in 3290 * Optimization: we know that if all tasks are in the fair class we can
3291 * the fair class we can call that function directly: 3291 * call that function directly, but only if the @prev task wasn't of a
3292 * higher scheduling class, because otherwise those loose the
3293 * opportunity to pull in more work from other CPUs.
3292 */ 3294 */
3293 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 3295 if (likely((prev->sched_class == &idle_sched_class ||
3296 prev->sched_class == &fair_sched_class) &&
3297 rq->nr_running == rq->cfs.h_nr_running)) {
3298
3294 p = fair_sched_class.pick_next_task(rq, prev, rf); 3299 p = fair_sched_class.pick_next_task(rq, prev, rf);
3295 if (unlikely(p == RETRY_TASK)) 3300 if (unlikely(p == RETRY_TASK))
3296 goto again; 3301 goto again;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..dea138964b91 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5799 * Due to large variance we need a large fuzz factor; hackbench in 5799 * Due to large variance we need a large fuzz factor; hackbench in
5800 * particularly is sensitive here. 5800 * particularly is sensitive here.
5801 */ 5801 */
5802 if ((avg_idle / 512) < avg_cost) 5802 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5803 return -1; 5803 return -1;
5804 5804
5805 time = local_clock(); 5805 time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..1b3c8189b286 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
51 */ 51 */
52SCHED_FEAT(TTWU_QUEUE, true) 52SCHED_FEAT(TTWU_QUEUE, true)
53 53
54/*
55 * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
56 */
57SCHED_FEAT(SIS_AVG_CPU, false)
58
54#ifdef HAVE_RT_PUSH_IPI 59#ifdef HAVE_RT_PUSH_IPI
55/* 60/*
56 * In order to avoid a thundering herd attack of CPUs that are 61 * In order to avoid a thundering herd attack of CPUs that are