diff options
-rw-r--r-- | arch/x86/kernel/tsc.c | 2 | ||||
-rw-r--r-- | include/linux/preempt.h | 5 | ||||
-rw-r--r-- | kernel/sched/clock.c | 53 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 |
4 files changed, 43 insertions, 25 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index a3acbac2ee72..19e5adb49a27 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -180,7 +180,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) | |||
180 | 180 | ||
181 | static void cyc2ns_data_init(struct cyc2ns_data *data) | 181 | static void cyc2ns_data_init(struct cyc2ns_data *data) |
182 | { | 182 | { |
183 | data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR; | 183 | data->cyc2ns_mul = 0; |
184 | data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; | 184 | data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; |
185 | data->cyc2ns_offset = 0; | 185 | data->cyc2ns_offset = 0; |
186 | data->__count = 0; | 186 | data->__count = 0; |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 59749fc48328..de83b4eb1642 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -134,7 +134,6 @@ do { \ | |||
134 | #undef preempt_check_resched | 134 | #undef preempt_check_resched |
135 | #endif | 135 | #endif |
136 | 136 | ||
137 | #ifdef CONFIG_PREEMPT | ||
138 | #define preempt_set_need_resched() \ | 137 | #define preempt_set_need_resched() \ |
139 | do { \ | 138 | do { \ |
140 | set_preempt_need_resched(); \ | 139 | set_preempt_need_resched(); \ |
@@ -144,10 +143,6 @@ do { \ | |||
144 | if (tif_need_resched()) \ | 143 | if (tif_need_resched()) \ |
145 | set_preempt_need_resched(); \ | 144 | set_preempt_need_resched(); \ |
146 | } while (0) | 145 | } while (0) |
147 | #else | ||
148 | #define preempt_set_need_resched() do { } while (0) | ||
149 | #define preempt_fold_need_resched() do { } while (0) | ||
150 | #endif | ||
151 | 146 | ||
152 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 147 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
153 | 148 | ||
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 6bd6a6731b21..43c2bcc35761 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -77,35 +77,50 @@ __read_mostly int sched_clock_running; | |||
77 | 77 | ||
78 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 78 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
79 | static struct static_key __sched_clock_stable = STATIC_KEY_INIT; | 79 | static struct static_key __sched_clock_stable = STATIC_KEY_INIT; |
80 | static int __sched_clock_stable_early; | ||
80 | 81 | ||
81 | int sched_clock_stable(void) | 82 | int sched_clock_stable(void) |
82 | { | 83 | { |
83 | if (static_key_false(&__sched_clock_stable)) | 84 | return static_key_false(&__sched_clock_stable); |
84 | return false; | ||
85 | return true; | ||
86 | } | 85 | } |
87 | 86 | ||
88 | void set_sched_clock_stable(void) | 87 | static void __set_sched_clock_stable(void) |
89 | { | 88 | { |
90 | if (!sched_clock_stable()) | 89 | if (!sched_clock_stable()) |
91 | static_key_slow_dec(&__sched_clock_stable); | 90 | static_key_slow_inc(&__sched_clock_stable); |
91 | } | ||
92 | |||
93 | void set_sched_clock_stable(void) | ||
94 | { | ||
95 | __sched_clock_stable_early = 1; | ||
96 | |||
97 | smp_mb(); /* matches sched_clock_init() */ | ||
98 | |||
99 | if (!sched_clock_running) | ||
100 | return; | ||
101 | |||
102 | __set_sched_clock_stable(); | ||
92 | } | 103 | } |
93 | 104 | ||
94 | static void __clear_sched_clock_stable(struct work_struct *work) | 105 | static void __clear_sched_clock_stable(struct work_struct *work) |
95 | { | 106 | { |
96 | /* XXX worry about clock continuity */ | 107 | /* XXX worry about clock continuity */ |
97 | if (sched_clock_stable()) | 108 | if (sched_clock_stable()) |
98 | static_key_slow_inc(&__sched_clock_stable); | 109 | static_key_slow_dec(&__sched_clock_stable); |
99 | } | 110 | } |
100 | 111 | ||
101 | static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); | 112 | static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); |
102 | 113 | ||
103 | void clear_sched_clock_stable(void) | 114 | void clear_sched_clock_stable(void) |
104 | { | 115 | { |
105 | if (keventd_up()) | 116 | __sched_clock_stable_early = 0; |
106 | schedule_work(&sched_clock_work); | 117 | |
107 | else | 118 | smp_mb(); /* matches sched_clock_init() */ |
108 | __clear_sched_clock_stable(&sched_clock_work); | 119 | |
120 | if (!sched_clock_running) | ||
121 | return; | ||
122 | |||
123 | schedule_work(&sched_clock_work); | ||
109 | } | 124 | } |
110 | 125 | ||
111 | struct sched_clock_data { | 126 | struct sched_clock_data { |
@@ -140,6 +155,20 @@ void sched_clock_init(void) | |||
140 | } | 155 | } |
141 | 156 | ||
142 | sched_clock_running = 1; | 157 | sched_clock_running = 1; |
158 | |||
159 | /* | ||
160 | * Ensure that it is impossible to not do a static_key update. | ||
161 | * | ||
162 | * Either {set,clear}_sched_clock_stable() must see sched_clock_running | ||
163 | * and do the update, or we must see their __sched_clock_stable_early | ||
164 | * and do the update, or both. | ||
165 | */ | ||
166 | smp_mb(); /* matches {set,clear}_sched_clock_stable() */ | ||
167 | |||
168 | if (__sched_clock_stable_early) | ||
169 | __set_sched_clock_stable(); | ||
170 | else | ||
171 | __clear_sched_clock_stable(NULL); | ||
143 | } | 172 | } |
144 | 173 | ||
145 | /* | 174 | /* |
@@ -340,7 +369,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |||
340 | */ | 369 | */ |
341 | u64 cpu_clock(int cpu) | 370 | u64 cpu_clock(int cpu) |
342 | { | 371 | { |
343 | if (static_key_false(&__sched_clock_stable)) | 372 | if (!sched_clock_stable()) |
344 | return sched_clock_cpu(cpu); | 373 | return sched_clock_cpu(cpu); |
345 | 374 | ||
346 | return sched_clock(); | 375 | return sched_clock(); |
@@ -355,7 +384,7 @@ u64 cpu_clock(int cpu) | |||
355 | */ | 384 | */ |
356 | u64 local_clock(void) | 385 | u64 local_clock(void) |
357 | { | 386 | { |
358 | if (static_key_false(&__sched_clock_stable)) | 387 | if (!sched_clock_stable()) |
359 | return sched_clock_cpu(raw_smp_processor_id()); | 388 | return sched_clock_cpu(raw_smp_processor_id()); |
360 | 389 | ||
361 | return sched_clock(); | 390 | return sched_clock(); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 867b0a4b0893..966cc2bfcb77 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2360,13 +2360,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, | |||
2360 | } | 2360 | } |
2361 | wakeup = 0; | 2361 | wakeup = 0; |
2362 | } else { | 2362 | } else { |
2363 | /* | 2363 | __synchronize_entity_decay(se); |
2364 | * Task re-woke on same cpu (or else migrate_task_rq_fair() | ||
2365 | * would have made count negative); we must be careful to avoid | ||
2366 | * double-accounting blocked time after synchronizing decays. | ||
2367 | */ | ||
2368 | se->avg.last_runnable_update += __synchronize_entity_decay(se) | ||
2369 | << 20; | ||
2370 | } | 2364 | } |
2371 | 2365 | ||
2372 | /* migrated tasks did not contribute to our blocked load */ | 2366 | /* migrated tasks did not contribute to our blocked load */ |