diff options
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r-- | kernel/sched_clock.c | 224 |
1 files changed, 71 insertions, 153 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 22ed55d1167f..e8ab096ddfe3 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -12,19 +12,17 @@ | |||
12 | * | 12 | * |
13 | * Create a semi stable clock from a mixture of other events, including: | 13 | * Create a semi stable clock from a mixture of other events, including: |
14 | * - gtod | 14 | * - gtod |
15 | * - jiffies | ||
16 | * - sched_clock() | 15 | * - sched_clock() |
17 | * - explicit idle events | 16 | * - explicit idle events |
18 | * | 17 | * |
19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | 18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, |
20 | * making it monotonic and keeping it within an expected window. This window | 19 | * making it monotonic and keeping it within an expected window. |
21 | * is set up using jiffies. | ||
22 | * | 20 | * |
23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | 21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
24 | * that is otherwise invisible (TSC gets stopped). | 22 | * that is otherwise invisible (TSC gets stopped). |
25 | * | 23 | * |
26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
27 | * consistent between cpus (never more than 1 jiffies difference). | 25 | * consistent between cpus (never more than 2 jiffies difference). |
28 | */ | 26 | */ |
29 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
30 | #include <linux/percpu.h> | 28 | #include <linux/percpu.h> |
@@ -32,13 +30,19 @@ | |||
32 | #include <linux/ktime.h> | 30 | #include <linux/ktime.h> |
33 | #include <linux/module.h> | 31 | #include <linux/module.h> |
34 | 32 | ||
33 | /* | ||
34 | * Scheduler clock - returns current time in nanosec units. | ||
35 | * This is default implementation. | ||
36 | * Architectures and sub-architectures can override this. | ||
37 | */ | ||
38 | unsigned long long __attribute__((weak)) sched_clock(void) | ||
39 | { | ||
40 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | ||
41 | } | ||
35 | 42 | ||
36 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 43 | static __read_mostly int sched_clock_running; |
37 | 44 | ||
38 | #define MULTI_SHIFT 15 | 45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
39 | /* Max is double, Min is 1/2 */ | ||
40 | #define MAX_MULTI (2LL << MULTI_SHIFT) | ||
41 | #define MIN_MULTI (1LL << (MULTI_SHIFT-1)) | ||
42 | 46 | ||
43 | struct sched_clock_data { | 47 | struct sched_clock_data { |
44 | /* | 48 | /* |
@@ -48,15 +52,9 @@ struct sched_clock_data { | |||
48 | */ | 52 | */ |
49 | raw_spinlock_t lock; | 53 | raw_spinlock_t lock; |
50 | 54 | ||
51 | unsigned long tick_jiffies; | ||
52 | u64 prev_raw; | ||
53 | u64 tick_raw; | 55 | u64 tick_raw; |
54 | u64 tick_gtod; | 56 | u64 tick_gtod; |
55 | u64 clock; | 57 | u64 clock; |
56 | s64 multi; | ||
57 | #ifdef CONFIG_NO_HZ | ||
58 | int check_max; | ||
59 | #endif | ||
60 | }; | 58 | }; |
61 | 59 | ||
62 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | 60 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
@@ -71,121 +69,69 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) | |||
71 | return &per_cpu(sched_clock_data, cpu); | 69 | return &per_cpu(sched_clock_data, cpu); |
72 | } | 70 | } |
73 | 71 | ||
74 | static __read_mostly int sched_clock_running; | ||
75 | |||
76 | void sched_clock_init(void) | 72 | void sched_clock_init(void) |
77 | { | 73 | { |
78 | u64 ktime_now = ktime_to_ns(ktime_get()); | 74 | u64 ktime_now = ktime_to_ns(ktime_get()); |
79 | unsigned long now_jiffies = jiffies; | ||
80 | int cpu; | 75 | int cpu; |
81 | 76 | ||
82 | for_each_possible_cpu(cpu) { | 77 | for_each_possible_cpu(cpu) { |
83 | struct sched_clock_data *scd = cpu_sdc(cpu); | 78 | struct sched_clock_data *scd = cpu_sdc(cpu); |
84 | 79 | ||
85 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 80 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
86 | scd->tick_jiffies = now_jiffies; | ||
87 | scd->prev_raw = 0; | ||
88 | scd->tick_raw = 0; | 81 | scd->tick_raw = 0; |
89 | scd->tick_gtod = ktime_now; | 82 | scd->tick_gtod = ktime_now; |
90 | scd->clock = ktime_now; | 83 | scd->clock = ktime_now; |
91 | scd->multi = 1 << MULTI_SHIFT; | ||
92 | #ifdef CONFIG_NO_HZ | ||
93 | scd->check_max = 1; | ||
94 | #endif | ||
95 | } | 84 | } |
96 | 85 | ||
97 | sched_clock_running = 1; | 86 | sched_clock_running = 1; |
98 | } | 87 | } |
99 | 88 | ||
100 | #ifdef CONFIG_NO_HZ | ||
101 | /* | 89 | /* |
102 | * The dynamic ticks makes the delta jiffies inaccurate. This | 90 | * min,max except they take wrapping into account |
103 | * prevents us from checking the maximum time update. | ||
104 | * Disable the maximum check during stopped ticks. | ||
105 | */ | 91 | */ |
106 | void sched_clock_tick_stop(int cpu) | ||
107 | { | ||
108 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
109 | |||
110 | scd->check_max = 0; | ||
111 | } | ||
112 | 92 | ||
113 | void sched_clock_tick_start(int cpu) | 93 | static inline u64 wrap_min(u64 x, u64 y) |
114 | { | 94 | { |
115 | struct sched_clock_data *scd = cpu_sdc(cpu); | 95 | return (s64)(x - y) < 0 ? x : y; |
116 | |||
117 | scd->check_max = 1; | ||
118 | } | 96 | } |
119 | 97 | ||
120 | static int check_max(struct sched_clock_data *scd) | 98 | static inline u64 wrap_max(u64 x, u64 y) |
121 | { | 99 | { |
122 | return scd->check_max; | 100 | return (s64)(x - y) > 0 ? x : y; |
123 | } | 101 | } |
124 | #else | ||
125 | static int check_max(struct sched_clock_data *scd) | ||
126 | { | ||
127 | return 1; | ||
128 | } | ||
129 | #endif /* CONFIG_NO_HZ */ | ||
130 | 102 | ||
131 | /* | 103 | /* |
132 | * update the percpu scd from the raw @now value | 104 | * update the percpu scd from the raw @now value |
133 | * | 105 | * |
134 | * - filter out backward motion | 106 | * - filter out backward motion |
135 | * - use jiffies to generate a min,max window to clip the raw values | 107 | * - use the GTOD tick value to create a window to filter crazy TSC values |
136 | */ | 108 | */ |
137 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) | 109 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
138 | { | 110 | { |
139 | unsigned long now_jiffies = jiffies; | 111 | s64 delta = now - scd->tick_raw; |
140 | long delta_jiffies = now_jiffies - scd->tick_jiffies; | 112 | u64 clock, min_clock, max_clock; |
141 | u64 clock = scd->clock; | ||
142 | u64 min_clock, max_clock; | ||
143 | s64 delta = now - scd->prev_raw; | ||
144 | 113 | ||
145 | WARN_ON_ONCE(!irqs_disabled()); | 114 | WARN_ON_ONCE(!irqs_disabled()); |
146 | 115 | ||
147 | /* | 116 | if (unlikely(delta < 0)) |
148 | * At schedule tick the clock can be just under the gtod. We don't | 117 | delta = 0; |
149 | * want to push it too prematurely. | ||
150 | */ | ||
151 | min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); | ||
152 | if (min_clock > TICK_NSEC) | ||
153 | min_clock -= TICK_NSEC / 2; | ||
154 | |||
155 | if (unlikely(delta < 0)) { | ||
156 | clock++; | ||
157 | goto out; | ||
158 | } | ||
159 | 118 | ||
160 | /* | 119 | /* |
161 | * The clock must stay within a jiffie of the gtod. | 120 | * scd->clock = clamp(scd->tick_gtod + delta, |
162 | * But since we may be at the start of a jiffy or the end of one | 121 | * max(scd->tick_gtod, scd->clock), |
163 | * we add another jiffy buffer. | 122 | * scd->tick_gtod + TICK_NSEC); |
164 | */ | 123 | */ |
165 | max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; | ||
166 | 124 | ||
167 | delta *= scd->multi; | 125 | clock = scd->tick_gtod + delta; |
168 | delta >>= MULTI_SHIFT; | 126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); |
127 | max_clock = scd->tick_gtod + TICK_NSEC; | ||
169 | 128 | ||
170 | if (unlikely(clock + delta > max_clock) && check_max(scd)) { | 129 | clock = wrap_max(clock, min_clock); |
171 | if (clock < max_clock) | 130 | clock = wrap_min(clock, max_clock); |
172 | clock = max_clock; | ||
173 | else | ||
174 | clock++; | ||
175 | } else { | ||
176 | clock += delta; | ||
177 | } | ||
178 | 131 | ||
179 | out: | 132 | scd->clock = clock; |
180 | if (unlikely(clock < min_clock)) | ||
181 | clock = min_clock; | ||
182 | 133 | ||
183 | if (time) | 134 | return scd->clock; |
184 | *time = clock; | ||
185 | else { | ||
186 | scd->prev_raw = now; | ||
187 | scd->clock = clock; | ||
188 | } | ||
189 | } | 135 | } |
190 | 136 | ||
191 | static void lock_double_clock(struct sched_clock_data *data1, | 137 | static void lock_double_clock(struct sched_clock_data *data1, |
@@ -203,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1, | |||
203 | u64 sched_clock_cpu(int cpu) | 149 | u64 sched_clock_cpu(int cpu) |
204 | { | 150 | { |
205 | struct sched_clock_data *scd = cpu_sdc(cpu); | 151 | struct sched_clock_data *scd = cpu_sdc(cpu); |
206 | u64 now, clock; | 152 | u64 now, clock, this_clock, remote_clock; |
207 | 153 | ||
208 | if (unlikely(!sched_clock_running)) | 154 | if (unlikely(!sched_clock_running)) |
209 | return 0ull; | 155 | return 0ull; |
@@ -212,43 +158,44 @@ u64 sched_clock_cpu(int cpu) | |||
212 | now = sched_clock(); | 158 | now = sched_clock(); |
213 | 159 | ||
214 | if (cpu != raw_smp_processor_id()) { | 160 | if (cpu != raw_smp_processor_id()) { |
215 | /* | ||
216 | * in order to update a remote cpu's clock based on our | ||
217 | * unstable raw time rebase it against: | ||
218 | * tick_raw (offset between raw counters) | ||
219 | * tick_gotd (tick offset between cpus) | ||
220 | */ | ||
221 | struct sched_clock_data *my_scd = this_scd(); | 161 | struct sched_clock_data *my_scd = this_scd(); |
222 | 162 | ||
223 | lock_double_clock(scd, my_scd); | 163 | lock_double_clock(scd, my_scd); |
224 | 164 | ||
225 | now -= my_scd->tick_raw; | 165 | this_clock = __update_sched_clock(my_scd, now); |
226 | now += scd->tick_raw; | 166 | remote_clock = scd->clock; |
227 | 167 | ||
228 | now += my_scd->tick_gtod; | 168 | /* |
229 | now -= scd->tick_gtod; | 169 | * Use the opportunity that we have both locks |
170 | * taken to couple the two clocks: we take the | ||
171 | * larger time as the latest time for both | ||
172 | * runqueues. (this creates monotonic movement) | ||
173 | */ | ||
174 | if (likely((s64)(remote_clock - this_clock) < 0)) { | ||
175 | clock = this_clock; | ||
176 | scd->clock = clock; | ||
177 | } else { | ||
178 | /* | ||
179 | * Should be rare, but possible: | ||
180 | */ | ||
181 | clock = remote_clock; | ||
182 | my_scd->clock = remote_clock; | ||
183 | } | ||
230 | 184 | ||
231 | __raw_spin_unlock(&my_scd->lock); | 185 | __raw_spin_unlock(&my_scd->lock); |
232 | |||
233 | __update_sched_clock(scd, now, &clock); | ||
234 | |||
235 | __raw_spin_unlock(&scd->lock); | ||
236 | |||
237 | } else { | 186 | } else { |
238 | __raw_spin_lock(&scd->lock); | 187 | __raw_spin_lock(&scd->lock); |
239 | __update_sched_clock(scd, now, NULL); | 188 | clock = __update_sched_clock(scd, now); |
240 | clock = scd->clock; | ||
241 | __raw_spin_unlock(&scd->lock); | ||
242 | } | 189 | } |
243 | 190 | ||
191 | __raw_spin_unlock(&scd->lock); | ||
192 | |||
244 | return clock; | 193 | return clock; |
245 | } | 194 | } |
246 | 195 | ||
247 | void sched_clock_tick(void) | 196 | void sched_clock_tick(void) |
248 | { | 197 | { |
249 | struct sched_clock_data *scd = this_scd(); | 198 | struct sched_clock_data *scd = this_scd(); |
250 | unsigned long now_jiffies = jiffies; | ||
251 | s64 mult, delta_gtod, delta_raw; | ||
252 | u64 now, now_gtod; | 199 | u64 now, now_gtod; |
253 | 200 | ||
254 | if (unlikely(!sched_clock_running)) | 201 | if (unlikely(!sched_clock_running)) |
@@ -260,29 +207,9 @@ void sched_clock_tick(void) | |||
260 | now = sched_clock(); | 207 | now = sched_clock(); |
261 | 208 | ||
262 | __raw_spin_lock(&scd->lock); | 209 | __raw_spin_lock(&scd->lock); |
263 | __update_sched_clock(scd, now, NULL); | ||
264 | /* | ||
265 | * update tick_gtod after __update_sched_clock() because that will | ||
266 | * already observe 1 new jiffy; adding a new tick_gtod to that would | ||
267 | * increase the clock 2 jiffies. | ||
268 | */ | ||
269 | delta_gtod = now_gtod - scd->tick_gtod; | ||
270 | delta_raw = now - scd->tick_raw; | ||
271 | |||
272 | if ((long)delta_raw > 0) { | ||
273 | mult = delta_gtod << MULTI_SHIFT; | ||
274 | do_div(mult, delta_raw); | ||
275 | scd->multi = mult; | ||
276 | if (scd->multi > MAX_MULTI) | ||
277 | scd->multi = MAX_MULTI; | ||
278 | else if (scd->multi < MIN_MULTI) | ||
279 | scd->multi = MIN_MULTI; | ||
280 | } else | ||
281 | scd->multi = 1 << MULTI_SHIFT; | ||
282 | |||
283 | scd->tick_raw = now; | 210 | scd->tick_raw = now; |
284 | scd->tick_gtod = now_gtod; | 211 | scd->tick_gtod = now_gtod; |
285 | scd->tick_jiffies = now_jiffies; | 212 | __update_sched_clock(scd, now); |
286 | __raw_spin_unlock(&scd->lock); | 213 | __raw_spin_unlock(&scd->lock); |
287 | } | 214 | } |
288 | 215 | ||
@@ -300,37 +227,28 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |||
300 | */ | 227 | */ |
301 | void sched_clock_idle_wakeup_event(u64 delta_ns) | 228 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
302 | { | 229 | { |
303 | struct sched_clock_data *scd = this_scd(); | 230 | sched_clock_tick(); |
304 | u64 now = sched_clock(); | ||
305 | |||
306 | /* | ||
307 | * Override the previous timestamp and ignore all | ||
308 | * sched_clock() deltas that occured while we idled, | ||
309 | * and use the PM-provided delta_ns to advance the | ||
310 | * rq clock: | ||
311 | */ | ||
312 | __raw_spin_lock(&scd->lock); | ||
313 | scd->prev_raw = now; | ||
314 | scd->clock += delta_ns; | ||
315 | scd->multi = 1 << MULTI_SHIFT; | ||
316 | __raw_spin_unlock(&scd->lock); | ||
317 | |||
318 | touch_softlockup_watchdog(); | 231 | touch_softlockup_watchdog(); |
319 | } | 232 | } |
320 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 233 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
321 | 234 | ||
322 | #endif | 235 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
323 | 236 | ||
324 | /* | 237 | void sched_clock_init(void) |
325 | * Scheduler clock - returns current time in nanosec units. | ||
326 | * This is default implementation. | ||
327 | * Architectures and sub-architectures can override this. | ||
328 | */ | ||
329 | unsigned long long __attribute__((weak)) sched_clock(void) | ||
330 | { | 238 | { |
331 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | 239 | sched_clock_running = 1; |
332 | } | 240 | } |
333 | 241 | ||
242 | u64 sched_clock_cpu(int cpu) | ||
243 | { | ||
244 | if (unlikely(!sched_clock_running)) | ||
245 | return 0; | ||
246 | |||
247 | return sched_clock(); | ||
248 | } | ||
249 | |||
250 | #endif | ||
251 | |||
334 | unsigned long long cpu_clock(int cpu) | 252 | unsigned long long cpu_clock(int cpu) |
335 | { | 253 | { |
336 | unsigned long long clock; | 254 | unsigned long long clock; |