diff options
Diffstat (limited to 'kernel/sched_clock.c')
| -rw-r--r-- | kernel/sched_clock.c | 95 |
1 files changed, 85 insertions, 10 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 906a0f718cb3..52f1a149bfb1 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
| @@ -10,19 +10,55 @@ | |||
| 10 | * Ingo Molnar <mingo@redhat.com> | 10 | * Ingo Molnar <mingo@redhat.com> |
| 11 | * Guillaume Chazarain <guichaz@gmail.com> | 11 | * Guillaume Chazarain <guichaz@gmail.com> |
| 12 | * | 12 | * |
| 13 | * Create a semi stable clock from a mixture of other events, including: | 13 | * |
| 14 | * - gtod | 14 | * What: |
| 15 | * | ||
| 16 | * cpu_clock(i) provides a fast (execution time) high resolution | ||
| 17 | * clock with bounded drift between CPUs. The value of cpu_clock(i) | ||
| 18 | * is monotonic for constant i. The timestamp returned is in nanoseconds. | ||
| 19 | * | ||
| 20 | * ######################### BIG FAT WARNING ########################## | ||
| 21 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | ||
| 22 | * # go backwards !! # | ||
| 23 | * #################################################################### | ||
| 24 | * | ||
| 25 | * There is no strict promise about the base, although it tends to start | ||
| 26 | * at 0 on boot (but people really shouldn't rely on that). | ||
| 27 | * | ||
| 28 | * cpu_clock(i) -- can be used from any context, including NMI. | ||
| 29 | * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI) | ||
| 30 | * local_clock() -- is cpu_clock() on the current cpu. | ||
| 31 | * | ||
| 32 | * How: | ||
| 33 | * | ||
| 34 | * The implementation either uses sched_clock() when | ||
| 35 | * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the | ||
| 36 | * sched_clock() is assumed to provide these properties (mostly it means | ||
| 37 | * the architecture provides a globally synchronized highres time source). | ||
| 38 | * | ||
| 39 | * Otherwise it tries to create a semi stable clock from a mixture of other | ||
| 40 | * clocks, including: | ||
| 41 | * | ||
| 42 | * - GTOD (clock monotomic) | ||
| 15 | * - sched_clock() | 43 | * - sched_clock() |
| 16 | * - explicit idle events | 44 | * - explicit idle events |
| 17 | * | 45 | * |
| 18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | 46 | * We use GTOD as base and use sched_clock() deltas to improve resolution. The |
| 19 | * making it monotonic and keeping it within an expected window. | 47 | * deltas are filtered to provide monotonicity and keeping it within an |
| 48 | * expected window. | ||
| 20 | * | 49 | * |
| 21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | 50 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
| 22 | * that is otherwise invisible (TSC gets stopped). | 51 | * that is otherwise invisible (TSC gets stopped). |
| 23 | * | 52 | * |
| 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 53 | * |
| 25 | * consistent between cpus (never more than 2 jiffies difference). | 54 | * Notes: |
| 55 | * | ||
| 56 | * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things | ||
| 57 | * like cpufreq interrupts that can change the base clock (TSC) multiplier | ||
| 58 | * and cause funny jumps in time -- although the filtering provided by | ||
| 59 | * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it | ||
| 60 | * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on | ||
| 61 | * sched_clock(). | ||
| 26 | */ | 62 | */ |
| 27 | #include <linux/spinlock.h> | 63 | #include <linux/spinlock.h> |
| 28 | #include <linux/hardirq.h> | 64 | #include <linux/hardirq.h> |
| @@ -170,6 +206,11 @@ again: | |||
| 170 | return val; | 206 | return val; |
| 171 | } | 207 | } |
| 172 | 208 | ||
| 209 | /* | ||
| 210 | * Similar to cpu_clock(), but requires local IRQs to be disabled. | ||
| 211 | * | ||
| 212 | * See cpu_clock(). | ||
| 213 | */ | ||
| 173 | u64 sched_clock_cpu(int cpu) | 214 | u64 sched_clock_cpu(int cpu) |
| 174 | { | 215 | { |
| 175 | struct sched_clock_data *scd; | 216 | struct sched_clock_data *scd; |
| @@ -237,9 +278,19 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
| 237 | } | 278 | } |
| 238 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 279 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
| 239 | 280 | ||
| 240 | unsigned long long cpu_clock(int cpu) | 281 | /* |
| 282 | * As outlined at the top, provides a fast, high resolution, nanosecond | ||
| 283 | * time source that is monotonic per cpu argument and has bounded drift | ||
| 284 | * between cpus. | ||
| 285 | * | ||
| 286 | * ######################### BIG FAT WARNING ########################## | ||
| 287 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # | ||
| 288 | * # go backwards !! # | ||
| 289 | * #################################################################### | ||
| 290 | */ | ||
| 291 | u64 cpu_clock(int cpu) | ||
| 241 | { | 292 | { |
| 242 | unsigned long long clock; | 293 | u64 clock; |
| 243 | unsigned long flags; | 294 | unsigned long flags; |
| 244 | 295 | ||
| 245 | local_irq_save(flags); | 296 | local_irq_save(flags); |
| @@ -249,6 +300,25 @@ unsigned long long cpu_clock(int cpu) | |||
| 249 | return clock; | 300 | return clock; |
| 250 | } | 301 | } |
| 251 | 302 | ||
| 303 | /* | ||
| 304 | * Similar to cpu_clock() for the current cpu. Time will only be observed | ||
| 305 | * to be monotonic if care is taken to only compare timestampt taken on the | ||
| 306 | * same CPU. | ||
| 307 | * | ||
| 308 | * See cpu_clock(). | ||
| 309 | */ | ||
| 310 | u64 local_clock(void) | ||
| 311 | { | ||
| 312 | u64 clock; | ||
| 313 | unsigned long flags; | ||
| 314 | |||
| 315 | local_irq_save(flags); | ||
| 316 | clock = sched_clock_cpu(smp_processor_id()); | ||
| 317 | local_irq_restore(flags); | ||
| 318 | |||
| 319 | return clock; | ||
| 320 | } | ||
| 321 | |||
| 252 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 322 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 253 | 323 | ||
| 254 | void sched_clock_init(void) | 324 | void sched_clock_init(void) |
| @@ -264,12 +334,17 @@ u64 sched_clock_cpu(int cpu) | |||
| 264 | return sched_clock(); | 334 | return sched_clock(); |
| 265 | } | 335 | } |
| 266 | 336 | ||
| 267 | 337 | u64 cpu_clock(int cpu) | |
| 268 | unsigned long long cpu_clock(int cpu) | ||
| 269 | { | 338 | { |
| 270 | return sched_clock_cpu(cpu); | 339 | return sched_clock_cpu(cpu); |
| 271 | } | 340 | } |
| 272 | 341 | ||
| 342 | u64 local_clock(void) | ||
| 343 | { | ||
| 344 | return sched_clock_cpu(0); | ||
| 345 | } | ||
| 346 | |||
| 273 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 347 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 274 | 348 | ||
| 275 | EXPORT_SYMBOL_GPL(cpu_clock); | 349 | EXPORT_SYMBOL_GPL(cpu_clock); |
| 350 | EXPORT_SYMBOL_GPL(local_clock); | ||
