diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 52 |
1 files changed, 49 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8dcdec6fe0fe..7377222ab42f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -632,11 +632,39 @@ int sysctl_sched_rt_runtime = 950000; | |||
632 | */ | 632 | */ |
633 | #define RUNTIME_INF ((u64)~0ULL) | 633 | #define RUNTIME_INF ((u64)~0ULL) |
634 | 634 | ||
635 | static const unsigned long long time_sync_thresh = 100000; | ||
636 | |||
637 | static DEFINE_PER_CPU(unsigned long long, time_offset); | ||
638 | static DEFINE_PER_CPU(unsigned long long, prev_cpu_time); | ||
639 | |||
635 | /* | 640 | /* |
636 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | 641 | * Global lock which we take every now and then to synchronize |
637 | * clock constructed from sched_clock(): | 642 | * the CPUs time. This method is not warp-safe, but it's good |
643 | * enough to synchronize slowly diverging time sources and thus | ||
644 | * it's good enough for tracing: | ||
638 | */ | 645 | */ |
639 | unsigned long long cpu_clock(int cpu) | 646 | static DEFINE_SPINLOCK(time_sync_lock); |
647 | static unsigned long long prev_global_time; | ||
648 | |||
649 | static unsigned long long __sync_cpu_clock(cycles_t time, int cpu) | ||
650 | { | ||
651 | unsigned long flags; | ||
652 | |||
653 | spin_lock_irqsave(&time_sync_lock, flags); | ||
654 | |||
655 | if (time < prev_global_time) { | ||
656 | per_cpu(time_offset, cpu) += prev_global_time - time; | ||
657 | time = prev_global_time; | ||
658 | } else { | ||
659 | prev_global_time = time; | ||
660 | } | ||
661 | |||
662 | spin_unlock_irqrestore(&time_sync_lock, flags); | ||
663 | |||
664 | return time; | ||
665 | } | ||
666 | |||
667 | static unsigned long long __cpu_clock(int cpu) | ||
640 | { | 668 | { |
641 | unsigned long long now; | 669 | unsigned long long now; |
642 | unsigned long flags; | 670 | unsigned long flags; |
@@ -657,6 +685,24 @@ unsigned long long cpu_clock(int cpu) | |||
657 | 685 | ||
658 | return now; | 686 | return now; |
659 | } | 687 | } |
688 | |||
689 | /* | ||
690 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
691 | * clock constructed from sched_clock(): | ||
692 | */ | ||
693 | unsigned long long cpu_clock(int cpu) | ||
694 | { | ||
695 | unsigned long long prev_cpu_time, time, delta_time; | ||
696 | |||
697 | prev_cpu_time = per_cpu(prev_cpu_time, cpu); | ||
698 | time = __cpu_clock(cpu) + per_cpu(time_offset, cpu); | ||
699 | delta_time = time-prev_cpu_time; | ||
700 | |||
701 | if (unlikely(delta_time > time_sync_thresh)) | ||
702 | time = __sync_cpu_clock(time, cpu); | ||
703 | |||
704 | return time; | ||
705 | } | ||
660 | EXPORT_SYMBOL_GPL(cpu_clock); | 706 | EXPORT_SYMBOL_GPL(cpu_clock); |
661 | 707 | ||
662 | #ifndef prepare_arch_switch | 708 | #ifndef prepare_arch_switch |