diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-06-27 07:41:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 08:31:30 -0400 |
commit | 76a2a6ee8a0660a29127f05989ac59ae1ce865fa (patch) | |
tree | 93ae1dd3cc5f564fc1e9d30bcdb0cd313f67221a | |
parent | c09595f63bb1909c5dc4dca288f4fe818561b5f3 (diff) |
sched: sched_clock_cpu() based cpu_clock()
with sched_clock_cpu() being reasonably in sync between cpus (max 1 jiffy
difference) use this to provide cpu_clock().
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 76 | ||||
-rw-r--r-- | kernel/sched_clock.c | 12 |
2 files changed, 12 insertions, 76 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 874b6da15430..eb3454c410fa 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -818,82 +818,6 @@ static inline u64 global_rt_runtime(void) | |||
818 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | 818 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
819 | } | 819 | } |
820 | 820 | ||
821 | unsigned long long time_sync_thresh = 100000; | ||
822 | |||
823 | static DEFINE_PER_CPU(unsigned long long, time_offset); | ||
824 | static DEFINE_PER_CPU(unsigned long long, prev_cpu_time); | ||
825 | |||
826 | /* | ||
827 | * Global lock which we take every now and then to synchronize | ||
828 | * the CPUs time. This method is not warp-safe, but it's good | ||
829 | * enough to synchronize slowly diverging time sources and thus | ||
830 | * it's good enough for tracing: | ||
831 | */ | ||
832 | static DEFINE_SPINLOCK(time_sync_lock); | ||
833 | static unsigned long long prev_global_time; | ||
834 | |||
835 | static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu) | ||
836 | { | ||
837 | /* | ||
838 | * We want this inlined, to not get tracer function calls | ||
839 | * in this critical section: | ||
840 | */ | ||
841 | spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_); | ||
842 | __raw_spin_lock(&time_sync_lock.raw_lock); | ||
843 | |||
844 | if (time < prev_global_time) { | ||
845 | per_cpu(time_offset, cpu) += prev_global_time - time; | ||
846 | time = prev_global_time; | ||
847 | } else { | ||
848 | prev_global_time = time; | ||
849 | } | ||
850 | |||
851 | __raw_spin_unlock(&time_sync_lock.raw_lock); | ||
852 | spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_); | ||
853 | |||
854 | return time; | ||
855 | } | ||
856 | |||
857 | static unsigned long long __cpu_clock(int cpu) | ||
858 | { | ||
859 | unsigned long long now; | ||
860 | |||
861 | /* | ||
862 | * Only call sched_clock() if the scheduler has already been | ||
863 | * initialized (some code might call cpu_clock() very early): | ||
864 | */ | ||
865 | if (unlikely(!scheduler_running)) | ||
866 | return 0; | ||
867 | |||
868 | now = sched_clock_cpu(cpu); | ||
869 | |||
870 | return now; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
875 | * clock constructed from sched_clock(): | ||
876 | */ | ||
877 | unsigned long long cpu_clock(int cpu) | ||
878 | { | ||
879 | unsigned long long prev_cpu_time, time, delta_time; | ||
880 | unsigned long flags; | ||
881 | |||
882 | local_irq_save(flags); | ||
883 | prev_cpu_time = per_cpu(prev_cpu_time, cpu); | ||
884 | time = __cpu_clock(cpu) + per_cpu(time_offset, cpu); | ||
885 | delta_time = time-prev_cpu_time; | ||
886 | |||
887 | if (unlikely(delta_time > time_sync_thresh)) { | ||
888 | time = __sync_cpu_clock(time, cpu); | ||
889 | per_cpu(prev_cpu_time, cpu) = time; | ||
890 | } | ||
891 | local_irq_restore(flags); | ||
892 | |||
893 | return time; | ||
894 | } | ||
895 | EXPORT_SYMBOL_GPL(cpu_clock); | ||
896 | |||
897 | #ifndef prepare_arch_switch | 821 | #ifndef prepare_arch_switch |
898 | # define prepare_arch_switch(next) do { } while (0) | 822 | # define prepare_arch_switch(next) do { } while (0) |
899 | #endif | 823 | #endif |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index ce05271219ab..3c696db59452 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -244,3 +244,15 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
244 | { | 244 | { |
245 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | 245 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); |
246 | } | 246 | } |
247 | |||
248 | unsigned long long cpu_clock(int cpu) | ||
249 | { | ||
250 | unsigned long long clock; | ||
251 | unsigned long flags; | ||
252 | |||
253 | raw_local_irq_save(flags); | ||
254 | clock = sched_clock_cpu(cpu); | ||
255 | raw_local_irq_restore(flags); | ||
256 | |||
257 | return clock; | ||
258 | } | ||