diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f98f75f3c708..9457106b18af 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -910,11 +910,14 @@ static DEFINE_PER_CPU(unsigned long long, prev_cpu_time); | |||
910 | static DEFINE_SPINLOCK(time_sync_lock); | 910 | static DEFINE_SPINLOCK(time_sync_lock); |
911 | static unsigned long long prev_global_time; | 911 | static unsigned long long prev_global_time; |
912 | 912 | ||
913 | static unsigned long long __sync_cpu_clock(cycles_t time, int cpu) | 913 | static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu) |
914 | { | 914 | { |
915 | unsigned long flags; | 915 | /* |
916 | 916 | * We want this inlined, to not get tracer function calls | |
917 | spin_lock_irqsave(&time_sync_lock, flags); | 917 | * in this critical section: |
918 | */ | ||
919 | spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_); | ||
920 | __raw_spin_lock(&time_sync_lock.raw_lock); | ||
918 | 921 | ||
919 | if (time < prev_global_time) { | 922 | if (time < prev_global_time) { |
920 | per_cpu(time_offset, cpu) += prev_global_time - time; | 923 | per_cpu(time_offset, cpu) += prev_global_time - time; |
@@ -923,7 +926,8 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu) | |||
923 | prev_global_time = time; | 926 | prev_global_time = time; |
924 | } | 927 | } |
925 | 928 | ||
926 | spin_unlock_irqrestore(&time_sync_lock, flags); | 929 | __raw_spin_unlock(&time_sync_lock.raw_lock); |
930 | spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_); | ||
927 | 931 | ||
928 | return time; | 932 | return time; |
929 | } | 933 | } |
@@ -931,7 +935,6 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu) | |||
931 | static unsigned long long __cpu_clock(int cpu) | 935 | static unsigned long long __cpu_clock(int cpu) |
932 | { | 936 | { |
933 | unsigned long long now; | 937 | unsigned long long now; |
934 | unsigned long flags; | ||
935 | struct rq *rq; | 938 | struct rq *rq; |
936 | 939 | ||
937 | /* | 940 | /* |
@@ -941,11 +944,9 @@ static unsigned long long __cpu_clock(int cpu) | |||
941 | if (unlikely(!scheduler_running)) | 944 | if (unlikely(!scheduler_running)) |
942 | return 0; | 945 | return 0; |
943 | 946 | ||
944 | local_irq_save(flags); | ||
945 | rq = cpu_rq(cpu); | 947 | rq = cpu_rq(cpu); |
946 | update_rq_clock(rq); | 948 | update_rq_clock(rq); |
947 | now = rq->clock; | 949 | now = rq->clock; |
948 | local_irq_restore(flags); | ||
949 | 950 | ||
950 | return now; | 951 | return now; |
951 | } | 952 | } |
@@ -957,13 +958,18 @@ static unsigned long long __cpu_clock(int cpu) | |||
957 | unsigned long long cpu_clock(int cpu) | 958 | unsigned long long cpu_clock(int cpu) |
958 | { | 959 | { |
959 | unsigned long long prev_cpu_time, time, delta_time; | 960 | unsigned long long prev_cpu_time, time, delta_time; |
961 | unsigned long flags; | ||
960 | 962 | ||
963 | local_irq_save(flags); | ||
961 | prev_cpu_time = per_cpu(prev_cpu_time, cpu); | 964 | prev_cpu_time = per_cpu(prev_cpu_time, cpu); |
962 | time = __cpu_clock(cpu) + per_cpu(time_offset, cpu); | 965 | time = __cpu_clock(cpu) + per_cpu(time_offset, cpu); |
963 | delta_time = time-prev_cpu_time; | 966 | delta_time = time-prev_cpu_time; |
964 | 967 | ||
965 | if (unlikely(delta_time > time_sync_thresh)) | 968 | if (unlikely(delta_time > time_sync_thresh)) { |
966 | time = __sync_cpu_clock(time, cpu); | 969 | time = __sync_cpu_clock(time, cpu); |
970 | per_cpu(prev_cpu_time, cpu) = time; | ||
971 | } | ||
972 | local_irq_restore(flags); | ||
967 | 973 | ||
968 | return time; | 974 | return time; |
969 | } | 975 | } |