aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 390f33234bd0..e1d16c9a7680 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -25,6 +25,7 @@
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/hardirq.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/percpu.h> 30#include <linux/percpu.h>
30#include <linux/ktime.h> 31#include <linux/ktime.h>
@@ -37,7 +38,8 @@
37 */ 38 */
38unsigned long long __attribute__((weak)) sched_clock(void) 39unsigned long long __attribute__((weak)) sched_clock(void)
39{ 40{
40 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); 41 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
42 * (NSEC_PER_SEC / HZ);
41} 43}
42 44
43static __read_mostly int sched_clock_running; 45static __read_mostly int sched_clock_running;
@@ -154,6 +156,17 @@ u64 sched_clock_cpu(int cpu)
154 return sched_clock(); 156 return sched_clock();
155 157
156 scd = cpu_sdc(cpu); 158 scd = cpu_sdc(cpu);
159
160 /*
161 * Normally this is not called in NMI context - but if it is,
162 * trying to do any locking here is totally lethal.
163 */
164 if (unlikely(in_nmi()))
165 return scd->clock;
166
167 if (unlikely(!sched_clock_running))
168 return 0ull;
169
157 WARN_ON_ONCE(!irqs_disabled()); 170 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 171 now = sched_clock();
159 172