aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a755d023805a..7ec82c1c61c5 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -25,6 +25,7 @@
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/hardirq.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/percpu.h> 30#include <linux/percpu.h>
30#include <linux/ktime.h> 31#include <linux/ktime.h>
@@ -162,6 +163,17 @@ u64 sched_clock_cpu(int cpu)
162 return sched_clock(); 163 return sched_clock();
163 164
164 scd = cpu_sdc(cpu); 165 scd = cpu_sdc(cpu);
166
167 /*
168 * Normally this is not called in NMI context - but if it is,
169 * trying to do any locking here is totally lethal.
170 */
171 if (unlikely(in_nmi()))
172 return scd->clock;
173
174 if (unlikely(!sched_clock_running))
175 return 0ull;
176
165 WARN_ON_ONCE(!irqs_disabled()); 177 WARN_ON_ONCE(!irqs_disabled());
166 now = sched_clock(); 178 now = sched_clock();
167 179