aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852414cc..a755d023805a 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/percpu.h>
30#include <linux/ktime.h>
31#include <linux/sched.h>
32 32
33/* 33/*
34 * Scheduler clock - returns current time in nanosec units. 34 * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
43static __read_mostly int sched_clock_running; 43static __read_mostly int sched_clock_running;
44 44
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46__read_mostly int sched_clock_stable;
47#else
48static const int sched_clock_stable = 1;
49#endif
46 50
47struct sched_clock_data { 51struct sched_clock_data {
48 /* 52 /*
@@ -87,7 +91,7 @@ void sched_clock_init(void)
87} 91}
88 92
89/* 93/*
90 * min,max except they take wrapping into account 94 * min, max except they take wrapping into account
91 */ 95 */
92 96
93static inline u64 wrap_min(u64 x, u64 y) 97static inline u64 wrap_min(u64 x, u64 y)
@@ -116,10 +120,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
116 if (unlikely(delta < 0)) 120 if (unlikely(delta < 0))
117 delta = 0; 121 delta = 0;
118 122
123 if (unlikely(!sched_clock_running))
124 return 0ull;
125
119 /* 126 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 127 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 128 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 129 * scd->tick_gtod + TICK_NSEC);
123 */ 130 */
124 131
125 clock = scd->tick_gtod + delta; 132 clock = scd->tick_gtod + delta;
@@ -148,12 +155,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
148 155
149u64 sched_clock_cpu(int cpu) 156u64 sched_clock_cpu(int cpu)
150{ 157{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock; 158 u64 now, clock, this_clock, remote_clock;
159 struct sched_clock_data *scd;
153 160
154 if (unlikely(!sched_clock_running)) 161 if (sched_clock_stable)
155 return 0ull; 162 return sched_clock();
156 163
164 scd = cpu_sdc(cpu);
157 WARN_ON_ONCE(!irqs_disabled()); 165 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 166 now = sched_clock();
159 167
@@ -193,6 +201,8 @@ u64 sched_clock_cpu(int cpu)
193 return clock; 201 return clock;
194} 202}
195 203
204#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
205
196void sched_clock_tick(void) 206void sched_clock_tick(void)
197{ 207{
198 struct sched_clock_data *scd = this_scd(); 208 struct sched_clock_data *scd = this_scd();
@@ -235,22 +245,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
235} 245}
236EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 246EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
237 247
238#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 248#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
239
240void sched_clock_init(void)
241{
242 sched_clock_running = 1;
243}
244
245u64 sched_clock_cpu(int cpu)
246{
247 if (unlikely(!sched_clock_running))
248 return 0;
249
250 return sched_clock();
251}
252
253#endif
254 249
255unsigned long long cpu_clock(int cpu) 250unsigned long long cpu_clock(int cpu)
256{ 251{