diff options
-rw-r--r-- | include/linux/sched.h | 10 | ||||
-rw-r--r-- | kernel/sched_clock.c | 45 |
2 files changed, 30 insertions, 25 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8981e52c714f..a063d19b7a7d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1670,6 +1670,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1670 | return set_cpus_allowed_ptr(p, &new_mask); | 1670 | return set_cpus_allowed_ptr(p, &new_mask); |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | /* | ||
1674 | * Architectures can set this to 1 if they have specified | ||
1675 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
1676 | * but then during bootup it turns out that sched_clock() | ||
1677 | * is reliable after all: | ||
1678 | */ | ||
1679 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1680 | extern int sched_clock_stable; | ||
1681 | #endif | ||
1682 | |||
1673 | extern unsigned long long sched_clock(void); | 1683 | extern unsigned long long sched_clock(void); |
1674 | 1684 | ||
1675 | extern void sched_clock_init(void); | 1685 | extern void sched_clock_init(void); |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index a0b0852414cc..a755d023805a 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -24,11 +24,11 @@ | |||
24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
25 | * consistent between cpus (never more than 2 jiffies difference). | 25 | * consistent between cpus (never more than 2 jiffies difference). |
26 | */ | 26 | */ |
27 | #include <linux/sched.h> | ||
28 | #include <linux/percpu.h> | ||
29 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
30 | #include <linux/ktime.h> | ||
31 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/percpu.h> | ||
30 | #include <linux/ktime.h> | ||
31 | #include <linux/sched.h> | ||
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Scheduler clock - returns current time in nanosec units. | 34 | * Scheduler clock - returns current time in nanosec units. |
@@ -43,6 +43,10 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
43 | static __read_mostly int sched_clock_running; | 43 | static __read_mostly int sched_clock_running; |
44 | 44 | ||
45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
46 | __read_mostly int sched_clock_stable; | ||
47 | #else | ||
48 | static const int sched_clock_stable = 1; | ||
49 | #endif | ||
46 | 50 | ||
47 | struct sched_clock_data { | 51 | struct sched_clock_data { |
48 | /* | 52 | /* |
@@ -87,7 +91,7 @@ void sched_clock_init(void) | |||
87 | } | 91 | } |
88 | 92 | ||
89 | /* | 93 | /* |
90 | * min,max except they take wrapping into account | 94 | * min, max except they take wrapping into account |
91 | */ | 95 | */ |
92 | 96 | ||
93 | static inline u64 wrap_min(u64 x, u64 y) | 97 | static inline u64 wrap_min(u64 x, u64 y) |
@@ -116,10 +120,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
116 | if (unlikely(delta < 0)) | 120 | if (unlikely(delta < 0)) |
117 | delta = 0; | 121 | delta = 0; |
118 | 122 | ||
123 | if (unlikely(!sched_clock_running)) | ||
124 | return 0ull; | ||
125 | |||
119 | /* | 126 | /* |
120 | * scd->clock = clamp(scd->tick_gtod + delta, | 127 | * scd->clock = clamp(scd->tick_gtod + delta, |
121 | * max(scd->tick_gtod, scd->clock), | 128 | * max(scd->tick_gtod, scd->clock), |
122 | * scd->tick_gtod + TICK_NSEC); | 129 | * scd->tick_gtod + TICK_NSEC); |
123 | */ | 130 | */ |
124 | 131 | ||
125 | clock = scd->tick_gtod + delta; | 132 | clock = scd->tick_gtod + delta; |
@@ -148,12 +155,13 @@ static void lock_double_clock(struct sched_clock_data *data1, | |||
148 | 155 | ||
149 | u64 sched_clock_cpu(int cpu) | 156 | u64 sched_clock_cpu(int cpu) |
150 | { | 157 | { |
151 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
152 | u64 now, clock, this_clock, remote_clock; | 158 | u64 now, clock, this_clock, remote_clock; |
159 | struct sched_clock_data *scd; | ||
153 | 160 | ||
154 | if (unlikely(!sched_clock_running)) | 161 | if (sched_clock_stable) |
155 | return 0ull; | 162 | return sched_clock(); |
156 | 163 | ||
164 | scd = cpu_sdc(cpu); | ||
157 | WARN_ON_ONCE(!irqs_disabled()); | 165 | WARN_ON_ONCE(!irqs_disabled()); |
158 | now = sched_clock(); | 166 | now = sched_clock(); |
159 | 167 | ||
@@ -193,6 +201,8 @@ u64 sched_clock_cpu(int cpu) | |||
193 | return clock; | 201 | return clock; |
194 | } | 202 | } |
195 | 203 | ||
204 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
205 | |||
196 | void sched_clock_tick(void) | 206 | void sched_clock_tick(void) |
197 | { | 207 | { |
198 | struct sched_clock_data *scd = this_scd(); | 208 | struct sched_clock_data *scd = this_scd(); |
@@ -235,22 +245,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
235 | } | 245 | } |
236 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 246 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
237 | 247 | ||
238 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 248 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
239 | |||
240 | void sched_clock_init(void) | ||
241 | { | ||
242 | sched_clock_running = 1; | ||
243 | } | ||
244 | |||
245 | u64 sched_clock_cpu(int cpu) | ||
246 | { | ||
247 | if (unlikely(!sched_clock_running)) | ||
248 | return 0; | ||
249 | |||
250 | return sched_clock(); | ||
251 | } | ||
252 | |||
253 | #endif | ||
254 | 249 | ||
255 | unsigned long long cpu_clock(int cpu) | 250 | unsigned long long cpu_clock(int cpu) |
256 | { | 251 | { |