aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-26 15:21:59 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-27 02:35:19 -0500
commit1b49061d400c9e51e3ac2aac026a099fe599b9bb (patch)
tree54c632cd7f0be2573897c1463a247e69fb769940
parent14131f2f98ac350ee9e73faed916d2238a8b6a0d (diff)
parent83ce400928680a6c8123d492684b27857f5a2d95 (diff)
Merge branch 'sched/clock' into tracing/ftrace
Conflicts: kernel/sched_clock.c
-rw-r--r--arch/x86/kernel/cpu/intel.c8
-rw-r--r--include/linux/sched.h10
-rw-r--r--kernel/sched_clock.c47
3 files changed, 40 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ade..5fff00c70de0 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -4,6 +4,7 @@
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/sched.h>
7#include <linux/thread_info.h> 8#include <linux/thread_info.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
56 57
57 /* 58 /*
58 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 59 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
59 * with P/T states and does not stop in deep C-states 60 * with P/T states and does not stop in deep C-states.
61 *
62 * It is also reliable across cores and sockets. (but not across
63 * cabinets - we turn it off in that case explicitly.)
60 */ 64 */
61 if (c->x86_power & (1 << 8)) { 65 if (c->x86_power & (1 << 8)) {
62 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 66 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 67 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
68 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
69 sched_clock_stable = 1;
64 } 70 }
65 71
66} 72}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 426666dd8203..7702cb166e08 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1672,6 +1672,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1672 return set_cpus_allowed_ptr(p, &new_mask); 1672 return set_cpus_allowed_ptr(p, &new_mask);
1673} 1673}
1674 1674
1675/*
1676 * Architectures can set this to 1 if they have specified
1677 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1678 * but then during bootup it turns out that sched_clock()
1679 * is reliable after all:
1680 */
1681#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1682extern int sched_clock_stable;
1683#endif
1684
1675extern unsigned long long sched_clock(void); 1685extern unsigned long long sched_clock(void);
1676 1686
1677extern void sched_clock_init(void); 1687extern void sched_clock_init(void);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index db69174b1178..7ec82c1c61c5 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,12 +24,12 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h>
32#include <linux/hardirq.h> 28#include <linux/hardirq.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/ktime.h>
32#include <linux/sched.h>
33 33
34/* 34/*
35 * Scheduler clock - returns current time in nanosec units. 35 * Scheduler clock - returns current time in nanosec units.
@@ -44,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
44static __read_mostly int sched_clock_running; 44static __read_mostly int sched_clock_running;
45 45
46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 46#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
47__read_mostly int sched_clock_stable;
48#else
49static const int sched_clock_stable = 1;
50#endif
47 51
48struct sched_clock_data { 52struct sched_clock_data {
49 /* 53 /*
@@ -88,7 +92,7 @@ void sched_clock_init(void)
88} 92}
89 93
90/* 94/*
91 * min,max except they take wrapping into account 95 * min, max except they take wrapping into account
92 */ 96 */
93 97
94static inline u64 wrap_min(u64 x, u64 y) 98static inline u64 wrap_min(u64 x, u64 y)
@@ -117,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
117 if (unlikely(delta < 0)) 121 if (unlikely(delta < 0))
118 delta = 0; 122 delta = 0;
119 123
124 if (unlikely(!sched_clock_running))
125 return 0ull;
126
120 /* 127 /*
121 * scd->clock = clamp(scd->tick_gtod + delta, 128 * scd->clock = clamp(scd->tick_gtod + delta,
122 * max(scd->tick_gtod, scd->clock), 129 * max(scd->tick_gtod, scd->clock),
123 * scd->tick_gtod + TICK_NSEC); 130 * scd->tick_gtod + TICK_NSEC);
124 */ 131 */
125 132
126 clock = scd->tick_gtod + delta; 133 clock = scd->tick_gtod + delta;
@@ -149,8 +156,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
149 156
150u64 sched_clock_cpu(int cpu) 157u64 sched_clock_cpu(int cpu)
151{ 158{
152 struct sched_clock_data *scd = cpu_sdc(cpu);
153 u64 now, clock, this_clock, remote_clock; 159 u64 now, clock, this_clock, remote_clock;
160 struct sched_clock_data *scd;
161
162 if (sched_clock_stable)
163 return sched_clock();
164
165 scd = cpu_sdc(cpu);
154 166
155 /* 167 /*
156 * Normally this is not called in NMI context - but if it is, 168 * Normally this is not called in NMI context - but if it is,
@@ -201,6 +213,8 @@ u64 sched_clock_cpu(int cpu)
201 return clock; 213 return clock;
202} 214}
203 215
216#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
217
204void sched_clock_tick(void) 218void sched_clock_tick(void)
205{ 219{
206 struct sched_clock_data *scd = this_scd(); 220 struct sched_clock_data *scd = this_scd();
@@ -243,22 +257,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
243} 257}
244EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); 258EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
245 259
246#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ 260#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
247
248void sched_clock_init(void)
249{
250 sched_clock_running = 1;
251}
252
253u64 sched_clock_cpu(int cpu)
254{
255 if (unlikely(!sched_clock_running))
256 return 0;
257
258 return sched_clock();
259}
260
261#endif
262 261
263unsigned long long cpu_clock(int cpu) 262unsigned long long cpu_clock(int cpu)
264{ 263{