aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlok Kataria <akataria@vmware.com>2008-07-01 14:43:18 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-09 01:43:25 -0400
commit0ef95533326a7b37d16025af9edc0c18e644b346 (patch)
tree216e53f744b9bd718c4f54862032c241bf59fd73 /arch
parent746f2eb790e75676ddc3b816ba18bac4179cc744 (diff)
x86: merge sched_clock handling
Move the basic global variable definitions and sched_clock handling in the common "tsc.c" file. - Unify notsc kernel command line handling for 32 bit and 64bit. - Functional changes for 64bit. - "tsc_disabled" is updated if "notsc" is passed at boottime. - Fallback to jiffies for sched_clock, incase notsc is passed on commandline. Signed-off-by: Alok N Kataria <akataria@vmware.com> Signed-off-by: Dan Hecht <dhecht@vmware.com> Cc: Dan Hecht <dhecht@vmware.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/time_32.c3
-rw-r--r--arch/x86/kernel/tsc.c86
-rw-r--r--arch/x86/kernel/tsc_32.c86
-rw-r--r--arch/x86/kernel/tsc_64.c67
5 files changed, 100 insertions, 144 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 54829e2b5160..ca904ee17252 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -26,7 +26,7 @@ obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
26obj-y += bootflag.o e820.o 26obj-y += bootflag.o e820.o
27obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o 27obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
28obj-y += alternative.o i8253.o pci-nommu.o 28obj-y += alternative.o i8253.o pci-nommu.o
29obj-y += tsc_$(BITS).o io_delay.o rtc.o 29obj-y += tsc_$(BITS).o io_delay.o rtc.o tsc.o
30 30
31obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 31obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
32obj-y += process.o 32obj-y += process.o
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 5f29f12da50c..059ca6ee59b4 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -39,9 +39,6 @@
39 39
40#include "do_timer.h" 40#include "do_timer.h"
41 41
42unsigned int cpu_khz; /* Detected as we calibrate the TSC */
43EXPORT_SYMBOL(cpu_khz);
44
45int timer_ack; 42int timer_ack;
46 43
47unsigned long profile_pc(struct pt_regs *regs) 44unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
new file mode 100644
index 000000000000..5d0be778fadd
--- /dev/null
+++ b/arch/x86/kernel/tsc.c
@@ -0,0 +1,86 @@
1#include <linux/sched.h>
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/timer.h>
5
6unsigned int cpu_khz; /* TSC clocks / usec, not used here */
7EXPORT_SYMBOL(cpu_khz);
8unsigned int tsc_khz;
9EXPORT_SYMBOL(tsc_khz);
10
11/*
12 * TSC can be unstable due to cpufreq or due to unsynced TSCs
13 */
14int tsc_unstable;
15
16/* native_sched_clock() is called before tsc_init(), so
17 we must start with the TSC soft disabled to prevent
18 erroneous rdtsc usage on !cpu_has_tsc processors */
19int tsc_disabled = -1;
20
21/*
22 * Scheduler clock - returns current time in nanosec units.
23 */
24u64 native_sched_clock(void)
25{
26 u64 this_offset;
27
28 /*
29 * Fall back to jiffies if there's no TSC available:
30 * ( But note that we still use it if the TSC is marked
31 * unstable. We do this because unlike Time Of Day,
32 * the scheduler clock tolerates small errors and it's
33 * very important for it to be as fast as the platform
34 * can achive it. )
35 */
36 if (unlikely(tsc_disabled)) {
37 /* No locking but a rare wrong value is not a big deal: */
38 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
39 }
40
41 /* read the Time Stamp Counter: */
42 rdtscll(this_offset);
43
44 /* return the value in ns */
45 return cycles_2_ns(this_offset);
46}
47
48/* We need to define a real function for sched_clock, to override the
49 weak default version */
50#ifdef CONFIG_PARAVIRT
51unsigned long long sched_clock(void)
52{
53 return paravirt_sched_clock();
54}
55#else
56unsigned long long
57sched_clock(void) __attribute__((alias("native_sched_clock")));
58#endif
59
60int check_tsc_unstable(void)
61{
62 return tsc_unstable;
63}
64EXPORT_SYMBOL_GPL(check_tsc_unstable);
65
66#ifdef CONFIG_X86_TSC
67int __init notsc_setup(char *str)
68{
69 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
70 "cannot disable TSC completely.\n");
71 tsc_disabled = 1;
72 return 1;
73}
74#else
75/*
76 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
77 * in cpu/common.c
78 */
79int __init notsc_setup(char *str)
80{
81 setup_clear_cpu_cap(X86_FEATURE_TSC);
82 return 1;
83}
84#endif
85
86__setup("notsc", notsc_setup);
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index 6240922e497c..dc8990056d75 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -15,52 +15,8 @@
15 15
16#include "mach_timer.h" 16#include "mach_timer.h"
17 17
18/* native_sched_clock() is called before tsc_init(), so 18extern int tsc_unstable;
19 we must start with the TSC soft disabled to prevent 19extern int tsc_disabled;
20 erroneous rdtsc usage on !cpu_has_tsc processors */
21static int tsc_disabled = -1;
22
23/*
24 * On some systems the TSC frequency does not
25 * change with the cpu frequency. So we need
26 * an extra value to store the TSC freq
27 */
28unsigned int tsc_khz;
29EXPORT_SYMBOL_GPL(tsc_khz);
30
31#ifdef CONFIG_X86_TSC
32static int __init tsc_setup(char *str)
33{
34 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
35 "cannot disable TSC completely.\n");
36 tsc_disabled = 1;
37 return 1;
38}
39#else
40/*
41 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
42 * in cpu/common.c
43 */
44static int __init tsc_setup(char *str)
45{
46 setup_clear_cpu_cap(X86_FEATURE_TSC);
47 return 1;
48}
49#endif
50
51__setup("notsc", tsc_setup);
52
53/*
54 * code to mark and check if the TSC is unstable
55 * due to cpufreq or due to unsynced TSCs
56 */
57static int tsc_unstable;
58
59int check_tsc_unstable(void)
60{
61 return tsc_unstable;
62}
63EXPORT_SYMBOL_GPL(check_tsc_unstable);
64 20
65/* Accelerators for sched_clock() 21/* Accelerators for sched_clock()
66 * convert from cycles(64bits) => nanoseconds (64bits) 22 * convert from cycles(64bits) => nanoseconds (64bits)
@@ -109,44 +65,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
109 local_irq_restore(flags); 65 local_irq_restore(flags);
110} 66}
111 67
112/*
113 * Scheduler clock - returns current time in nanosec units.
114 */
115unsigned long long native_sched_clock(void)
116{
117 unsigned long long this_offset;
118
119 /*
120 * Fall back to jiffies if there's no TSC available:
121 * ( But note that we still use it if the TSC is marked
122 * unstable. We do this because unlike Time Of Day,
123 * the scheduler clock tolerates small errors and it's
124 * very important for it to be as fast as the platform
125 * can achive it. )
126 */
127 if (unlikely(tsc_disabled))
128 /* No locking but a rare wrong value is not a big deal: */
129 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
130
131 /* read the Time Stamp Counter: */
132 rdtscll(this_offset);
133
134 /* return the value in ns */
135 return cycles_2_ns(this_offset);
136}
137
138/* We need to define a real function for sched_clock, to override the
139 weak default version */
140#ifdef CONFIG_PARAVIRT
141unsigned long long sched_clock(void)
142{
143 return paravirt_sched_clock();
144}
145#else
146unsigned long long sched_clock(void)
147 __attribute__((alias("native_sched_clock")));
148#endif
149
150unsigned long native_calculate_cpu_khz(void) 68unsigned long native_calculate_cpu_khz(void)
151{ 69{
152 unsigned long long start, end; 70 unsigned long long start, end;
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 9898fb01edfd..69cbe4c9f050 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -13,12 +13,8 @@
13#include <asm/timer.h> 13#include <asm/timer.h>
14#include <asm/vgtod.h> 14#include <asm/vgtod.h>
15 15
16static int notsc __initdata = 0; 16extern int tsc_unstable;
17 17extern int tsc_disabled;
18unsigned int cpu_khz; /* TSC clocks / usec, not used here */
19EXPORT_SYMBOL(cpu_khz);
20unsigned int tsc_khz;
21EXPORT_SYMBOL(tsc_khz);
22 18
23/* Accelerators for sched_clock() 19/* Accelerators for sched_clock()
24 * convert from cycles(64bits) => nanoseconds (64bits) 20 * convert from cycles(64bits) => nanoseconds (64bits)
@@ -41,6 +37,7 @@ EXPORT_SYMBOL(tsc_khz);
41 * 37 *
42 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 38 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
43 */ 39 */
40
44DEFINE_PER_CPU(unsigned long, cyc2ns); 41DEFINE_PER_CPU(unsigned long, cyc2ns);
45 42
46static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) 43static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
@@ -63,41 +60,6 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
63 local_irq_restore(flags); 60 local_irq_restore(flags);
64} 61}
65 62
66unsigned long long native_sched_clock(void)
67{
68 unsigned long a = 0;
69
70 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
71 * which means it is not completely exact and may not be monotonous
72 * between CPUs. But the errors should be too small to matter for
73 * scheduling purposes.
74 */
75
76 rdtscll(a);
77 return cycles_2_ns(a);
78}
79
80/* We need to define a real function for sched_clock, to override the
81 weak default version */
82#ifdef CONFIG_PARAVIRT
83unsigned long long sched_clock(void)
84{
85 return paravirt_sched_clock();
86}
87#else
88unsigned long long
89sched_clock(void) __attribute__((alias("native_sched_clock")));
90#endif
91
92
93static int tsc_unstable;
94
95int check_tsc_unstable(void)
96{
97 return tsc_unstable;
98}
99EXPORT_SYMBOL_GPL(check_tsc_unstable);
100
101#ifdef CONFIG_CPU_FREQ 63#ifdef CONFIG_CPU_FREQ
102 64
103/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency 65/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
@@ -281,14 +243,6 @@ __cpuinit int unsynchronized_tsc(void)
281 return num_present_cpus() > 1; 243 return num_present_cpus() > 1;
282} 244}
283 245
284int __init notsc_setup(char *s)
285{
286 notsc = 1;
287 return 1;
288}
289
290__setup("notsc", notsc_setup);
291
292static struct clocksource clocksource_tsc; 246static struct clocksource clocksource_tsc;
293 247
294/* 248/*
@@ -346,12 +300,13 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
346 300
347void __init init_tsc_clocksource(void) 301void __init init_tsc_clocksource(void)
348{ 302{
349 if (!notsc) { 303 if (tsc_disabled > 0)
350 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, 304 return;
351 clocksource_tsc.shift);
352 if (check_tsc_unstable())
353 clocksource_tsc.rating = 0;
354 305
355 clocksource_register(&clocksource_tsc); 306 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
356 } 307 clocksource_tsc.shift);
308 if (check_tsc_unstable())
309 clocksource_tsc.rating = 0;
310
311 clocksource_register(&clocksource_tsc);
357} 312}