diff options
author | Andi Kleen <ak@suse.de> | 2008-01-30 07:32:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:40 -0500 |
commit | 51fc97b93545e71cec578d6771bceeb92bc2d50b (patch) | |
tree | 4c280344d2b1fb37a4f643275b9c173e28364c9c | |
parent | 2b16a2353814a513cdb5c5c739b76a19d7ea39ce (diff) |
x86: allow TSC clock source on AMD Fam10h and some cleanup
After a lot of discussions with AMD it turns out that TSC
on Fam10h CPUs is synchronized when the CONSTANT_TSC cpuid bit is set.
Or rather that if there are ever systems where that is not
true it would be their BIOS' task to disable the bit.
So finally use TSC gettimeofday on Fam10h by default.
Or rather it is always used now on CPUs where the AMD
specific CONSTANT_TSC bit is set.
This gives a nice speed bost for gettimeofday() on these systems
which tends to be by far the most common v/syscall.
On a Fam10h system here TSC gtod uses about 20% of the CPU time of
acpi_pm based gtod(). This was measured on 32bit, on 64bit
it is even better because TSC gtod() can use a vsyscall
and stay in ring 3, which acpi_pm doesn't.
The Intel check simply checks for CONSTANT_TSC too without hardcoding
Intel vendor. This is equivalent on 64bit because all 64bit capable Intel
CPUs will have CONSTANT_TSC set.
On Intel there is no CPU supplied CONSTANT_TSC bit currently,
but we synthesize one based on hardcoded knowledge which steppings
have p-state invariant TSC.
So the new logic is now: On CPUs which have the AMD specific
CONSTANT_TSC bit set or on Intel CPUs which are new enough
to be known to have p-state invariant TSC always use
TSC based gettimeofday()
Cc: lenb@kernel.org
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/tsc_32.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/tsc_64.c | 5 |
2 files changed, 7 insertions, 3 deletions
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c index 00bb4c1c0593..2a7b95bd8509 100644 --- a/arch/x86/kernel/tsc_32.c +++ b/arch/x86/kernel/tsc_32.c | |||
@@ -354,6 +354,11 @@ __cpuinit int unsynchronized_tsc(void) | |||
354 | { | 354 | { |
355 | if (!cpu_has_tsc || tsc_unstable) | 355 | if (!cpu_has_tsc || tsc_unstable) |
356 | return 1; | 356 | return 1; |
357 | |||
358 | /* Anything with constant TSC should be synchronized */ | ||
359 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | ||
360 | return 0; | ||
361 | |||
357 | /* | 362 | /* |
358 | * Intel systems are normally all synchronized. | 363 | * Intel systems are normally all synchronized. |
359 | * Exceptions must mark TSC as unstable: | 364 | * Exceptions must mark TSC as unstable: |
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c index 2cc55b726c22..322b38c68198 100644 --- a/arch/x86/kernel/tsc_64.c +++ b/arch/x86/kernel/tsc_64.c | |||
@@ -272,9 +272,8 @@ __cpuinit int unsynchronized_tsc(void) | |||
272 | if (apic_is_clustered_box()) | 272 | if (apic_is_clustered_box()) |
273 | return 1; | 273 | return 1; |
274 | #endif | 274 | #endif |
275 | /* Most intel systems have synchronized TSCs except for | 275 | |
276 | multi node systems */ | 276 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { |
277 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { | ||
278 | #ifdef CONFIG_ACPI | 277 | #ifdef CONFIG_ACPI |
279 | /* But TSC doesn't tick in C3 so don't use it there */ | 278 | /* But TSC doesn't tick in C3 so don't use it there */ |
280 | if (acpi_gbl_FADT.header.length > 0 && | 279 | if (acpi_gbl_FADT.header.length > 0 && |