diff options
author | john stultz <johnstul@us.ibm.com> | 2006-06-26 03:25:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:21 -0400 |
commit | 539eb11e6e904f2cd4f62908cc5e44d724879721 (patch) | |
tree | df18c747c5226b138862fb19fad5b1527055b9c9 /include/asm-i386 | |
parent | 8d016ef1380a2a9a5ca5742ede04334199868f82 (diff) |
[PATCH] Time: i386 Conversion - part 2: Rework TSC Support
As part of the i386 conversion to the generic timekeeping infrastructure, this
introduces a new tsc.c file. The code in this file replaces the TSC
initialization, management and access code currently in timer_tsc.c (which
will be removed) that we want to preserve.
The code also introduces the following functionality:
o tsc_khz: like cpu_khz but stores the TSC frequency on systems that do not
change TSC frequency w/ CPU frequency
o check/mark_tsc_unstable: accessor/modifier flag for TSC timekeeping
usability
o minor cleanups to calibration math.
This patch also includes a one line __cpuinitdata fix from Zwane Mwaikambo.
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/mach-default/mach_timer.h | 4 | ||||
-rw-r--r-- | include/asm-i386/mach-summit/mach_mpparse.h | 3 | ||||
-rw-r--r-- | include/asm-i386/timex.h | 34 | ||||
-rw-r--r-- | include/asm-i386/tsc.h | 49 |
4 files changed, 56 insertions, 34 deletions
diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h index 4b9703bb0288..807992fd4171 100644 --- a/include/asm-i386/mach-default/mach_timer.h +++ b/include/asm-i386/mach-default/mach_timer.h | |||
@@ -15,7 +15,9 @@ | |||
15 | #ifndef _MACH_TIMER_H | 15 | #ifndef _MACH_TIMER_H |
16 | #define _MACH_TIMER_H | 16 | #define _MACH_TIMER_H |
17 | 17 | ||
18 | #define CALIBRATE_LATCH (5 * LATCH) | 18 | #define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ |
19 | #define CALIBRATE_LATCH \ | ||
20 | ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) | ||
19 | 21 | ||
20 | static inline void mach_prepare_counter(void) | 22 | static inline void mach_prepare_counter(void) |
21 | { | 23 | { |
diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h index 1cce2b924a80..94268399170d 100644 --- a/include/asm-i386/mach-summit/mach_mpparse.h +++ b/include/asm-i386/mach-summit/mach_mpparse.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ASM_MACH_MPPARSE_H | 2 | #define __ASM_MACH_MPPARSE_H |
3 | 3 | ||
4 | #include <mach_apic.h> | 4 | #include <mach_apic.h> |
5 | #include <asm/tsc.h> | ||
5 | 6 | ||
6 | extern int use_cyclone; | 7 | extern int use_cyclone; |
7 | 8 | ||
@@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | |||
29 | (!strncmp(productid, "VIGIL SMP", 9) | 30 | (!strncmp(productid, "VIGIL SMP", 9) |
30 | || !strncmp(productid, "EXA", 3) | 31 | || !strncmp(productid, "EXA", 3) |
31 | || !strncmp(productid, "RUTHLESS SMP", 12))){ | 32 | || !strncmp(productid, "RUTHLESS SMP", 12))){ |
33 | mark_tsc_unstable(); | ||
32 | use_cyclone = 1; /*enable cyclone-timer*/ | 34 | use_cyclone = 1; /*enable cyclone-timer*/ |
33 | setup_summit(); | 35 | setup_summit(); |
34 | return 1; | 36 | return 1; |
@@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
42 | if (!strncmp(oem_id, "IBM", 3) && | 44 | if (!strncmp(oem_id, "IBM", 3) && |
43 | (!strncmp(oem_table_id, "SERVIGIL", 8) | 45 | (!strncmp(oem_table_id, "SERVIGIL", 8) |
44 | || !strncmp(oem_table_id, "EXA", 3))){ | 46 | || !strncmp(oem_table_id, "EXA", 3))){ |
47 | mark_tsc_unstable(); | ||
45 | use_cyclone = 1; /*enable cyclone-timer*/ | 48 | use_cyclone = 1; /*enable cyclone-timer*/ |
46 | setup_summit(); | 49 | setup_summit(); |
47 | return 1; | 50 | return 1; |
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h index d434984303ca..3666044409f0 100644 --- a/include/asm-i386/timex.h +++ b/include/asm-i386/timex.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #define _ASMi386_TIMEX_H | 7 | #define _ASMi386_TIMEX_H |
8 | 8 | ||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/tsc.h> | ||
10 | 11 | ||
11 | #ifdef CONFIG_X86_ELAN | 12 | #ifdef CONFIG_X86_ELAN |
12 | # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | 13 | # define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ |
@@ -15,39 +16,6 @@ | |||
15 | #endif | 16 | #endif |
16 | 17 | ||
17 | 18 | ||
18 | /* | ||
19 | * Standard way to access the cycle counter on i586+ CPUs. | ||
20 | * Currently only used on SMP. | ||
21 | * | ||
22 | * If you really have a SMP machine with i486 chips or older, | ||
23 | * compile for that, and this will just always return zero. | ||
24 | * That's ok, it just means that the nicer scheduling heuristics | ||
25 | * won't work for you. | ||
26 | * | ||
27 | * We only use the low 32 bits, and we'd simply better make sure | ||
28 | * that we reschedule before that wraps. Scheduling at least every | ||
29 | * four billion cycles just basically sounds like a good idea, | ||
30 | * regardless of how fast the machine is. | ||
31 | */ | ||
32 | typedef unsigned long long cycles_t; | ||
33 | |||
34 | static inline cycles_t get_cycles (void) | ||
35 | { | ||
36 | unsigned long long ret=0; | ||
37 | |||
38 | #ifndef CONFIG_X86_TSC | ||
39 | if (!cpu_has_tsc) | ||
40 | return 0; | ||
41 | #endif | ||
42 | |||
43 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
44 | rdtscll(ret); | ||
45 | #endif | ||
46 | return ret; | ||
47 | } | ||
48 | |||
49 | extern unsigned int cpu_khz; | ||
50 | |||
51 | extern int read_current_timer(unsigned long *timer_value); | 19 | extern int read_current_timer(unsigned long *timer_value); |
52 | #define ARCH_HAS_READ_CURRENT_TIMER 1 | 20 | #define ARCH_HAS_READ_CURRENT_TIMER 1 |
53 | 21 | ||
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h new file mode 100644 index 000000000000..97b828ce31e0 --- /dev/null +++ b/include/asm-i386/tsc.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * linux/include/asm-i386/tsc.h | ||
3 | * | ||
4 | * i386 TSC related functions | ||
5 | */ | ||
6 | #ifndef _ASM_i386_TSC_H | ||
7 | #define _ASM_i386_TSC_H | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <asm/processor.h> | ||
11 | |||
12 | /* | ||
13 | * Standard way to access the cycle counter on i586+ CPUs. | ||
14 | * Currently only used on SMP. | ||
15 | * | ||
16 | * If you really have a SMP machine with i486 chips or older, | ||
17 | * compile for that, and this will just always return zero. | ||
18 | * That's ok, it just means that the nicer scheduling heuristics | ||
19 | * won't work for you. | ||
20 | * | ||
21 | * We only use the low 32 bits, and we'd simply better make sure | ||
22 | * that we reschedule before that wraps. Scheduling at least every | ||
23 | * four billion cycles just basically sounds like a good idea, | ||
24 | * regardless of how fast the machine is. | ||
25 | */ | ||
26 | typedef unsigned long long cycles_t; | ||
27 | |||
28 | extern unsigned int cpu_khz; | ||
29 | extern unsigned int tsc_khz; | ||
30 | |||
31 | static inline cycles_t get_cycles(void) | ||
32 | { | ||
33 | unsigned long long ret = 0; | ||
34 | |||
35 | #ifndef CONFIG_X86_TSC | ||
36 | if (!cpu_has_tsc) | ||
37 | return 0; | ||
38 | #endif | ||
39 | |||
40 | #if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) | ||
41 | rdtscll(ret); | ||
42 | #endif | ||
43 | return ret; | ||
44 | } | ||
45 | |||
46 | extern void tsc_init(void); | ||
47 | extern void mark_tsc_unstable(void); | ||
48 | |||
49 | #endif | ||