aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-x86_64/proto.h2
-rw-r--r--include/asm-x86_64/timex.h26
-rw-r--r--include/asm-x86_64/tsc.h66
4 files changed, 68 insertions, 75 deletions
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index c13933185c1c..e997891cc7cc 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -1,48 +1 @@
1/* #include <asm-x86_64/tsc.h>
2 * linux/include/asm-i386/tsc.h
3 *
4 * i386 TSC related functions
5 */
6#ifndef _ASM_i386_TSC_H
7#define _ASM_i386_TSC_H
8
9#include <asm/processor.h>
10
11/*
12 * Standard way to access the cycle counter on i586+ CPUs.
13 * Currently only used on SMP.
14 *
15 * If you really have a SMP machine with i486 chips or older,
16 * compile for that, and this will just always return zero.
17 * That's ok, it just means that the nicer scheduling heuristics
18 * won't work for you.
19 *
20 * We only use the low 32 bits, and we'd simply better make sure
21 * that we reschedule before that wraps. Scheduling at least every
22 * four billion cycles just basically sounds like a good idea,
23 * regardless of how fast the machine is.
24 */
25typedef unsigned long long cycles_t;
26
27extern unsigned int cpu_khz;
28extern unsigned int tsc_khz;
29
30static inline cycles_t get_cycles(void)
31{
32 unsigned long long ret = 0;
33
34#ifndef CONFIG_X86_TSC
35 if (!cpu_has_tsc)
36 return 0;
37#endif
38
39#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
40 rdtscll(ret);
41#endif
42 return ret;
43}
44
45extern void tsc_init(void);
46extern void mark_tsc_unstable(void);
47
48#endif
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index a6d2ff5c69b7..2ce3adf7bfdc 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -91,8 +91,6 @@ extern void check_efer(void);
91 91
92extern int unhandled_signal(struct task_struct *tsk, int sig); 92extern int unhandled_signal(struct task_struct *tsk, int sig);
93 93
94extern int unsynchronized_tsc(void);
95
96extern void select_idle_routine(const struct cpuinfo_x86 *c); 94extern void select_idle_routine(const struct cpuinfo_x86 *c);
97 95
98extern unsigned long table_start, table_end; 96extern unsigned long table_start, table_end;
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index b9e5320b7625..a4493a77d641 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -12,35 +12,11 @@
12#include <asm/hpet.h> 12#include <asm/hpet.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/tsc.h>
15#include <linux/compiler.h> 16#include <linux/compiler.h>
16 17
17#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */ 18#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
18 19
19typedef unsigned long long cycles_t;
20
21static inline cycles_t get_cycles (void)
22{
23 unsigned long long ret;
24
25 rdtscll(ret);
26 return ret;
27}
28
29/* Like get_cycles, but make sure the CPU is synchronized. */
30static __always_inline cycles_t get_cycles_sync(void)
31{
32 unsigned long long ret;
33 unsigned eax;
34 /* Don't do an additional sync on CPUs where we know
35 RDTSC is already synchronous. */
36 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
37 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
38 rdtscll(ret);
39 return ret;
40}
41
42extern unsigned int cpu_khz;
43
44extern int read_current_timer(unsigned long *timer_value); 20extern int read_current_timer(unsigned long *timer_value);
45#define ARCH_HAS_READ_CURRENT_TIMER 1 21#define ARCH_HAS_READ_CURRENT_TIMER 1
46 22
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h
new file mode 100644
index 000000000000..9a0a368852c7
--- /dev/null
+++ b/include/asm-x86_64/tsc.h
@@ -0,0 +1,66 @@
1/*
2 * linux/include/asm-x86_64/tsc.h
3 *
4 * x86_64 TSC related functions
5 */
6#ifndef _ASM_x86_64_TSC_H
7#define _ASM_x86_64_TSC_H
8
9#include <asm/processor.h>
10
11/*
12 * Standard way to access the cycle counter.
13 */
14typedef unsigned long long cycles_t;
15
16extern unsigned int cpu_khz;
17extern unsigned int tsc_khz;
18
19static inline cycles_t get_cycles(void)
20{
21 unsigned long long ret = 0;
22
23#ifndef CONFIG_X86_TSC
24 if (!cpu_has_tsc)
25 return 0;
26#endif
27
28#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
29 rdtscll(ret);
30#endif
31 return ret;
32}
33
34/* Like get_cycles, but make sure the CPU is synchronized. */
35static __always_inline cycles_t get_cycles_sync(void)
36{
37 unsigned long long ret;
38#ifdef X86_FEATURE_SYNC_RDTSC
39 unsigned eax;
40
41 /*
42 * Don't do an additional sync on CPUs where we know
43 * RDTSC is already synchronous:
44 */
45 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
46 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
47#else
48 sync_core();
49#endif
50 rdtscll(ret);
51
52 return ret;
53}
54
55extern void tsc_init(void);
56extern void mark_tsc_unstable(void);
57extern int unsynchronized_tsc(void);
58
59/*
60 * Boot-time check whether the TSCs are synchronized across
61 * all CPUs/cores:
62 */
63extern void check_tsc_sync_source(int cpu);
64extern void check_tsc_sync_target(void);
65
66#endif