diff options
Diffstat (limited to 'arch/x86/lib/delay_32.c')
-rw-r--r-- | arch/x86/lib/delay_32.c | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c new file mode 100644 index 000000000000..f6edb11364df --- /dev/null +++ b/arch/x86/lib/delay_32.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * Precise Delay Loops for i386 | ||
3 | * | ||
4 | * Copyright (C) 1993 Linus Torvalds | ||
5 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
6 | * | ||
7 | * The __delay function must _NOT_ be inlined as its execution time | ||
8 | * depends wildly on alignment on many x86 processors. The additional | ||
9 | * jump magic is needed to get the timing stable on all the CPU's | ||
10 | * we have to worry about. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/delay.h> | ||
16 | |||
17 | #include <asm/processor.h> | ||
18 | #include <asm/delay.h> | ||
19 | #include <asm/timer.h> | ||
20 | |||
21 | #ifdef CONFIG_SMP | ||
22 | # include <asm/smp.h> | ||
23 | #endif | ||
24 | |||
25 | /* simple loop based delay: */ | ||
26 | static void delay_loop(unsigned long loops) | ||
27 | { | ||
28 | int d0; | ||
29 | |||
30 | __asm__ __volatile__( | ||
31 | "\tjmp 1f\n" | ||
32 | ".align 16\n" | ||
33 | "1:\tjmp 2f\n" | ||
34 | ".align 16\n" | ||
35 | "2:\tdecl %0\n\tjns 2b" | ||
36 | :"=&a" (d0) | ||
37 | :"0" (loops)); | ||
38 | } | ||
39 | |||
40 | /* TSC based delay: */ | ||
41 | static void delay_tsc(unsigned long loops) | ||
42 | { | ||
43 | unsigned long bclock, now; | ||
44 | |||
45 | rdtscl(bclock); | ||
46 | do { | ||
47 | rep_nop(); | ||
48 | rdtscl(now); | ||
49 | } while ((now-bclock) < loops); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Since we calibrate only once at boot, this | ||
54 | * function should be set once at boot and not changed | ||
55 | */ | ||
56 | static void (*delay_fn)(unsigned long) = delay_loop; | ||
57 | |||
58 | void use_tsc_delay(void) | ||
59 | { | ||
60 | delay_fn = delay_tsc; | ||
61 | } | ||
62 | |||
63 | int read_current_timer(unsigned long *timer_val) | ||
64 | { | ||
65 | if (delay_fn == delay_tsc) { | ||
66 | rdtscl(*timer_val); | ||
67 | return 0; | ||
68 | } | ||
69 | return -1; | ||
70 | } | ||
71 | |||
72 | void __delay(unsigned long loops) | ||
73 | { | ||
74 | delay_fn(loops); | ||
75 | } | ||
76 | |||
77 | inline void __const_udelay(unsigned long xloops) | ||
78 | { | ||
79 | int d0; | ||
80 | |||
81 | xloops *= 4; | ||
82 | __asm__("mull %0" | ||
83 | :"=d" (xloops), "=&a" (d0) | ||
84 | :"1" (xloops), "0" | ||
85 | (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); | ||
86 | |||
87 | __delay(++xloops); | ||
88 | } | ||
89 | |||
90 | void __udelay(unsigned long usecs) | ||
91 | { | ||
92 | __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ | ||
93 | } | ||
94 | |||
95 | void __ndelay(unsigned long nsecs) | ||
96 | { | ||
97 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | ||
98 | } | ||
99 | |||
100 | EXPORT_SYMBOL(__delay); | ||
101 | EXPORT_SYMBOL(__const_udelay); | ||
102 | EXPORT_SYMBOL(__udelay); | ||
103 | EXPORT_SYMBOL(__ndelay); | ||