blob: 45cdd3fbd91c514f38e2c2782f7dbc0dcecd8e90 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
/*
* Precise Delay Loops for x86-64
*
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*
* The __delay function must _NOT_ be inlined as its execution time
* depends wildly on alignment on many x86 processors.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/preempt.h>
#include <linux/delay.h>
#include <asm/delay.h>
#include <asm/msr.h>
#ifdef CONFIG_SMP
#include <asm/smp.h>
#endif
int read_current_timer(unsigned long *timer_value)
{
rdtscll(*timer_value);
return 0;
}
void __delay(unsigned long loops)
{
unsigned bclock, now;
preempt_disable(); /* TSC's are pre-cpu */
rdtscl(bclock);
do {
rep_nop();
rdtscl(now);
}
while ((now-bclock) < loops);
preempt_enable();
}
EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
__delay(((xloops * HZ *
cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
}
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__ndelay);
|