aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/lib/delay_32.c31
-rw-r--r--arch/x86/lib/delay_64.c30
2 files changed, 53 insertions, 8 deletions
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index 4535e6d147ad..d710f2d167bb 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -44,13 +44,36 @@ static void delay_loop(unsigned long loops)
44static void delay_tsc(unsigned long loops) 44static void delay_tsc(unsigned long loops)
45{ 45{
46 unsigned long bclock, now; 46 unsigned long bclock, now;
47 int cpu;
47 48
48 preempt_disable(); /* TSC's are per-cpu */ 49 preempt_disable();
50 cpu = smp_processor_id();
49 rdtscl(bclock); 51 rdtscl(bclock);
50 do { 52 for (;;) {
51 rep_nop();
52 rdtscl(now); 53 rdtscl(now);
53 } while ((now-bclock) < loops); 54 if ((now - bclock) >= loops)
55 break;
56
57 /* Allow RT tasks to run */
58 preempt_enable();
59 rep_nop();
60 preempt_disable();
61
62 /*
63 * It is possible that we moved to another CPU, and
64 * since TSC's are per-cpu we need to calculate
65 * that. The delay must guarantee that we wait "at
66 * least" the amount of time. Being moved to another
67 * CPU could make the wait longer but we just need to
68 * make sure we waited long enough. Rebalance the
69 * counter for this CPU.
70 */
71 if (unlikely(cpu != smp_processor_id())) {
72 loops -= (now - bclock);
73 cpu = smp_processor_id();
74 rdtscl(bclock);
75 }
76 }
54 preempt_enable(); 77 preempt_enable();
55} 78}
56 79
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index bbc610518516..4c441be92641 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -31,14 +31,36 @@ int __devinit read_current_timer(unsigned long *timer_value)
31void __delay(unsigned long loops) 31void __delay(unsigned long loops)
32{ 32{
33 unsigned bclock, now; 33 unsigned bclock, now;
34 int cpu;
34 35
35 preempt_disable(); /* TSC's are pre-cpu */ 36 preempt_disable();
37 cpu = smp_processor_id();
36 rdtscl(bclock); 38 rdtscl(bclock);
37 do { 39 for (;;) {
38 rep_nop();
39 rdtscl(now); 40 rdtscl(now);
41 if ((now - bclock) >= loops)
42 break;
43
44 /* Allow RT tasks to run */
45 preempt_enable();
46 rep_nop();
47 preempt_disable();
48
49 /*
50 * It is possible that we moved to another CPU, and
51 * since TSC's are per-cpu we need to calculate
52 * that. The delay must guarantee that we wait "at
53 * least" the amount of time. Being moved to another
54 * CPU could make the wait longer but we just need to
55 * make sure we waited long enough. Rebalance the
56 * counter for this CPU.
57 */
58 if (unlikely(cpu != smp_processor_id())) {
59 loops -= (now - bclock);
60 cpu = smp_processor_id();
61 rdtscl(bclock);
62 }
40 } 63 }
41 while ((now-bclock) < loops);
42 preempt_enable(); 64 preempt_enable();
43} 65}
44EXPORT_SYMBOL(__delay); 66EXPORT_SYMBOL(__delay);