aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2013-10-23 17:29:16 -0400
committerHelge Deller <deller@gmx.de>2013-11-07 16:28:26 -0500
commitf6d12eefcda2e4135aa529ab8b5bf2766b4a78dd (patch)
tree595a3fe90daf8c5a5443745507349dff408af5e1
parente4be260d156409ebdabf1e13ed547d3703801c5f (diff)
parisc: make udelay() SMP-safe
Each CPU has it's own Control Register 16 (CR16) which is used as time source for the udelay() function. But since the CR16 registers across different CPUs are not synced, we need to recalculate the loop count if we get switched away to ensure that we really delay as much time as requested. Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/include/asm/delay.h41
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/delay.c73
3 files changed, 84 insertions, 32 deletions
diff --git a/arch/parisc/include/asm/delay.h b/arch/parisc/include/asm/delay.h
index 912ee7e6a579..08e58e679e3e 100644
--- a/arch/parisc/include/asm/delay.h
+++ b/arch/parisc/include/asm/delay.h
@@ -1,15 +1,5 @@
1#ifndef _PARISC_DELAY_H 1#ifndef _ASM_PARISC_DELAY_H
2#define _PARISC_DELAY_H 2#define _ASM_PARISC_DELAY_H
3
4#include <asm/special_insns.h> /* for mfctl() */
5#include <asm/processor.h> /* for boot_cpu_data */
6
7
8/*
9 * Copyright (C) 1993 Linus Torvalds
10 *
11 * Delay routines
12 */
13 3
14static __inline__ void __delay(unsigned long loops) { 4static __inline__ void __delay(unsigned long loops) {
15 asm volatile( 5 asm volatile(
@@ -19,25 +9,14 @@ static __inline__ void __delay(unsigned long loops) {
19 : "=r" (loops) : "0" (loops)); 9 : "=r" (loops) : "0" (loops));
20} 10}
21 11
22static __inline__ void __cr16_delay(unsigned long clocks) { 12extern void __udelay(unsigned long usecs);
23 unsigned long start; 13extern void __udelay_bad(unsigned long usecs);
24
25 /*
26 * Note: Due to unsigned math, cr16 rollovers shouldn't be
27 * a problem here. However, on 32 bit, we need to make sure
28 * we don't pass in too big a value. The current default
29 * value of MAX_UDELAY_MS should help prevent this.
30 */
31 14
32 start = mfctl(16); 15static inline void udelay(unsigned long usecs)
33 while ((mfctl(16) - start) < clocks) 16{
34 ; 17 if (__builtin_constant_p(usecs) && (usecs) > 20000)
18 __udelay_bad(usecs);
19 __udelay(usecs);
35} 20}
36 21
37static __inline__ void __udelay(unsigned long usecs) { 22#endif /* _ASM_PARISC_DELAY_H */
38 __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
39}
40
41#define udelay(n) __udelay(n)
42
43#endif /* defined(_PARISC_DELAY_H) */
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 5651536ac733..8fa92b8d839a 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -3,6 +3,6 @@
3# 3#
4 4
5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ 5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
6 ucmpdi2.o 6 ucmpdi2.o delay.o
7 7
8obj-y := iomap.o 8obj-y := iomap.o
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c
new file mode 100644
index 000000000000..ec9255f27a81
--- /dev/null
+++ b/arch/parisc/lib/delay.c
@@ -0,0 +1,73 @@
1/*
2 * Precise Delay Loops for parisc
3 *
4 * based on code by:
5 * Copyright (C) 1993 Linus Torvalds
6 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
8 *
9 * parisc implementation:
10 * Copyright (C) 2013 Helge Deller <deller@gmx.de>
11 */
12
13
14#include <linux/module.h>
15#include <linux/preempt.h>
16#include <linux/init.h>
17
18#include <asm/processor.h>
19#include <asm/delay.h>
20
21#include <asm/special_insns.h> /* for mfctl() */
22#include <asm/processor.h> /* for boot_cpu_data */
23
24/* CR16 based delay: */
25static void __cr16_delay(unsigned long __loops)
26{
27 /*
28 * Note: Due to unsigned math, cr16 rollovers shouldn't be
29 * a problem here. However, on 32 bit, we need to make sure
30 * we don't pass in too big a value. The current default
31 * value of MAX_UDELAY_MS should help prevent this.
32 */
33 u32 bclock, now, loops = __loops;
34 int cpu;
35
36 preempt_disable();
37 cpu = smp_processor_id();
38 bclock = mfctl(16);
39 for (;;) {
40 now = mfctl(16);
41 if ((now - bclock) >= loops)
42 break;
43
44 /* Allow RT tasks to run */
45 preempt_enable();
46 asm volatile(" nop\n");
47 barrier();
48 preempt_disable();
49
50 /*
51 * It is possible that we moved to another CPU, and
52 * since CR16's are per-cpu we need to calculate
53 * that. The delay must guarantee that we wait "at
54 * least" the amount of time. Being moved to another
55 * CPU could make the wait longer but we just need to
56 * make sure we waited long enough. Rebalance the
57 * counter for this CPU.
58 */
59 if (unlikely(cpu != smp_processor_id())) {
60 loops -= (now - bclock);
61 cpu = smp_processor_id();
62 bclock = mfctl(16);
63 }
64 }
65 preempt_enable();
66}
67
68
69void __udelay(unsigned long usecs)
70{
71 __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
72}
73EXPORT_SYMBOL(__udelay);