diff options
| -rw-r--r-- | arch/parisc/include/asm/delay.h | 41 | ||||
| -rw-r--r-- | arch/parisc/lib/Makefile | 2 | ||||
| -rw-r--r-- | arch/parisc/lib/delay.c | 73 |
3 files changed, 84 insertions, 32 deletions
diff --git a/arch/parisc/include/asm/delay.h b/arch/parisc/include/asm/delay.h index 912ee7e6a579..08e58e679e3e 100644 --- a/arch/parisc/include/asm/delay.h +++ b/arch/parisc/include/asm/delay.h | |||
| @@ -1,15 +1,5 @@ | |||
| 1 | #ifndef _PARISC_DELAY_H | 1 | #ifndef _ASM_PARISC_DELAY_H |
| 2 | #define _PARISC_DELAY_H | 2 | #define _ASM_PARISC_DELAY_H |
| 3 | |||
| 4 | #include <asm/special_insns.h> /* for mfctl() */ | ||
| 5 | #include <asm/processor.h> /* for boot_cpu_data */ | ||
| 6 | |||
| 7 | |||
| 8 | /* | ||
| 9 | * Copyright (C) 1993 Linus Torvalds | ||
| 10 | * | ||
| 11 | * Delay routines | ||
| 12 | */ | ||
| 13 | 3 | ||
| 14 | static __inline__ void __delay(unsigned long loops) { | 4 | static __inline__ void __delay(unsigned long loops) { |
| 15 | asm volatile( | 5 | asm volatile( |
| @@ -19,25 +9,14 @@ static __inline__ void __delay(unsigned long loops) { | |||
| 19 | : "=r" (loops) : "0" (loops)); | 9 | : "=r" (loops) : "0" (loops)); |
| 20 | } | 10 | } |
| 21 | 11 | ||
| 22 | static __inline__ void __cr16_delay(unsigned long clocks) { | 12 | extern void __udelay(unsigned long usecs); |
| 23 | unsigned long start; | 13 | extern void __udelay_bad(unsigned long usecs); |
| 24 | |||
| 25 | /* | ||
| 26 | * Note: Due to unsigned math, cr16 rollovers shouldn't be | ||
| 27 | * a problem here. However, on 32 bit, we need to make sure | ||
| 28 | * we don't pass in too big a value. The current default | ||
| 29 | * value of MAX_UDELAY_MS should help prevent this. | ||
| 30 | */ | ||
| 31 | 14 | ||
| 32 | start = mfctl(16); | 15 | static inline void udelay(unsigned long usecs) |
| 33 | while ((mfctl(16) - start) < clocks) | 16 | { |
| 34 | ; | 17 | if (__builtin_constant_p(usecs) && (usecs) > 20000) |
| 18 | __udelay_bad(usecs); | ||
| 19 | __udelay(usecs); | ||
| 35 | } | 20 | } |
| 36 | 21 | ||
| 37 | static __inline__ void __udelay(unsigned long usecs) { | 22 | #endif /* _ASM_PARISC_DELAY_H */ |
| 38 | __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL)); | ||
| 39 | } | ||
| 40 | |||
| 41 | #define udelay(n) __udelay(n) | ||
| 42 | |||
| 43 | #endif /* defined(_PARISC_DELAY_H) */ | ||
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5651536ac733..8fa92b8d839a 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile | |||
| @@ -3,6 +3,6 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ | 5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ |
| 6 | ucmpdi2.o | 6 | ucmpdi2.o delay.o |
| 7 | 7 | ||
| 8 | obj-y := iomap.o | 8 | obj-y := iomap.o |
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c new file mode 100644 index 000000000000..ec9255f27a81 --- /dev/null +++ b/arch/parisc/lib/delay.c | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | /* | ||
| 2 | * Precise Delay Loops for parisc | ||
| 3 | * | ||
| 4 | * based on code by: | ||
| 5 | * Copyright (C) 1993 Linus Torvalds | ||
| 6 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
| 7 | * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> | ||
| 8 | * | ||
| 9 | * parisc implementation: | ||
| 10 | * Copyright (C) 2013 Helge Deller <deller@gmx.de> | ||
| 11 | */ | ||
| 12 | |||
| 13 | |||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/preempt.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | |||
| 18 | #include <asm/processor.h> | ||
| 19 | #include <asm/delay.h> | ||
| 20 | |||
| 21 | #include <asm/special_insns.h> /* for mfctl() */ | ||
| 22 | #include <asm/processor.h> /* for boot_cpu_data */ | ||
| 23 | |||
| 24 | /* CR16 based delay: */ | ||
| 25 | static void __cr16_delay(unsigned long __loops) | ||
| 26 | { | ||
| 27 | /* | ||
| 28 | * Note: Due to unsigned math, cr16 rollovers shouldn't be | ||
| 29 | * a problem here. However, on 32 bit, we need to make sure | ||
| 30 | * we don't pass in too big a value. The current default | ||
| 31 | * value of MAX_UDELAY_MS should help prevent this. | ||
| 32 | */ | ||
| 33 | u32 bclock, now, loops = __loops; | ||
| 34 | int cpu; | ||
| 35 | |||
| 36 | preempt_disable(); | ||
| 37 | cpu = smp_processor_id(); | ||
| 38 | bclock = mfctl(16); | ||
| 39 | for (;;) { | ||
| 40 | now = mfctl(16); | ||
| 41 | if ((now - bclock) >= loops) | ||
| 42 | break; | ||
| 43 | |||
| 44 | /* Allow RT tasks to run */ | ||
| 45 | preempt_enable(); | ||
| 46 | asm volatile(" nop\n"); | ||
| 47 | barrier(); | ||
| 48 | preempt_disable(); | ||
| 49 | |||
| 50 | /* | ||
| 51 | * It is possible that we moved to another CPU, and | ||
| 52 | * since CR16's are per-cpu we need to calculate | ||
| 53 | * that. The delay must guarantee that we wait "at | ||
| 54 | * least" the amount of time. Being moved to another | ||
| 55 | * CPU could make the wait longer but we just need to | ||
| 56 | * make sure we waited long enough. Rebalance the | ||
| 57 | * counter for this CPU. | ||
| 58 | */ | ||
| 59 | if (unlikely(cpu != smp_processor_id())) { | ||
| 60 | loops -= (now - bclock); | ||
| 61 | cpu = smp_processor_id(); | ||
| 62 | bclock = mfctl(16); | ||
| 63 | } | ||
| 64 | } | ||
| 65 | preempt_enable(); | ||
| 66 | } | ||
| 67 | |||
| 68 | |||
| 69 | void __udelay(unsigned long usecs) | ||
| 70 | { | ||
| 71 | __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL)); | ||
| 72 | } | ||
| 73 | EXPORT_SYMBOL(__udelay); | ||
