aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-m68knommu/delay.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-m68knommu/delay.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-m68knommu/delay.h')
-rw-r--r--include/asm-m68knommu/delay.h76
1 files changed, 76 insertions, 0 deletions
diff --git a/include/asm-m68knommu/delay.h b/include/asm-m68knommu/delay.h
new file mode 100644
index 000000000000..e3a976254672
--- /dev/null
+++ b/include/asm-m68knommu/delay.h
@@ -0,0 +1,76 @@
1#ifndef _M68KNOMMU_DELAY_H
2#define _M68KNOMMU_DELAY_H
3
4/*
5 * Copyright (C) 1994 Hamish Macdonald
6 * Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
7 */
8
9#include <asm/param.h>
10
11extern __inline__ void __delay(unsigned long loops)
12{
13#if defined(CONFIG_COLDFIRE)
14 /* The coldfire runs this loop at significantly different speeds
15 * depending upon long word alignment or not. We'll pad it to
16 * long word alignment which is the faster version.
17 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
18 * than using a NOP (0x4e71) instruction because it executes in one
19 * cycle not three and doesn't allow for an arbitary delay waiting
20 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
21 * stall waiting for the register to become valid if such is added
22 * to the coldfire at some stage.
23 */
24 __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
25 "1: subql #1, %0\n\t"
26 "jcc 1b"
27 : "=d" (loops) : "0" (loops));
28#else
29 __asm__ __volatile__ ( "1: subql #1, %0\n\t"
30 "jcc 1b"
31 : "=d" (loops) : "0" (loops));
32#endif
33}
34
35/*
36 * Ideally we use a 32*32->64 multiply to calculate the number of
37 * loop iterations, but the older standard 68k and ColdFire do not
38 * have this instruction. So for them we have a clsoe approximation
39 * loop using 32*32->32 multiplies only. This calculation based on
40 * the ARM version of delay.
41 *
42 * We want to implement:
43 *
44 * loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
45 */
46
47#define HZSCALE (268435456 / (1000000/HZ))
48
49extern unsigned long loops_per_jiffy;
50
51extern __inline__ void _udelay(unsigned long usecs)
52{
53#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
54 defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
55 defined(CONFIG_COLDFIRE)
56 __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
57#else
58 unsigned long tmp;
59
60 usecs *= 4295; /* 2**32 / 1000000 */
61 __asm__ ("mulul %2,%0:%1"
62 : "=d" (usecs), "=d" (tmp)
63 : "d" (usecs), "1" (loops_per_jiffy*HZ));
64 __delay(usecs);
65#endif
66}
67
68/*
69 * Moved the udelay() function into library code, no longer inlined.
70 * I had to change the algorithm because we are overflowing now on
71 * the faster ColdFire parts. The code is a little biger, so it makes
72 * sense to library it.
73 */
74extern void udelay(unsigned long usecs);
75
76#endif /* defined(_M68KNOMMU_DELAY_H) */