diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-04-29 04:03:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 11:06:27 -0400 |
commit | 6510d41954dc6a9c8b1dbca7eaca0f23195ca727 (patch) | |
tree | 868b5fac25c7c5b80cc5a88eaaab8bf3d693420d /include/asm-frv/unaligned.h | |
parent | 064106a91be5e76cb42c1ddf5d3871e3a1bd2a23 (diff) |
kernel: Move arches to use common unaligned access
Unaligned access is ok for the following arches:
cris, m68k, mn10300, powerpc, s390, x86
Arches that use the memmove implementation for native endian, and
the byteshifting for the opposite endianness.
h8300, m32r, xtensa
Packed struct for native endian, byteshifting for other endian:
alpha, blackfin, ia64, parisc, sparc, sparc64, mips, sh
m86knommu is generic_be for Coldfire, otherwise unaligned access is ok.
frv, arm chooses endianness based on compiler settings, uses the byteshifting
versions. Remove the unaligned trap handler from frv as it is now unused.
v850 is le, uses the byteshifting versions for both be and le.
Remove the now unused asm-generic implementation.
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-frv/unaligned.h')
-rw-r--r-- | include/asm-frv/unaligned.h | 196 |
1 files changed, 8 insertions, 188 deletions
diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h index dc8e9c9bf6bd..64ccc736f2d8 100644 --- a/include/asm-frv/unaligned.h +++ b/include/asm-frv/unaligned.h | |||
@@ -9,194 +9,14 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _ASM_UNALIGNED_H | 12 | #ifndef _ASM_FRV_UNALIGNED_H |
13 | #define _ASM_UNALIGNED_H | 13 | #define _ASM_FRV_UNALIGNED_H |
14 | 14 | ||
15 | #include <linux/unaligned/le_byteshift.h> | ||
16 | #include <linux/unaligned/be_byteshift.h> | ||
17 | #include <linux/unaligned/generic.h> | ||
15 | 18 | ||
16 | /* | 19 | #define get_unaligned __get_unaligned_be |
17 | * Unaligned accesses on uClinux can't be performed in a fault handler - the | 20 | #define put_unaligned __put_unaligned_be |
18 | * CPU detects them as imprecise exceptions making this impossible. | ||
19 | * | ||
20 | * With the FR451, however, they are precise, and so we used to fix them up in | ||
21 | * the memory access fault handler. However, instruction bundling make this | ||
22 | * impractical. So, now we fall back to using memcpy. | ||
23 | */ | ||
24 | #ifdef CONFIG_MMU | ||
25 | |||
26 | /* | ||
27 | * The asm statement in the macros below is a way to get GCC to copy a | ||
28 | * value from one variable to another without having any clue it's | ||
29 | * actually doing so, so that it won't have any idea that the values | ||
30 | * in the two variables are related. | ||
31 | */ | ||
32 | |||
33 | #define get_unaligned(ptr) ({ \ | ||
34 | typeof((*(ptr))) __x; \ | ||
35 | void *__ptrcopy; \ | ||
36 | asm("" : "=r" (__ptrcopy) : "0" (ptr)); \ | ||
37 | memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \ | ||
38 | __x; \ | ||
39 | }) | ||
40 | |||
41 | #define put_unaligned(val, ptr) ({ \ | ||
42 | typeof((*(ptr))) __x = (val); \ | ||
43 | void *__ptrcopy; \ | ||
44 | asm("" : "=r" (__ptrcopy) : "0" (ptr)); \ | ||
45 | memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \ | ||
46 | }) | ||
47 | |||
48 | extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0); | ||
49 | |||
50 | #else | ||
51 | |||
52 | #define get_unaligned(ptr) \ | ||
53 | ({ \ | ||
54 | typeof(*(ptr)) x; \ | ||
55 | const char *__p = (const char *) (ptr); \ | ||
56 | \ | ||
57 | switch (sizeof(x)) { \ | ||
58 | case 1: \ | ||
59 | x = *(ptr); \ | ||
60 | break; \ | ||
61 | case 2: \ | ||
62 | { \ | ||
63 | uint8_t a; \ | ||
64 | asm(" ldub%I2 %M2,%0 \n" \ | ||
65 | " ldub%I3.p %M3,%1 \n" \ | ||
66 | " slli %0,#8,%0 \n" \ | ||
67 | " or %0,%1,%0 \n" \ | ||
68 | : "=&r"(x), "=&r"(a) \ | ||
69 | : "m"(__p[0]), "m"(__p[1]) \ | ||
70 | ); \ | ||
71 | break; \ | ||
72 | } \ | ||
73 | \ | ||
74 | case 4: \ | ||
75 | { \ | ||
76 | uint8_t a; \ | ||
77 | asm(" ldub%I2 %M2,%0 \n" \ | ||
78 | " ldub%I3.p %M3,%1 \n" \ | ||
79 | " slli %0,#8,%0 \n" \ | ||
80 | " or %0,%1,%0 \n" \ | ||
81 | " ldub%I4.p %M4,%1 \n" \ | ||
82 | " slli %0,#8,%0 \n" \ | ||
83 | " or %0,%1,%0 \n" \ | ||
84 | " ldub%I5.p %M5,%1 \n" \ | ||
85 | " slli %0,#8,%0 \n" \ | ||
86 | " or %0,%1,%0 \n" \ | ||
87 | : "=&r"(x), "=&r"(a) \ | ||
88 | : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \ | ||
89 | ); \ | ||
90 | break; \ | ||
91 | } \ | ||
92 | \ | ||
93 | case 8: \ | ||
94 | { \ | ||
95 | union { uint64_t x; u32 y[2]; } z; \ | ||
96 | uint8_t a; \ | ||
97 | asm(" ldub%I3 %M3,%0 \n" \ | ||
98 | " ldub%I4.p %M4,%2 \n" \ | ||
99 | " slli %0,#8,%0 \n" \ | ||
100 | " or %0,%2,%0 \n" \ | ||
101 | " ldub%I5.p %M5,%2 \n" \ | ||
102 | " slli %0,#8,%0 \n" \ | ||
103 | " or %0,%2,%0 \n" \ | ||
104 | " ldub%I6.p %M6,%2 \n" \ | ||
105 | " slli %0,#8,%0 \n" \ | ||
106 | " or %0,%2,%0 \n" \ | ||
107 | " ldub%I7 %M7,%1 \n" \ | ||
108 | " ldub%I8.p %M8,%2 \n" \ | ||
109 | " slli %1,#8,%1 \n" \ | ||
110 | " or %1,%2,%1 \n" \ | ||
111 | " ldub%I9.p %M9,%2 \n" \ | ||
112 | " slli %1,#8,%1 \n" \ | ||
113 | " or %1,%2,%1 \n" \ | ||
114 | " ldub%I10.p %M10,%2 \n" \ | ||
115 | " slli %1,#8,%1 \n" \ | ||
116 | " or %1,%2,%1 \n" \ | ||
117 | : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \ | ||
118 | : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \ | ||
119 | "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \ | ||
120 | ); \ | ||
121 | x = z.x; \ | ||
122 | break; \ | ||
123 | } \ | ||
124 | \ | ||
125 | default: \ | ||
126 | x = 0; \ | ||
127 | BUG(); \ | ||
128 | break; \ | ||
129 | } \ | ||
130 | \ | ||
131 | x; \ | ||
132 | }) | ||
133 | |||
134 | #define put_unaligned(val, ptr) \ | ||
135 | do { \ | ||
136 | char *__p = (char *) (ptr); \ | ||
137 | int x; \ | ||
138 | \ | ||
139 | switch (sizeof(*ptr)) { \ | ||
140 | case 2: \ | ||
141 | { \ | ||
142 | asm(" stb%I1.p %0,%M1 \n" \ | ||
143 | " srli %0,#8,%0 \n" \ | ||
144 | " stb%I2 %0,%M2 \n" \ | ||
145 | : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \ | ||
146 | : "0"(val) \ | ||
147 | ); \ | ||
148 | break; \ | ||
149 | } \ | ||
150 | \ | ||
151 | case 4: \ | ||
152 | { \ | ||
153 | asm(" stb%I1.p %0,%M1 \n" \ | ||
154 | " srli %0,#8,%0 \n" \ | ||
155 | " stb%I2.p %0,%M2 \n" \ | ||
156 | " srli %0,#8,%0 \n" \ | ||
157 | " stb%I3.p %0,%M3 \n" \ | ||
158 | " srli %0,#8,%0 \n" \ | ||
159 | " stb%I4 %0,%M4 \n" \ | ||
160 | : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \ | ||
161 | : "0"(val) \ | ||
162 | ); \ | ||
163 | break; \ | ||
164 | } \ | ||
165 | \ | ||
166 | case 8: \ | ||
167 | { \ | ||
168 | uint32_t __high, __low; \ | ||
169 | __high = (uint64_t)val >> 32; \ | ||
170 | __low = val & 0xffffffff; \ | ||
171 | asm(" stb%I2.p %0,%M2 \n" \ | ||
172 | " srli %0,#8,%0 \n" \ | ||
173 | " stb%I3.p %0,%M3 \n" \ | ||
174 | " srli %0,#8,%0 \n" \ | ||
175 | " stb%I4.p %0,%M4 \n" \ | ||
176 | " srli %0,#8,%0 \n" \ | ||
177 | " stb%I5.p %0,%M5 \n" \ | ||
178 | " srli %0,#8,%0 \n" \ | ||
179 | " stb%I6.p %1,%M6 \n" \ | ||
180 | " srli %1,#8,%1 \n" \ | ||
181 | " stb%I7.p %1,%M7 \n" \ | ||
182 | " srli %1,#8,%1 \n" \ | ||
183 | " stb%I8.p %1,%M8 \n" \ | ||
184 | " srli %1,#8,%1 \n" \ | ||
185 | " stb%I9 %1,%M9 \n" \ | ||
186 | : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \ | ||
187 | "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \ | ||
188 | "=m"(__p[1]), "=m"(__p[0]) \ | ||
189 | : "0"(__low), "1"(__high) \ | ||
190 | ); \ | ||
191 | break; \ | ||
192 | } \ | ||
193 | \ | ||
194 | default: \ | ||
195 | *(ptr) = (val); \ | ||
196 | break; \ | ||
197 | } \ | ||
198 | } while(0) | ||
199 | |||
200 | #endif | ||
201 | 21 | ||
202 | #endif | 22 | #endif /* _ASM_FRV_UNALIGNED_H */ |