aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-frv/unaligned.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-frv/unaligned.h')
-rw-r--r--include/asm-frv/unaligned.h203
1 files changed, 203 insertions, 0 deletions
diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h
new file mode 100644
index 000000000000..a0d199bf01d9
--- /dev/null
+++ b/include/asm-frv/unaligned.h
@@ -0,0 +1,203 @@
1/* unaligned.h: unaligned access handler
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _ASM_UNALIGNED_H
13#define _ASM_UNALIGNED_H
14
15#include <linux/config.h>
16
17/*
18 * Unaligned accesses on uClinux can't be performed in a fault handler - the
19 * CPU detects them as imprecise exceptions making this impossible.
20 *
21 * With the FR451, however, they are precise, and so we used to fix them up in
22 * the memory access fault handler. However, instruction bundling make this
23 * impractical. So, now we fall back to using memcpy.
24 */
25#ifdef CONFIG_MMU
26
27/*
28 * The asm statement in the macros below is a way to get GCC to copy a
29 * value from one variable to another without having any clue it's
30 * actually doing so, so that it won't have any idea that the values
31 * in the two variables are related.
32 */
33
34#define get_unaligned(ptr) ({ \
35 typeof((*(ptr))) __x; \
36 void *__ptrcopy; \
37 asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
38 memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \
39 __x; \
40})
41
42#define put_unaligned(val, ptr) ({ \
43 typeof((*(ptr))) __x = (val); \
44 void *__ptrcopy; \
45 asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
46 memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \
47})
48
49extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0);
50
51#else
52
53#define get_unaligned(ptr) \
54({ \
55 typeof(*(ptr)) x; \
56 const char *__p = (const char *) (ptr); \
57 \
58 switch (sizeof(x)) { \
59 case 1: \
60 x = *(ptr); \
61 break; \
62 case 2: \
63 { \
64 uint8_t a; \
65 asm(" ldub%I2 %M2,%0 \n" \
66 " ldub%I3.p %M3,%1 \n" \
67 " slli %0,#8,%0 \n" \
68 " or %0,%1,%0 \n" \
69 : "=&r"(x), "=&r"(a) \
70 : "m"(__p[0]), "m"(__p[1]) \
71 ); \
72 break; \
73 } \
74 \
75 case 4: \
76 { \
77 uint8_t a; \
78 asm(" ldub%I2 %M2,%0 \n" \
79 " ldub%I3.p %M3,%1 \n" \
80 " slli %0,#8,%0 \n" \
81 " or %0,%1,%0 \n" \
82 " ldub%I4.p %M4,%1 \n" \
83 " slli %0,#8,%0 \n" \
84 " or %0,%1,%0 \n" \
85 " ldub%I5.p %M5,%1 \n" \
86 " slli %0,#8,%0 \n" \
87 " or %0,%1,%0 \n" \
88 : "=&r"(x), "=&r"(a) \
89 : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \
90 ); \
91 break; \
92 } \
93 \
94 case 8: \
95 { \
96 union { uint64_t x; u32 y[2]; } z; \
97 uint8_t a; \
98 asm(" ldub%I3 %M3,%0 \n" \
99 " ldub%I4.p %M4,%2 \n" \
100 " slli %0,#8,%0 \n" \
101 " or %0,%2,%0 \n" \
102 " ldub%I5.p %M5,%2 \n" \
103 " slli %0,#8,%0 \n" \
104 " or %0,%2,%0 \n" \
105 " ldub%I6.p %M6,%2 \n" \
106 " slli %0,#8,%0 \n" \
107 " or %0,%2,%0 \n" \
108 " ldub%I7 %M7,%1 \n" \
109 " ldub%I8.p %M8,%2 \n" \
110 " slli %1,#8,%1 \n" \
111 " or %1,%2,%1 \n" \
112 " ldub%I9.p %M9,%2 \n" \
113 " slli %1,#8,%1 \n" \
114 " or %1,%2,%1 \n" \
115 " ldub%I10.p %M10,%2 \n" \
116 " slli %1,#8,%1 \n" \
117 " or %1,%2,%1 \n" \
118 : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \
119 : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \
120 "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \
121 ); \
122 x = z.x; \
123 break; \
124 } \
125 \
126 default: \
127 x = 0; \
128 BUG(); \
129 break; \
130 } \
131 \
132 x; \
133})
134
135#define put_unaligned(val, ptr) \
136do { \
137 char *__p = (char *) (ptr); \
138 int x; \
139 \
140 switch (sizeof(*ptr)) { \
141 case 2: \
142 { \
143 asm(" stb%I1.p %0,%M1 \n" \
144 " srli %0,#8,%0 \n" \
145 " stb%I2 %0,%M2 \n" \
146 : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \
147 : "0"(val) \
148 ); \
149 break; \
150 } \
151 \
152 case 4: \
153 { \
154 asm(" stb%I1.p %0,%M1 \n" \
155 " srli %0,#8,%0 \n" \
156 " stb%I2.p %0,%M2 \n" \
157 " srli %0,#8,%0 \n" \
158 " stb%I3.p %0,%M3 \n" \
159 " srli %0,#8,%0 \n" \
160 " stb%I4 %0,%M4 \n" \
161 : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \
162 : "0"(val) \
163 ); \
164 break; \
165 } \
166 \
167 case 8: \
168 { \
169 uint32_t __high, __low; \
170 __high = (uint64_t)val >> 32; \
171 __low = val & 0xffffffff; \
172 asm(" stb%I2.p %0,%M2 \n" \
173 " srli %0,#8,%0 \n" \
174 " stb%I3.p %0,%M3 \n" \
175 " srli %0,#8,%0 \n" \
176 " stb%I4.p %0,%M4 \n" \
177 " srli %0,#8,%0 \n" \
178 " stb%I5.p %0,%M5 \n" \
179 " srli %0,#8,%0 \n" \
180 " stb%I6.p %1,%M6 \n" \
181 " srli %1,#8,%1 \n" \
182 " stb%I7.p %1,%M7 \n" \
183 " srli %1,#8,%1 \n" \
184 " stb%I8.p %1,%M8 \n" \
185 " srli %1,#8,%1 \n" \
186 " stb%I9 %1,%M9 \n" \
187 : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \
188 "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \
189 "=m"(__p[1]), "=m"(__p[0]) \
190 : "0"(__low), "1"(__high) \
191 ); \
192 break; \
193 } \
194 \
195 default: \
196 *(ptr) = (val); \
197 break; \
198 } \
199} while(0)
200
201#endif
202
203#endif