aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/unaligned-sh4a.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 16:34:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-06 16:34:45 -0500
commit9858a38ea3a940762ae3028cce88f686d0e0c28b (patch)
treef34a3ba1eba9be58cdd906e33280ce5b35d31b06 /arch/sh/include/asm/unaligned-sh4a.h
parentabb359450f20c32ae03039d8736f12b1d561caf5 (diff)
parentf862f904d357dc0d3612347a8dbabe6fae037fbb (diff)
Merge branch 'sh-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* 'sh-latest' of git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: sh: include Migo-R TS driver in Migo-R defconfig sh: correct definitions to access stack pointers sh: Tidy up SH-4A unaligned load support. dma: shdma: NMI support. sh: mach-sdk7786: Handle baseboard NMI source selection. sh: mach-rsk: Add polled GPIO buttons support for RSK+7203. sh: Break out cpuinfo_op procfs bits. sh: Enable optional gpiolib for all CPUs with pinmux tables. sh: migrate SH_CLK_MD to mode pin API. sh: machvec IO death.
Diffstat (limited to 'arch/sh/include/asm/unaligned-sh4a.h')
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h164
1 files changed, 52 insertions, 112 deletions
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
index 9f4dd252c981..c48a9c3420da 100644
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ b/arch/sh/include/asm/unaligned-sh4a.h
@@ -18,10 +18,20 @@
18 * of spill registers and blowing up when building at low optimization 18 * of spill registers and blowing up when building at low optimization
19 * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. 19 * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
20 */ 20 */
21#include <linux/unaligned/packed_struct.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <asm/byteorder.h> 23#include <asm/byteorder.h>
23 24
24static __always_inline u32 __get_unaligned_cpu32(const u8 *p) 25static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
26{
27#ifdef __LITTLE_ENDIAN
28 return p[0] | p[1] << 8;
29#else
30 return p[0] << 8 | p[1];
31#endif
32}
33
34static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
25{ 35{
26 unsigned long unaligned; 36 unsigned long unaligned;
27 37
@@ -34,218 +44,148 @@ static __always_inline u32 __get_unaligned_cpu32(const u8 *p)
34 return unaligned; 44 return unaligned;
35} 45}
36 46
37struct __una_u16 { u16 x __attribute__((packed)); };
38struct __una_u32 { u32 x __attribute__((packed)); };
39struct __una_u64 { u64 x __attribute__((packed)); };
40
41static inline u16 __get_unaligned_cpu16(const u8 *p)
42{
43#ifdef __LITTLE_ENDIAN
44 return p[0] | p[1] << 8;
45#else
46 return p[0] << 8 | p[1];
47#endif
48}
49
50/* 47/*
51 * Even though movua.l supports auto-increment on the read side, it can 48 * Even though movua.l supports auto-increment on the read side, it can
52 * only store to r0 due to instruction encoding constraints, so just let 49 * only store to r0 due to instruction encoding constraints, so just let
53 * the compiler sort it out on its own. 50 * the compiler sort it out on its own.
54 */ 51 */
55static inline u64 __get_unaligned_cpu64(const u8 *p) 52static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
56{ 53{
57#ifdef __LITTLE_ENDIAN 54#ifdef __LITTLE_ENDIAN
58 return (u64)__get_unaligned_cpu32(p + 4) << 32 | 55 return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
59 __get_unaligned_cpu32(p); 56 sh4a_get_unaligned_cpu32(p);
60#else 57#else
61 return (u64)__get_unaligned_cpu32(p) << 32 | 58 return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
62 __get_unaligned_cpu32(p + 4); 59 sh4a_get_unaligned_cpu32(p + 4);
63#endif 60#endif
64} 61}
65 62
66static inline u16 get_unaligned_le16(const void *p) 63static inline u16 get_unaligned_le16(const void *p)
67{ 64{
68 return le16_to_cpu(__get_unaligned_cpu16(p)); 65 return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
69} 66}
70 67
71static inline u32 get_unaligned_le32(const void *p) 68static inline u32 get_unaligned_le32(const void *p)
72{ 69{
73 return le32_to_cpu(__get_unaligned_cpu32(p)); 70 return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
74} 71}
75 72
76static inline u64 get_unaligned_le64(const void *p) 73static inline u64 get_unaligned_le64(const void *p)
77{ 74{
78 return le64_to_cpu(__get_unaligned_cpu64(p)); 75 return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
79} 76}
80 77
81static inline u16 get_unaligned_be16(const void *p) 78static inline u16 get_unaligned_be16(const void *p)
82{ 79{
83 return be16_to_cpu(__get_unaligned_cpu16(p)); 80 return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
84} 81}
85 82
86static inline u32 get_unaligned_be32(const void *p) 83static inline u32 get_unaligned_be32(const void *p)
87{ 84{
88 return be32_to_cpu(__get_unaligned_cpu32(p)); 85 return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
89} 86}
90 87
91static inline u64 get_unaligned_be64(const void *p) 88static inline u64 get_unaligned_be64(const void *p)
92{ 89{
93 return be64_to_cpu(__get_unaligned_cpu64(p)); 90 return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
94} 91}
95 92
96static inline void __put_le16_noalign(u8 *p, u16 val) 93static inline void nonnative_put_le16(u16 val, u8 *p)
97{ 94{
98 *p++ = val; 95 *p++ = val;
99 *p++ = val >> 8; 96 *p++ = val >> 8;
100} 97}
101 98
102static inline void __put_le32_noalign(u8 *p, u32 val) 99static inline void nonnative_put_le32(u32 val, u8 *p)
103{ 100{
104 __put_le16_noalign(p, val); 101 nonnative_put_le16(val, p);
105 __put_le16_noalign(p + 2, val >> 16); 102 nonnative_put_le16(val >> 16, p + 2);
106} 103}
107 104
108static inline void __put_le64_noalign(u8 *p, u64 val) 105static inline void nonnative_put_le64(u64 val, u8 *p)
109{ 106{
110 __put_le32_noalign(p, val); 107 nonnative_put_le32(val, p);
111 __put_le32_noalign(p + 4, val >> 32); 108 nonnative_put_le32(val >> 32, p + 4);
112} 109}
113 110
114static inline void __put_be16_noalign(u8 *p, u16 val) 111static inline void nonnative_put_be16(u16 val, u8 *p)
115{ 112{
116 *p++ = val >> 8; 113 *p++ = val >> 8;
117 *p++ = val; 114 *p++ = val;
118} 115}
119 116
120static inline void __put_be32_noalign(u8 *p, u32 val) 117static inline void nonnative_put_be32(u32 val, u8 *p)
121{ 118{
122 __put_be16_noalign(p, val >> 16); 119 nonnative_put_be16(val >> 16, p);
123 __put_be16_noalign(p + 2, val); 120 nonnative_put_be16(val, p + 2);
124} 121}
125 122
126static inline void __put_be64_noalign(u8 *p, u64 val) 123static inline void nonnative_put_be64(u64 val, u8 *p)
127{ 124{
128 __put_be32_noalign(p, val >> 32); 125 nonnative_put_be32(val >> 32, p);
129 __put_be32_noalign(p + 4, val); 126 nonnative_put_be32(val, p + 4);
130} 127}
131 128
132static inline void put_unaligned_le16(u16 val, void *p) 129static inline void put_unaligned_le16(u16 val, void *p)
133{ 130{
134#ifdef __LITTLE_ENDIAN 131#ifdef __LITTLE_ENDIAN
135 ((struct __una_u16 *)p)->x = val; 132 __put_unaligned_cpu16(val, p);
136#else 133#else
137 __put_le16_noalign(p, val); 134 nonnative_put_le16(val, p);
138#endif 135#endif
139} 136}
140 137
141static inline void put_unaligned_le32(u32 val, void *p) 138static inline void put_unaligned_le32(u32 val, void *p)
142{ 139{
143#ifdef __LITTLE_ENDIAN 140#ifdef __LITTLE_ENDIAN
144 ((struct __una_u32 *)p)->x = val; 141 __put_unaligned_cpu32(val, p);
145#else 142#else
146 __put_le32_noalign(p, val); 143 nonnative_put_le32(val, p);
147#endif 144#endif
148} 145}
149 146
150static inline void put_unaligned_le64(u64 val, void *p) 147static inline void put_unaligned_le64(u64 val, void *p)
151{ 148{
152#ifdef __LITTLE_ENDIAN 149#ifdef __LITTLE_ENDIAN
153 ((struct __una_u64 *)p)->x = val; 150 __put_unaligned_cpu64(val, p);
154#else 151#else
155 __put_le64_noalign(p, val); 152 nonnative_put_le64(val, p);
156#endif 153#endif
157} 154}
158 155
159static inline void put_unaligned_be16(u16 val, void *p) 156static inline void put_unaligned_be16(u16 val, void *p)
160{ 157{
161#ifdef __BIG_ENDIAN 158#ifdef __BIG_ENDIAN
162 ((struct __una_u16 *)p)->x = val; 159 __put_unaligned_cpu16(val, p);
163#else 160#else
164 __put_be16_noalign(p, val); 161 nonnative_put_be16(val, p);
165#endif 162#endif
166} 163}
167 164
168static inline void put_unaligned_be32(u32 val, void *p) 165static inline void put_unaligned_be32(u32 val, void *p)
169{ 166{
170#ifdef __BIG_ENDIAN 167#ifdef __BIG_ENDIAN
171 ((struct __una_u32 *)p)->x = val; 168 __put_unaligned_cpu32(val, p);
172#else 169#else
173 __put_be32_noalign(p, val); 170 nonnative_put_be32(val, p);
174#endif 171#endif
175} 172}
176 173
177static inline void put_unaligned_be64(u64 val, void *p) 174static inline void put_unaligned_be64(u64 val, void *p)
178{ 175{
179#ifdef __BIG_ENDIAN 176#ifdef __BIG_ENDIAN
180 ((struct __una_u64 *)p)->x = val; 177 __put_unaligned_cpu64(val, p);
181#else 178#else
182 __put_be64_noalign(p, val); 179 nonnative_put_be64(val, p);
183#endif 180#endif
184} 181}
185 182
186/* 183/*
187 * Cause a link-time error if we try an unaligned access other than 184 * While it's a bit non-obvious, even though the generic le/be wrappers
188 * 1,2,4 or 8 bytes long 185 * use the __get/put_xxx prefixing, they actually wrap in to the
186 * non-prefixed get/put_xxx variants as provided above.
189 */ 187 */
190extern void __bad_unaligned_access_size(void); 188#include <linux/unaligned/generic.h>
191
192#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
193 __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
194 __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
195 __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
196 __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
197 __bad_unaligned_access_size())))); \
198 }))
199
200#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
201 __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
202 __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
203 __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
204 __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
205 __bad_unaligned_access_size())))); \
206 }))
207
208#define __put_unaligned_le(val, ptr) ({ \
209 void *__gu_p = (ptr); \
210 switch (sizeof(*(ptr))) { \
211 case 1: \
212 *(u8 *)__gu_p = (__force u8)(val); \
213 break; \
214 case 2: \
215 put_unaligned_le16((__force u16)(val), __gu_p); \
216 break; \
217 case 4: \
218 put_unaligned_le32((__force u32)(val), __gu_p); \
219 break; \
220 case 8: \
221 put_unaligned_le64((__force u64)(val), __gu_p); \
222 break; \
223 default: \
224 __bad_unaligned_access_size(); \
225 break; \
226 } \
227 (void)0; })
228
229#define __put_unaligned_be(val, ptr) ({ \
230 void *__gu_p = (ptr); \
231 switch (sizeof(*(ptr))) { \
232 case 1: \
233 *(u8 *)__gu_p = (__force u8)(val); \
234 break; \
235 case 2: \
236 put_unaligned_be16((__force u16)(val), __gu_p); \
237 break; \
238 case 4: \
239 put_unaligned_be32((__force u32)(val), __gu_p); \
240 break; \
241 case 8: \
242 put_unaligned_be64((__force u64)(val), __gu_p); \
243 break; \
244 default: \
245 __bad_unaligned_access_size(); \
246 break; \
247 } \
248 (void)0; })
249 189
250#ifdef __LITTLE_ENDIAN 190#ifdef __LITTLE_ENDIAN
251# define get_unaligned __get_unaligned_le 191# define get_unaligned __get_unaligned_le