aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-04-29 04:03:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:27 -0400
commit6510d41954dc6a9c8b1dbca7eaca0f23195ca727 (patch)
tree868b5fac25c7c5b80cc5a88eaaab8bf3d693420d /include/asm-arm
parent064106a91be5e76cb42c1ddf5d3871e3a1bd2a23 (diff)
kernel: Move arches to use common unaligned access
Unaligned access is ok for the following arches: cris, m68k, mn10300, powerpc, s390, x86 Arches that use the memmove implementation for native endian, and the byteshifting for the opposite endianness. h8300, m32r, xtensa Packed struct for native endian, byteshifting for other endian: alpha, blackfin, ia64, parisc, sparc, sparc64, mips, sh m86knommu is generic_be for Coldfire, otherwise unaligned access is ok. frv, arm chooses endianness based on compiler settings, uses the byteshifting versions. Remove the unaligned trap handler from frv as it is now unused. v850 is le, uses the byteshifting versions for both be and le. Remove the now unused asm-generic implementation. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/unaligned.h174
1 files changed, 6 insertions, 168 deletions
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h
index 5db03cf3b905..44593a894903 100644
--- a/include/asm-arm/unaligned.h
+++ b/include/asm-arm/unaligned.h
@@ -1,171 +1,9 @@
1#ifndef __ASM_ARM_UNALIGNED_H 1#ifndef _ASM_ARM_UNALIGNED_H
2#define __ASM_ARM_UNALIGNED_H 2#define _ASM_ARM_UNALIGNED_H
3 3
4#include <asm/types.h> 4#include <linux/unaligned/le_byteshift.h>
5 5#include <linux/unaligned/be_byteshift.h>
6extern int __bug_unaligned_x(const void *ptr); 6#include <linux/unaligned/generic.h>
7
8/*
9 * What is the most efficient way of loading/storing an unaligned value?
10 *
11 * That is the subject of this file. Efficiency here is defined as
12 * minimum code size with minimum register usage for the common cases.
13 * It is currently not believed that long longs are common, so we
14 * trade efficiency for the chars, shorts and longs against the long
15 * longs.
16 *
17 * Current stats with gcc 2.7.2.2 for these functions:
18 *
19 * ptrsize get: code regs put: code regs
20 * 1 1 1 1 2
21 * 2 3 2 3 2
22 * 4 7 3 7 3
23 * 8 20 6 16 6
24 *
25 * gcc 2.95.1 seems to code differently:
26 *
27 * ptrsize get: code regs put: code regs
28 * 1 1 1 1 2
29 * 2 3 2 3 2
30 * 4 7 4 7 4
31 * 8 19 8 15 6
32 *
33 * which may or may not be more efficient (depending upon whether
34 * you can afford the extra registers). Hopefully the gcc 2.95
35 * is inteligent enough to decide if it is better to use the
36 * extra register, but evidence so far seems to suggest otherwise.
37 *
38 * Unfortunately, gcc is not able to optimise the high word
39 * out of long long >> 32, or the low word from long long << 32
40 */
41
42#define __get_unaligned_2_le(__p) \
43 (unsigned int)(__p[0] | __p[1] << 8)
44
45#define __get_unaligned_2_be(__p) \
46 (unsigned int)(__p[0] << 8 | __p[1])
47
48#define __get_unaligned_4_le(__p) \
49 (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
50
51#define __get_unaligned_4_be(__p) \
52 (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3])
53
54#define __get_unaligned_8_le(__p) \
55 ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 | \
56 __get_unaligned_4_le(__p))
57
58#define __get_unaligned_8_be(__p) \
59 ((unsigned long long)__get_unaligned_4_be(__p) << 32 | \
60 __get_unaligned_4_be((__p+4)))
61
62#define __get_unaligned_le(ptr) \
63 ((__force typeof(*(ptr)))({ \
64 const __u8 *__p = (const __u8 *)(ptr); \
65 __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \
66 __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p), \
67 __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p), \
68 __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p), \
69 (void)__bug_unaligned_x(__p))))); \
70 }))
71
72#define __get_unaligned_be(ptr) \
73 ((__force typeof(*(ptr)))({ \
74 const __u8 *__p = (const __u8 *)(ptr); \
75 __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \
76 __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p), \
77 __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p), \
78 __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p), \
79 (void)__bug_unaligned_x(__p))))); \
80 }))
81
82
83static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p)
84{
85 *__p++ = __v;
86 *__p++ = __v >> 8;
87}
88
89static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p)
90{
91 *__p++ = __v >> 8;
92 *__p++ = __v;
93}
94
95static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p)
96{
97 __put_unaligned_2_le(__v >> 16, __p + 2);
98 __put_unaligned_2_le(__v, __p);
99}
100
101static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p)
102{
103 __put_unaligned_2_be(__v >> 16, __p);
104 __put_unaligned_2_be(__v, __p + 2);
105}
106
107static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p)
108{
109 /*
110 * tradeoff: 8 bytes of stack for all unaligned puts (2
111 * instructions), or an extra register in the long long
112 * case - go for the extra register.
113 */
114 __put_unaligned_4_le(__v >> 32, __p+4);
115 __put_unaligned_4_le(__v, __p);
116}
117
118static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p)
119{
120 /*
121 * tradeoff: 8 bytes of stack for all unaligned puts (2
122 * instructions), or an extra register in the long long
123 * case - go for the extra register.
124 */
125 __put_unaligned_4_be(__v >> 32, __p);
126 __put_unaligned_4_be(__v, __p+4);
127}
128
129/*
130 * Try to store an unaligned value as efficiently as possible.
131 */
132#define __put_unaligned_le(val,ptr) \
133 ({ \
134 (void)sizeof(*(ptr) = (val)); \
135 switch (sizeof(*(ptr))) { \
136 case 1: \
137 *(ptr) = (val); \
138 break; \
139 case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr)); \
140 break; \
141 case 4: __put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr)); \
142 break; \
143 case 8: __put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \
144 break; \
145 default: __bug_unaligned_x(ptr); \
146 break; \
147 } \
148 (void) 0; \
149 })
150
151#define __put_unaligned_be(val,ptr) \
152 ({ \
153 (void)sizeof(*(ptr) = (val)); \
154 switch (sizeof(*(ptr))) { \
155 case 1: \
156 *(ptr) = (val); \
157 break; \
158 case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr)); \
159 break; \
160 case 4: __put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr)); \
161 break; \
162 case 8: __put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \
163 break; \
164 default: __bug_unaligned_x(ptr); \
165 break; \
166 } \
167 (void) 0; \
168 })
169 7
170/* 8/*
171 * Select endianness 9 * Select endianness
@@ -178,4 +16,4 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _
178#define put_unaligned __put_unaligned_be 16#define put_unaligned __put_unaligned_be
179#endif 17#endif
180 18
181#endif 19#endif /* _ASM_ARM_UNALIGNED_H */