diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-arm/unaligned.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-arm/unaligned.h')
-rw-r--r-- | include/asm-arm/unaligned.h | 191 |
1 files changed, 191 insertions, 0 deletions
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h new file mode 100644 index 000000000000..1b39c2f322c9 --- /dev/null +++ b/include/asm-arm/unaligned.h | |||
@@ -0,0 +1,191 @@ | |||
1 | #ifndef __ASM_ARM_UNALIGNED_H | ||
2 | #define __ASM_ARM_UNALIGNED_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | extern int __bug_unaligned_x(void *ptr); | ||
7 | |||
8 | /* | ||
9 | * What is the most efficient way of loading/storing an unaligned value? | ||
10 | * | ||
11 | * That is the subject of this file. Efficiency here is defined as | ||
12 | * minimum code size with minimum register usage for the common cases. | ||
13 | * It is currently not believed that long longs are common, so we | ||
14 | * trade efficiency for the chars, shorts and longs against the long | ||
15 | * longs. | ||
16 | * | ||
17 | * Current stats with gcc 2.7.2.2 for these functions: | ||
18 | * | ||
19 | * ptrsize get: code regs put: code regs | ||
20 | * 1 1 1 1 2 | ||
21 | * 2 3 2 3 2 | ||
22 | * 4 7 3 7 3 | ||
23 | * 8 20 6 16 6 | ||
24 | * | ||
25 | * gcc 2.95.1 seems to code differently: | ||
26 | * | ||
27 | * ptrsize get: code regs put: code regs | ||
28 | * 1 1 1 1 2 | ||
29 | * 2 3 2 3 2 | ||
30 | * 4 7 4 7 4 | ||
31 | * 8 19 8 15 6 | ||
32 | * | ||
33 | * which may or may not be more efficient (depending upon whether | ||
34 | * you can afford the extra registers). Hopefully the gcc 2.95 | ||
35 | * is inteligent enough to decide if it is better to use the | ||
36 | * extra register, but evidence so far seems to suggest otherwise. | ||
37 | * | ||
38 | * Unfortunately, gcc is not able to optimise the high word | ||
39 | * out of long long >> 32, or the low word from long long << 32 | ||
40 | */ | ||
41 | |||
42 | #define __get_unaligned_2_le(__p) \ | ||
43 | (__p[0] | __p[1] << 8) | ||
44 | |||
45 | #define __get_unaligned_2_be(__p) \ | ||
46 | (__p[0] << 8 | __p[1]) | ||
47 | |||
48 | #define __get_unaligned_4_le(__p) \ | ||
49 | (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) | ||
50 | |||
51 | #define __get_unaligned_4_be(__p) \ | ||
52 | (__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3]) | ||
53 | |||
54 | #define __get_unaligned_le(ptr) \ | ||
55 | ({ \ | ||
56 | __typeof__(*(ptr)) __v; \ | ||
57 | __u8 *__p = (__u8 *)(ptr); \ | ||
58 | switch (sizeof(*(ptr))) { \ | ||
59 | case 1: __v = *(ptr); break; \ | ||
60 | case 2: __v = __get_unaligned_2_le(__p); break; \ | ||
61 | case 4: __v = __get_unaligned_4_le(__p); break; \ | ||
62 | case 8: { \ | ||
63 | unsigned int __v1, __v2; \ | ||
64 | __v2 = __get_unaligned_4_le((__p+4)); \ | ||
65 | __v1 = __get_unaligned_4_le(__p); \ | ||
66 | __v = ((unsigned long long)__v2 << 32 | __v1); \ | ||
67 | } \ | ||
68 | break; \ | ||
69 | default: __v = __bug_unaligned_x(__p); break; \ | ||
70 | } \ | ||
71 | __v; \ | ||
72 | }) | ||
73 | |||
74 | #define __get_unaligned_be(ptr) \ | ||
75 | ({ \ | ||
76 | __typeof__(*(ptr)) __v; \ | ||
77 | __u8 *__p = (__u8 *)(ptr); \ | ||
78 | switch (sizeof(*(ptr))) { \ | ||
79 | case 1: __v = *(ptr); break; \ | ||
80 | case 2: __v = __get_unaligned_2_be(__p); break; \ | ||
81 | case 4: __v = __get_unaligned_4_be(__p); break; \ | ||
82 | case 8: { \ | ||
83 | unsigned int __v1, __v2; \ | ||
84 | __v2 = __get_unaligned_4_be(__p); \ | ||
85 | __v1 = __get_unaligned_4_be((__p+4)); \ | ||
86 | __v = ((unsigned long long)__v2 << 32 | __v1); \ | ||
87 | } \ | ||
88 | break; \ | ||
89 | default: __v = __bug_unaligned_x(__p); break; \ | ||
90 | } \ | ||
91 | __v; \ | ||
92 | }) | ||
93 | |||
94 | |||
95 | static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p) | ||
96 | { | ||
97 | *__p++ = __v; | ||
98 | *__p++ = __v >> 8; | ||
99 | } | ||
100 | |||
101 | static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p) | ||
102 | { | ||
103 | *__p++ = __v >> 8; | ||
104 | *__p++ = __v; | ||
105 | } | ||
106 | |||
107 | static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p) | ||
108 | { | ||
109 | __put_unaligned_2_le(__v >> 16, __p + 2); | ||
110 | __put_unaligned_2_le(__v, __p); | ||
111 | } | ||
112 | |||
113 | static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p) | ||
114 | { | ||
115 | __put_unaligned_2_be(__v >> 16, __p); | ||
116 | __put_unaligned_2_be(__v, __p + 2); | ||
117 | } | ||
118 | |||
119 | static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p) | ||
120 | { | ||
121 | /* | ||
122 | * tradeoff: 8 bytes of stack for all unaligned puts (2 | ||
123 | * instructions), or an extra register in the long long | ||
124 | * case - go for the extra register. | ||
125 | */ | ||
126 | __put_unaligned_4_le(__v >> 32, __p+4); | ||
127 | __put_unaligned_4_le(__v, __p); | ||
128 | } | ||
129 | |||
130 | static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p) | ||
131 | { | ||
132 | /* | ||
133 | * tradeoff: 8 bytes of stack for all unaligned puts (2 | ||
134 | * instructions), or an extra register in the long long | ||
135 | * case - go for the extra register. | ||
136 | */ | ||
137 | __put_unaligned_4_be(__v >> 32, __p); | ||
138 | __put_unaligned_4_be(__v, __p+4); | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Try to store an unaligned value as efficiently as possible. | ||
143 | */ | ||
144 | #define __put_unaligned_le(val,ptr) \ | ||
145 | ({ \ | ||
146 | switch (sizeof(*(ptr))) { \ | ||
147 | case 1: \ | ||
148 | *(ptr) = (val); \ | ||
149 | break; \ | ||
150 | case 2: __put_unaligned_2_le((val),(__u8 *)(ptr)); \ | ||
151 | break; \ | ||
152 | case 4: __put_unaligned_4_le((val),(__u8 *)(ptr)); \ | ||
153 | break; \ | ||
154 | case 8: __put_unaligned_8_le((val),(__u8 *)(ptr)); \ | ||
155 | break; \ | ||
156 | default: __bug_unaligned_x(ptr); \ | ||
157 | break; \ | ||
158 | } \ | ||
159 | (void) 0; \ | ||
160 | }) | ||
161 | |||
162 | #define __put_unaligned_be(val,ptr) \ | ||
163 | ({ \ | ||
164 | switch (sizeof(*(ptr))) { \ | ||
165 | case 1: \ | ||
166 | *(ptr) = (val); \ | ||
167 | break; \ | ||
168 | case 2: __put_unaligned_2_be((val),(__u8 *)(ptr)); \ | ||
169 | break; \ | ||
170 | case 4: __put_unaligned_4_be((val),(__u8 *)(ptr)); \ | ||
171 | break; \ | ||
172 | case 8: __put_unaligned_8_be((val),(__u8 *)(ptr)); \ | ||
173 | break; \ | ||
174 | default: __bug_unaligned_x(ptr); \ | ||
175 | break; \ | ||
176 | } \ | ||
177 | (void) 0; \ | ||
178 | }) | ||
179 | |||
180 | /* | ||
181 | * Select endianness | ||
182 | */ | ||
183 | #ifndef __ARMEB__ | ||
184 | #define get_unaligned __get_unaligned_le | ||
185 | #define put_unaligned __put_unaligned_le | ||
186 | #else | ||
187 | #define get_unaligned __get_unaligned_be | ||
188 | #define put_unaligned __put_unaligned_be | ||
189 | #endif | ||
190 | |||
191 | #endif | ||