aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/byteorder
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/byteorder
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/linux/byteorder')
-rw-r--r--include/linux/byteorder/big_endian.h106
-rw-r--r--include/linux/byteorder/generic.h172
-rw-r--r--include/linux/byteorder/little_endian.h106
-rw-r--r--include/linux/byteorder/pdp_endian.h88
-rw-r--r--include/linux/byteorder/swab.h192
-rw-r--r--include/linux/byteorder/swabb.h137
6 files changed, 801 insertions, 0 deletions
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
new file mode 100644
index 000000000000..bef87891cb24
--- /dev/null
+++ b/include/linux/byteorder/big_endian.h
@@ -0,0 +1,106 @@
1#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H
2#define _LINUX_BYTEORDER_BIG_ENDIAN_H
3
4#ifndef __BIG_ENDIAN
5#define __BIG_ENDIAN 4321
6#endif
7#ifndef __BIG_ENDIAN_BITFIELD
8#define __BIG_ENDIAN_BITFIELD
9#endif
10
11#include <linux/types.h>
12#include <linux/byteorder/swab.h>
13
14#define __constant_htonl(x) ((__force __be32)(__u32)(x))
15#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
16#define __constant_htons(x) ((__force __be16)(__u16)(x))
17#define __constant_ntohs(x) ((__force __u16)(__be16)(x))
18#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x)))
19#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x))
20#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x)))
21#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x))
22#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x)))
23#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x))
24#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x))
25#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x))
26#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x))
27#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x))
28#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x))
29#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x))
30#define __cpu_to_le64(x) ((__force __le64)__swab64((x)))
31#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
32#define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
33#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
34#define __cpu_to_le16(x) ((__force __le16)__swab16((x)))
35#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
36#define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
37#define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
38#define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
39#define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
40#define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
41#define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
42
43static inline __le64 __cpu_to_le64p(const __u64 *p)
44{
45 return (__force __le64)__swab64p(p);
46}
47static inline __u64 __le64_to_cpup(const __le64 *p)
48{
49 return __swab64p((__u64 *)p);
50}
51static inline __le32 __cpu_to_le32p(const __u32 *p)
52{
53 return (__force __le32)__swab32p(p);
54}
55static inline __u32 __le32_to_cpup(const __le32 *p)
56{
57 return __swab32p((__u32 *)p);
58}
59static inline __le16 __cpu_to_le16p(const __u16 *p)
60{
61 return (__force __le16)__swab16p(p);
62}
63static inline __u16 __le16_to_cpup(const __le16 *p)
64{
65 return __swab16p((__u16 *)p);
66}
67static inline __be64 __cpu_to_be64p(const __u64 *p)
68{
69 return (__force __be64)*p;
70}
71static inline __u64 __be64_to_cpup(const __be64 *p)
72{
73 return (__force __u64)*p;
74}
75static inline __be32 __cpu_to_be32p(const __u32 *p)
76{
77 return (__force __be32)*p;
78}
79static inline __u32 __be32_to_cpup(const __be32 *p)
80{
81 return (__force __u32)*p;
82}
83static inline __be16 __cpu_to_be16p(const __u16 *p)
84{
85 return (__force __be16)*p;
86}
87static inline __u16 __be16_to_cpup(const __be16 *p)
88{
89 return (__force __u16)*p;
90}
91#define __cpu_to_le64s(x) __swab64s((x))
92#define __le64_to_cpus(x) __swab64s((x))
93#define __cpu_to_le32s(x) __swab32s((x))
94#define __le32_to_cpus(x) __swab32s((x))
95#define __cpu_to_le16s(x) __swab16s((x))
96#define __le16_to_cpus(x) __swab16s((x))
97#define __cpu_to_be64s(x) do {} while (0)
98#define __be64_to_cpus(x) do {} while (0)
99#define __cpu_to_be32s(x) do {} while (0)
100#define __be32_to_cpus(x) do {} while (0)
101#define __cpu_to_be16s(x) do {} while (0)
102#define __be16_to_cpus(x) do {} while (0)
103
104#include <linux/byteorder/generic.h>
105
106#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
new file mode 100644
index 000000000000..5fde6f4d6c1e
--- /dev/null
+++ b/include/linux/byteorder/generic.h
@@ -0,0 +1,172 @@
1#ifndef _LINUX_BYTEORDER_GENERIC_H
2#define _LINUX_BYTEORDER_GENERIC_H
3
4/*
5 * linux/byteorder_generic.h
6 * Generic Byte-reordering support
7 *
8 * Francois-Rene Rideau <fare@tunes.org> 19970707
9 * gathered all the good ideas from all asm-foo/byteorder.h into one file,
10 * cleaned them up.
11 * I hope it is compliant with non-GCC compilers.
12 * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
13 * because I wasn't sure it would be ok to put it in types.h
14 * Upgraded it to 2.1.43
15 * Francois-Rene Rideau <fare@tunes.org> 19971012
16 * Upgraded it to 2.1.57
17 * to please Linus T., replaced huge #ifdef's between little/big endian
18 * by nestedly #include'd files.
19 * Francois-Rene Rideau <fare@tunes.org> 19971205
20 * Made it to 2.1.71; now a facelift:
21 * Put files under include/linux/byteorder/
22 * Split swab from generic support.
23 *
24 * TODO:
25 * = Regular kernel maintainers could also replace all these manual
26 * byteswap macros that remain, disseminated among drivers,
27 * after some grep or the sources...
28 * = Linus might want to rename all these macros and files to fit his taste,
29 * to fit his personal naming scheme.
30 * = it seems that a few drivers would also appreciate
31 * nybble swapping support...
32 * = every architecture could add their byteswap macro in asm/byteorder.h
33 * see how some architectures already do (i386, alpha, ppc, etc)
34 * = cpu_to_beXX and beXX_to_cpu might some day need to be well
35 * distinguished throughout the kernel. This is not the case currently,
36 * since little endian, big endian, and pdp endian machines needn't it.
37 * But this might be the case for, say, a port of Linux to 20/21 bit
38 * architectures (and F21 Linux addict around?).
39 */
40
41/*
42 * The following macros are to be defined by <asm/byteorder.h>:
43 *
44 * Conversion of long and short int between network and host format
45 * ntohl(__u32 x)
46 * ntohs(__u16 x)
47 * htonl(__u32 x)
48 * htons(__u16 x)
49 * It seems that some programs (which? where? or perhaps a standard? POSIX?)
50 * might like the above to be functions, not macros (why?).
51 * if that's true, then detect them, and take measures.
52 * Anyway, the measure is: define only ___ntohl as a macro instead,
53 * and in a separate file, have
54 * unsigned long inline ntohl(x){return ___ntohl(x);}
55 *
56 * The same for constant arguments
57 * __constant_ntohl(__u32 x)
58 * __constant_ntohs(__u16 x)
59 * __constant_htonl(__u32 x)
60 * __constant_htons(__u16 x)
61 *
62 * Conversion of XX-bit integers (16- 32- or 64-)
63 * between native CPU format and little/big endian format
64 * 64-bit stuff only defined for proper architectures
65 * cpu_to_[bl]eXX(__uXX x)
66 * [bl]eXX_to_cpu(__uXX x)
67 *
68 * The same, but takes a pointer to the value to convert
69 * cpu_to_[bl]eXXp(__uXX x)
70 * [bl]eXX_to_cpup(__uXX x)
71 *
72 * The same, but change in situ
73 * cpu_to_[bl]eXXs(__uXX x)
74 * [bl]eXX_to_cpus(__uXX x)
75 *
76 * See asm-foo/byteorder.h for examples of how to provide
77 * architecture-optimized versions
78 *
79 */
80
81
82#if defined(__KERNEL__)
83/*
84 * inside the kernel, we can use nicknames;
85 * outside of it, we must avoid POSIX namespace pollution...
86 */
87#define cpu_to_le64 __cpu_to_le64
88#define le64_to_cpu __le64_to_cpu
89#define cpu_to_le32 __cpu_to_le32
90#define le32_to_cpu __le32_to_cpu
91#define cpu_to_le16 __cpu_to_le16
92#define le16_to_cpu __le16_to_cpu
93#define cpu_to_be64 __cpu_to_be64
94#define be64_to_cpu __be64_to_cpu
95#define cpu_to_be32 __cpu_to_be32
96#define be32_to_cpu __be32_to_cpu
97#define cpu_to_be16 __cpu_to_be16
98#define be16_to_cpu __be16_to_cpu
99#define cpu_to_le64p __cpu_to_le64p
100#define le64_to_cpup __le64_to_cpup
101#define cpu_to_le32p __cpu_to_le32p
102#define le32_to_cpup __le32_to_cpup
103#define cpu_to_le16p __cpu_to_le16p
104#define le16_to_cpup __le16_to_cpup
105#define cpu_to_be64p __cpu_to_be64p
106#define be64_to_cpup __be64_to_cpup
107#define cpu_to_be32p __cpu_to_be32p
108#define be32_to_cpup __be32_to_cpup
109#define cpu_to_be16p __cpu_to_be16p
110#define be16_to_cpup __be16_to_cpup
111#define cpu_to_le64s __cpu_to_le64s
112#define le64_to_cpus __le64_to_cpus
113#define cpu_to_le32s __cpu_to_le32s
114#define le32_to_cpus __le32_to_cpus
115#define cpu_to_le16s __cpu_to_le16s
116#define le16_to_cpus __le16_to_cpus
117#define cpu_to_be64s __cpu_to_be64s
118#define be64_to_cpus __be64_to_cpus
119#define cpu_to_be32s __cpu_to_be32s
120#define be32_to_cpus __be32_to_cpus
121#define cpu_to_be16s __cpu_to_be16s
122#define be16_to_cpus __be16_to_cpus
123#endif
124
125
126#if defined(__KERNEL__)
127/*
128 * Handle ntohl and suches. These have various compatibility
129 * issues - like we want to give the prototype even though we
130 * also have a macro for them in case some strange program
131 * wants to take the address of the thing or something..
132 *
133 * Note that these used to return a "long" in libc5, even though
134 * long is often 64-bit these days.. Thus the casts.
135 *
136 * They have to be macros in order to do the constant folding
137 * correctly - if the argument passed into a inline function
138 * it is no longer constant according to gcc..
139 */
140
141#undef ntohl
142#undef ntohs
143#undef htonl
144#undef htons
145
146/*
147 * Do the prototypes. Somebody might want to take the
148 * address or some such sick thing..
149 */
150extern __u32 ntohl(__be32);
151extern __be32 htonl(__u32);
152extern __u16 ntohs(__be16);
153extern __be16 htons(__u16);
154
155#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
156
157#define ___htonl(x) __cpu_to_be32(x)
158#define ___htons(x) __cpu_to_be16(x)
159#define ___ntohl(x) __be32_to_cpu(x)
160#define ___ntohs(x) __be16_to_cpu(x)
161
162#define htonl(x) ___htonl(x)
163#define ntohl(x) ___ntohl(x)
164#define htons(x) ___htons(x)
165#define ntohs(x) ___ntohs(x)
166
167#endif /* OPTIMIZE */
168
169#endif /* KERNEL */
170
171
172#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
new file mode 100644
index 000000000000..86e62b750176
--- /dev/null
+++ b/include/linux/byteorder/little_endian.h
@@ -0,0 +1,106 @@
1#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
2#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
3
4#ifndef __LITTLE_ENDIAN
5#define __LITTLE_ENDIAN 1234
6#endif
7#ifndef __LITTLE_ENDIAN_BITFIELD
8#define __LITTLE_ENDIAN_BITFIELD
9#endif
10
11#include <linux/types.h>
12#include <linux/byteorder/swab.h>
13
14#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
15#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
16#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
17#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
18#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
19#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
20#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
21#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
22#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
23#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
24#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
25#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
26#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
27#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
28#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
29#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
30#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
31#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
32#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
33#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
34#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
35#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
36#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
37#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
38#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
39#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
40#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
41#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
42
43static inline __le64 __cpu_to_le64p(const __u64 *p)
44{
45 return (__force __le64)*p;
46}
47static inline __u64 __le64_to_cpup(const __le64 *p)
48{
49 return (__force __u64)*p;
50}
51static inline __le32 __cpu_to_le32p(const __u32 *p)
52{
53 return (__force __le32)*p;
54}
55static inline __u32 __le32_to_cpup(const __le32 *p)
56{
57 return (__force __u32)*p;
58}
59static inline __le16 __cpu_to_le16p(const __u16 *p)
60{
61 return (__force __le16)*p;
62}
63static inline __u16 __le16_to_cpup(const __le16 *p)
64{
65 return (__force __u16)*p;
66}
67static inline __be64 __cpu_to_be64p(const __u64 *p)
68{
69 return (__force __be64)__swab64p(p);
70}
71static inline __u64 __be64_to_cpup(const __be64 *p)
72{
73 return __swab64p((__u64 *)p);
74}
75static inline __be32 __cpu_to_be32p(const __u32 *p)
76{
77 return (__force __be32)__swab32p(p);
78}
79static inline __u32 __be32_to_cpup(const __be32 *p)
80{
81 return __swab32p((__u32 *)p);
82}
83static inline __be16 __cpu_to_be16p(const __u16 *p)
84{
85 return (__force __be16)__swab16p(p);
86}
87static inline __u16 __be16_to_cpup(const __be16 *p)
88{
89 return __swab16p((__u16 *)p);
90}
91#define __cpu_to_le64s(x) do {} while (0)
92#define __le64_to_cpus(x) do {} while (0)
93#define __cpu_to_le32s(x) do {} while (0)
94#define __le32_to_cpus(x) do {} while (0)
95#define __cpu_to_le16s(x) do {} while (0)
96#define __le16_to_cpus(x) do {} while (0)
97#define __cpu_to_be64s(x) __swab64s((x))
98#define __be64_to_cpus(x) __swab64s((x))
99#define __cpu_to_be32s(x) __swab32s((x))
100#define __be32_to_cpus(x) __swab32s((x))
101#define __cpu_to_be16s(x) __swab16s((x))
102#define __be16_to_cpus(x) __swab16s((x))
103
104#include <linux/byteorder/generic.h>
105
106#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/include/linux/byteorder/pdp_endian.h b/include/linux/byteorder/pdp_endian.h
new file mode 100644
index 000000000000..618631cbc6e3
--- /dev/null
+++ b/include/linux/byteorder/pdp_endian.h
@@ -0,0 +1,88 @@
1#ifndef _LINUX_BYTEORDER_PDP_ENDIAN_H
2#define _LINUX_BYTEORDER_PDP_ENDIAN_H
3
4/*
5 * Could have been named NUXI-endian, but we use the same name as in glibc.
6 * hopefully only the PDP and its evolutions (old VAXen in compatibility mode)
7 * should ever use this braindead byteorder.
8 * This file *should* work, but has not been tested.
9 *
10 * little-endian is 1234; big-endian is 4321; nuxi/pdp-endian is 3412
11 *
12 * I thought vaxen were NUXI-endian, but was told they were correct-endian
13 * (little-endian), though indeed there existed NUXI-endian machines
14 * (DEC PDP-11 and old VAXen in compatibility mode).
15 * This makes this file a bit useless, but as a proof-of-concept.
16 *
17 * But what does a __u64 look like: is it 34127856 or 78563412 ???
18 * I don't dare imagine! Hence, no 64-bit byteorder support yet.
19 * Hopefully, there 64-bit pdp-endian support shouldn't ever be required.
20 *
21 */
22
23#ifndef __PDP_ENDIAN
24#define __PDP_ENDIAN 3412
25#endif
26#ifndef __PDP_ENDIAN_BITFIELD
27#define __PDP_ENDIAN_BITFIELD
28#endif
29
30#include <linux/byteorder/swab.h>
31#include <linux/byteorder/swabb.h>
32
33#define __constant_htonl(x) ___constant_swahb32((x))
34#define __constant_ntohl(x) ___constant_swahb32((x))
35#define __constant_htons(x) ___constant_swab16((x))
36#define __constant_ntohs(x) ___constant_swab16((x))
37#define __constant_cpu_to_le64(x) I DON'T KNOW
38#define __constant_le64_to_cpu(x) I DON'T KNOW
39#define __constant_cpu_to_le32(x) ___constant_swahw32((x))
40#define __constant_le32_to_cpu(x) ___constant_swahw32((x))
41#define __constant_cpu_to_le16(x) ((__u16)(x)
42#define __constant_le16_to_cpu(x) ((__u16)(x)
43#define __constant_cpu_to_be64(x) I DON'T KNOW
44#define __constant_be64_to_cpu(x) I DON'T KNOW
45#define __constant_cpu_to_be32(x) ___constant_swahb32((x))
46#define __constant_be32_to_cpu(x) ___constant_swahb32((x))
47#define __constant_cpu_to_be16(x) ___constant_swab16((x))
48#define __constant_be16_to_cpu(x) ___constant_swab16((x))
49#define __cpu_to_le64(x) I DON'T KNOW
50#define __le64_to_cpu(x) I DON'T KNOW
51#define __cpu_to_le32(x) ___swahw32((x))
52#define __le32_to_cpu(x) ___swahw32((x))
53#define __cpu_to_le16(x) ((__u16)(x)
54#define __le16_to_cpu(x) ((__u16)(x)
55#define __cpu_to_be64(x) I DON'T KNOW
56#define __be64_to_cpu(x) I DON'T KNOW
57#define __cpu_to_be32(x) __swahb32((x))
58#define __be32_to_cpu(x) __swahb32((x))
59#define __cpu_to_be16(x) __swab16((x))
60#define __be16_to_cpu(x) __swab16((x))
61#define __cpu_to_le64p(x) I DON'T KNOW
62#define __le64_to_cpup(x) I DON'T KNOW
63#define __cpu_to_le32p(x) ___swahw32p((x))
64#define __le32_to_cpup(x) ___swahw32p((x))
65#define __cpu_to_le16p(x) (*(__u16*)(x))
66#define __le16_to_cpup(x) (*(__u16*)(x))
67#define __cpu_to_be64p(x) I DON'T KNOW
68#define __be64_to_cpup(x) I DON'T KNOW
69#define __cpu_to_be32p(x) __swahb32p((x))
70#define __be32_to_cpup(x) __swahb32p((x))
71#define __cpu_to_be16p(x) __swab16p((x))
72#define __be16_to_cpup(x) __swab16p((x))
73#define __cpu_to_le64s(x) I DON'T KNOW
74#define __le64_to_cpus(x) I DON'T KNOW
75#define __cpu_to_le32s(x) ___swahw32s((x))
76#define __le32_to_cpus(x) ___swahw32s((x))
77#define __cpu_to_le16s(x) do {} while (0)
78#define __le16_to_cpus(x) do {} while (0)
79#define __cpu_to_be64s(x) I DON'T KNOW
80#define __be64_to_cpus(x) I DON'T KNOW
81#define __cpu_to_be32s(x) __swahb32s((x))
82#define __be32_to_cpus(x) __swahb32s((x))
83#define __cpu_to_be16s(x) __swab16s((x))
84#define __be16_to_cpus(x) __swab16s((x))
85
86#include <linux/byteorder/generic.h>
87
88#endif /* _LINUX_BYTEORDER_PDP_ENDIAN_H */
diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h
new file mode 100644
index 000000000000..2f1cb775125a
--- /dev/null
+++ b/include/linux/byteorder/swab.h
@@ -0,0 +1,192 @@
1#ifndef _LINUX_BYTEORDER_SWAB_H
2#define _LINUX_BYTEORDER_SWAB_H
3
4/*
5 * linux/byteorder/swab.h
6 * Byte-swapping, independently from CPU endianness
7 * swabXX[ps]?(foo)
8 *
9 * Francois-Rene Rideau <fare@tunes.org> 19971205
10 * separated swab functions from cpu_to_XX,
11 * to clean up support for bizarre-endian architectures.
12 *
13 * See asm-i386/byteorder.h and suches for examples of how to provide
14 * architecture-dependent optimized versions
15 *
16 */
17
18#include <linux/compiler.h>
19
20/* casts are necessary for constants, because we never know how for sure
21 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
22 */
23#define ___swab16(x) \
24({ \
25 __u16 __x = (x); \
26 ((__u16)( \
27 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
28 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
29})
30
31#define ___swab32(x) \
32({ \
33 __u32 __x = (x); \
34 ((__u32)( \
35 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
36 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
37 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
38 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
39})
40
41#define ___swab64(x) \
42({ \
43 __u64 __x = (x); \
44 ((__u64)( \
45 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
46 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
47 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
48 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
49 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
50 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
51 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
52 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
53})
54
55#define ___constant_swab16(x) \
56 ((__u16)( \
57 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
58 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
59#define ___constant_swab32(x) \
60 ((__u32)( \
61 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
62 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
63 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
64 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
65#define ___constant_swab64(x) \
66 ((__u64)( \
67 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
68 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
69 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
70 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
71 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
72 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
73 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
74 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
75
76/*
77 * provide defaults when no architecture-specific optimization is detected
78 */
79#ifndef __arch__swab16
80# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
81#endif
82#ifndef __arch__swab32
83# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
84#endif
85#ifndef __arch__swab64
86# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
87#endif
88
89#ifndef __arch__swab16p
90# define __arch__swab16p(x) __arch__swab16(*(x))
91#endif
92#ifndef __arch__swab32p
93# define __arch__swab32p(x) __arch__swab32(*(x))
94#endif
95#ifndef __arch__swab64p
96# define __arch__swab64p(x) __arch__swab64(*(x))
97#endif
98
99#ifndef __arch__swab16s
100# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
101#endif
102#ifndef __arch__swab32s
103# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
104#endif
105#ifndef __arch__swab64s
106# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
107#endif
108
109
110/*
111 * Allow constant folding
112 */
113#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
114# define __swab16(x) \
115(__builtin_constant_p((__u16)(x)) ? \
116 ___swab16((x)) : \
117 __fswab16((x)))
118# define __swab32(x) \
119(__builtin_constant_p((__u32)(x)) ? \
120 ___swab32((x)) : \
121 __fswab32((x)))
122# define __swab64(x) \
123(__builtin_constant_p((__u64)(x)) ? \
124 ___swab64((x)) : \
125 __fswab64((x)))
126#else
127# define __swab16(x) __fswab16(x)
128# define __swab32(x) __fswab32(x)
129# define __swab64(x) __fswab64(x)
130#endif /* OPTIMIZE */
131
132
133static __inline__ __attribute_const__ __u16 __fswab16(__u16 x)
134{
135 return __arch__swab16(x);
136}
137static __inline__ __u16 __swab16p(const __u16 *x)
138{
139 return __arch__swab16p(x);
140}
141static __inline__ void __swab16s(__u16 *addr)
142{
143 __arch__swab16s(addr);
144}
145
146static __inline__ __attribute_const__ __u32 __fswab32(__u32 x)
147{
148 return __arch__swab32(x);
149}
150static __inline__ __u32 __swab32p(const __u32 *x)
151{
152 return __arch__swab32p(x);
153}
154static __inline__ void __swab32s(__u32 *addr)
155{
156 __arch__swab32s(addr);
157}
158
159#ifdef __BYTEORDER_HAS_U64__
160static __inline__ __attribute_const__ __u64 __fswab64(__u64 x)
161{
162# ifdef __SWAB_64_THRU_32__
163 __u32 h = x >> 32;
164 __u32 l = x & ((1ULL<<32)-1);
165 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
166# else
167 return __arch__swab64(x);
168# endif
169}
170static __inline__ __u64 __swab64p(const __u64 *x)
171{
172 return __arch__swab64p(x);
173}
174static __inline__ void __swab64s(__u64 *addr)
175{
176 __arch__swab64s(addr);
177}
178#endif /* __BYTEORDER_HAS_U64__ */
179
180#if defined(__KERNEL__)
181#define swab16 __swab16
182#define swab32 __swab32
183#define swab64 __swab64
184#define swab16p __swab16p
185#define swab32p __swab32p
186#define swab64p __swab64p
187#define swab16s __swab16s
188#define swab32s __swab32s
189#define swab64s __swab64s
190#endif
191
192#endif /* _LINUX_BYTEORDER_SWAB_H */
diff --git a/include/linux/byteorder/swabb.h b/include/linux/byteorder/swabb.h
new file mode 100644
index 000000000000..d28d9a804d3b
--- /dev/null
+++ b/include/linux/byteorder/swabb.h
@@ -0,0 +1,137 @@
1#ifndef _LINUX_BYTEORDER_SWABB_H
2#define _LINUX_BYTEORDER_SWABB_H
3
4/*
5 * linux/byteorder/swabb.h
6 * SWAp Bytes Bizarrely
7 * swaHHXX[ps]?(foo)
8 *
9 * Support for obNUXIous pdp-endian and other bizarre architectures.
10 * Will Linux ever run on such ancient beasts? if not, this file
11 * will be but a programming pearl. Still, it's a reminder that we
12 * shouldn't be making too many assumptions when trying to be portable.
13 *
14 */
15
16/*
17 * Meaning of the names I chose (vaxlinux people feel free to correct them):
18 * swahw32 swap 16-bit half-words in a 32-bit word
19 * swahb32 swap 8-bit halves of each 16-bit half-word in a 32-bit word
20 *
21 * No 64-bit support yet. I don't know NUXI conventions for long longs.
22 * I guarantee it will be a mess when it's there, though :->
23 * It will be even worse if there are conflicting 64-bit conventions.
24 * Hopefully, no one ever used 64-bit objects on NUXI machines.
25 *
26 */
27
28#define ___swahw32(x) \
29({ \
30 __u32 __x = (x); \
31 ((__u32)( \
32 (((__u32)(__x) & (__u32)0x0000ffffUL) << 16) | \
33 (((__u32)(__x) & (__u32)0xffff0000UL) >> 16) )); \
34})
35#define ___swahb32(x) \
36({ \
37 __u32 __x = (x); \
38 ((__u32)( \
39 (((__u32)(__x) & (__u32)0x00ff00ffUL) << 8) | \
40 (((__u32)(__x) & (__u32)0xff00ff00UL) >> 8) )); \
41})
42
43#define ___constant_swahw32(x) \
44 ((__u32)( \
45 (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
46 (((__u32)(x) & (__u32)0xffff0000UL) >> 16) ))
47#define ___constant_swahb32(x) \
48 ((__u32)( \
49 (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
50 (((__u32)(x) & (__u32)0xff00ff00UL) >> 8) ))
51
52/*
53 * provide defaults when no architecture-specific optimization is detected
54 */
55#ifndef __arch__swahw32
56# define __arch__swahw32(x) ___swahw32(x)
57#endif
58#ifndef __arch__swahb32
59# define __arch__swahb32(x) ___swahb32(x)
60#endif
61
62#ifndef __arch__swahw32p
63# define __arch__swahw32p(x) __swahw32(*(x))
64#endif
65#ifndef __arch__swahb32p
66# define __arch__swahb32p(x) __swahb32(*(x))
67#endif
68
69#ifndef __arch__swahw32s
70# define __arch__swahw32s(x) do { *(x) = __swahw32p((x)); } while (0)
71#endif
72#ifndef __arch__swahb32s
73# define __arch__swahb32s(x) do { *(x) = __swahb32p((x)); } while (0)
74#endif
75
76
77/*
78 * Allow constant folding
79 */
80#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
81# define __swahw32(x) \
82(__builtin_constant_p((__u32)(x)) ? \
83 ___swahw32((x)) : \
84 __fswahw32((x)))
85# define __swahb32(x) \
86(__builtin_constant_p((__u32)(x)) ? \
87 ___swahb32((x)) : \
88 __fswahb32((x)))
89#else
90# define __swahw32(x) __fswahw32(x)
91# define __swahb32(x) __fswahb32(x)
92#endif /* OPTIMIZE */
93
94
95static __inline__ __const__ __u32 __fswahw32(__u32 x)
96{
97 return __arch__swahw32(x);
98}
99static __inline__ __u32 __swahw32p(__u32 *x)
100{
101 return __arch__swahw32p(x);
102}
103static __inline__ void __swahw32s(__u32 *addr)
104{
105 __arch__swahw32s(addr);
106}
107
108
109static __inline__ __const__ __u32 __fswahb32(__u32 x)
110{
111 return __arch__swahb32(x);
112}
113static __inline__ __u32 __swahb32p(__u32 *x)
114{
115 return __arch__swahb32p(x);
116}
117static __inline__ void __swahb32s(__u32 *addr)
118{
119 __arch__swahb32s(addr);
120}
121
122#ifdef __BYTEORDER_HAS_U64__
123/*
124 * Not supported yet
125 */
126#endif /* __BYTEORDER_HAS_U64__ */
127
128#if defined(__KERNEL__)
129#define swahw32 __swahw32
130#define swahb32 __swahb32
131#define swahw32p __swahw32p
132#define swahb32p __swahb32p
133#define swahw32s __swahw32s
134#define swahb32s __swahb32s
135#endif
136
137#endif /* _LINUX_BYTEORDER_SWABB_H */