diff options
author | Matt Fleming <matt.fleming@intel.com> | 2012-02-28 08:37:20 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2012-02-28 13:22:51 -0500 |
commit | a07f7672d7cf0ff0d6e548a9feb6e0bd016d9c6c (patch) | |
tree | 656594c3fb97e31ed6a207d8ba9a5dda026aa04f /tools | |
parent | 6b21d18ed50c7d145220b0724ea7f2613abf0f95 (diff) |
tools/include: Add byteshift headers for endian access
There are various hostprogs in the kernel that are rolling their own
implementations of {get,put}_unaligned_le*(). Copy the byteshift
headers from include/linux/unaligned so that they can all use a single
implementation.
This requires changing some of the data types to the userspace
exported ones (u32 -> __u32, etc).
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Link: http://lkml.kernel.org/r/1330436245-24875-2-git-send-email-matt@console-pimps.org
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/include/tools/be_byteshift.h | 70 | ||||
-rw-r--r-- | tools/include/tools/le_byteshift.h | 70 |
2 files changed, 140 insertions, 0 deletions
diff --git a/tools/include/tools/be_byteshift.h b/tools/include/tools/be_byteshift.h new file mode 100644 index 000000000000..f4912e2668ba --- /dev/null +++ b/tools/include/tools/be_byteshift.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef _TOOLS_BE_BYTESHIFT_H | ||
2 | #define _TOOLS_BE_BYTESHIFT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | static inline __u16 __get_unaligned_be16(const __u8 *p) | ||
7 | { | ||
8 | return p[0] << 8 | p[1]; | ||
9 | } | ||
10 | |||
11 | static inline __u32 __get_unaligned_be32(const __u8 *p) | ||
12 | { | ||
13 | return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; | ||
14 | } | ||
15 | |||
16 | static inline __u64 __get_unaligned_be64(const __u8 *p) | ||
17 | { | ||
18 | return (__u64)__get_unaligned_be32(p) << 32 | | ||
19 | __get_unaligned_be32(p + 4); | ||
20 | } | ||
21 | |||
22 | static inline void __put_unaligned_be16(__u16 val, __u8 *p) | ||
23 | { | ||
24 | *p++ = val >> 8; | ||
25 | *p++ = val; | ||
26 | } | ||
27 | |||
28 | static inline void __put_unaligned_be32(__u32 val, __u8 *p) | ||
29 | { | ||
30 | __put_unaligned_be16(val >> 16, p); | ||
31 | __put_unaligned_be16(val, p + 2); | ||
32 | } | ||
33 | |||
34 | static inline void __put_unaligned_be64(__u64 val, __u8 *p) | ||
35 | { | ||
36 | __put_unaligned_be32(val >> 32, p); | ||
37 | __put_unaligned_be32(val, p + 4); | ||
38 | } | ||
39 | |||
40 | static inline __u16 get_unaligned_be16(const void *p) | ||
41 | { | ||
42 | return __get_unaligned_be16((const __u8 *)p); | ||
43 | } | ||
44 | |||
45 | static inline __u32 get_unaligned_be32(const void *p) | ||
46 | { | ||
47 | return __get_unaligned_be32((const __u8 *)p); | ||
48 | } | ||
49 | |||
50 | static inline __u64 get_unaligned_be64(const void *p) | ||
51 | { | ||
52 | return __get_unaligned_be64((const __u8 *)p); | ||
53 | } | ||
54 | |||
55 | static inline void put_unaligned_be16(__u16 val, void *p) | ||
56 | { | ||
57 | __put_unaligned_be16(val, p); | ||
58 | } | ||
59 | |||
60 | static inline void put_unaligned_be32(__u32 val, void *p) | ||
61 | { | ||
62 | __put_unaligned_be32(val, p); | ||
63 | } | ||
64 | |||
65 | static inline void put_unaligned_be64(__u64 val, void *p) | ||
66 | { | ||
67 | __put_unaligned_be64(val, p); | ||
68 | } | ||
69 | |||
70 | #endif /* _TOOLS_BE_BYTESHIFT_H */ | ||
diff --git a/tools/include/tools/le_byteshift.h b/tools/include/tools/le_byteshift.h new file mode 100644 index 000000000000..c99d45a68bda --- /dev/null +++ b/tools/include/tools/le_byteshift.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef _TOOLS_LE_BYTESHIFT_H | ||
2 | #define _TOOLS_LE_BYTESHIFT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | static inline __u16 __get_unaligned_le16(const __u8 *p) | ||
7 | { | ||
8 | return p[0] | p[1] << 8; | ||
9 | } | ||
10 | |||
11 | static inline __u32 __get_unaligned_le32(const __u8 *p) | ||
12 | { | ||
13 | return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; | ||
14 | } | ||
15 | |||
16 | static inline __u64 __get_unaligned_le64(const __u8 *p) | ||
17 | { | ||
18 | return (__u64)__get_unaligned_le32(p + 4) << 32 | | ||
19 | __get_unaligned_le32(p); | ||
20 | } | ||
21 | |||
22 | static inline void __put_unaligned_le16(__u16 val, __u8 *p) | ||
23 | { | ||
24 | *p++ = val; | ||
25 | *p++ = val >> 8; | ||
26 | } | ||
27 | |||
28 | static inline void __put_unaligned_le32(__u32 val, __u8 *p) | ||
29 | { | ||
30 | __put_unaligned_le16(val >> 16, p + 2); | ||
31 | __put_unaligned_le16(val, p); | ||
32 | } | ||
33 | |||
34 | static inline void __put_unaligned_le64(__u64 val, __u8 *p) | ||
35 | { | ||
36 | __put_unaligned_le32(val >> 32, p + 4); | ||
37 | __put_unaligned_le32(val, p); | ||
38 | } | ||
39 | |||
40 | static inline __u16 get_unaligned_le16(const void *p) | ||
41 | { | ||
42 | return __get_unaligned_le16((const __u8 *)p); | ||
43 | } | ||
44 | |||
45 | static inline __u32 get_unaligned_le32(const void *p) | ||
46 | { | ||
47 | return __get_unaligned_le32((const __u8 *)p); | ||
48 | } | ||
49 | |||
50 | static inline __u64 get_unaligned_le64(const void *p) | ||
51 | { | ||
52 | return __get_unaligned_le64((const __u8 *)p); | ||
53 | } | ||
54 | |||
55 | static inline void put_unaligned_le16(__u16 val, void *p) | ||
56 | { | ||
57 | __put_unaligned_le16(val, p); | ||
58 | } | ||
59 | |||
60 | static inline void put_unaligned_le32(__u32 val, void *p) | ||
61 | { | ||
62 | __put_unaligned_le32(val, p); | ||
63 | } | ||
64 | |||
65 | static inline void put_unaligned_le64(__u64 val, void *p) | ||
66 | { | ||
67 | __put_unaligned_le64(val, p); | ||
68 | } | ||
69 | |||
70 | #endif /* _TOOLS_LE_BYTESHIFT_H */ | ||