diff options
-rw-r--r-- | arch/powerpc/include/asm/byteorder.h | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h index b37752214a16..d5de325472e9 100644 --- a/arch/powerpc/include/asm/byteorder.h +++ b/arch/powerpc/include/asm/byteorder.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <asm/types.h> | 11 | #include <asm/types.h> |
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
13 | 13 | ||
14 | #define __BIG_ENDIAN | ||
15 | |||
14 | #ifdef __GNUC__ | 16 | #ifdef __GNUC__ |
15 | #ifdef __KERNEL__ | 17 | #ifdef __KERNEL__ |
16 | 18 | ||
@@ -21,12 +23,19 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr) | |||
21 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); | 23 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); |
22 | return val; | 24 | return val; |
23 | } | 25 | } |
26 | #define __arch_swab16p ld_le16 | ||
24 | 27 | ||
25 | static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) | 28 | static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) |
26 | { | 29 | { |
27 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); | 30 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); |
28 | } | 31 | } |
29 | 32 | ||
33 | static inline void __arch_swab16s(__u16 *addr) | ||
34 | { | ||
35 | st_le16(addr, *addr); | ||
36 | } | ||
37 | #define __arch_swab16s __arch_swab16s | ||
38 | |||
30 | static __inline__ __u32 ld_le32(const volatile __u32 *addr) | 39 | static __inline__ __u32 ld_le32(const volatile __u32 *addr) |
31 | { | 40 | { |
32 | __u32 val; | 41 | __u32 val; |
@@ -34,13 +43,20 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr) | |||
34 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); | 43 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); |
35 | return val; | 44 | return val; |
36 | } | 45 | } |
46 | #define __arch_swab32p ld_le32 | ||
37 | 47 | ||
38 | static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) | 48 | static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) |
39 | { | 49 | { |
40 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); | 50 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); |
41 | } | 51 | } |
42 | 52 | ||
43 | static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) | 53 | static inline void __arch_swab32s(__u32 *addr) |
54 | { | ||
55 | st_le32(addr, *addr); | ||
56 | } | ||
57 | #define __arch_swab32s __arch_swab32s | ||
58 | |||
59 | static inline __attribute_const__ __u16 __arch_swab16(__u16 value) | ||
44 | { | 60 | { |
45 | __u16 result; | 61 | __u16 result; |
46 | 62 | ||
@@ -49,8 +65,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value) | |||
49 | : "r" (value), "0" (value >> 8)); | 65 | : "r" (value), "0" (value >> 8)); |
50 | return result; | 66 | return result; |
51 | } | 67 | } |
68 | #define __arch_swab16 __arch_swab16 | ||
52 | 69 | ||
53 | static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) | 70 | static inline __attribute_const__ __u32 __arch_swab32(__u32 value) |
54 | { | 71 | { |
55 | __u32 result; | 72 | __u32 result; |
56 | 73 | ||
@@ -61,29 +78,16 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value) | |||
61 | : "r" (value), "0" (value >> 24)); | 78 | : "r" (value), "0" (value >> 24)); |
62 | return result; | 79 | return result; |
63 | } | 80 | } |
64 | 81 | #define __arch_swab32 __arch_swab32 | |
65 | #define __arch__swab16(x) ___arch__swab16(x) | ||
66 | #define __arch__swab32(x) ___arch__swab32(x) | ||
67 | |||
68 | /* The same, but returns converted value from the location pointer by addr. */ | ||
69 | #define __arch__swab16p(addr) ld_le16(addr) | ||
70 | #define __arch__swab32p(addr) ld_le32(addr) | ||
71 | |||
72 | /* The same, but do the conversion in situ, ie. put the value back to addr. */ | ||
73 | #define __arch__swab16s(addr) st_le16(addr,*addr) | ||
74 | #define __arch__swab32s(addr) st_le32(addr,*addr) | ||
75 | 82 | ||
76 | #endif /* __KERNEL__ */ | 83 | #endif /* __KERNEL__ */ |
77 | 84 | ||
78 | #ifndef __STRICT_ANSI__ | ||
79 | #define __BYTEORDER_HAS_U64__ | ||
80 | #ifndef __powerpc64__ | 85 | #ifndef __powerpc64__ |
81 | #define __SWAB_64_THRU_32__ | 86 | #define __SWAB_64_THRU_32__ |
82 | #endif /* __powerpc64__ */ | 87 | #endif /* __powerpc64__ */ |
83 | #endif /* __STRICT_ANSI__ */ | ||
84 | 88 | ||
85 | #endif /* __GNUC__ */ | 89 | #endif /* __GNUC__ */ |
86 | 90 | ||
87 | #include <linux/byteorder/big_endian.h> | 91 | #include <linux/byteorder.h> |
88 | 92 | ||
89 | #endif /* _ASM_POWERPC_BYTEORDER_H */ | 93 | #endif /* _ASM_POWERPC_BYTEORDER_H */ |