aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-04-29 04:03:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:27 -0400
commit6510d41954dc6a9c8b1dbca7eaca0f23195ca727 (patch)
tree868b5fac25c7c5b80cc5a88eaaab8bf3d693420d
parent064106a91be5e76cb42c1ddf5d3871e3a1bd2a23 (diff)
kernel: Move arches to use common unaligned access
Unaligned access is ok for the following arches: cris, m68k, mn10300, powerpc, s390, x86 Arches that use the memmove implementation for native endian, and the byteshifting for the opposite endianness. h8300, m32r, xtensa Packed struct for native endian, byteshifting for other endian: alpha, blackfin, ia64, parisc, sparc, sparc64, mips, sh m86knommu is generic_be for Coldfire, otherwise unaligned access is ok. frv, arm chooses endianness based on compiler settings, uses the byteshifting versions. Remove the unaligned trap handler from frv as it is now unused. v850 is le, uses the byteshifting versions for both be and le. Remove the now unused asm-generic implementation. Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Acked-by: David S. Miller <davem@davemloft.net> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/frv/kernel/traps.c7
-rw-r--r--arch/frv/mm/unaligned.c217
-rw-r--r--include/asm-alpha/unaligned.h13
-rw-r--r--include/asm-arm/unaligned.h174
-rw-r--r--include/asm-avr32/unaligned.h13
-rw-r--r--include/asm-blackfin/unaligned.h13
-rw-r--r--include/asm-cris/unaligned.h17
-rw-r--r--include/asm-frv/unaligned.h196
-rw-r--r--include/asm-generic/unaligned.h124
-rw-r--r--include/asm-h8300/unaligned.h20
-rw-r--r--include/asm-ia64/unaligned.h7
-rw-r--r--include/asm-m32r/unaligned.h27
-rw-r--r--include/asm-m68k/unaligned.h17
-rw-r--r--include/asm-m68knommu/unaligned.h22
-rw-r--r--include/asm-mips/unaligned.h37
-rw-r--r--include/asm-mn10300/unaligned.h130
-rw-r--r--include/asm-parisc/unaligned.h12
-rw-r--r--include/asm-powerpc/unaligned.h11
-rw-r--r--include/asm-s390/unaligned.h25
-rw-r--r--include/asm-sh/unaligned.h20
-rw-r--r--include/asm-sparc/unaligned.h10
-rw-r--r--include/asm-sparc64/unaligned.h10
-rw-r--r--include/asm-um/unaligned.h6
-rw-r--r--include/asm-v850/unaligned.h124
-rw-r--r--include/asm-x86/unaligned.h31
-rw-r--r--include/asm-xtensa/unaligned.h35
26 files changed, 203 insertions, 1115 deletions
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index a40df80b2ebd..1d2dfe67d442 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -362,11 +362,8 @@ asmlinkage void memory_access_exception(unsigned long esr0,
362#ifdef CONFIG_MMU 362#ifdef CONFIG_MMU
363 unsigned long fixup; 363 unsigned long fixup;
364 364
365 if ((esr0 & ESRx_EC) == ESRx_EC_DATA_ACCESS) 365 fixup = search_exception_table(__frame->pc);
366 if (handle_misalignment(esr0, ear0, epcr0) == 0) 366 if (fixup) {
367 return;
368
369 if ((fixup = search_exception_table(__frame->pc)) != 0) {
370 __frame->pc = fixup; 367 __frame->pc = fixup;
371 return; 368 return;
372 } 369 }
diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c
deleted file mode 100644
index 8f0375fc15a8..000000000000
--- a/arch/frv/mm/unaligned.c
+++ /dev/null
@@ -1,217 +0,0 @@
1/* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only)
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/signal.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/types.h>
17#include <linux/user.h>
18#include <linux/string.h>
19#include <linux/linkage.h>
20#include <linux/init.h>
21
22#include <asm/setup.h>
23#include <asm/system.h>
24#include <asm/uaccess.h>
25
26#if 0
27#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
28#else
29#define kdebug(fmt, ...) do {} while(0)
30#endif
31
32#define _MA_SIGNED 0x01
33#define _MA_HALF 0x02
34#define _MA_WORD 0x04
35#define _MA_DWORD 0x08
36#define _MA_SZ_MASK 0x0e
37#define _MA_LOAD 0x10
38#define _MA_STORE 0x20
39#define _MA_UPDATE 0x40
40#define _MA_IMM 0x80
41
42#define _MA_LDxU _MA_LOAD | _MA_UPDATE
43#define _MA_LDxI _MA_LOAD | _MA_IMM
44#define _MA_STxU _MA_STORE | _MA_UPDATE
45#define _MA_STxI _MA_STORE | _MA_IMM
46
47static const uint8_t tbl_LDGRk_reg[0x40] = {
48 [0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED, /* LDSH @(GRi,GRj),GRk */
49 [0x03] = _MA_LOAD | _MA_HALF, /* LDUH @(GRi,GRj),GRk */
50 [0x04] = _MA_LOAD | _MA_WORD, /* LD @(GRi,GRj),GRk */
51 [0x05] = _MA_LOAD | _MA_DWORD, /* LDD @(GRi,GRj),GRk */
52 [0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED, /* LDSHU @(GRi,GRj),GRk */
53 [0x13] = _MA_LDxU | _MA_HALF, /* LDUHU @(GRi,GRj),GRk */
54 [0x14] = _MA_LDxU | _MA_WORD, /* LDU @(GRi,GRj),GRk */
55 [0x15] = _MA_LDxU | _MA_DWORD, /* LDDU @(GRi,GRj),GRk */
56};
57
58static const uint8_t tbl_STGRk_reg[0x40] = {
59 [0x01] = _MA_STORE | _MA_HALF, /* STH @(GRi,GRj),GRk */
60 [0x02] = _MA_STORE | _MA_WORD, /* ST @(GRi,GRj),GRk */
61 [0x03] = _MA_STORE | _MA_DWORD, /* STD @(GRi,GRj),GRk */
62 [0x11] = _MA_STxU | _MA_HALF, /* STHU @(GRi,GRj),GRk */
63 [0x12] = _MA_STxU | _MA_WORD, /* STU @(GRi,GRj),GRk */
64 [0x13] = _MA_STxU | _MA_DWORD, /* STDU @(GRi,GRj),GRk */
65};
66
67static const uint8_t tbl_LDSTGRk_imm[0x80] = {
68 [0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED, /* LDSHI @(GRi,d12),GRk */
69 [0x32] = _MA_LDxI | _MA_WORD, /* LDI @(GRi,d12),GRk */
70 [0x33] = _MA_LDxI | _MA_DWORD, /* LDDI @(GRi,d12),GRk */
71 [0x36] = _MA_LDxI | _MA_HALF, /* LDUHI @(GRi,d12),GRk */
72 [0x51] = _MA_STxI | _MA_HALF, /* STHI @(GRi,d12),GRk */
73 [0x52] = _MA_STxI | _MA_WORD, /* STI @(GRi,d12),GRk */
74 [0x53] = _MA_STxI | _MA_DWORD, /* STDI @(GRi,d12),GRk */
75};
76
77
78/*****************************************************************************/
79/*
80 * see if we can handle the exception by fixing up a misaligned memory access
81 */
82int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0)
83{
84 unsigned long insn, addr, *greg;
85 int GRi, GRj, GRk, D12, op;
86
87 union {
88 uint64_t _64;
89 uint32_t _32[2];
90 uint16_t _16;
91 uint8_t _8[8];
92 } x;
93
94 if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7))
95 return -EAGAIN;
96
97 epcr0 &= EPCR0_PC;
98
99 if (__frame->pc != epcr0) {
100 kdebug("MISALIGN: Execution not halted on excepting instruction\n");
101 BUG();
102 }
103
104 if (__get_user(insn, (unsigned long *) epcr0) < 0)
105 return -EFAULT;
106
107 /* determine the instruction type first */
108 switch ((insn >> 18) & 0x7f) {
109 case 0x2:
110 /* LDx @(GRi,GRj),GRk */
111 op = tbl_LDGRk_reg[(insn >> 6) & 0x3f];
112 break;
113
114 case 0x3:
115 /* STx GRk,@(GRi,GRj) */
116 op = tbl_STGRk_reg[(insn >> 6) & 0x3f];
117 break;
118
119 default:
120 op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f];
121 break;
122 }
123
124 if (!op)
125 return -EAGAIN;
126
127 kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op);
128
129 memset(&x, 0xba, 8);
130
131 /* validate the instruction parameters */
132 greg = (unsigned long *) &__frame->tbr;
133
134 GRi = (insn >> 12) & 0x3f;
135 GRk = (insn >> 25) & 0x3f;
136
137 if (GRi > 31 || GRk > 31)
138 return -ENOENT;
139
140 if (op & _MA_DWORD && GRk & 1)
141 return -EINVAL;
142
143 if (op & _MA_IMM) {
144 D12 = insn & 0xfff;
145 asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */
146 addr = (GRi ? greg[GRi] : 0) + D12;
147 }
148 else {
149 GRj = (insn >> 0) & 0x3f;
150 if (GRj > 31)
151 return -ENOENT;
152 addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0);
153 }
154
155 if (addr != ear0) {
156 kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n",
157 addr, ear0);
158 return -EFAULT;
159 }
160
161 /* check the address is okay */
162 if (user_mode(__frame) && ___range_ok(ear0, 8) < 0)
163 return -EFAULT;
164
165 /* perform the memory op */
166 if (op & _MA_STORE) {
167 /* perform a store */
168 x._32[0] = 0;
169 if (GRk != 0) {
170 if (op & _MA_HALF) {
171 x._16 = greg[GRk];
172 }
173 else {
174 x._32[0] = greg[GRk];
175 }
176 }
177 if (op & _MA_DWORD)
178 x._32[1] = greg[GRk + 1];
179
180 kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n",
181 GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK);
182
183 if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0)
184 return -EFAULT;
185 }
186 else {
187 /* perform a load */
188 if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0)
189 return -EFAULT;
190
191 if (op & _MA_HALF) {
192 if (op & _MA_SIGNED)
193 asm ("slli %0,#16,%0 ! srai %0,#16,%0"
194 : "=r"(x._32[0]) : "0"(x._16));
195 else
196 asm ("sethi #0,%0"
197 : "=r"(x._32[0]) : "0"(x._16));
198 }
199
200 kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n",
201 addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]);
202
203 if (GRk != 0)
204 greg[GRk] = x._32[0];
205 if (op & _MA_DWORD)
206 greg[GRk + 1] = x._32[1];
207 }
208
209 /* update the base pointer if required */
210 if (op & _MA_UPDATE)
211 greg[GRi] = addr;
212
213 /* well... we've done that insn */
214 __frame->pc = __frame->pc + 4;
215
216 return 0;
217} /* end handle_misalignment() */
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h
index a1d72846f61c..3787c60aed3f 100644
--- a/include/asm-alpha/unaligned.h
+++ b/include/asm-alpha/unaligned.h
@@ -1,6 +1,11 @@
1#ifndef __ALPHA_UNALIGNED_H 1#ifndef _ASM_ALPHA_UNALIGNED_H
2#define __ALPHA_UNALIGNED_H 2#define _ASM_ALPHA_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/le_struct.h>
5#include <linux/unaligned/be_byteshift.h>
6#include <linux/unaligned/generic.h>
5 7
6#endif 8#define get_unaligned __get_unaligned_le
9#define put_unaligned __put_unaligned_le
10
11#endif /* _ASM_ALPHA_UNALIGNED_H */
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h
index 5db03cf3b905..44593a894903 100644
--- a/include/asm-arm/unaligned.h
+++ b/include/asm-arm/unaligned.h
@@ -1,171 +1,9 @@
1#ifndef __ASM_ARM_UNALIGNED_H 1#ifndef _ASM_ARM_UNALIGNED_H
2#define __ASM_ARM_UNALIGNED_H 2#define _ASM_ARM_UNALIGNED_H
3 3
4#include <asm/types.h> 4#include <linux/unaligned/le_byteshift.h>
5 5#include <linux/unaligned/be_byteshift.h>
6extern int __bug_unaligned_x(const void *ptr); 6#include <linux/unaligned/generic.h>
7
8/*
9 * What is the most efficient way of loading/storing an unaligned value?
10 *
11 * That is the subject of this file. Efficiency here is defined as
12 * minimum code size with minimum register usage for the common cases.
13 * It is currently not believed that long longs are common, so we
14 * trade efficiency for the chars, shorts and longs against the long
15 * longs.
16 *
17 * Current stats with gcc 2.7.2.2 for these functions:
18 *
19 * ptrsize get: code regs put: code regs
20 * 1 1 1 1 2
21 * 2 3 2 3 2
22 * 4 7 3 7 3
23 * 8 20 6 16 6
24 *
25 * gcc 2.95.1 seems to code differently:
26 *
27 * ptrsize get: code regs put: code regs
28 * 1 1 1 1 2
29 * 2 3 2 3 2
30 * 4 7 4 7 4
31 * 8 19 8 15 6
32 *
33 * which may or may not be more efficient (depending upon whether
34 * you can afford the extra registers). Hopefully the gcc 2.95
35 * is inteligent enough to decide if it is better to use the
36 * extra register, but evidence so far seems to suggest otherwise.
37 *
38 * Unfortunately, gcc is not able to optimise the high word
39 * out of long long >> 32, or the low word from long long << 32
40 */
41
42#define __get_unaligned_2_le(__p) \
43 (unsigned int)(__p[0] | __p[1] << 8)
44
45#define __get_unaligned_2_be(__p) \
46 (unsigned int)(__p[0] << 8 | __p[1])
47
48#define __get_unaligned_4_le(__p) \
49 (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
50
51#define __get_unaligned_4_be(__p) \
52 (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3])
53
54#define __get_unaligned_8_le(__p) \
55 ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 | \
56 __get_unaligned_4_le(__p))
57
58#define __get_unaligned_8_be(__p) \
59 ((unsigned long long)__get_unaligned_4_be(__p) << 32 | \
60 __get_unaligned_4_be((__p+4)))
61
62#define __get_unaligned_le(ptr) \
63 ((__force typeof(*(ptr)))({ \
64 const __u8 *__p = (const __u8 *)(ptr); \
65 __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \
66 __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p), \
67 __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p), \
68 __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p), \
69 (void)__bug_unaligned_x(__p))))); \
70 }))
71
72#define __get_unaligned_be(ptr) \
73 ((__force typeof(*(ptr)))({ \
74 const __u8 *__p = (const __u8 *)(ptr); \
75 __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \
76 __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p), \
77 __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p), \
78 __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p), \
79 (void)__bug_unaligned_x(__p))))); \
80 }))
81
82
83static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p)
84{
85 *__p++ = __v;
86 *__p++ = __v >> 8;
87}
88
89static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p)
90{
91 *__p++ = __v >> 8;
92 *__p++ = __v;
93}
94
95static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p)
96{
97 __put_unaligned_2_le(__v >> 16, __p + 2);
98 __put_unaligned_2_le(__v, __p);
99}
100
101static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p)
102{
103 __put_unaligned_2_be(__v >> 16, __p);
104 __put_unaligned_2_be(__v, __p + 2);
105}
106
107static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p)
108{
109 /*
110 * tradeoff: 8 bytes of stack for all unaligned puts (2
111 * instructions), or an extra register in the long long
112 * case - go for the extra register.
113 */
114 __put_unaligned_4_le(__v >> 32, __p+4);
115 __put_unaligned_4_le(__v, __p);
116}
117
118static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p)
119{
120 /*
121 * tradeoff: 8 bytes of stack for all unaligned puts (2
122 * instructions), or an extra register in the long long
123 * case - go for the extra register.
124 */
125 __put_unaligned_4_be(__v >> 32, __p);
126 __put_unaligned_4_be(__v, __p+4);
127}
128
129/*
130 * Try to store an unaligned value as efficiently as possible.
131 */
132#define __put_unaligned_le(val,ptr) \
133 ({ \
134 (void)sizeof(*(ptr) = (val)); \
135 switch (sizeof(*(ptr))) { \
136 case 1: \
137 *(ptr) = (val); \
138 break; \
139 case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr)); \
140 break; \
141 case 4: __put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr)); \
142 break; \
143 case 8: __put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \
144 break; \
145 default: __bug_unaligned_x(ptr); \
146 break; \
147 } \
148 (void) 0; \
149 })
150
151#define __put_unaligned_be(val,ptr) \
152 ({ \
153 (void)sizeof(*(ptr) = (val)); \
154 switch (sizeof(*(ptr))) { \
155 case 1: \
156 *(ptr) = (val); \
157 break; \
158 case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr)); \
159 break; \
160 case 4: __put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr)); \
161 break; \
162 case 8: __put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \
163 break; \
164 default: __bug_unaligned_x(ptr); \
165 break; \
166 } \
167 (void) 0; \
168 })
169 7
170/* 8/*
171 * Select endianness 9 * Select endianness
@@ -178,4 +16,4 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _
178#define put_unaligned __put_unaligned_be 16#define put_unaligned __put_unaligned_be
179#endif 17#endif
180 18
181#endif 19#endif /* _ASM_ARM_UNALIGNED_H */
diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h
index 36f5fd430543..041877290470 100644
--- a/include/asm-avr32/unaligned.h
+++ b/include/asm-avr32/unaligned.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_AVR32_UNALIGNED_H 1#ifndef _ASM_AVR32_UNALIGNED_H
2#define __ASM_AVR32_UNALIGNED_H 2#define _ASM_AVR32_UNALIGNED_H
3 3
4/* 4/*
5 * AVR32 can handle some unaligned accesses, depending on the 5 * AVR32 can handle some unaligned accesses, depending on the
@@ -11,6 +11,11 @@
11 * optimize word loads in general. 11 * optimize word loads in general.
12 */ 12 */
13 13
14#include <asm-generic/unaligned.h> 14#include <linux/unaligned/be_struct.h>
15#include <linux/unaligned/le_byteshift.h>
16#include <linux/unaligned/generic.h>
15 17
16#endif /* __ASM_AVR32_UNALIGNED_H */ 18#define get_unaligned __get_unaligned_be
19#define put_unaligned __put_unaligned_be
20
21#endif /* _ASM_AVR32_UNALIGNED_H */
diff --git a/include/asm-blackfin/unaligned.h b/include/asm-blackfin/unaligned.h
index 10081dc241ef..fd8a1d634945 100644
--- a/include/asm-blackfin/unaligned.h
+++ b/include/asm-blackfin/unaligned.h
@@ -1,6 +1,11 @@
1#ifndef __BFIN_UNALIGNED_H 1#ifndef _ASM_BLACKFIN_UNALIGNED_H
2#define __BFIN_UNALIGNED_H 2#define _ASM_BLACKFIN_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/le_struct.h>
5#include <linux/unaligned/be_byteshift.h>
6#include <linux/unaligned/generic.h>
5 7
6#endif /* __BFIN_UNALIGNED_H */ 8#define get_unaligned __get_unaligned_le
9#define put_unaligned __put_unaligned_le
10
11#endif /* _ASM_BLACKFIN_UNALIGNED_H */
diff --git a/include/asm-cris/unaligned.h b/include/asm-cris/unaligned.h
index 7fbbb399f6f1..7b3f3fec567c 100644
--- a/include/asm-cris/unaligned.h
+++ b/include/asm-cris/unaligned.h
@@ -1,16 +1,13 @@
1#ifndef __CRIS_UNALIGNED_H 1#ifndef _ASM_CRIS_UNALIGNED_H
2#define __CRIS_UNALIGNED_H 2#define _ASM_CRIS_UNALIGNED_H
3 3
4/* 4/*
5 * CRIS can do unaligned accesses itself. 5 * CRIS can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */ 6 */
7#include <linux/unaligned/access_ok.h>
8#include <linux/unaligned/generic.h>
11 9
12#define get_unaligned(ptr) (*(ptr)) 10#define get_unaligned __get_unaligned_le
11#define put_unaligned __put_unaligned_le
13 12
14#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) 13#endif /* _ASM_CRIS_UNALIGNED_H */
15
16#endif
diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h
index dc8e9c9bf6bd..64ccc736f2d8 100644
--- a/include/asm-frv/unaligned.h
+++ b/include/asm-frv/unaligned.h
@@ -9,194 +9,14 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _ASM_UNALIGNED_H 12#ifndef _ASM_FRV_UNALIGNED_H
13#define _ASM_UNALIGNED_H 13#define _ASM_FRV_UNALIGNED_H
14 14
15#include <linux/unaligned/le_byteshift.h>
16#include <linux/unaligned/be_byteshift.h>
17#include <linux/unaligned/generic.h>
15 18
16/* 19#define get_unaligned __get_unaligned_be
17 * Unaligned accesses on uClinux can't be performed in a fault handler - the 20#define put_unaligned __put_unaligned_be
18 * CPU detects them as imprecise exceptions making this impossible.
19 *
20 * With the FR451, however, they are precise, and so we used to fix them up in
21 * the memory access fault handler. However, instruction bundling make this
22 * impractical. So, now we fall back to using memcpy.
23 */
24#ifdef CONFIG_MMU
25
26/*
27 * The asm statement in the macros below is a way to get GCC to copy a
28 * value from one variable to another without having any clue it's
29 * actually doing so, so that it won't have any idea that the values
30 * in the two variables are related.
31 */
32
33#define get_unaligned(ptr) ({ \
34 typeof((*(ptr))) __x; \
35 void *__ptrcopy; \
36 asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
37 memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \
38 __x; \
39})
40
41#define put_unaligned(val, ptr) ({ \
42 typeof((*(ptr))) __x = (val); \
43 void *__ptrcopy; \
44 asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
45 memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \
46})
47
48extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0);
49
50#else
51
52#define get_unaligned(ptr) \
53({ \
54 typeof(*(ptr)) x; \
55 const char *__p = (const char *) (ptr); \
56 \
57 switch (sizeof(x)) { \
58 case 1: \
59 x = *(ptr); \
60 break; \
61 case 2: \
62 { \
63 uint8_t a; \
64 asm(" ldub%I2 %M2,%0 \n" \
65 " ldub%I3.p %M3,%1 \n" \
66 " slli %0,#8,%0 \n" \
67 " or %0,%1,%0 \n" \
68 : "=&r"(x), "=&r"(a) \
69 : "m"(__p[0]), "m"(__p[1]) \
70 ); \
71 break; \
72 } \
73 \
74 case 4: \
75 { \
76 uint8_t a; \
77 asm(" ldub%I2 %M2,%0 \n" \
78 " ldub%I3.p %M3,%1 \n" \
79 " slli %0,#8,%0 \n" \
80 " or %0,%1,%0 \n" \
81 " ldub%I4.p %M4,%1 \n" \
82 " slli %0,#8,%0 \n" \
83 " or %0,%1,%0 \n" \
84 " ldub%I5.p %M5,%1 \n" \
85 " slli %0,#8,%0 \n" \
86 " or %0,%1,%0 \n" \
87 : "=&r"(x), "=&r"(a) \
88 : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \
89 ); \
90 break; \
91 } \
92 \
93 case 8: \
94 { \
95 union { uint64_t x; u32 y[2]; } z; \
96 uint8_t a; \
97 asm(" ldub%I3 %M3,%0 \n" \
98 " ldub%I4.p %M4,%2 \n" \
99 " slli %0,#8,%0 \n" \
100 " or %0,%2,%0 \n" \
101 " ldub%I5.p %M5,%2 \n" \
102 " slli %0,#8,%0 \n" \
103 " or %0,%2,%0 \n" \
104 " ldub%I6.p %M6,%2 \n" \
105 " slli %0,#8,%0 \n" \
106 " or %0,%2,%0 \n" \
107 " ldub%I7 %M7,%1 \n" \
108 " ldub%I8.p %M8,%2 \n" \
109 " slli %1,#8,%1 \n" \
110 " or %1,%2,%1 \n" \
111 " ldub%I9.p %M9,%2 \n" \
112 " slli %1,#8,%1 \n" \
113 " or %1,%2,%1 \n" \
114 " ldub%I10.p %M10,%2 \n" \
115 " slli %1,#8,%1 \n" \
116 " or %1,%2,%1 \n" \
117 : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \
118 : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \
119 "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \
120 ); \
121 x = z.x; \
122 break; \
123 } \
124 \
125 default: \
126 x = 0; \
127 BUG(); \
128 break; \
129 } \
130 \
131 x; \
132})
133
134#define put_unaligned(val, ptr) \
135do { \
136 char *__p = (char *) (ptr); \
137 int x; \
138 \
139 switch (sizeof(*ptr)) { \
140 case 2: \
141 { \
142 asm(" stb%I1.p %0,%M1 \n" \
143 " srli %0,#8,%0 \n" \
144 " stb%I2 %0,%M2 \n" \
145 : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \
146 : "0"(val) \
147 ); \
148 break; \
149 } \
150 \
151 case 4: \
152 { \
153 asm(" stb%I1.p %0,%M1 \n" \
154 " srli %0,#8,%0 \n" \
155 " stb%I2.p %0,%M2 \n" \
156 " srli %0,#8,%0 \n" \
157 " stb%I3.p %0,%M3 \n" \
158 " srli %0,#8,%0 \n" \
159 " stb%I4 %0,%M4 \n" \
160 : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \
161 : "0"(val) \
162 ); \
163 break; \
164 } \
165 \
166 case 8: \
167 { \
168 uint32_t __high, __low; \
169 __high = (uint64_t)val >> 32; \
170 __low = val & 0xffffffff; \
171 asm(" stb%I2.p %0,%M2 \n" \
172 " srli %0,#8,%0 \n" \
173 " stb%I3.p %0,%M3 \n" \
174 " srli %0,#8,%0 \n" \
175 " stb%I4.p %0,%M4 \n" \
176 " srli %0,#8,%0 \n" \
177 " stb%I5.p %0,%M5 \n" \
178 " srli %0,#8,%0 \n" \
179 " stb%I6.p %1,%M6 \n" \
180 " srli %1,#8,%1 \n" \
181 " stb%I7.p %1,%M7 \n" \
182 " srli %1,#8,%1 \n" \
183 " stb%I8.p %1,%M8 \n" \
184 " srli %1,#8,%1 \n" \
185 " stb%I9 %1,%M9 \n" \
186 : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \
187 "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \
188 "=m"(__p[1]), "=m"(__p[0]) \
189 : "0"(__low), "1"(__high) \
190 ); \
191 break; \
192 } \
193 \
194 default: \
195 *(ptr) = (val); \
196 break; \
197 } \
198} while(0)
199
200#endif
201 21
202#endif 22#endif /* _ASM_FRV_UNALIGNED_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
deleted file mode 100644
index 2fe1b2e67f01..000000000000
--- a/include/asm-generic/unaligned.h
+++ /dev/null
@@ -1,124 +0,0 @@
1#ifndef _ASM_GENERIC_UNALIGNED_H_
2#define _ASM_GENERIC_UNALIGNED_H_
3
4/*
5 * For the benefit of those who are trying to port Linux to another
6 * architecture, here are some C-language equivalents.
7 *
8 * This is based almost entirely upon Richard Henderson's
9 * asm-alpha/unaligned.h implementation. Some comments were
10 * taken from David Mosberger's asm-ia64/unaligned.h header.
11 */
12
13#include <linux/types.h>
14
15/*
16 * The main single-value unaligned transfer routines.
17 */
18#define get_unaligned(ptr) \
19 __get_unaligned((ptr), sizeof(*(ptr)))
20#define put_unaligned(x,ptr) \
21 ((void)sizeof(*(ptr)=(x)),\
22 __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
23
24/*
25 * This function doesn't actually exist. The idea is that when
26 * someone uses the macros below with an unsupported size (datatype),
27 * the linker will alert us to the problem via an unresolved reference
28 * error.
29 */
30extern void bad_unaligned_access_length(void) __attribute__((noreturn));
31
32struct __una_u64 { __u64 x __attribute__((packed)); };
33struct __una_u32 { __u32 x __attribute__((packed)); };
34struct __una_u16 { __u16 x __attribute__((packed)); };
35
36/*
37 * Elemental unaligned loads
38 */
39
40static inline __u64 __uldq(const __u64 *addr)
41{
42 const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
43 return ptr->x;
44}
45
46static inline __u32 __uldl(const __u32 *addr)
47{
48 const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
49 return ptr->x;
50}
51
52static inline __u16 __uldw(const __u16 *addr)
53{
54 const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
55 return ptr->x;
56}
57
58/*
59 * Elemental unaligned stores
60 */
61
62static inline void __ustq(__u64 val, __u64 *addr)
63{
64 struct __una_u64 *ptr = (struct __una_u64 *) addr;
65 ptr->x = val;
66}
67
68static inline void __ustl(__u32 val, __u32 *addr)
69{
70 struct __una_u32 *ptr = (struct __una_u32 *) addr;
71 ptr->x = val;
72}
73
74static inline void __ustw(__u16 val, __u16 *addr)
75{
76 struct __una_u16 *ptr = (struct __una_u16 *) addr;
77 ptr->x = val;
78}
79
80#define __get_unaligned(ptr, size) ({ \
81 const void *__gu_p = ptr; \
82 __u64 __val; \
83 switch (size) { \
84 case 1: \
85 __val = *(const __u8 *)__gu_p; \
86 break; \
87 case 2: \
88 __val = __uldw(__gu_p); \
89 break; \
90 case 4: \
91 __val = __uldl(__gu_p); \
92 break; \
93 case 8: \
94 __val = __uldq(__gu_p); \
95 break; \
96 default: \
97 bad_unaligned_access_length(); \
98 }; \
99 (__force __typeof__(*(ptr)))__val; \
100})
101
102#define __put_unaligned(val, ptr, size) \
103({ \
104 void *__gu_p = ptr; \
105 switch (size) { \
106 case 1: \
107 *(__u8 *)__gu_p = (__force __u8)val; \
108 break; \
109 case 2: \
110 __ustw((__force __u16)val, __gu_p); \
111 break; \
112 case 4: \
113 __ustl((__force __u32)val, __gu_p); \
114 break; \
115 case 8: \
116 __ustq(val, __gu_p); \
117 break; \
118 default: \
119 bad_unaligned_access_length(); \
120 }; \
121 (void)0; \
122})
123
124#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-h8300/unaligned.h b/include/asm-h8300/unaligned.h
index ffb67f472070..b8d06c70c2da 100644
--- a/include/asm-h8300/unaligned.h
+++ b/include/asm-h8300/unaligned.h
@@ -1,15 +1,11 @@
1#ifndef __H8300_UNALIGNED_H 1#ifndef _ASM_H8300_UNALIGNED_H
2#define __H8300_UNALIGNED_H 2#define _ASM_H8300_UNALIGNED_H
3 3
4#include <linux/unaligned/be_memmove.h>
5#include <linux/unaligned/le_byteshift.h>
6#include <linux/unaligned/generic.h>
4 7
5/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ 8#define get_unaligned __get_unaligned_be
9#define put_unaligned __put_unaligned_be
6 10
7#define get_unaligned(ptr) \ 11#endif /* _ASM_H8300_UNALIGNED_H */
8 ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
9
10#define put_unaligned(val, ptr) \
11 ({ __typeof__(*(ptr)) __tmp = (val); \
12 memmove((ptr), &__tmp, sizeof(*(ptr))); \
13 (void)0; })
14
15#endif
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h
index bb8559888103..7bddc7f58584 100644
--- a/include/asm-ia64/unaligned.h
+++ b/include/asm-ia64/unaligned.h
@@ -1,6 +1,11 @@
1#ifndef _ASM_IA64_UNALIGNED_H 1#ifndef _ASM_IA64_UNALIGNED_H
2#define _ASM_IA64_UNALIGNED_H 2#define _ASM_IA64_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/le_struct.h>
5#include <linux/unaligned/be_byteshift.h>
6#include <linux/unaligned/generic.h>
7
8#define get_unaligned __get_unaligned_le
9#define put_unaligned __put_unaligned_le
5 10
6#endif /* _ASM_IA64_UNALIGNED_H */ 11#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/include/asm-m32r/unaligned.h b/include/asm-m32r/unaligned.h
index fccc180c3913..377eb20d1ec6 100644
--- a/include/asm-m32r/unaligned.h
+++ b/include/asm-m32r/unaligned.h
@@ -1,19 +1,18 @@
1#ifndef _ASM_M32R_UNALIGNED_H 1#ifndef _ASM_M32R_UNALIGNED_H
2#define _ASM_M32R_UNALIGNED_H 2#define _ASM_M32R_UNALIGNED_H
3 3
4/* 4#if defined(__LITTLE_ENDIAN__)
5 * For the benefit of those who are trying to port Linux to another 5# include <linux/unaligned/le_memmove.h>
6 * architecture, here are some C-language equivalents. 6# include <linux/unaligned/be_byteshift.h>
7 */ 7# include <linux/unaligned/generic.h>
8 8# define get_unaligned __get_unaligned_le
9#include <asm/string.h> 9# define put_unaligned __put_unaligned_le
10 10#else
11#define get_unaligned(ptr) \ 11# include <linux/unaligned/be_memmove.h>
12 ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) 12# include <linux/unaligned/le_byteshift.h>
13 13# include <linux/unaligned/generic.h>
14#define put_unaligned(val, ptr) \ 14# define get_unaligned __get_unaligned_be
15 ({ __typeof__(*(ptr)) __tmp = (val); \ 15# define put_unaligned __put_unaligned_be
16 memmove((ptr), &__tmp, sizeof(*(ptr))); \ 16#endif
17 (void)0; })
18 17
19#endif /* _ASM_M32R_UNALIGNED_H */ 18#endif /* _ASM_M32R_UNALIGNED_H */
diff --git a/include/asm-m68k/unaligned.h b/include/asm-m68k/unaligned.h
index 804cb3f888fe..77698f2dc33c 100644
--- a/include/asm-m68k/unaligned.h
+++ b/include/asm-m68k/unaligned.h
@@ -1,16 +1,13 @@
1#ifndef __M68K_UNALIGNED_H 1#ifndef _ASM_M68K_UNALIGNED_H
2#define __M68K_UNALIGNED_H 2#define _ASM_M68K_UNALIGNED_H
3 3
4/* 4/*
5 * The m68k can do unaligned accesses itself. 5 * The m68k can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */ 6 */
7#include <linux/unaligned/access_ok.h>
8#include <linux/unaligned/generic.h>
11 9
12#define get_unaligned(ptr) (*(ptr)) 10#define get_unaligned __get_unaligned_be
11#define put_unaligned __put_unaligned_be
13 12
14#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) 13#endif /* _ASM_M68K_UNALIGNED_H */
15
16#endif
diff --git a/include/asm-m68knommu/unaligned.h b/include/asm-m68knommu/unaligned.h
index 869e9dd24f54..eb1ea4cb9a59 100644
--- a/include/asm-m68knommu/unaligned.h
+++ b/include/asm-m68knommu/unaligned.h
@@ -1,23 +1,25 @@
1#ifndef __M68K_UNALIGNED_H 1#ifndef _ASM_M68KNOMMU_UNALIGNED_H
2#define __M68K_UNALIGNED_H 2#define _ASM_M68KNOMMU_UNALIGNED_H
3 3
4 4
5#ifdef CONFIG_COLDFIRE 5#ifdef CONFIG_COLDFIRE
6#include <linux/unaligned/be_struct.h>
7#include <linux/unaligned/le_byteshift.h>
8#include <linux/unaligned/generic.h>
6 9
7#include <asm-generic/unaligned.h> 10#define get_unaligned __get_unaligned_be
11#define put_unaligned __put_unaligned_be
8 12
9#else 13#else
10/* 14/*
11 * The m68k can do unaligned accesses itself. 15 * The m68k can do unaligned accesses itself.
12 *
13 * The strange macros are there to make sure these can't
14 * be misused in a way that makes them not work on other
15 * architectures where unaligned accesses aren't as simple.
16 */ 16 */
17#include <linux/unaligned/access_ok.h>
18#include <linux/unaligned/generic.h>
17 19
18#define get_unaligned(ptr) (*(ptr)) 20#define get_unaligned __get_unaligned_be
19#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) 21#define put_unaligned __put_unaligned_be
20 22
21#endif 23#endif
22 24
23#endif 25#endif /* _ASM_M68KNOMMU_UNALIGNED_H */
diff --git a/include/asm-mips/unaligned.h b/include/asm-mips/unaligned.h
index 3249049e93aa..792404948571 100644
--- a/include/asm-mips/unaligned.h
+++ b/include/asm-mips/unaligned.h
@@ -5,25 +5,24 @@
5 * 5 *
6 * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) 6 * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
7 */ 7 */
8#ifndef __ASM_GENERIC_UNALIGNED_H 8#ifndef _ASM_MIPS_UNALIGNED_H
9#define __ASM_GENERIC_UNALIGNED_H 9#define _ASM_MIPS_UNALIGNED_H
10 10
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#if defined(__MIPSEB__)
13# include <linux/unaligned/be_struct.h>
14# include <linux/unaligned/le_byteshift.h>
15# include <linux/unaligned/generic.h>
16# define get_unaligned __get_unaligned_be
17# define put_unaligned __put_unaligned_be
18#elif defined(__MIPSEL__)
19# include <linux/unaligned/le_struct.h>
20# include <linux/unaligned/be_byteshift.h>
21# include <linux/unaligned/generic.h>
22# define get_unaligned __get_unaligned_le
23# define put_unaligned __put_unaligned_le
24#else
25# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
26#endif
12 27
13#define get_unaligned(ptr) \ 28#endif /* _ASM_MIPS_UNALIGNED_H */
14({ \
15 struct __packed { \
16 typeof(*(ptr)) __v; \
17 } *__p = (void *) (ptr); \
18 __p->__v; \
19})
20
21#define put_unaligned(val, ptr) \
22do { \
23 struct __packed { \
24 typeof(*(ptr)) __v; \
25 } *__p = (void *) (ptr); \
26 __p->__v = (val); \
27} while(0)
28
29#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-mn10300/unaligned.h b/include/asm-mn10300/unaligned.h
index cad3afbd035f..0df671318ae4 100644
--- a/include/asm-mn10300/unaligned.h
+++ b/include/asm-mn10300/unaligned.h
@@ -8,129 +8,13 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11#ifndef _ASM_UNALIGNED_H 11#ifndef _ASM_MN10300_UNALIGNED_H
12#define _ASM_UNALIGNED_H 12#define _ASM_MN10300_UNALIGNED_H
13 13
14#include <asm/types.h> 14#include <linux/unaligned/access_ok.h>
15#include <linux/unaligned/generic.h>
15 16
16#if 0 17#define get_unaligned __get_unaligned_le
17extern int __bug_unaligned_x(void *ptr); 18#define put_unaligned __put_unaligned_le
18 19
19/* 20#endif /* _ASM_MN10300_UNALIGNED_H */
20 * What is the most efficient way of loading/storing an unaligned value?
21 *
22 * That is the subject of this file. Efficiency here is defined as
23 * minimum code size with minimum register usage for the common cases.
24 * It is currently not believed that long longs are common, so we
25 * trade efficiency for the chars, shorts and longs against the long
26 * longs.
27 *
28 * Current stats with gcc 2.7.2.2 for these functions:
29 *
30 * ptrsize get: code regs put: code regs
31 * 1 1 1 1 2
32 * 2 3 2 3 2
33 * 4 7 3 7 3
34 * 8 20 6 16 6
35 *
36 * gcc 2.95.1 seems to code differently:
37 *
38 * ptrsize get: code regs put: code regs
39 * 1 1 1 1 2
40 * 2 3 2 3 2
41 * 4 7 4 7 4
42 * 8 19 8 15 6
43 *
44 * which may or may not be more efficient (depending upon whether
45 * you can afford the extra registers). Hopefully the gcc 2.95
46 * is inteligent enough to decide if it is better to use the
47 * extra register, but evidence so far seems to suggest otherwise.
48 *
49 * Unfortunately, gcc is not able to optimise the high word
50 * out of long long >> 32, or the low word from long long << 32
51 */
52
53#define __get_unaligned_2(__p) \
54 (__p[0] | __p[1] << 8)
55
56#define __get_unaligned_4(__p) \
57 (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
58
59#define get_unaligned(ptr) \
60({ \
61 unsigned int __v1, __v2; \
62 __typeof__(*(ptr)) __v; \
63 __u8 *__p = (__u8 *)(ptr); \
64 \
65 switch (sizeof(*(ptr))) { \
66 case 1: __v = *(ptr); break; \
67 case 2: __v = __get_unaligned_2(__p); break; \
68 case 4: __v = __get_unaligned_4(__p); break; \
69 case 8: \
70 __v2 = __get_unaligned_4((__p+4)); \
71 __v1 = __get_unaligned_4(__p); \
72 __v = ((unsigned long long)__v2 << 32 | __v1); \
73 break; \
74 default: __v = __bug_unaligned_x(__p); break; \
75 } \
76 __v; \
77})
78
79
80static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
81{
82 *__p++ = __v;
83 *__p++ = __v >> 8;
84}
85
86static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
87{
88 __put_unaligned_2(__v >> 16, __p + 2);
89 __put_unaligned_2(__v, __p);
90}
91
92static inline void __put_unaligned_8(const unsigned long long __v, __u8 *__p)
93{
94 /*
95 * tradeoff: 8 bytes of stack for all unaligned puts (2
96 * instructions), or an extra register in the long long
97 * case - go for the extra register.
98 */
99 __put_unaligned_4(__v >> 32, __p + 4);
100 __put_unaligned_4(__v, __p);
101}
102
103/*
104 * Try to store an unaligned value as efficiently as possible.
105 */
106#define put_unaligned(val, ptr) \
107 ({ \
108 switch (sizeof(*(ptr))) { \
109 case 1: \
110 *(ptr) = (val); \
111 break; \
112 case 2: \
113 __put_unaligned_2((val), (__u8 *)(ptr)); \
114 break; \
115 case 4: \
116 __put_unaligned_4((val), (__u8 *)(ptr)); \
117 break; \
118 case 8: \
119 __put_unaligned_8((val), (__u8 *)(ptr)); \
120 break; \
121 default: \
122 __bug_unaligned_x(ptr); \
123 break; \
124 } \
125 (void) 0; \
126 })
127
128
129#else
130
131#define get_unaligned(ptr) (*(ptr))
132#define put_unaligned(val, ptr) ({ *(ptr) = (val); (void) 0; })
133
134#endif
135
136#endif
diff --git a/include/asm-parisc/unaligned.h b/include/asm-parisc/unaligned.h
index 53c905838d93..dfc5d3321a54 100644
--- a/include/asm-parisc/unaligned.h
+++ b/include/asm-parisc/unaligned.h
@@ -1,7 +1,11 @@
1#ifndef _ASM_PARISC_UNALIGNED_H_ 1#ifndef _ASM_PARISC_UNALIGNED_H
2#define _ASM_PARISC_UNALIGNED_H_ 2#define _ASM_PARISC_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/be_struct.h>
5#include <linux/unaligned/le_byteshift.h>
6#include <linux/unaligned/generic.h>
7#define get_unaligned __get_unaligned_be
8#define put_unaligned __put_unaligned_be
5 9
6#ifdef __KERNEL__ 10#ifdef __KERNEL__
7struct pt_regs; 11struct pt_regs;
@@ -9,4 +13,4 @@ void handle_unaligned(struct pt_regs *regs);
9int check_unaligned(struct pt_regs *regs); 13int check_unaligned(struct pt_regs *regs);
10#endif 14#endif
11 15
12#endif /* _ASM_PARISC_UNALIGNED_H_ */ 16#endif /* _ASM_PARISC_UNALIGNED_H */
diff --git a/include/asm-powerpc/unaligned.h b/include/asm-powerpc/unaligned.h
index 6c95dfa2652f..5f1b1e3c2137 100644
--- a/include/asm-powerpc/unaligned.h
+++ b/include/asm-powerpc/unaligned.h
@@ -5,15 +5,12 @@
5 5
6/* 6/*
7 * The PowerPC can do unaligned accesses itself in big endian mode. 7 * The PowerPC can do unaligned accesses itself in big endian mode.
8 *
9 * The strange macros are there to make sure these can't
10 * be misused in a way that makes them not work on other
11 * architectures where unaligned accesses aren't as simple.
12 */ 8 */
9#include <linux/unaligned/access_ok.h>
10#include <linux/unaligned/generic.h>
13 11
14#define get_unaligned(ptr) (*(ptr)) 12#define get_unaligned __get_unaligned_be
15 13#define put_unaligned __put_unaligned_be
16#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
17 14
18#endif /* __KERNEL__ */ 15#endif /* __KERNEL__ */
19#endif /* _ASM_POWERPC_UNALIGNED_H */ 16#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/include/asm-s390/unaligned.h b/include/asm-s390/unaligned.h
index 8ee86dbedd1f..da9627afe5d8 100644
--- a/include/asm-s390/unaligned.h
+++ b/include/asm-s390/unaligned.h
@@ -1,24 +1,13 @@
1/* 1#ifndef _ASM_S390_UNALIGNED_H
2 * include/asm-s390/unaligned.h 2#define _ASM_S390_UNALIGNED_H
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/unaligned.h"
7 */
8
9#ifndef __S390_UNALIGNED_H
10#define __S390_UNALIGNED_H
11 3
12/* 4/*
13 * The S390 can do unaligned accesses itself. 5 * The S390 can do unaligned accesses itself.
14 *
15 * The strange macros are there to make sure these can't
16 * be misused in a way that makes them not work on other
17 * architectures where unaligned accesses aren't as simple.
18 */ 6 */
7#include <linux/unaligned/access_ok.h>
8#include <linux/unaligned/generic.h>
19 9
20#define get_unaligned(ptr) (*(ptr)) 10#define get_unaligned __get_unaligned_be
21 11#define put_unaligned __put_unaligned_be
22#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
23 12
24#endif 13#endif /* _ASM_S390_UNALIGNED_H */
diff --git a/include/asm-sh/unaligned.h b/include/asm-sh/unaligned.h
index 5250e3063b42..c1641a01d50f 100644
--- a/include/asm-sh/unaligned.h
+++ b/include/asm-sh/unaligned.h
@@ -1,7 +1,19 @@
1#ifndef __ASM_SH_UNALIGNED_H 1#ifndef _ASM_SH_UNALIGNED_H
2#define __ASM_SH_UNALIGNED_H 2#define _ASM_SH_UNALIGNED_H
3 3
4/* SH can't handle unaligned accesses. */ 4/* SH can't handle unaligned accesses. */
5#include <asm-generic/unaligned.h> 5#ifdef __LITTLE_ENDIAN__
6# include <linux/unaligned/le_struct.h>
7# include <linux/unaligned/be_byteshift.h>
8# include <linux/unaligned/generic.h>
9# define get_unaligned __get_unaligned_le
10# define put_unaligned __put_unaligned_le
11#else
12# include <linux/unaligned/be_struct.h>
13# include <linux/unaligned/le_byteshift.h>
14# include <linux/unaligned/generic.h>
15# define get_unaligned __get_unaligned_be
16# define put_unaligned __put_unaligned_be
17#endif
6 18
7#endif /* __ASM_SH_UNALIGNED_H */ 19#endif /* _ASM_SH_UNALIGNED_H */
diff --git a/include/asm-sparc/unaligned.h b/include/asm-sparc/unaligned.h
index b6f8eddd30af..11d2d5fb5902 100644
--- a/include/asm-sparc/unaligned.h
+++ b/include/asm-sparc/unaligned.h
@@ -1,6 +1,10 @@
1#ifndef _ASM_SPARC_UNALIGNED_H_ 1#ifndef _ASM_SPARC_UNALIGNED_H
2#define _ASM_SPARC_UNALIGNED_H_ 2#define _ASM_SPARC_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/be_struct.h>
5#include <linux/unaligned/le_byteshift.h>
6#include <linux/unaligned/generic.h>
7#define get_unaligned __get_unaligned_be
8#define put_unaligned __put_unaligned_be
5 9
6#endif /* _ASM_SPARC_UNALIGNED_H */ 10#endif /* _ASM_SPARC_UNALIGNED_H */
diff --git a/include/asm-sparc64/unaligned.h b/include/asm-sparc64/unaligned.h
index 1ed3ba537772..edcebb09441e 100644
--- a/include/asm-sparc64/unaligned.h
+++ b/include/asm-sparc64/unaligned.h
@@ -1,6 +1,10 @@
1#ifndef _ASM_SPARC64_UNALIGNED_H_ 1#ifndef _ASM_SPARC64_UNALIGNED_H
2#define _ASM_SPARC64_UNALIGNED_H_ 2#define _ASM_SPARC64_UNALIGNED_H
3 3
4#include <asm-generic/unaligned.h> 4#include <linux/unaligned/be_struct.h>
5#include <linux/unaligned/le_byteshift.h>
6#include <linux/unaligned/generic.h>
7#define get_unaligned __get_unaligned_be
8#define put_unaligned __put_unaligned_be
5 9
6#endif /* _ASM_SPARC64_UNALIGNED_H */ 10#endif /* _ASM_SPARC64_UNALIGNED_H */
diff --git a/include/asm-um/unaligned.h b/include/asm-um/unaligned.h
index 1d2497c57274..a47196974e39 100644
--- a/include/asm-um/unaligned.h
+++ b/include/asm-um/unaligned.h
@@ -1,6 +1,6 @@
1#ifndef __UM_UNALIGNED_H 1#ifndef _ASM_UM_UNALIGNED_H
2#define __UM_UNALIGNED_H 2#define _ASM_UM_UNALIGNED_H
3 3
4#include "asm/arch/unaligned.h" 4#include "asm/arch/unaligned.h"
5 5
6#endif 6#endif /* _ASM_UM_UNALIGNED_H */
diff --git a/include/asm-v850/unaligned.h b/include/asm-v850/unaligned.h
index e30b18653a94..53122b28491e 100644
--- a/include/asm-v850/unaligned.h
+++ b/include/asm-v850/unaligned.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * include/asm-v850/unaligned.h -- Unaligned memory access
3 *
4 * Copyright (C) 2001 NEC Corporation 2 * Copyright (C) 2001 NEC Corporation
5 * Copyright (C) 2001 Miles Bader <miles@gnu.org> 3 * Copyright (C) 2001 Miles Bader <miles@gnu.org>
6 * 4 *
@@ -8,123 +6,17 @@
8 * Public License. See the file COPYING in the main directory of this 6 * Public License. See the file COPYING in the main directory of this
9 * archive for more details. 7 * archive for more details.
10 * 8 *
11 * This file is a copy of the arm version, include/asm-arm/unaligned.h
12 *
13 * Note that some v850 chips support unaligned access, but it seems too 9 * Note that some v850 chips support unaligned access, but it seems too
14 * annoying to use. 10 * annoying to use.
15 */ 11 */
12#ifndef _ASM_V850_UNALIGNED_H
13#define _ASM_V850_UNALIGNED_H
16 14
17#ifndef __V850_UNALIGNED_H__ 15#include <linux/unaligned/be_byteshift.h>
18#define __V850_UNALIGNED_H__ 16#include <linux/unaligned/le_byteshift.h>
19 17#include <linux/unaligned/generic.h>
20#include <asm/types.h>
21
22extern int __bug_unaligned_x(void *ptr);
23
24/*
25 * What is the most efficient way of loading/storing an unaligned value?
26 *
27 * That is the subject of this file. Efficiency here is defined as
28 * minimum code size with minimum register usage for the common cases.
29 * It is currently not believed that long longs are common, so we
30 * trade efficiency for the chars, shorts and longs against the long
31 * longs.
32 *
33 * Current stats with gcc 2.7.2.2 for these functions:
34 *
35 * ptrsize get: code regs put: code regs
36 * 1 1 1 1 2
37 * 2 3 2 3 2
38 * 4 7 3 7 3
39 * 8 20 6 16 6
40 *
41 * gcc 2.95.1 seems to code differently:
42 *
43 * ptrsize get: code regs put: code regs
44 * 1 1 1 1 2
45 * 2 3 2 3 2
46 * 4 7 4 7 4
47 * 8 19 8 15 6
48 *
49 * which may or may not be more efficient (depending upon whether
50 * you can afford the extra registers). Hopefully the gcc 2.95
51 * is inteligent enough to decide if it is better to use the
52 * extra register, but evidence so far seems to suggest otherwise.
53 *
54 * Unfortunately, gcc is not able to optimise the high word
55 * out of long long >> 32, or the low word from long long << 32
56 */
57
58#define __get_unaligned_2(__p) \
59 (__p[0] | __p[1] << 8)
60
61#define __get_unaligned_4(__p) \
62 (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
63
64#define get_unaligned(ptr) \
65 ({ \
66 __typeof__(*(ptr)) __v; \
67 __u8 *__p = (__u8 *)(ptr); \
68 switch (sizeof(*(ptr))) { \
69 case 1: __v = *(ptr); break; \
70 case 2: __v = __get_unaligned_2(__p); break; \
71 case 4: __v = __get_unaligned_4(__p); break; \
72 case 8: { \
73 unsigned int __v1, __v2; \
74 __v2 = __get_unaligned_4((__p+4)); \
75 __v1 = __get_unaligned_4(__p); \
76 __v = ((unsigned long long)__v2 << 32 | __v1); \
77 } \
78 break; \
79 default: __v = __bug_unaligned_x(__p); break; \
80 } \
81 __v; \
82 })
83
84
85static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
86{
87 *__p++ = __v;
88 *__p++ = __v >> 8;
89}
90
91static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
92{
93 __put_unaligned_2(__v >> 16, __p + 2);
94 __put_unaligned_2(__v, __p);
95}
96
97static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
98{
99 /*
100 * tradeoff: 8 bytes of stack for all unaligned puts (2
101 * instructions), or an extra register in the long long
102 * case - go for the extra register.
103 */
104 __put_unaligned_4(__v >> 32, __p+4);
105 __put_unaligned_4(__v, __p);
106}
107
108/*
109 * Try to store an unaligned value as efficiently as possible.
110 */
111#define put_unaligned(val,ptr) \
112 ({ \
113 switch (sizeof(*(ptr))) { \
114 case 1: \
115 *(ptr) = (val); \
116 break; \
117 case 2: __put_unaligned_2((val),(__u8 *)(ptr)); \
118 break; \
119 case 4: __put_unaligned_4((val),(__u8 *)(ptr)); \
120 break; \
121 case 8: __put_unaligned_8((val),(__u8 *)(ptr)); \
122 break; \
123 default: __bug_unaligned_x(ptr); \
124 break; \
125 } \
126 (void) 0; \
127 })
128 18
19#define get_unaligned __get_unaligned_le
20#define put_unaligned __put_unaligned_le
129 21
130#endif /* __V850_UNALIGNED_H__ */ 22#endif /* _ASM_V850_UNALIGNED_H */
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index d270ffe72759..a7bd416b4763 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -3,35 +3,12 @@
3 3
4/* 4/*
5 * The x86 can do unaligned accesses itself. 5 * The x86 can do unaligned accesses itself.
6 *
7 * The strange macros are there to make sure these can't
8 * be misused in a way that makes them not work on other
9 * architectures where unaligned accesses aren't as simple.
10 */ 6 */
11 7
12/** 8#include <linux/unaligned/access_ok.h>
13 * get_unaligned - get value from possibly mis-aligned location 9#include <linux/unaligned/generic.h>
14 * @ptr: pointer to value
15 *
16 * This macro should be used for accessing values larger in size than
17 * single bytes at locations that are expected to be improperly aligned,
18 * e.g. retrieving a u16 value from a location not u16-aligned.
19 *
20 * Note that unaligned accesses can be very expensive on some architectures.
21 */
22#define get_unaligned(ptr) (*(ptr))
23 10
24/** 11#define get_unaligned __get_unaligned_le
25 * put_unaligned - put value to a possibly mis-aligned location 12#define put_unaligned __put_unaligned_le
26 * @val: value to place
27 * @ptr: pointer to location
28 *
29 * This macro should be used for placing values larger in size than
30 * single bytes at locations that are expected to be improperly aligned,
31 * e.g. writing a u16 value to a location not u16-aligned.
32 *
33 * Note that unaligned accesses can be very expensive on some architectures.
34 */
35#define put_unaligned(val, ptr) ((void)(*(ptr) = (val)))
36 13
37#endif /* _ASM_X86_UNALIGNED_H */ 14#endif /* _ASM_X86_UNALIGNED_H */
diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h
index 28220890d0a6..8f3424fc5d18 100644
--- a/include/asm-xtensa/unaligned.h
+++ b/include/asm-xtensa/unaligned.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * include/asm-xtensa/unaligned.h
3 *
4 * Xtensa doesn't handle unaligned accesses efficiently. 2 * Xtensa doesn't handle unaligned accesses efficiently.
5 * 3 *
6 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,20 +7,23 @@
9 * 7 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 */ 9 */
10#ifndef _ASM_XTENSA_UNALIGNED_H
11#define _ASM_XTENSA_UNALIGNED_H
12 12
13#ifndef _XTENSA_UNALIGNED_H 13#ifdef __XTENSA_EL__
14#define _XTENSA_UNALIGNED_H 14# include <linux/unaligned/le_memmove.h>
15 15# include <linux/unaligned/be_byteshift.h>
16#include <linux/string.h> 16# include <linux/unaligned/generic.h>
17 17# define get_unaligned __get_unaligned_le
18/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ 18# define put_unaligned __put_unaligned_le
19 19#elif defined(__XTENSA_EB__)
20#define get_unaligned(ptr) \ 20# include <linux/unaligned/be_memmove.h>
21 ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) 21# include <linux/unaligned/le_byteshift.h>
22 22# include <linux/unaligned/generic.h>
23#define put_unaligned(val, ptr) \ 23# define get_unaligned __get_unaligned_be
24 ({ __typeof__(*(ptr)) __tmp = (val); \ 24# define put_unaligned __put_unaligned_be
25 memmove((ptr), &__tmp, sizeof(*(ptr))); \ 25#else
26 (void)0; }) 26# error processor byte order undefined!
27#endif
27 28
28#endif /* _XTENSA_UNALIGNED_H */ 29#endif /* _ASM_XTENSA_UNALIGNED_H */