diff options
author | David Brownell <david-b@pacbell.net> | 2007-05-25 21:47:47 -0400 |
---|---|---|
committer | Haavard Skinnemoen <hskinnemoen@atmel.com> | 2007-07-18 14:45:50 -0400 |
commit | c6083cd61b5a64a1c73d1634744382f54cb99595 (patch) | |
tree | 4ad4409a074a9de87a03bc2162d27e326f2c0498 /include/asm-avr32/unaligned.h | |
parent | 8b4a40809e5330c9da5d20107d693d92d73b31dc (diff) |
[AVR32] faster avr32 unaligned access
Use a more conventional implementation for unaligned access, and include
an AT32AP-specific optimization: the CPU will handle unaligned words.
The result is always faster and smaller for 8, 16, and 32 bit values.
For 64 bit quantities, it's presumably larger.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com>
Diffstat (limited to 'include/asm-avr32/unaligned.h')
-rw-r--r-- | include/asm-avr32/unaligned.h | 29 |
1 files changed, 20 insertions, 9 deletions
diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h index 3042723fcbfd..791361786fcc 100644 --- a/include/asm-avr32/unaligned.h +++ b/include/asm-avr32/unaligned.h | |||
@@ -6,20 +6,31 @@ | |||
6 | * implementation. The AVR32 AP implementation can handle unaligned | 6 | * implementation. The AVR32 AP implementation can handle unaligned |
7 | * words, but halfwords must be halfword-aligned, and doublewords must | 7 | * words, but halfwords must be halfword-aligned, and doublewords must |
8 | * be word-aligned. | 8 | * be word-aligned. |
9 | * | ||
10 | * TODO: Make all this CPU-specific and optimize. | ||
11 | */ | 9 | */ |
12 | 10 | ||
13 | #include <linux/string.h> | 11 | #include <asm-generic/unaligned.h> |
14 | 12 | ||
15 | /* Use memmove here, so gcc does not insert a __builtin_memcpy. */ | 13 | #ifdef CONFIG_CPU_AT32AP7000 |
16 | 14 | ||
15 | /* REVISIT calling memmove() may be smaller for 64-bit values ... */ | ||
16 | |||
17 | #undef get_unaligned | ||
17 | #define get_unaligned(ptr) \ | 18 | #define get_unaligned(ptr) \ |
18 | ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) | 19 | ___get_unaligned(ptr, sizeof((*ptr))) |
20 | #define ___get_unaligned(ptr, size) \ | ||
21 | ((size == 4) ? *(ptr) : __get_unaligned(ptr, size)) | ||
22 | |||
23 | #undef put_unaligned | ||
24 | #define put_unaligned(val, ptr) \ | ||
25 | ___put_unaligned((__u64)(val), ptr, sizeof((*ptr))) | ||
26 | #define ___put_unaligned(val, ptr, size) \ | ||
27 | do { \ | ||
28 | if (size == 4) \ | ||
29 | *(ptr) = (val); \ | ||
30 | else \ | ||
31 | __put_unaligned(val, ptr, size); \ | ||
32 | } while (0) | ||
19 | 33 | ||
20 | #define put_unaligned(val, ptr) \ | 34 | #endif |
21 | ({ __typeof__(*(ptr)) __tmp = (val); \ | ||
22 | memmove((ptr), &__tmp, sizeof(*(ptr))); \ | ||
23 | (void)0; }) | ||
24 | 35 | ||
25 | #endif /* __ASM_AVR32_UNALIGNED_H */ | 36 | #endif /* __ASM_AVR32_UNALIGNED_H */ |