diff options
author | Magnus Damm <damm@igel.co.jp> | 2009-06-04 07:20:24 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-06-04 07:20:24 -0400 |
commit | 48c72fccbfb1db01b5d0b98baff4442fea50d7a4 (patch) | |
tree | be342ec5f629a1488e8d93df2ff96670698fe03f /arch/sh | |
parent | 138f025267dcc07d5e7d0bb1f20e9a6b5f2fdcf7 (diff) |
sh: 16-bit get_unaligned() sh4a fix
This patch fixes the 16-bit case of the sh4a specific
unaligned access implementation. Without this patch
the 16-bit version of sh4a get_unaligned() results in
a 32-bit read which may read more data than intended
and/or cross page boundaries.
Unbreaks mtd NOR write handling on Migo-R.
Signed-off-by: Magnus Damm <damm@igel.co.jp>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/unaligned-sh4a.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h index d8f89770275b..9f4dd252c981 100644 --- a/arch/sh/include/asm/unaligned-sh4a.h +++ b/arch/sh/include/asm/unaligned-sh4a.h | |||
@@ -3,9 +3,9 @@ | |||
3 | 3 | ||
4 | /* | 4 | /* |
5 | * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. | 5 | * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. |
6 | * Support for 16 and 64-bit accesses are done through shifting and | 6 | * Support for 64-bit accesses are done through shifting and masking |
7 | * masking relative to the endianness. Unaligned stores are not supported | 7 | * relative to the endianness. Unaligned stores are not supported by the |
8 | * by the instruction encoding, so these continue to use the packed | 8 | * instruction encoding, so these continue to use the packed |
9 | * struct. | 9 | * struct. |
10 | * | 10 | * |
11 | * The same note as with the movli.l/movco.l pair applies here, as long | 11 | * The same note as with the movli.l/movco.l pair applies here, as long |
@@ -41,9 +41,9 @@ struct __una_u64 { u64 x __attribute__((packed)); }; | |||
41 | static inline u16 __get_unaligned_cpu16(const u8 *p) | 41 | static inline u16 __get_unaligned_cpu16(const u8 *p) |
42 | { | 42 | { |
43 | #ifdef __LITTLE_ENDIAN | 43 | #ifdef __LITTLE_ENDIAN |
44 | return __get_unaligned_cpu32(p) & 0xffff; | 44 | return p[0] | p[1] << 8; |
45 | #else | 45 | #else |
46 | return __get_unaligned_cpu32(p) >> 16; | 46 | return p[0] << 8 | p[1]; |
47 | #endif | 47 | #endif |
48 | } | 48 | } |
49 | 49 | ||