aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-14 12:57:36 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-14 12:57:36 -0400
commit43bc61d86f8ea6edef2e02d1dc47617883fa9a9c (patch)
tree4f7752888f2e9ca5dcfa9680edefea12aa4a6e8d /arch
parent0837f52463583f76670ab2350e0f1541cb0351f5 (diff)
sh: Add register alignment helpers for shared flushers.
This plugs in some register alignment helpers for the shared flushers, allowing them to also be used on SH-5. The main rationale here is that in the SH-5 case we have a variable ABI, where the pointer size may not equal the register width. This register extension is taken care of by the SH-5 code already today, and is otherwise unused on the SH-4 code. This combines the two and allows us to kill off the SH-5 implementation. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/system_32.h5
-rw-r--r--arch/sh/include/asm/system_64.h5
-rw-r--r--arch/sh/include/asm/types.h2
-rw-r--r--arch/sh/mm/flush-sh4.c21
4 files changed, 24 insertions, 9 deletions
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index 6c68a51f1cc5..d7299d69ff79 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -198,6 +198,11 @@ do { \
198}) 198})
199#endif 199#endif
200 200
201static inline reg_size_t register_align(void *val)
202{
203 return (unsigned long)(signed long)val;
204}
205
201int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, 206int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
202 struct mem_access *ma); 207 struct mem_access *ma);
203 208
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 943acf5ea07c..218b54d9d660 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -37,4 +37,9 @@ do { \
37#define jump_to_uncached() do { } while (0) 37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0) 38#define back_to_cached() do { } while (0)
39 39
40static inline reg_size_t register_align(void *val)
41{
42 return (unsigned long long)(signed long long)(signed long)val;
43}
44
40#endif /* __ASM_SH_SYSTEM_64_H */ 45#endif /* __ASM_SH_SYSTEM_64_H */
diff --git a/arch/sh/include/asm/types.h b/arch/sh/include/asm/types.h
index c7f3c94837dd..f8421f7ad63a 100644
--- a/arch/sh/include/asm/types.h
+++ b/arch/sh/include/asm/types.h
@@ -11,8 +11,10 @@
11 11
12#ifdef CONFIG_SUPERH32 12#ifdef CONFIG_SUPERH32
13typedef u16 insn_size_t; 13typedef u16 insn_size_t;
14typedef u32 reg_size_t;
14#else 15#else
15typedef u32 insn_size_t; 16typedef u32 insn_size_t;
17typedef u64 reg_size_t;
16#endif 18#endif
17 19
18#endif /* __ASSEMBLY__ */ 20#endif /* __ASSEMBLY__ */
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index edefc53891a8..1b6b6a12a99b 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -10,10 +10,11 @@
10 */ 10 */
11void __weak __flush_wback_region(void *start, int size) 11void __weak __flush_wback_region(void *start, int size)
12{ 12{
13 unsigned long v, cnt, end; 13 reg_size_t aligned_start, v, cnt, end;
14 14
15 v = (unsigned long)start & ~(L1_CACHE_BYTES-1); 15 aligned_start = register_align(start);
16 end = ((unsigned long)start + size + L1_CACHE_BYTES-1) 16 v = aligned_start & ~(L1_CACHE_BYTES-1);
17 end = (aligned_start + size + L1_CACHE_BYTES-1)
17 & ~(L1_CACHE_BYTES-1); 18 & ~(L1_CACHE_BYTES-1);
18 cnt = (end - v) / L1_CACHE_BYTES; 19 cnt = (end - v) / L1_CACHE_BYTES;
19 20
@@ -52,10 +53,11 @@ void __weak __flush_wback_region(void *start, int size)
52 */ 53 */
53void __weak __flush_purge_region(void *start, int size) 54void __weak __flush_purge_region(void *start, int size)
54{ 55{
55 unsigned long v, cnt, end; 56 reg_size_t aligned_start, v, cnt, end;
56 57
57 v = (unsigned long)start & ~(L1_CACHE_BYTES-1); 58 aligned_start = register_align(start);
58 end = ((unsigned long)start + size + L1_CACHE_BYTES-1) 59 v = aligned_start & ~(L1_CACHE_BYTES-1);
60 end = (aligned_start + size + L1_CACHE_BYTES-1)
59 & ~(L1_CACHE_BYTES-1); 61 & ~(L1_CACHE_BYTES-1);
60 cnt = (end - v) / L1_CACHE_BYTES; 62 cnt = (end - v) / L1_CACHE_BYTES;
61 63
@@ -90,10 +92,11 @@ void __weak __flush_purge_region(void *start, int size)
90 */ 92 */
91void __weak __flush_invalidate_region(void *start, int size) 93void __weak __flush_invalidate_region(void *start, int size)
92{ 94{
93 unsigned long v, cnt, end; 95 reg_size_t aligned_start, v, cnt, end;
94 96
95 v = (unsigned long)start & ~(L1_CACHE_BYTES-1); 97 aligned_start = register_align(start);
96 end = ((unsigned long)start + size + L1_CACHE_BYTES-1) 98 v = aligned_start & ~(L1_CACHE_BYTES-1);
99 end = (aligned_start + size + L1_CACHE_BYTES-1)
97 & ~(L1_CACHE_BYTES-1); 100 & ~(L1_CACHE_BYTES-1);
98 cnt = (end - v) / L1_CACHE_BYTES; 101 cnt = (end - v) / L1_CACHE_BYTES;
99 102