diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-11-10 05:46:31 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-27 23:18:42 -0500 |
commit | a62a3861e0adfd2612372883b5b1fc05a5182796 (patch) | |
tree | d3ff2c41f2a059173c8959f006bef770de528a0b /include/asm-sh/system.h | |
parent | 36bcd39dbca824daffe16d607ae574b6edc7d31a (diff) |
sh: Split out system.h in to _32 and _64 variants.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/system.h')
-rw-r--r-- | include/asm-sh/system.h | 97 |
1 files changed, 8 insertions, 89 deletions
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 288abeb5476f..0cfa96aa5844 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h | |||
@@ -12,60 +12,9 @@ | |||
12 | #include <asm/types.h> | 12 | #include <asm/types.h> |
13 | #include <asm/ptrace.h> | 13 | #include <asm/ptrace.h> |
14 | 14 | ||
15 | struct task_struct *__switch_to(struct task_struct *prev, | ||
16 | struct task_struct *next); | ||
17 | |||
18 | #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ | 15 | #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ |
19 | /* | ||
20 | * switch_to() should switch tasks to task nr n, first | ||
21 | */ | ||
22 | |||
23 | #define switch_to(prev, next, last) do { \ | ||
24 | struct task_struct *__last; \ | ||
25 | register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ | ||
26 | register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ | ||
27 | register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ | ||
28 | register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ | ||
29 | register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ | ||
30 | register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ | ||
31 | __asm__ __volatile__ (".balign 4\n\t" \ | ||
32 | "stc.l gbr, @-r15\n\t" \ | ||
33 | "sts.l pr, @-r15\n\t" \ | ||
34 | "mov.l r8, @-r15\n\t" \ | ||
35 | "mov.l r9, @-r15\n\t" \ | ||
36 | "mov.l r10, @-r15\n\t" \ | ||
37 | "mov.l r11, @-r15\n\t" \ | ||
38 | "mov.l r12, @-r15\n\t" \ | ||
39 | "mov.l r13, @-r15\n\t" \ | ||
40 | "mov.l r14, @-r15\n\t" \ | ||
41 | "mov.l r15, @r1 ! save SP\n\t" \ | ||
42 | "mov.l @r6, r15 ! change to new stack\n\t" \ | ||
43 | "mova 1f, %0\n\t" \ | ||
44 | "mov.l %0, @r2 ! save PC\n\t" \ | ||
45 | "mov.l 2f, %0\n\t" \ | ||
46 | "jmp @%0 ! call __switch_to\n\t" \ | ||
47 | " lds r7, pr ! with return to new PC\n\t" \ | ||
48 | ".balign 4\n" \ | ||
49 | "2:\n\t" \ | ||
50 | ".long __switch_to\n" \ | ||
51 | "1:\n\t" \ | ||
52 | "mov.l @r15+, r14\n\t" \ | ||
53 | "mov.l @r15+, r13\n\t" \ | ||
54 | "mov.l @r15+, r12\n\t" \ | ||
55 | "mov.l @r15+, r11\n\t" \ | ||
56 | "mov.l @r15+, r10\n\t" \ | ||
57 | "mov.l @r15+, r9\n\t" \ | ||
58 | "mov.l @r15+, r8\n\t" \ | ||
59 | "lds.l @r15+, pr\n\t" \ | ||
60 | "ldc.l @r15+, gbr\n\t" \ | ||
61 | : "=z" (__last) \ | ||
62 | : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ | ||
63 | "r" (__ts5), "r" (__ts6), "r" (__ts7) \ | ||
64 | : "r3", "t"); \ | ||
65 | last = __last; \ | ||
66 | } while (0) | ||
67 | 16 | ||
68 | #ifdef CONFIG_CPU_SH4A | 17 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) |
69 | #define __icbi() \ | 18 | #define __icbi() \ |
70 | { \ | 19 | { \ |
71 | unsigned long __addr; \ | 20 | unsigned long __addr; \ |
@@ -91,7 +40,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
91 | * Historically we have only done this type of barrier for the MMUCR, but | 40 | * Historically we have only done this type of barrier for the MMUCR, but |
92 | * it's also necessary for the CCR, so we make it generic here instead. | 41 | * it's also necessary for the CCR, so we make it generic here instead. |
93 | */ | 42 | */ |
94 | #ifdef CONFIG_CPU_SH4A | 43 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) |
95 | #define mb() __asm__ __volatile__ ("synco": : :"memory") | 44 | #define mb() __asm__ __volatile__ ("synco": : :"memory") |
96 | #define rmb() mb() | 45 | #define rmb() mb() |
97 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") | 46 | #define wmb() __asm__ __volatile__ ("synco": : :"memory") |
@@ -119,42 +68,6 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
119 | 68 | ||
120 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 69 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
121 | 70 | ||
122 | /* | ||
123 | * Jump to P2 area. | ||
124 | * When handling TLB or caches, we need to do it from P2 area. | ||
125 | */ | ||
126 | #define jump_to_P2() \ | ||
127 | do { \ | ||
128 | unsigned long __dummy; \ | ||
129 | __asm__ __volatile__( \ | ||
130 | "mov.l 1f, %0\n\t" \ | ||
131 | "or %1, %0\n\t" \ | ||
132 | "jmp @%0\n\t" \ | ||
133 | " nop\n\t" \ | ||
134 | ".balign 4\n" \ | ||
135 | "1: .long 2f\n" \ | ||
136 | "2:" \ | ||
137 | : "=&r" (__dummy) \ | ||
138 | : "r" (0x20000000)); \ | ||
139 | } while (0) | ||
140 | |||
141 | /* | ||
142 | * Back to P1 area. | ||
143 | */ | ||
144 | #define back_to_P1() \ | ||
145 | do { \ | ||
146 | unsigned long __dummy; \ | ||
147 | ctrl_barrier(); \ | ||
148 | __asm__ __volatile__( \ | ||
149 | "mov.l 1f, %0\n\t" \ | ||
150 | "jmp @%0\n\t" \ | ||
151 | " nop\n\t" \ | ||
152 | ".balign 4\n" \ | ||
153 | "1: .long 2f\n" \ | ||
154 | "2:" \ | ||
155 | : "=&r" (__dummy)); \ | ||
156 | } while (0) | ||
157 | |||
158 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | 71 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
159 | { | 72 | { |
160 | unsigned long flags, retval; | 73 | unsigned long flags, retval; |
@@ -281,4 +194,10 @@ asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5, | |||
281 | 194 | ||
282 | #define arch_align_stack(x) (x) | 195 | #define arch_align_stack(x) (x) |
283 | 196 | ||
197 | #ifdef CONFIG_SUPERH32 | ||
198 | # include "system_32.h" | ||
199 | #else | ||
200 | # include "system_64.h" | ||
201 | #endif | ||
202 | |||
284 | #endif | 203 | #endif |