diff options
Diffstat (limited to 'arch/sh/kernel/process_32.c')
-rw-r--r-- | arch/sh/kernel/process_32.c | 164 |
1 files changed, 24 insertions, 140 deletions
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index d8af889366a4..3cb88f114d7a 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c | |||
@@ -16,65 +16,15 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/elfcore.h> | 18 | #include <linux/elfcore.h> |
19 | #include <linux/pm.h> | ||
20 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
21 | #include <linux/kexec.h> | ||
22 | #include <linux/kdebug.h> | ||
23 | #include <linux/tick.h> | ||
24 | #include <linux/reboot.h> | ||
25 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
26 | #include <linux/ftrace.h> | 21 | #include <linux/ftrace.h> |
27 | #include <linux/preempt.h> | 22 | #include <linux/hw_breakpoint.h> |
28 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
29 | #include <asm/mmu_context.h> | 24 | #include <asm/mmu_context.h> |
30 | #include <asm/pgalloc.h> | ||
31 | #include <asm/system.h> | 25 | #include <asm/system.h> |
32 | #include <asm/ubc.h> | ||
33 | #include <asm/fpu.h> | 26 | #include <asm/fpu.h> |
34 | #include <asm/syscalls.h> | 27 | #include <asm/syscalls.h> |
35 | #include <asm/watchdog.h> | ||
36 | |||
37 | int ubc_usercnt = 0; | ||
38 | |||
39 | #ifdef CONFIG_32BIT | ||
40 | static void watchdog_trigger_immediate(void) | ||
41 | { | ||
42 | sh_wdt_write_cnt(0xFF); | ||
43 | sh_wdt_write_csr(0xC2); | ||
44 | } | ||
45 | |||
46 | void machine_restart(char * __unused) | ||
47 | { | ||
48 | local_irq_disable(); | ||
49 | |||
50 | /* Use watchdog timer to trigger reset */ | ||
51 | watchdog_trigger_immediate(); | ||
52 | |||
53 | while (1) | ||
54 | cpu_sleep(); | ||
55 | } | ||
56 | #else | ||
57 | void machine_restart(char * __unused) | ||
58 | { | ||
59 | /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ | ||
60 | asm volatile("ldc %0, sr\n\t" | ||
61 | "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); | ||
62 | } | ||
63 | #endif | ||
64 | |||
65 | void machine_halt(void) | ||
66 | { | ||
67 | local_irq_disable(); | ||
68 | |||
69 | while (1) | ||
70 | cpu_sleep(); | ||
71 | } | ||
72 | |||
73 | void machine_power_off(void) | ||
74 | { | ||
75 | if (pm_power_off) | ||
76 | pm_power_off(); | ||
77 | } | ||
78 | 28 | ||
79 | void show_regs(struct pt_regs * regs) | 29 | void show_regs(struct pt_regs * regs) |
80 | { | 30 | { |
@@ -91,7 +41,7 @@ void show_regs(struct pt_regs * regs) | |||
91 | printk("PC : %08lx SP : %08lx SR : %08lx ", | 41 | printk("PC : %08lx SP : %08lx SR : %08lx ", |
92 | regs->pc, regs->regs[15], regs->sr); | 42 | regs->pc, regs->regs[15], regs->sr); |
93 | #ifdef CONFIG_MMU | 43 | #ifdef CONFIG_MMU |
94 | printk("TEA : %08x\n", ctrl_inl(MMU_TEA)); | 44 | printk("TEA : %08x\n", __raw_readl(MMU_TEA)); |
95 | #else | 45 | #else |
96 | printk("\n"); | 46 | printk("\n"); |
97 | #endif | 47 | #endif |
@@ -147,21 +97,34 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
147 | } | 97 | } |
148 | EXPORT_SYMBOL(kernel_thread); | 98 | EXPORT_SYMBOL(kernel_thread); |
149 | 99 | ||
100 | void start_thread(struct pt_regs *regs, unsigned long new_pc, | ||
101 | unsigned long new_sp) | ||
102 | { | ||
103 | set_fs(USER_DS); | ||
104 | |||
105 | regs->pr = 0; | ||
106 | regs->sr = SR_FD; | ||
107 | regs->pc = new_pc; | ||
108 | regs->regs[15] = new_sp; | ||
109 | |||
110 | free_thread_xstate(current); | ||
111 | } | ||
112 | EXPORT_SYMBOL(start_thread); | ||
113 | |||
150 | /* | 114 | /* |
151 | * Free current thread data structures etc.. | 115 | * Free current thread data structures etc.. |
152 | */ | 116 | */ |
153 | void exit_thread(void) | 117 | void exit_thread(void) |
154 | { | 118 | { |
155 | if (current->thread.ubc_pc) { | ||
156 | current->thread.ubc_pc = 0; | ||
157 | ubc_usercnt -= 1; | ||
158 | } | ||
159 | } | 119 | } |
160 | 120 | ||
161 | void flush_thread(void) | 121 | void flush_thread(void) |
162 | { | 122 | { |
163 | #if defined(CONFIG_SH_FPU) | ||
164 | struct task_struct *tsk = current; | 123 | struct task_struct *tsk = current; |
124 | |||
125 | flush_ptrace_hw_breakpoint(tsk); | ||
126 | |||
127 | #if defined(CONFIG_SH_FPU) | ||
165 | /* Forget lazy FPU state */ | 128 | /* Forget lazy FPU state */ |
166 | clear_fpu(tsk, task_pt_regs(tsk)); | 129 | clear_fpu(tsk, task_pt_regs(tsk)); |
167 | clear_used_math(); | 130 | clear_used_math(); |
@@ -209,11 +172,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
209 | { | 172 | { |
210 | struct thread_info *ti = task_thread_info(p); | 173 | struct thread_info *ti = task_thread_info(p); |
211 | struct pt_regs *childregs; | 174 | struct pt_regs *childregs; |
175 | |||
212 | #if defined(CONFIG_SH_DSP) | 176 | #if defined(CONFIG_SH_DSP) |
213 | struct task_struct *tsk = current; | 177 | struct task_struct *tsk = current; |
214 | #endif | ||
215 | 178 | ||
216 | #if defined(CONFIG_SH_DSP) | ||
217 | if (is_dsp_enabled(tsk)) { | 179 | if (is_dsp_enabled(tsk)) { |
218 | /* We can use the __save_dsp or just copy the struct: | 180 | /* We can use the __save_dsp or just copy the struct: |
219 | * __save_dsp(p); | 181 | * __save_dsp(p); |
@@ -244,53 +206,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
244 | p->thread.sp = (unsigned long) childregs; | 206 | p->thread.sp = (unsigned long) childregs; |
245 | p->thread.pc = (unsigned long) ret_from_fork; | 207 | p->thread.pc = (unsigned long) ret_from_fork; |
246 | 208 | ||
247 | p->thread.ubc_pc = 0; | 209 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
248 | 210 | ||
249 | return 0; | 211 | return 0; |
250 | } | 212 | } |
251 | 213 | ||
252 | /* Tracing by user break controller. */ | ||
253 | static void ubc_set_tracing(int asid, unsigned long pc) | ||
254 | { | ||
255 | #if defined(CONFIG_CPU_SH4A) | ||
256 | unsigned long val; | ||
257 | |||
258 | val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE); | ||
259 | val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid)); | ||
260 | |||
261 | ctrl_outl(val, UBC_CBR0); | ||
262 | ctrl_outl(pc, UBC_CAR0); | ||
263 | ctrl_outl(0x0, UBC_CAMR0); | ||
264 | ctrl_outl(0x0, UBC_CBCR); | ||
265 | |||
266 | val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE); | ||
267 | ctrl_outl(val, UBC_CRR0); | ||
268 | |||
269 | /* Read UBC register that we wrote last, for checking update */ | ||
270 | val = ctrl_inl(UBC_CRR0); | ||
271 | |||
272 | #else /* CONFIG_CPU_SH4A */ | ||
273 | ctrl_outl(pc, UBC_BARA); | ||
274 | |||
275 | #ifdef CONFIG_MMU | ||
276 | ctrl_outb(asid, UBC_BASRA); | ||
277 | #endif | ||
278 | |||
279 | ctrl_outl(0, UBC_BAMRA); | ||
280 | |||
281 | if (current_cpu_data.type == CPU_SH7729 || | ||
282 | current_cpu_data.type == CPU_SH7710 || | ||
283 | current_cpu_data.type == CPU_SH7712 || | ||
284 | current_cpu_data.type == CPU_SH7203){ | ||
285 | ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); | ||
286 | ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); | ||
287 | } else { | ||
288 | ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); | ||
289 | ctrl_outw(BRCR_PCBA, UBC_BRCR); | ||
290 | } | ||
291 | #endif /* CONFIG_CPU_SH4A */ | ||
292 | } | ||
293 | |||
294 | /* | 214 | /* |
295 | * switch_to(x,y) should switch tasks from x to y. | 215 | * switch_to(x,y) should switch tasks from x to y. |
296 | * | 216 | * |
@@ -304,7 +224,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
304 | 224 | ||
305 | /* we're going to use this soon, after a few expensive things */ | 225 | /* we're going to use this soon, after a few expensive things */ |
306 | if (next->fpu_counter > 5) | 226 | if (next->fpu_counter > 5) |
307 | prefetch(&next_t->fpu.hard); | 227 | prefetch(next_t->xstate); |
308 | 228 | ||
309 | #ifdef CONFIG_MMU | 229 | #ifdef CONFIG_MMU |
310 | /* | 230 | /* |
@@ -316,32 +236,13 @@ __switch_to(struct task_struct *prev, struct task_struct *next) | |||
316 | : "r" (task_thread_info(next))); | 236 | : "r" (task_thread_info(next))); |
317 | #endif | 237 | #endif |
318 | 238 | ||
319 | /* If no tasks are using the UBC, we're done */ | ||
320 | if (ubc_usercnt == 0) | ||
321 | /* If no tasks are using the UBC, we're done */; | ||
322 | else if (next->thread.ubc_pc && next->mm) { | ||
323 | int asid = 0; | ||
324 | #ifdef CONFIG_MMU | ||
325 | asid |= cpu_asid(smp_processor_id(), next->mm); | ||
326 | #endif | ||
327 | ubc_set_tracing(asid, next->thread.ubc_pc); | ||
328 | } else { | ||
329 | #if defined(CONFIG_CPU_SH4A) | ||
330 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
331 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
332 | #else | ||
333 | ctrl_outw(0, UBC_BBRA); | ||
334 | ctrl_outw(0, UBC_BBRB); | ||
335 | #endif | ||
336 | } | ||
337 | |||
338 | /* | 239 | /* |
339 | * If the task has used fpu the last 5 timeslices, just do a full | 240 | * If the task has used fpu the last 5 timeslices, just do a full |
340 | * restore of the math state immediately to avoid the trap; the | 241 | * restore of the math state immediately to avoid the trap; the |
341 | * chances of needing FPU soon are obviously high now | 242 | * chances of needing FPU soon are obviously high now |
342 | */ | 243 | */ |
343 | if (next->fpu_counter > 5) | 244 | if (next->fpu_counter > 5) |
344 | fpu_state_restore(task_pt_regs(next)); | 245 | __fpu_state_restore(); |
345 | 246 | ||
346 | return prev; | 247 | return prev; |
347 | } | 248 | } |
@@ -434,20 +335,3 @@ unsigned long get_wchan(struct task_struct *p) | |||
434 | 335 | ||
435 | return pc; | 336 | return pc; |
436 | } | 337 | } |
437 | |||
438 | asmlinkage void break_point_trap(void) | ||
439 | { | ||
440 | /* Clear tracing. */ | ||
441 | #if defined(CONFIG_CPU_SH4A) | ||
442 | ctrl_outl(UBC_CBR_INIT, UBC_CBR0); | ||
443 | ctrl_outl(UBC_CRR_INIT, UBC_CRR0); | ||
444 | #else | ||
445 | ctrl_outw(0, UBC_BBRA); | ||
446 | ctrl_outw(0, UBC_BBRB); | ||
447 | ctrl_outl(0, UBC_BRCR); | ||
448 | #endif | ||
449 | current->thread.ubc_pc = 0; | ||
450 | ubc_usercnt -= 1; | ||
451 | |||
452 | force_sig(SIGTRAP, current); | ||
453 | } | ||