aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/process_32.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/sh/kernel/process_32.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/sh/kernel/process_32.c')
-rw-r--r--arch/sh/kernel/process_32.c201
1 files changed, 54 insertions, 147 deletions
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 0673c4746be3..052981972ae6 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -15,66 +15,17 @@
15 */ 15 */
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/slab.h>
18#include <linux/elfcore.h> 19#include <linux/elfcore.h>
19#include <linux/pm.h>
20#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
21#include <linux/kexec.h>
22#include <linux/kdebug.h>
23#include <linux/tick.h>
24#include <linux/reboot.h>
25#include <linux/fs.h> 21#include <linux/fs.h>
26#include <linux/ftrace.h> 22#include <linux/ftrace.h>
27#include <linux/preempt.h> 23#include <linux/hw_breakpoint.h>
28#include <asm/uaccess.h> 24#include <asm/uaccess.h>
29#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
30#include <asm/pgalloc.h>
31#include <asm/system.h> 26#include <asm/system.h>
32#include <asm/ubc.h>
33#include <asm/fpu.h> 27#include <asm/fpu.h>
34#include <asm/syscalls.h> 28#include <asm/syscalls.h>
35#include <asm/watchdog.h>
36
37int ubc_usercnt = 0;
38
39#ifdef CONFIG_32BIT
40static void watchdog_trigger_immediate(void)
41{
42 sh_wdt_write_cnt(0xFF);
43 sh_wdt_write_csr(0xC2);
44}
45
46void machine_restart(char * __unused)
47{
48 local_irq_disable();
49
50 /* Use watchdog timer to trigger reset */
51 watchdog_trigger_immediate();
52
53 while (1)
54 cpu_sleep();
55}
56#else
57void machine_restart(char * __unused)
58{
59 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
60 asm volatile("ldc %0, sr\n\t"
61 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
62}
63#endif
64
65void machine_halt(void)
66{
67 local_irq_disable();
68
69 while (1)
70 cpu_sleep();
71}
72
73void machine_power_off(void)
74{
75 if (pm_power_off)
76 pm_power_off();
77}
78 29
79void show_regs(struct pt_regs * regs) 30void show_regs(struct pt_regs * regs)
80{ 31{
@@ -91,7 +42,7 @@ void show_regs(struct pt_regs * regs)
91 printk("PC : %08lx SP : %08lx SR : %08lx ", 42 printk("PC : %08lx SP : %08lx SR : %08lx ",
92 regs->pc, regs->regs[15], regs->sr); 43 regs->pc, regs->regs[15], regs->sr);
93#ifdef CONFIG_MMU 44#ifdef CONFIG_MMU
94 printk("TEA : %08x\n", ctrl_inl(MMU_TEA)); 45 printk("TEA : %08x\n", __raw_readl(MMU_TEA));
95#else 46#else
96 printk("\n"); 47 printk("\n");
97#endif 48#endif
@@ -134,7 +85,10 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
134 regs.regs[5] = (unsigned long)fn; 85 regs.regs[5] = (unsigned long)fn;
135 86
136 regs.pc = (unsigned long)kernel_thread_helper; 87 regs.pc = (unsigned long)kernel_thread_helper;
137 regs.sr = (1 << 30); 88 regs.sr = SR_MD;
89#if defined(CONFIG_SH_FPU)
90 regs.sr |= SR_FD;
91#endif
138 92
139 /* Ok, create the new process.. */ 93 /* Ok, create the new process.. */
140 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 94 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
@@ -142,22 +96,36 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
142 96
143 return pid; 97 return pid;
144} 98}
99EXPORT_SYMBOL(kernel_thread);
100
101void start_thread(struct pt_regs *regs, unsigned long new_pc,
102 unsigned long new_sp)
103{
104 set_fs(USER_DS);
105
106 regs->pr = 0;
107 regs->sr = SR_FD;
108 regs->pc = new_pc;
109 regs->regs[15] = new_sp;
110
111 free_thread_xstate(current);
112}
113EXPORT_SYMBOL(start_thread);
145 114
146/* 115/*
147 * Free current thread data structures etc.. 116 * Free current thread data structures etc..
148 */ 117 */
149void exit_thread(void) 118void exit_thread(void)
150{ 119{
151 if (current->thread.ubc_pc) {
152 current->thread.ubc_pc = 0;
153 ubc_usercnt -= 1;
154 }
155} 120}
156 121
157void flush_thread(void) 122void flush_thread(void)
158{ 123{
159#if defined(CONFIG_SH_FPU)
160 struct task_struct *tsk = current; 124 struct task_struct *tsk = current;
125
126 flush_ptrace_hw_breakpoint(tsk);
127
128#if defined(CONFIG_SH_FPU)
161 /* Forget lazy FPU state */ 129 /* Forget lazy FPU state */
162 clear_fpu(tsk, task_pt_regs(tsk)); 130 clear_fpu(tsk, task_pt_regs(tsk));
163 clear_used_math(); 131 clear_used_math();
@@ -186,6 +154,16 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
186 154
187 return fpvalid; 155 return fpvalid;
188} 156}
157EXPORT_SYMBOL(dump_fpu);
158
159/*
160 * This gets called before we allocate a new thread and copy
161 * the current task into it.
162 */
163void prepare_to_copy(struct task_struct *tsk)
164{
165 unlazy_fpu(tsk, task_pt_regs(tsk));
166}
189 167
190asmlinkage void ret_from_fork(void); 168asmlinkage void ret_from_fork(void);
191 169
@@ -195,17 +173,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
195{ 173{
196 struct thread_info *ti = task_thread_info(p); 174 struct thread_info *ti = task_thread_info(p);
197 struct pt_regs *childregs; 175 struct pt_regs *childregs;
198#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP)
199 struct task_struct *tsk = current;
200#endif
201
202#if defined(CONFIG_SH_FPU)
203 unlazy_fpu(tsk, regs);
204 p->thread.fpu = tsk->thread.fpu;
205 copy_to_stopped_child_used_math(p);
206#endif
207 176
208#if defined(CONFIG_SH_DSP) 177#if defined(CONFIG_SH_DSP)
178 struct task_struct *tsk = current;
179
209 if (is_dsp_enabled(tsk)) { 180 if (is_dsp_enabled(tsk)) {
210 /* We can use the __save_dsp or just copy the struct: 181 /* We can use the __save_dsp or just copy the struct:
211 * __save_dsp(p); 182 * __save_dsp(p);
@@ -224,6 +195,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
224 } else { 195 } else {
225 childregs->regs[15] = (unsigned long)childregs; 196 childregs->regs[15] = (unsigned long)childregs;
226 ti->addr_limit = KERNEL_DS; 197 ti->addr_limit = KERNEL_DS;
198 ti->status &= ~TS_USEDFPU;
199 p->fpu_counter = 0;
227 } 200 }
228 201
229 if (clone_flags & CLONE_SETTLS) 202 if (clone_flags & CLONE_SETTLS)
@@ -234,53 +207,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
234 p->thread.sp = (unsigned long) childregs; 207 p->thread.sp = (unsigned long) childregs;
235 p->thread.pc = (unsigned long) ret_from_fork; 208 p->thread.pc = (unsigned long) ret_from_fork;
236 209
237 p->thread.ubc_pc = 0; 210 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
238 211
239 return 0; 212 return 0;
240} 213}
241 214
242/* Tracing by user break controller. */
243static void ubc_set_tracing(int asid, unsigned long pc)
244{
245#if defined(CONFIG_CPU_SH4A)
246 unsigned long val;
247
248 val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
249 val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
250
251 ctrl_outl(val, UBC_CBR0);
252 ctrl_outl(pc, UBC_CAR0);
253 ctrl_outl(0x0, UBC_CAMR0);
254 ctrl_outl(0x0, UBC_CBCR);
255
256 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
257 ctrl_outl(val, UBC_CRR0);
258
259 /* Read UBC register that we wrote last, for checking update */
260 val = ctrl_inl(UBC_CRR0);
261
262#else /* CONFIG_CPU_SH4A */
263 ctrl_outl(pc, UBC_BARA);
264
265#ifdef CONFIG_MMU
266 ctrl_outb(asid, UBC_BASRA);
267#endif
268
269 ctrl_outl(0, UBC_BAMRA);
270
271 if (current_cpu_data.type == CPU_SH7729 ||
272 current_cpu_data.type == CPU_SH7710 ||
273 current_cpu_data.type == CPU_SH7712 ||
274 current_cpu_data.type == CPU_SH7203){
275 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
276 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
277 } else {
278 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
279 ctrl_outw(BRCR_PCBA, UBC_BRCR);
280 }
281#endif /* CONFIG_CPU_SH4A */
282}
283
284/* 215/*
285 * switch_to(x,y) should switch tasks from x to y. 216 * switch_to(x,y) should switch tasks from x to y.
286 * 217 *
@@ -288,9 +219,13 @@ static void ubc_set_tracing(int asid, unsigned long pc)
288__notrace_funcgraph struct task_struct * 219__notrace_funcgraph struct task_struct *
289__switch_to(struct task_struct *prev, struct task_struct *next) 220__switch_to(struct task_struct *prev, struct task_struct *next)
290{ 221{
291#if defined(CONFIG_SH_FPU) 222 struct thread_struct *next_t = &next->thread;
223
292 unlazy_fpu(prev, task_pt_regs(prev)); 224 unlazy_fpu(prev, task_pt_regs(prev));
293#endif 225
226 /* we're going to use this soon, after a few expensive things */
227 if (next->fpu_counter > 5)
228 prefetch(next_t->xstate);
294 229
295#ifdef CONFIG_MMU 230#ifdef CONFIG_MMU
296 /* 231 /*
@@ -302,24 +237,13 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
302 : "r" (task_thread_info(next))); 237 : "r" (task_thread_info(next)));
303#endif 238#endif
304 239
305 /* If no tasks are using the UBC, we're done */ 240 /*
306 if (ubc_usercnt == 0) 241 * If the task has used fpu the last 5 timeslices, just do a full
307 /* If no tasks are using the UBC, we're done */; 242 * restore of the math state immediately to avoid the trap; the
308 else if (next->thread.ubc_pc && next->mm) { 243 * chances of needing FPU soon are obviously high now
309 int asid = 0; 244 */
310#ifdef CONFIG_MMU 245 if (next->fpu_counter > 5)
311 asid |= cpu_asid(smp_processor_id(), next->mm); 246 __fpu_state_restore();
312#endif
313 ubc_set_tracing(asid, next->thread.ubc_pc);
314 } else {
315#if defined(CONFIG_CPU_SH4A)
316 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
317 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
318#else
319 ctrl_outw(0, UBC_BBRA);
320 ctrl_outw(0, UBC_BBRB);
321#endif
322 }
323 247
324 return prev; 248 return prev;
325} 249}
@@ -412,20 +336,3 @@ unsigned long get_wchan(struct task_struct *p)
412 336
413 return pc; 337 return pc;
414} 338}
415
416asmlinkage void break_point_trap(void)
417{
418 /* Clear tracing. */
419#if defined(CONFIG_CPU_SH4A)
420 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
421 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
422#else
423 ctrl_outw(0, UBC_BBRA);
424 ctrl_outw(0, UBC_BBRB);
425 ctrl_outl(0, UBC_BRCR);
426#endif
427 current->thread.ubc_pc = 0;
428 ubc_usercnt -= 1;
429
430 force_sig(SIGTRAP, current);
431}