diff options
Diffstat (limited to 'arch/tile/kernel/process.c')
-rw-r--r-- | arch/tile/kernel/process.c | 647 |
1 files changed, 647 insertions, 0 deletions
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c new file mode 100644 index 000000000000..824f230e6d1a --- /dev/null +++ b/arch/tile/kernel/process.c | |||
@@ -0,0 +1,647 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/preempt.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/elfcore.h> | ||
21 | #include <linux/tick.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/compat.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/stack.h> | ||
29 | #include <asm/homecache.h> | ||
30 | #include <arch/chip.h> | ||
31 | #include <arch/abi.h> | ||
32 | |||
33 | |||
34 | /* | ||
35 | * Use the (x86) "idle=poll" option to prefer low latency when leaving the | ||
36 | * idle loop over low power while in the idle loop, e.g. if we have | ||
37 | * one thread per core and we want to get threads out of futex waits fast. | ||
38 | */ | ||
39 | static int no_idle_nap; | ||
40 | static int __init idle_setup(char *str) | ||
41 | { | ||
42 | if (!str) | ||
43 | return -EINVAL; | ||
44 | |||
45 | if (!strcmp(str, "poll")) { | ||
46 | printk("using polling idle threads.\n"); | ||
47 | no_idle_nap = 1; | ||
48 | } else if (!strcmp(str, "halt")) | ||
49 | no_idle_nap = 0; | ||
50 | else | ||
51 | return -1; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | early_param("idle", idle_setup); | ||
56 | |||
57 | /* | ||
58 | * The idle thread. There's no useful work to be | ||
59 | * done, so just try to conserve power and have a | ||
60 | * low exit latency (ie sit in a loop waiting for | ||
61 | * somebody to say that they'd like to reschedule) | ||
62 | */ | ||
63 | void cpu_idle(void) | ||
64 | { | ||
65 | extern void _cpu_idle(void); | ||
66 | int cpu = smp_processor_id(); | ||
67 | |||
68 | |||
69 | current_thread_info()->status |= TS_POLLING; | ||
70 | |||
71 | if (no_idle_nap) { | ||
72 | while (1) { | ||
73 | while (!need_resched()) | ||
74 | cpu_relax(); | ||
75 | schedule(); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /* endless idle loop with no priority at all */ | ||
80 | while (1) { | ||
81 | tick_nohz_stop_sched_tick(1); | ||
82 | while (!need_resched()) { | ||
83 | if (cpu_is_offline(cpu)) | ||
84 | BUG(); /* no HOTPLUG_CPU */ | ||
85 | |||
86 | local_irq_disable(); | ||
87 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | ||
88 | current_thread_info()->status &= ~TS_POLLING; | ||
89 | /* | ||
90 | * TS_POLLING-cleared state must be visible before we | ||
91 | * test NEED_RESCHED: | ||
92 | */ | ||
93 | smp_mb(); | ||
94 | |||
95 | if (!need_resched()) | ||
96 | _cpu_idle(); | ||
97 | else | ||
98 | local_irq_enable(); | ||
99 | current_thread_info()->status |= TS_POLLING; | ||
100 | } | ||
101 | tick_nohz_restart_sched_tick(); | ||
102 | preempt_enable_no_resched(); | ||
103 | schedule(); | ||
104 | preempt_disable(); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | struct thread_info *alloc_thread_info(struct task_struct *task) | ||
109 | { | ||
110 | struct page *page; | ||
111 | int flags = GFP_KERNEL; | ||
112 | |||
113 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
114 | flags |= __GFP_ZERO; | ||
115 | #endif | ||
116 | |||
117 | page = alloc_pages(flags, THREAD_SIZE_ORDER); | ||
118 | if (!page) | ||
119 | return 0; | ||
120 | |||
121 | return (struct thread_info *)page_address(page); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Free a thread_info node, and all of its derivative | ||
126 | * data structures. | ||
127 | */ | ||
128 | void free_thread_info(struct thread_info *info) | ||
129 | { | ||
130 | struct single_step_state *step_state = info->step_state; | ||
131 | |||
132 | |||
133 | if (step_state) { | ||
134 | |||
135 | /* | ||
136 | * FIXME: we don't munmap step_state->buffer | ||
137 | * because the mm_struct for this process (info->task->mm) | ||
138 | * has already been zeroed in exit_mm(). Keeping a | ||
139 | * reference to it here seems like a bad move, so this | ||
140 | * means we can't munmap() the buffer, and therefore if we | ||
141 | * ptrace multiple threads in a process, we will slowly | ||
142 | * leak user memory. (Note that as soon as the last | ||
143 | * thread in a process dies, we will reclaim all user | ||
144 | * memory including single-step buffers in the usual way.) | ||
145 | * We should either assign a kernel VA to this buffer | ||
146 | * somehow, or we should associate the buffer(s) with the | ||
147 | * mm itself so we can clean them up that way. | ||
148 | */ | ||
149 | kfree(step_state); | ||
150 | } | ||
151 | |||
152 | free_page((unsigned long)info); | ||
153 | } | ||
154 | |||
155 | static void save_arch_state(struct thread_struct *t); | ||
156 | |||
157 | extern void ret_from_fork(void); | ||
158 | |||
159 | int copy_thread(unsigned long clone_flags, unsigned long sp, | ||
160 | unsigned long stack_size, | ||
161 | struct task_struct *p, struct pt_regs *regs) | ||
162 | { | ||
163 | struct pt_regs *childregs; | ||
164 | unsigned long ksp; | ||
165 | |||
166 | /* | ||
167 | * When creating a new kernel thread we pass sp as zero. | ||
168 | * Assign it to a reasonable value now that we have the stack. | ||
169 | */ | ||
170 | if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0)) | ||
171 | sp = KSTK_TOP(p); | ||
172 | |||
173 | /* | ||
174 | * Do not clone step state from the parent; each thread | ||
175 | * must make its own lazily. | ||
176 | */ | ||
177 | task_thread_info(p)->step_state = NULL; | ||
178 | |||
179 | /* | ||
180 | * Start new thread in ret_from_fork so it schedules properly | ||
181 | * and then return from interrupt like the parent. | ||
182 | */ | ||
183 | p->thread.pc = (unsigned long) ret_from_fork; | ||
184 | |||
185 | /* Save user stack top pointer so we can ID the stack vm area later. */ | ||
186 | p->thread.usp0 = sp; | ||
187 | |||
188 | /* Record the pid of the process that created this one. */ | ||
189 | p->thread.creator_pid = current->pid; | ||
190 | |||
191 | /* | ||
192 | * Copy the registers onto the kernel stack so the | ||
193 | * return-from-interrupt code will reload it into registers. | ||
194 | */ | ||
195 | childregs = task_pt_regs(p); | ||
196 | *childregs = *regs; | ||
197 | childregs->regs[0] = 0; /* return value is zero */ | ||
198 | childregs->sp = sp; /* override with new user stack pointer */ | ||
199 | |||
200 | /* | ||
201 | * Copy the callee-saved registers from the passed pt_regs struct | ||
202 | * into the context-switch callee-saved registers area. | ||
203 | * We have to restore the callee-saved registers since we may | ||
204 | * be cloning a userspace task with userspace register state, | ||
205 | * and we won't be unwinding the same kernel frames to restore them. | ||
206 | * Zero out the C ABI save area to mark the top of the stack. | ||
207 | */ | ||
208 | ksp = (unsigned long) childregs; | ||
209 | ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */ | ||
210 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
211 | ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long); | ||
212 | memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG], | ||
213 | CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long)); | ||
214 | ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */ | ||
215 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
216 | p->thread.ksp = ksp; | ||
217 | |||
218 | #if CHIP_HAS_TILE_DMA() | ||
219 | /* | ||
220 | * No DMA in the new thread. We model this on the fact that | ||
221 | * fork() clears the pending signals, alarms, and aio for the child. | ||
222 | */ | ||
223 | memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state)); | ||
224 | memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); | ||
225 | #endif | ||
226 | |||
227 | #if CHIP_HAS_SN_PROC() | ||
228 | /* Likewise, the new thread is not running static processor code. */ | ||
229 | p->thread.sn_proc_running = 0; | ||
230 | memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb)); | ||
231 | #endif | ||
232 | |||
233 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
234 | /* New thread has its miscellaneous processor state bits clear. */ | ||
235 | p->thread.proc_status = 0; | ||
236 | #endif | ||
237 | |||
238 | |||
239 | |||
240 | /* | ||
241 | * Start the new thread with the current architecture state | ||
242 | * (user interrupt masks, etc.). | ||
243 | */ | ||
244 | save_arch_state(&p->thread); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Return "current" if it looks plausible, or else a pointer to a dummy. | ||
251 | * This can be helpful if we are just trying to emit a clean panic. | ||
252 | */ | ||
253 | struct task_struct *validate_current(void) | ||
254 | { | ||
255 | static struct task_struct corrupt = { .comm = "<corrupt>" }; | ||
256 | struct task_struct *tsk = current; | ||
257 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || | ||
258 | (void *)tsk > high_memory || | ||
259 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { | ||
260 | printk("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); | ||
261 | tsk = &corrupt; | ||
262 | } | ||
263 | return tsk; | ||
264 | } | ||
265 | |||
266 | /* Take and return the pointer to the previous task, for schedule_tail(). */ | ||
267 | struct task_struct *sim_notify_fork(struct task_struct *prev) | ||
268 | { | ||
269 | struct task_struct *tsk = current; | ||
270 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT | | ||
271 | (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
272 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK | | ||
273 | (tsk->pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
274 | return prev; | ||
275 | } | ||
276 | |||
277 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | ||
278 | { | ||
279 | struct pt_regs *ptregs = task_pt_regs(tsk); | ||
280 | elf_core_copy_regs(regs, ptregs); | ||
281 | return 1; | ||
282 | } | ||
283 | |||
284 | #if CHIP_HAS_TILE_DMA() | ||
285 | |||
286 | /* Allow user processes to access the DMA SPRs */ | ||
287 | void grant_dma_mpls(void) | ||
288 | { | ||
289 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); | ||
290 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); | ||
291 | } | ||
292 | |||
293 | /* Forbid user processes from accessing the DMA SPRs */ | ||
294 | void restrict_dma_mpls(void) | ||
295 | { | ||
296 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); | ||
297 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); | ||
298 | } | ||
299 | |||
300 | /* Pause the DMA engine, then save off its state registers. */ | ||
301 | static void save_tile_dma_state(struct tile_dma_state *dma) | ||
302 | { | ||
303 | unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
304 | unsigned long post_suspend_state; | ||
305 | |||
306 | /* If we're running, suspend the engine. */ | ||
307 | if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) | ||
308 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | ||
309 | |||
310 | /* | ||
311 | * Wait for the engine to idle, then save regs. Note that we | ||
312 | * want to record the "running" bit from before suspension, | ||
313 | * and the "done" bit from after, so that we can properly | ||
314 | * distinguish a case where the user suspended the engine from | ||
315 | * the case where the kernel suspended as part of the context | ||
316 | * swap. | ||
317 | */ | ||
318 | do { | ||
319 | post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
320 | } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK); | ||
321 | |||
322 | dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR); | ||
323 | dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR); | ||
324 | dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR); | ||
325 | dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR); | ||
326 | dma->strides = __insn_mfspr(SPR_DMA_STRIDE); | ||
327 | dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE); | ||
328 | dma->byte = __insn_mfspr(SPR_DMA_BYTE); | ||
329 | dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) | | ||
330 | (post_suspend_state & SPR_DMA_STATUS__DONE_MASK); | ||
331 | } | ||
332 | |||
333 | /* Restart a DMA that was running before we were context-switched out. */ | ||
334 | static void restore_tile_dma_state(struct thread_struct *t) | ||
335 | { | ||
336 | const struct tile_dma_state *dma = &t->tile_dma_state; | ||
337 | |||
338 | /* | ||
339 | * The only way to restore the done bit is to run a zero | ||
340 | * length transaction. | ||
341 | */ | ||
342 | if ((dma->status & SPR_DMA_STATUS__DONE_MASK) && | ||
343 | !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) { | ||
344 | __insn_mtspr(SPR_DMA_BYTE, 0); | ||
345 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
346 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | ||
347 | SPR_DMA_STATUS__BUSY_MASK) | ||
348 | ; | ||
349 | } | ||
350 | |||
351 | __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src); | ||
352 | __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk); | ||
353 | __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest); | ||
354 | __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk); | ||
355 | __insn_mtspr(SPR_DMA_STRIDE, dma->strides); | ||
356 | __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size); | ||
357 | __insn_mtspr(SPR_DMA_BYTE, dma->byte); | ||
358 | |||
359 | /* | ||
360 | * Restart the engine if we were running and not done. | ||
361 | * Clear a pending async DMA fault that we were waiting on return | ||
362 | * to user space to execute, since we expect the DMA engine | ||
363 | * to regenerate those faults for us now. Note that we don't | ||
364 | * try to clear the TIF_ASYNC_TLB flag, since it's relatively | ||
365 | * harmless if set, and it covers both DMA and the SN processor. | ||
366 | */ | ||
367 | if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) { | ||
368 | t->dma_async_tlb.fault_num = 0; | ||
369 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | #endif | ||
374 | |||
375 | static void save_arch_state(struct thread_struct *t) | ||
376 | { | ||
377 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
378 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) | | ||
379 | ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32); | ||
380 | #else | ||
381 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0); | ||
382 | #endif | ||
383 | t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0); | ||
384 | t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1); | ||
385 | t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0); | ||
386 | t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1); | ||
387 | t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2); | ||
388 | t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3); | ||
389 | t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS); | ||
390 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
391 | t->proc_status = __insn_mfspr(SPR_PROC_STATUS); | ||
392 | #endif | ||
393 | } | ||
394 | |||
395 | static void restore_arch_state(const struct thread_struct *t) | ||
396 | { | ||
397 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
398 | __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask); | ||
399 | __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32); | ||
400 | #else | ||
401 | __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask); | ||
402 | #endif | ||
403 | __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]); | ||
404 | __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]); | ||
405 | __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]); | ||
406 | __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]); | ||
407 | __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]); | ||
408 | __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]); | ||
409 | __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0); | ||
410 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
411 | __insn_mtspr(SPR_PROC_STATUS, t->proc_status); | ||
412 | #endif | ||
413 | #if CHIP_HAS_TILE_RTF_HWM() | ||
414 | /* | ||
415 | * Clear this whenever we switch back to a process in case | ||
416 | * the previous process was monkeying with it. Even if enabled | ||
417 | * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a | ||
418 | * performance hint, so isn't worth a full save/restore. | ||
419 | */ | ||
420 | __insn_mtspr(SPR_TILE_RTF_HWM, 0); | ||
421 | #endif | ||
422 | } | ||
423 | |||
424 | |||
425 | void _prepare_arch_switch(struct task_struct *next) | ||
426 | { | ||
427 | #if CHIP_HAS_SN_PROC() | ||
428 | int snctl; | ||
429 | #endif | ||
430 | #if CHIP_HAS_TILE_DMA() | ||
431 | struct tile_dma_state *dma = ¤t->thread.tile_dma_state; | ||
432 | if (dma->enabled) | ||
433 | save_tile_dma_state(dma); | ||
434 | #endif | ||
435 | #if CHIP_HAS_SN_PROC() | ||
436 | /* | ||
437 | * Suspend the static network processor if it was running. | ||
438 | * We do not suspend the fabric itself, just like we don't | ||
439 | * try to suspend the UDN. | ||
440 | */ | ||
441 | snctl = __insn_mfspr(SPR_SNCTL); | ||
442 | current->thread.sn_proc_running = | ||
443 | (snctl & SPR_SNCTL__FRZPROC_MASK) == 0; | ||
444 | if (current->thread.sn_proc_running) | ||
445 | __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK); | ||
446 | #endif | ||
447 | } | ||
448 | |||
449 | |||
450 | extern struct task_struct *__switch_to(struct task_struct *prev, | ||
451 | struct task_struct *next, | ||
452 | unsigned long new_system_save_1_0); | ||
453 | |||
454 | struct task_struct *__sched _switch_to(struct task_struct *prev, | ||
455 | struct task_struct *next) | ||
456 | { | ||
457 | /* DMA state is already saved; save off other arch state. */ | ||
458 | save_arch_state(&prev->thread); | ||
459 | |||
460 | #if CHIP_HAS_TILE_DMA() | ||
461 | /* | ||
462 | * Restore DMA in new task if desired. | ||
463 | * Note that it is only safe to restart here since interrupts | ||
464 | * are disabled, so we can't take any DMATLB miss or access | ||
465 | * interrupts before we have finished switching stacks. | ||
466 | */ | ||
467 | if (next->thread.tile_dma_state.enabled) { | ||
468 | restore_tile_dma_state(&next->thread); | ||
469 | grant_dma_mpls(); | ||
470 | } else { | ||
471 | restrict_dma_mpls(); | ||
472 | } | ||
473 | #endif | ||
474 | |||
475 | /* Restore other arch state. */ | ||
476 | restore_arch_state(&next->thread); | ||
477 | |||
478 | #if CHIP_HAS_SN_PROC() | ||
479 | /* | ||
480 | * Restart static network processor in the new process | ||
481 | * if it was running before. | ||
482 | */ | ||
483 | if (next->thread.sn_proc_running) { | ||
484 | int snctl = __insn_mfspr(SPR_SNCTL); | ||
485 | __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK); | ||
486 | } | ||
487 | #endif | ||
488 | |||
489 | |||
490 | /* | ||
491 | * Switch kernel SP, PC, and callee-saved registers. | ||
492 | * In the context of the new task, return the old task pointer | ||
493 | * (i.e. the task that actually called __switch_to). | ||
494 | * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. | ||
495 | */ | ||
496 | return __switch_to(prev, next, next_current_ksp0(next)); | ||
497 | } | ||
498 | |||
499 | int _sys_fork(struct pt_regs *regs) | ||
500 | { | ||
501 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
502 | } | ||
503 | |||
504 | int _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
505 | int __user *parent_tidptr, int __user *child_tidptr, | ||
506 | struct pt_regs *regs) | ||
507 | { | ||
508 | if (!newsp) | ||
509 | newsp = regs->sp; | ||
510 | return do_fork(clone_flags, newsp, regs, 0, | ||
511 | parent_tidptr, child_tidptr); | ||
512 | } | ||
513 | |||
514 | int _sys_vfork(struct pt_regs *regs) | ||
515 | { | ||
516 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, | ||
517 | regs, 0, NULL, NULL); | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * sys_execve() executes a new program. | ||
522 | */ | ||
523 | int _sys_execve(char __user *path, char __user *__user *argv, | ||
524 | char __user *__user *envp, struct pt_regs *regs) | ||
525 | { | ||
526 | int error; | ||
527 | char *filename; | ||
528 | |||
529 | filename = getname(path); | ||
530 | error = PTR_ERR(filename); | ||
531 | if (IS_ERR(filename)) | ||
532 | goto out; | ||
533 | error = do_execve(filename, argv, envp, regs); | ||
534 | putname(filename); | ||
535 | out: | ||
536 | return error; | ||
537 | } | ||
538 | |||
539 | #ifdef CONFIG_COMPAT | ||
540 | int _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
541 | compat_uptr_t __user *envp, struct pt_regs *regs) | ||
542 | { | ||
543 | int error; | ||
544 | char *filename; | ||
545 | |||
546 | filename = getname(path); | ||
547 | error = PTR_ERR(filename); | ||
548 | if (IS_ERR(filename)) | ||
549 | goto out; | ||
550 | error = compat_do_execve(filename, argv, envp, regs); | ||
551 | putname(filename); | ||
552 | out: | ||
553 | return error; | ||
554 | } | ||
555 | #endif | ||
556 | |||
557 | unsigned long get_wchan(struct task_struct *p) | ||
558 | { | ||
559 | struct KBacktraceIterator kbt; | ||
560 | |||
561 | if (!p || p == current || p->state == TASK_RUNNING) | ||
562 | return 0; | ||
563 | |||
564 | for (KBacktraceIterator_init(&kbt, p, NULL); | ||
565 | !KBacktraceIterator_end(&kbt); | ||
566 | KBacktraceIterator_next(&kbt)) { | ||
567 | if (!in_sched_functions(kbt.it.pc)) | ||
568 | return kbt.it.pc; | ||
569 | } | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * We pass in lr as zero (cleared in kernel_thread) and the caller | ||
576 | * part of the backtrace ABI on the stack also zeroed (in copy_thread) | ||
577 | * so that backtraces will stop with this function. | ||
578 | * Note that we don't use r0, since copy_thread() clears it. | ||
579 | */ | ||
580 | static void start_kernel_thread(int dummy, int (*fn)(int), int arg) | ||
581 | { | ||
582 | do_exit(fn(arg)); | ||
583 | } | ||
584 | |||
585 | /* | ||
586 | * Create a kernel thread | ||
587 | */ | ||
588 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
589 | { | ||
590 | struct pt_regs regs; | ||
591 | |||
592 | memset(®s, 0, sizeof(regs)); | ||
593 | regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */ | ||
594 | regs.pc = (long) start_kernel_thread; | ||
595 | regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */ | ||
596 | regs.regs[1] = (long) fn; /* function pointer */ | ||
597 | regs.regs[2] = (long) arg; /* parameter register */ | ||
598 | |||
599 | /* Ok, create the new process.. */ | ||
600 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, | ||
601 | 0, NULL, NULL); | ||
602 | } | ||
603 | EXPORT_SYMBOL(kernel_thread); | ||
604 | |||
605 | /* Flush thread state. */ | ||
606 | void flush_thread(void) | ||
607 | { | ||
608 | /* Nothing */ | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Free current thread data structures etc.. | ||
613 | */ | ||
614 | void exit_thread(void) | ||
615 | { | ||
616 | /* Nothing */ | ||
617 | } | ||
618 | |||
619 | #ifdef __tilegx__ | ||
620 | # define LINECOUNT 3 | ||
621 | # define EXTRA_NL "\n" | ||
622 | #else | ||
623 | # define LINECOUNT 4 | ||
624 | # define EXTRA_NL "" | ||
625 | #endif | ||
626 | |||
627 | void show_regs(struct pt_regs *regs) | ||
628 | { | ||
629 | struct task_struct *tsk = validate_current(); | ||
630 | int i, linebreak; | ||
631 | printk("\n"); | ||
632 | printk(" Pid: %d, comm: %20s, CPU: %d\n", | ||
633 | tsk->pid, tsk->comm, smp_processor_id()); | ||
634 | for (i = linebreak = 0; i < 53; ++i) { | ||
635 | printk(" r%-2d: "REGFMT, i, regs->regs[i]); | ||
636 | if (++linebreak == LINECOUNT) { | ||
637 | linebreak = 0; | ||
638 | printk("\n"); | ||
639 | } | ||
640 | } | ||
641 | printk(" tp : "REGFMT EXTRA_NL " sp : "REGFMT" lr : "REGFMT"\n", | ||
642 | regs->tp, regs->sp, regs->lr); | ||
643 | printk(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", | ||
644 | regs->pc, regs->ex1, regs->faultnum); | ||
645 | |||
646 | dump_stack_regs(regs); | ||
647 | } | ||