diff options
Diffstat (limited to 'arch/um/kernel/process.c')
-rw-r--r-- | arch/um/kernel/process.c | 154 |
1 files changed, 76 insertions, 78 deletions
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 0eae00b3e588..c07961bedb75 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -4,19 +4,21 @@ | |||
4 | * Licensed under the GPL | 4 | * Licensed under the GPL |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "linux/stddef.h" | 7 | #include <linux/stddef.h> |
8 | #include "linux/err.h" | 8 | #include <linux/err.h> |
9 | #include "linux/hardirq.h" | 9 | #include <linux/hardirq.h> |
10 | #include "linux/mm.h" | 10 | #include <linux/gfp.h> |
11 | #include "linux/personality.h" | 11 | #include <linux/mm.h> |
12 | #include "linux/proc_fs.h" | 12 | #include <linux/personality.h> |
13 | #include "linux/ptrace.h" | 13 | #include <linux/proc_fs.h> |
14 | #include "linux/random.h" | 14 | #include <linux/ptrace.h> |
15 | #include "linux/sched.h" | 15 | #include <linux/random.h> |
16 | #include "linux/tick.h" | 16 | #include <linux/sched.h> |
17 | #include "linux/threads.h" | 17 | #include <linux/tick.h> |
18 | #include "asm/pgtable.h" | 18 | #include <linux/threads.h> |
19 | #include "asm/uaccess.h" | 19 | #include <asm/current.h> |
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/uaccess.h> | ||
20 | #include "as-layout.h" | 22 | #include "as-layout.h" |
21 | #include "kern_util.h" | 23 | #include "kern_util.h" |
22 | #include "os.h" | 24 | #include "os.h" |
@@ -30,7 +32,7 @@ | |||
30 | */ | 32 | */ |
31 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | 33 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; |
32 | 34 | ||
33 | static inline int external_pid(struct task_struct *task) | 35 | static inline int external_pid(void) |
34 | { | 36 | { |
35 | /* FIXME: Need to look up userspace_pid by cpu */ | 37 | /* FIXME: Need to look up userspace_pid by cpu */ |
36 | return userspace_pid[0]; | 38 | return userspace_pid[0]; |
@@ -40,7 +42,7 @@ int pid_to_processor_id(int pid) | |||
40 | { | 42 | { |
41 | int i; | 43 | int i; |
42 | 44 | ||
43 | for(i = 0; i < ncpus; i++) { | 45 | for (i = 0; i < ncpus; i++) { |
44 | if (cpu_tasks[i].pid == pid) | 46 | if (cpu_tasks[i].pid == pid) |
45 | return i; | 47 | return i; |
46 | } | 48 | } |
@@ -60,8 +62,6 @@ unsigned long alloc_stack(int order, int atomic) | |||
60 | if (atomic) | 62 | if (atomic) |
61 | flags = GFP_ATOMIC; | 63 | flags = GFP_ATOMIC; |
62 | page = __get_free_pages(flags, order); | 64 | page = __get_free_pages(flags, order); |
63 | if (page == 0) | ||
64 | return 0; | ||
65 | 65 | ||
66 | return page; | 66 | return page; |
67 | } | 67 | } |
@@ -80,15 +80,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
80 | static inline void set_current(struct task_struct *task) | 80 | static inline void set_current(struct task_struct *task) |
81 | { | 81 | { |
82 | cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) | 82 | cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) |
83 | { external_pid(task), task }); | 83 | { external_pid(), task }); |
84 | } | 84 | } |
85 | 85 | ||
86 | extern void arch_switch_to(struct task_struct *from, struct task_struct *to); | 86 | extern void arch_switch_to(struct task_struct *to); |
87 | 87 | ||
88 | void *_switch_to(void *prev, void *next, void *last) | 88 | void *_switch_to(void *prev, void *next, void *last) |
89 | { | 89 | { |
90 | struct task_struct *from = prev; | 90 | struct task_struct *from = prev; |
91 | struct task_struct *to= next; | 91 | struct task_struct *to = next; |
92 | 92 | ||
93 | to->thread.prev_sched = from; | 93 | to->thread.prev_sched = from; |
94 | set_current(to); | 94 | set_current(to); |
@@ -99,13 +99,13 @@ void *_switch_to(void *prev, void *next, void *last) | |||
99 | switch_threads(&from->thread.switch_buf, | 99 | switch_threads(&from->thread.switch_buf, |
100 | &to->thread.switch_buf); | 100 | &to->thread.switch_buf); |
101 | 101 | ||
102 | arch_switch_to(current->thread.prev_sched, current); | 102 | arch_switch_to(current); |
103 | 103 | ||
104 | if (current->thread.saved_task) | 104 | if (current->thread.saved_task) |
105 | show_regs(&(current->thread.regs)); | 105 | show_regs(&(current->thread.regs)); |
106 | next= current->thread.saved_task; | 106 | to = current->thread.saved_task; |
107 | prev= current; | 107 | from = current; |
108 | } while(current->thread.saved_task); | 108 | } while (current->thread.saved_task); |
109 | 109 | ||
110 | return current->thread.prev_sched; | 110 | return current->thread.prev_sched; |
111 | 111 | ||
@@ -163,8 +163,6 @@ void new_thread_handler(void) | |||
163 | void fork_handler(void) | 163 | void fork_handler(void) |
164 | { | 164 | { |
165 | force_flush_all(); | 165 | force_flush_all(); |
166 | if (current->thread.prev_sched == NULL) | ||
167 | panic("blech"); | ||
168 | 166 | ||
169 | schedule_tail(current->thread.prev_sched); | 167 | schedule_tail(current->thread.prev_sched); |
170 | 168 | ||
@@ -173,7 +171,7 @@ void fork_handler(void) | |||
173 | * arch_switch_to isn't needed. We could want to apply this to | 171 | * arch_switch_to isn't needed. We could want to apply this to |
174 | * improve performance. -bb | 172 | * improve performance. -bb |
175 | */ | 173 | */ |
176 | arch_switch_to(current->thread.prev_sched, current); | 174 | arch_switch_to(current); |
177 | 175 | ||
178 | current->thread.prev_sched = NULL; | 176 | current->thread.prev_sched = NULL; |
179 | 177 | ||
@@ -204,7 +202,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | |||
204 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); | 202 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); |
205 | } | 203 | } |
206 | else { | 204 | else { |
207 | init_thread_registers(&p->thread.regs.regs); | 205 | get_safe_registers(p->thread.regs.regs.gp); |
208 | p->thread.request.u.thread = current->thread.request.u.thread; | 206 | p->thread.request.u.thread = current->thread.request.u.thread; |
209 | handler = new_thread_handler; | 207 | handler = new_thread_handler; |
210 | } | 208 | } |
@@ -237,7 +235,7 @@ void default_idle(void) | |||
237 | { | 235 | { |
238 | unsigned long long nsecs; | 236 | unsigned long long nsecs; |
239 | 237 | ||
240 | while(1) { | 238 | while (1) { |
241 | /* endless idle loop with no priority at all */ | 239 | /* endless idle loop with no priority at all */ |
242 | 240 | ||
243 | /* | 241 | /* |
@@ -256,53 +254,10 @@ void default_idle(void) | |||
256 | 254 | ||
257 | void cpu_idle(void) | 255 | void cpu_idle(void) |
258 | { | 256 | { |
259 | cpu_tasks[current_thread->cpu].pid = os_getpid(); | 257 | cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); |
260 | default_idle(); | 258 | default_idle(); |
261 | } | 259 | } |
262 | 260 | ||
263 | void *um_virt_to_phys(struct task_struct *task, unsigned long addr, | ||
264 | pte_t *pte_out) | ||
265 | { | ||
266 | pgd_t *pgd; | ||
267 | pud_t *pud; | ||
268 | pmd_t *pmd; | ||
269 | pte_t *pte; | ||
270 | pte_t ptent; | ||
271 | |||
272 | if (task->mm == NULL) | ||
273 | return ERR_PTR(-EINVAL); | ||
274 | pgd = pgd_offset(task->mm, addr); | ||
275 | if (!pgd_present(*pgd)) | ||
276 | return ERR_PTR(-EINVAL); | ||
277 | |||
278 | pud = pud_offset(pgd, addr); | ||
279 | if (!pud_present(*pud)) | ||
280 | return ERR_PTR(-EINVAL); | ||
281 | |||
282 | pmd = pmd_offset(pud, addr); | ||
283 | if (!pmd_present(*pmd)) | ||
284 | return ERR_PTR(-EINVAL); | ||
285 | |||
286 | pte = pte_offset_kernel(pmd, addr); | ||
287 | ptent = *pte; | ||
288 | if (!pte_present(ptent)) | ||
289 | return ERR_PTR(-EINVAL); | ||
290 | |||
291 | if (pte_out != NULL) | ||
292 | *pte_out = ptent; | ||
293 | return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK); | ||
294 | } | ||
295 | |||
296 | char *current_cmd(void) | ||
297 | { | ||
298 | #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM) | ||
299 | return "(Unknown)"; | ||
300 | #else | ||
301 | void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL); | ||
302 | return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr); | ||
303 | #endif | ||
304 | } | ||
305 | |||
306 | void dump_thread(struct pt_regs *regs, struct user *u) | 261 | void dump_thread(struct pt_regs *regs, struct user *u) |
307 | { | 262 | { |
308 | } | 263 | } |
@@ -317,7 +272,7 @@ int user_context(unsigned long sp) | |||
317 | unsigned long stack; | 272 | unsigned long stack; |
318 | 273 | ||
319 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); | 274 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); |
320 | return stack != (unsigned long) current_thread; | 275 | return stack != (unsigned long) current_thread_info(); |
321 | } | 276 | } |
322 | 277 | ||
323 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; | 278 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; |
@@ -331,7 +286,7 @@ void do_uml_exitcalls(void) | |||
331 | (*call)(); | 286 | (*call)(); |
332 | } | 287 | } |
333 | 288 | ||
334 | char *uml_strdup(char *string) | 289 | char *uml_strdup(const char *string) |
335 | { | 290 | { |
336 | return kstrdup(string, GFP_KERNEL); | 291 | return kstrdup(string, GFP_KERNEL); |
337 | } | 292 | } |
@@ -359,7 +314,7 @@ int strlen_user_proc(char __user *str) | |||
359 | int smp_sigio_handler(void) | 314 | int smp_sigio_handler(void) |
360 | { | 315 | { |
361 | #ifdef CONFIG_SMP | 316 | #ifdef CONFIG_SMP |
362 | int cpu = current_thread->cpu; | 317 | int cpu = current_thread_info()->cpu; |
363 | IPI_handler(cpu); | 318 | IPI_handler(cpu); |
364 | if (cpu != 0) | 319 | if (cpu != 0) |
365 | return 1; | 320 | return 1; |
@@ -369,7 +324,7 @@ int smp_sigio_handler(void) | |||
369 | 324 | ||
370 | int cpu(void) | 325 | int cpu(void) |
371 | { | 326 | { |
372 | return current_thread->cpu; | 327 | return current_thread_info()->cpu; |
373 | } | 328 | } |
374 | 329 | ||
375 | static atomic_t using_sysemu = ATOMIC_INIT(0); | 330 | static atomic_t using_sysemu = ATOMIC_INIT(0); |
@@ -435,7 +390,7 @@ int singlestepping(void * t) | |||
435 | { | 390 | { |
436 | struct task_struct *task = t ? t : current; | 391 | struct task_struct *task = t ? t : current; |
437 | 392 | ||
438 | if ( ! (task->ptrace & PT_DTRACE) ) | 393 | if (!(task->ptrace & PT_DTRACE)) |
439 | return 0; | 394 | return 0; |
440 | 395 | ||
441 | if (task->thread.singlestep_syscall) | 396 | if (task->thread.singlestep_syscall) |
@@ -459,3 +414,46 @@ unsigned long arch_align_stack(unsigned long sp) | |||
459 | return sp & ~0xf; | 414 | return sp & ~0xf; |
460 | } | 415 | } |
461 | #endif | 416 | #endif |
417 | |||
418 | unsigned long get_wchan(struct task_struct *p) | ||
419 | { | ||
420 | unsigned long stack_page, sp, ip; | ||
421 | bool seen_sched = 0; | ||
422 | |||
423 | if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) | ||
424 | return 0; | ||
425 | |||
426 | stack_page = (unsigned long) task_stack_page(p); | ||
427 | /* Bail if the process has no kernel stack for some reason */ | ||
428 | if (stack_page == 0) | ||
429 | return 0; | ||
430 | |||
431 | sp = p->thread.switch_buf->JB_SP; | ||
432 | /* | ||
433 | * Bail if the stack pointer is below the bottom of the kernel | ||
434 | * stack for some reason | ||
435 | */ | ||
436 | if (sp < stack_page) | ||
437 | return 0; | ||
438 | |||
439 | while (sp < stack_page + THREAD_SIZE) { | ||
440 | ip = *((unsigned long *) sp); | ||
441 | if (in_sched_functions(ip)) | ||
442 | /* Ignore everything until we're above the scheduler */ | ||
443 | seen_sched = 1; | ||
444 | else if (kernel_text_address(ip) && seen_sched) | ||
445 | return ip; | ||
446 | |||
447 | sp += sizeof(unsigned long); | ||
448 | } | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) | ||
454 | { | ||
455 | int cpu = current_thread_info()->cpu; | ||
456 | |||
457 | return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu); | ||
458 | } | ||
459 | |||