diff options
Diffstat (limited to 'arch/um/os-Linux/skas/process.c')
-rw-r--r-- | arch/um/os-Linux/skas/process.c | 566 |
1 files changed, 566 insertions, 0 deletions
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c new file mode 100644 index 00000000000..120a21c5883 --- /dev/null +++ b/arch/um/os-Linux/skas/process.c | |||
@@ -0,0 +1,566 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #include <stdlib.h> | ||
7 | #include <string.h> | ||
8 | #include <unistd.h> | ||
9 | #include <errno.h> | ||
10 | #include <signal.h> | ||
11 | #include <setjmp.h> | ||
12 | #include <sched.h> | ||
13 | #include "ptrace_user.h" | ||
14 | #include <sys/wait.h> | ||
15 | #include <sys/mman.h> | ||
16 | #include <sys/user.h> | ||
17 | #include <sys/time.h> | ||
18 | #include <asm/unistd.h> | ||
19 | #include <asm/types.h> | ||
20 | #include "user.h" | ||
21 | #include "sysdep/ptrace.h" | ||
22 | #include "user_util.h" | ||
23 | #include "kern_util.h" | ||
24 | #include "skas.h" | ||
25 | #include "stub-data.h" | ||
26 | #include "mm_id.h" | ||
27 | #include "sysdep/sigcontext.h" | ||
28 | #include "sysdep/stub.h" | ||
29 | #include "os.h" | ||
30 | #include "proc_mm.h" | ||
31 | #include "skas_ptrace.h" | ||
32 | #include "chan_user.h" | ||
33 | #include "registers.h" | ||
34 | #include "mem.h" | ||
35 | #include "uml-config.h" | ||
36 | #include "process.h" | ||
37 | #include "longjmp.h" | ||
38 | |||
39 | int is_skas_winch(int pid, int fd, void *data) | ||
40 | { | ||
41 | if(pid != os_getpgrp()) | ||
42 | return(0); | ||
43 | |||
44 | register_winch_irq(-1, fd, -1, data); | ||
45 | return(1); | ||
46 | } | ||
47 | |||
48 | void wait_stub_done(int pid, int sig, char * fname) | ||
49 | { | ||
50 | int n, status, err; | ||
51 | |||
52 | do { | ||
53 | if ( sig != -1 ) { | ||
54 | err = ptrace(PTRACE_CONT, pid, 0, sig); | ||
55 | if(err) | ||
56 | panic("%s : continue failed, errno = %d\n", | ||
57 | fname, errno); | ||
58 | } | ||
59 | sig = 0; | ||
60 | |||
61 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | ||
62 | } while((n >= 0) && WIFSTOPPED(status) && | ||
63 | ((WSTOPSIG(status) == SIGVTALRM) || | ||
64 | /* running UML inside a detached screen can cause | ||
65 | * SIGWINCHes | ||
66 | */ | ||
67 | (WSTOPSIG(status) == SIGWINCH))); | ||
68 | |||
69 | if((n < 0) || !WIFSTOPPED(status) || | ||
70 | (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){ | ||
71 | unsigned long regs[HOST_FRAME_SIZE]; | ||
72 | |||
73 | if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) | ||
74 | printk("Failed to get registers from stub, " | ||
75 | "errno = %d\n", errno); | ||
76 | else { | ||
77 | int i; | ||
78 | |||
79 | printk("Stub registers -\n"); | ||
80 | for(i = 0; i < HOST_FRAME_SIZE; i++) | ||
81 | printk("\t%d - %lx\n", i, regs[i]); | ||
82 | } | ||
83 | panic("%s : failed to wait for SIGUSR1/SIGTRAP, " | ||
84 | "pid = %d, n = %d, errno = %d, status = 0x%x\n", | ||
85 | fname, pid, n, errno, status); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | extern unsigned long current_stub_stack(void); | ||
90 | |||
91 | void get_skas_faultinfo(int pid, struct faultinfo * fi) | ||
92 | { | ||
93 | int err; | ||
94 | |||
95 | if(ptrace_faultinfo){ | ||
96 | err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); | ||
97 | if(err) | ||
98 | panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " | ||
99 | "errno = %d\n", errno); | ||
100 | |||
101 | /* Special handling for i386, which has different structs */ | ||
102 | if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) | ||
103 | memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, | ||
104 | sizeof(struct faultinfo) - | ||
105 | sizeof(struct ptrace_faultinfo)); | ||
106 | } | ||
107 | else { | ||
108 | wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo"); | ||
109 | |||
110 | /* faultinfo is prepared by the stub-segv-handler at start of | ||
111 | * the stub stack page. We just have to copy it. | ||
112 | */ | ||
113 | memcpy(fi, (void *)current_stub_stack(), sizeof(*fi)); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static void handle_segv(int pid, union uml_pt_regs * regs) | ||
118 | { | ||
119 | get_skas_faultinfo(pid, ®s->skas.faultinfo); | ||
120 | segv(regs->skas.faultinfo, 0, 1, NULL); | ||
121 | } | ||
122 | |||
123 | /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/ | ||
124 | static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu) | ||
125 | { | ||
126 | int err, status; | ||
127 | |||
128 | /* Mark this as a syscall */ | ||
129 | UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->skas.regs); | ||
130 | |||
131 | if (!local_using_sysemu) | ||
132 | { | ||
133 | err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET, | ||
134 | __NR_getpid); | ||
135 | if(err < 0) | ||
136 | panic("handle_trap - nullifying syscall failed errno = %d\n", | ||
137 | errno); | ||
138 | |||
139 | err = ptrace(PTRACE_SYSCALL, pid, 0, 0); | ||
140 | if(err < 0) | ||
141 | panic("handle_trap - continuing to end of syscall failed, " | ||
142 | "errno = %d\n", errno); | ||
143 | |||
144 | CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); | ||
145 | if((err < 0) || !WIFSTOPPED(status) || | ||
146 | (WSTOPSIG(status) != SIGTRAP + 0x80)) | ||
147 | panic("handle_trap - failed to wait at end of syscall, " | ||
148 | "errno = %d, status = %d\n", errno, status); | ||
149 | } | ||
150 | |||
151 | handle_syscall(regs); | ||
152 | } | ||
153 | |||
154 | extern int __syscall_stub_start; | ||
155 | |||
156 | static int userspace_tramp(void *stack) | ||
157 | { | ||
158 | void *addr; | ||
159 | |||
160 | ptrace(PTRACE_TRACEME, 0, 0, 0); | ||
161 | |||
162 | init_new_thread_signals(1); | ||
163 | enable_timer(); | ||
164 | |||
165 | if(!proc_mm){ | ||
166 | /* This has a pte, but it can't be mapped in with the usual | ||
167 | * tlb_flush mechanism because this is part of that mechanism | ||
168 | */ | ||
169 | int fd; | ||
170 | __u64 offset; | ||
171 | fd = phys_mapping(to_phys(&__syscall_stub_start), &offset); | ||
172 | addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(), | ||
173 | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset); | ||
174 | if(addr == MAP_FAILED){ | ||
175 | printk("mapping mmap stub failed, errno = %d\n", | ||
176 | errno); | ||
177 | exit(1); | ||
178 | } | ||
179 | |||
180 | if(stack != NULL){ | ||
181 | fd = phys_mapping(to_phys(stack), &offset); | ||
182 | addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(), | ||
183 | PROT_READ | PROT_WRITE, | ||
184 | MAP_FIXED | MAP_SHARED, fd, offset); | ||
185 | if(addr == MAP_FAILED){ | ||
186 | printk("mapping segfault stack failed, " | ||
187 | "errno = %d\n", errno); | ||
188 | exit(1); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | if(!ptrace_faultinfo && (stack != NULL)){ | ||
193 | unsigned long v = UML_CONFIG_STUB_CODE + | ||
194 | (unsigned long) stub_segv_handler - | ||
195 | (unsigned long) &__syscall_stub_start; | ||
196 | |||
197 | set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size()); | ||
198 | set_handler(SIGSEGV, (void *) v, SA_ONSTACK, | ||
199 | SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, | ||
200 | SIGUSR1, -1); | ||
201 | } | ||
202 | |||
203 | os_stop_process(os_getpid()); | ||
204 | return(0); | ||
205 | } | ||
206 | |||
207 | /* Each element set once, and only accessed by a single processor anyway */ | ||
208 | #undef NR_CPUS | ||
209 | #define NR_CPUS 1 | ||
210 | int userspace_pid[NR_CPUS]; | ||
211 | |||
212 | int start_userspace(unsigned long stub_stack) | ||
213 | { | ||
214 | void *stack; | ||
215 | unsigned long sp; | ||
216 | int pid, status, n, flags; | ||
217 | |||
218 | stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, | ||
219 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | ||
220 | if(stack == MAP_FAILED) | ||
221 | panic("start_userspace : mmap failed, errno = %d", errno); | ||
222 | sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *); | ||
223 | |||
224 | flags = CLONE_FILES | SIGCHLD; | ||
225 | if(proc_mm) flags |= CLONE_VM; | ||
226 | pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); | ||
227 | if(pid < 0) | ||
228 | panic("start_userspace : clone failed, errno = %d", errno); | ||
229 | |||
230 | do { | ||
231 | CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); | ||
232 | if(n < 0) | ||
233 | panic("start_userspace : wait failed, errno = %d", | ||
234 | errno); | ||
235 | } while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM)); | ||
236 | |||
237 | if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) | ||
238 | panic("start_userspace : expected SIGSTOP, got status = %d", | ||
239 | status); | ||
240 | |||
241 | if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0) | ||
242 | panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n", | ||
243 | errno); | ||
244 | |||
245 | if(munmap(stack, PAGE_SIZE) < 0) | ||
246 | panic("start_userspace : munmap failed, errno = %d\n", errno); | ||
247 | |||
248 | return(pid); | ||
249 | } | ||
250 | |||
251 | void userspace(union uml_pt_regs *regs) | ||
252 | { | ||
253 | int err, status, op, pid = userspace_pid[0]; | ||
254 | int local_using_sysemu; /*To prevent races if using_sysemu changes under us.*/ | ||
255 | |||
256 | while(1){ | ||
257 | restore_registers(pid, regs); | ||
258 | |||
259 | /* Now we set local_using_sysemu to be used for one loop */ | ||
260 | local_using_sysemu = get_using_sysemu(); | ||
261 | |||
262 | op = SELECT_PTRACE_OPERATION(local_using_sysemu, singlestepping(NULL)); | ||
263 | |||
264 | err = ptrace(op, pid, 0, 0); | ||
265 | if(err) | ||
266 | panic("userspace - could not resume userspace process, " | ||
267 | "pid=%d, ptrace operation = %d, errno = %d\n", | ||
268 | op, errno); | ||
269 | |||
270 | CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED)); | ||
271 | if(err < 0) | ||
272 | panic("userspace - waitpid failed, errno = %d\n", | ||
273 | errno); | ||
274 | |||
275 | regs->skas.is_user = 1; | ||
276 | save_registers(pid, regs); | ||
277 | UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */ | ||
278 | |||
279 | if(WIFSTOPPED(status)){ | ||
280 | switch(WSTOPSIG(status)){ | ||
281 | case SIGSEGV: | ||
282 | if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo) | ||
283 | user_signal(SIGSEGV, regs, pid); | ||
284 | else handle_segv(pid, regs); | ||
285 | break; | ||
286 | case SIGTRAP + 0x80: | ||
287 | handle_trap(pid, regs, local_using_sysemu); | ||
288 | break; | ||
289 | case SIGTRAP: | ||
290 | relay_signal(SIGTRAP, regs); | ||
291 | break; | ||
292 | case SIGIO: | ||
293 | case SIGVTALRM: | ||
294 | case SIGILL: | ||
295 | case SIGBUS: | ||
296 | case SIGFPE: | ||
297 | case SIGWINCH: | ||
298 | user_signal(WSTOPSIG(status), regs, pid); | ||
299 | break; | ||
300 | default: | ||
301 | printk("userspace - child stopped with signal " | ||
302 | "%d\n", WSTOPSIG(status)); | ||
303 | } | ||
304 | pid = userspace_pid[0]; | ||
305 | interrupt_end(); | ||
306 | |||
307 | /* Avoid -ERESTARTSYS handling in host */ | ||
308 | if(PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET) | ||
309 | PT_SYSCALL_NR(regs->skas.regs) = -1; | ||
310 | } | ||
311 | } | ||
312 | } | ||
313 | #define INIT_JMP_NEW_THREAD 0 | ||
314 | #define INIT_JMP_REMOVE_SIGSTACK 1 | ||
315 | #define INIT_JMP_CALLBACK 2 | ||
316 | #define INIT_JMP_HALT 3 | ||
317 | #define INIT_JMP_REBOOT 4 | ||
318 | |||
319 | int copy_context_skas0(unsigned long new_stack, int pid) | ||
320 | { | ||
321 | int err; | ||
322 | unsigned long regs[MAX_REG_NR]; | ||
323 | unsigned long current_stack = current_stub_stack(); | ||
324 | struct stub_data *data = (struct stub_data *) current_stack; | ||
325 | struct stub_data *child_data = (struct stub_data *) new_stack; | ||
326 | __u64 new_offset; | ||
327 | int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); | ||
328 | |||
329 | /* prepare offset and fd of child's stack as argument for parent's | ||
330 | * and child's mmap2 calls | ||
331 | */ | ||
332 | *data = ((struct stub_data) { .offset = MMAP_OFFSET(new_offset), | ||
333 | .fd = new_fd, | ||
334 | .timer = ((struct itimerval) | ||
335 | { { 0, 1000000 / hz() }, | ||
336 | { 0, 1000000 / hz() }})}); | ||
337 | get_safe_registers(regs); | ||
338 | |||
339 | /* Set parent's instruction pointer to start of clone-stub */ | ||
340 | regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE + | ||
341 | (unsigned long) stub_clone_handler - | ||
342 | (unsigned long) &__syscall_stub_start; | ||
343 | regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE - | ||
344 | sizeof(void *); | ||
345 | #ifdef __SIGNAL_FRAMESIZE | ||
346 | regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE; | ||
347 | #endif | ||
348 | err = ptrace_setregs(pid, regs); | ||
349 | if(err < 0) | ||
350 | panic("copy_context_skas0 : PTRACE_SETREGS failed, " | ||
351 | "pid = %d, errno = %d\n", pid, errno); | ||
352 | |||
353 | /* set a well known return code for detection of child write failure */ | ||
354 | child_data->err = 12345678; | ||
355 | |||
356 | /* Wait, until parent has finished its work: read child's pid from | ||
357 | * parent's stack, and check, if bad result. | ||
358 | */ | ||
359 | wait_stub_done(pid, 0, "copy_context_skas0"); | ||
360 | |||
361 | pid = data->err; | ||
362 | if(pid < 0) | ||
363 | panic("copy_context_skas0 - stub-parent reports error %d\n", | ||
364 | pid); | ||
365 | |||
366 | /* Wait, until child has finished too: read child's result from | ||
367 | * child's stack and check it. | ||
368 | */ | ||
369 | wait_stub_done(pid, -1, "copy_context_skas0"); | ||
370 | if (child_data->err != UML_CONFIG_STUB_DATA) | ||
371 | panic("copy_context_skas0 - stub-child reports error %d\n", | ||
372 | child_data->err); | ||
373 | |||
374 | if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, | ||
375 | (void *)PTRACE_O_TRACESYSGOOD) < 0) | ||
376 | panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, " | ||
377 | "errno = %d\n", errno); | ||
378 | |||
379 | return pid; | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * This is used only, if stub pages are needed, while proc_mm is | ||
384 | * availabl. Opening /proc/mm creates a new mm_context, which lacks | ||
385 | * the stub-pages. Thus, we map them using /proc/mm-fd | ||
386 | */ | ||
387 | void map_stub_pages(int fd, unsigned long code, | ||
388 | unsigned long data, unsigned long stack) | ||
389 | { | ||
390 | struct proc_mm_op mmop; | ||
391 | int n; | ||
392 | __u64 code_offset; | ||
393 | int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start), | ||
394 | &code_offset); | ||
395 | |||
396 | mmop = ((struct proc_mm_op) { .op = MM_MMAP, | ||
397 | .u = | ||
398 | { .mmap = | ||
399 | { .addr = code, | ||
400 | .len = PAGE_SIZE, | ||
401 | .prot = PROT_EXEC, | ||
402 | .flags = MAP_FIXED | MAP_PRIVATE, | ||
403 | .fd = code_fd, | ||
404 | .offset = code_offset | ||
405 | } } }); | ||
406 | n = os_write_file(fd, &mmop, sizeof(mmop)); | ||
407 | if(n != sizeof(mmop)) | ||
408 | panic("map_stub_pages : /proc/mm map for code failed, " | ||
409 | "err = %d\n", -n); | ||
410 | |||
411 | if ( stack ) { | ||
412 | __u64 map_offset; | ||
413 | int map_fd = phys_mapping(to_phys((void *)stack), &map_offset); | ||
414 | mmop = ((struct proc_mm_op) | ||
415 | { .op = MM_MMAP, | ||
416 | .u = | ||
417 | { .mmap = | ||
418 | { .addr = data, | ||
419 | .len = PAGE_SIZE, | ||
420 | .prot = PROT_READ | PROT_WRITE, | ||
421 | .flags = MAP_FIXED | MAP_SHARED, | ||
422 | .fd = map_fd, | ||
423 | .offset = map_offset | ||
424 | } } }); | ||
425 | n = os_write_file(fd, &mmop, sizeof(mmop)); | ||
426 | if(n != sizeof(mmop)) | ||
427 | panic("map_stub_pages : /proc/mm map for data failed, " | ||
428 | "err = %d\n", -n); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, | ||
433 | void (*handler)(int)) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | sigjmp_buf switch_buf, fork_buf; | ||
437 | int enable; | ||
438 | |||
439 | *switch_buf_ptr = &switch_buf; | ||
440 | *fork_buf_ptr = &fork_buf; | ||
441 | |||
442 | /* Somewhat subtle - siglongjmp restores the signal mask before doing | ||
443 | * the longjmp. This means that when jumping from one stack to another | ||
444 | * when the target stack has interrupts enabled, an interrupt may occur | ||
445 | * on the source stack. This is bad when starting up a process because | ||
446 | * it's not supposed to get timer ticks until it has been scheduled. | ||
447 | * So, we disable interrupts around the sigsetjmp to ensure that | ||
448 | * they can't happen until we get back here where they are safe. | ||
449 | */ | ||
450 | flags = get_signals(); | ||
451 | block_signals(); | ||
452 | if(UML_SIGSETJMP(&fork_buf, enable) == 0) | ||
453 | new_thread_proc(stack, handler); | ||
454 | |||
455 | remove_sigstack(); | ||
456 | |||
457 | set_signals(flags); | ||
458 | } | ||
459 | |||
460 | void thread_wait(void *sw, void *fb) | ||
461 | { | ||
462 | sigjmp_buf buf, **switch_buf = sw, *fork_buf; | ||
463 | int enable; | ||
464 | |||
465 | *switch_buf = &buf; | ||
466 | fork_buf = fb; | ||
467 | if(UML_SIGSETJMP(&buf, enable) == 0) | ||
468 | siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK); | ||
469 | } | ||
470 | |||
471 | void switch_threads(void *me, void *next) | ||
472 | { | ||
473 | sigjmp_buf my_buf, **me_ptr = me, *next_buf = next; | ||
474 | int enable; | ||
475 | |||
476 | *me_ptr = &my_buf; | ||
477 | if(UML_SIGSETJMP(&my_buf, enable) == 0) | ||
478 | UML_SIGLONGJMP(next_buf, 1); | ||
479 | } | ||
480 | |||
481 | static sigjmp_buf initial_jmpbuf; | ||
482 | |||
483 | /* XXX Make these percpu */ | ||
484 | static void (*cb_proc)(void *arg); | ||
485 | static void *cb_arg; | ||
486 | static sigjmp_buf *cb_back; | ||
487 | |||
488 | int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr) | ||
489 | { | ||
490 | sigjmp_buf **switch_buf = switch_buf_ptr; | ||
491 | int n, enable; | ||
492 | |||
493 | set_handler(SIGWINCH, (__sighandler_t) sig_handler, | ||
494 | SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM, | ||
495 | SIGVTALRM, -1); | ||
496 | |||
497 | *fork_buf_ptr = &initial_jmpbuf; | ||
498 | n = UML_SIGSETJMP(&initial_jmpbuf, enable); | ||
499 | switch(n){ | ||
500 | case INIT_JMP_NEW_THREAD: | ||
501 | new_thread_proc((void *) stack, new_thread_handler); | ||
502 | break; | ||
503 | case INIT_JMP_REMOVE_SIGSTACK: | ||
504 | remove_sigstack(); | ||
505 | break; | ||
506 | case INIT_JMP_CALLBACK: | ||
507 | (*cb_proc)(cb_arg); | ||
508 | UML_SIGLONGJMP(cb_back, 1); | ||
509 | break; | ||
510 | case INIT_JMP_HALT: | ||
511 | kmalloc_ok = 0; | ||
512 | return(0); | ||
513 | case INIT_JMP_REBOOT: | ||
514 | kmalloc_ok = 0; | ||
515 | return(1); | ||
516 | default: | ||
517 | panic("Bad sigsetjmp return in start_idle_thread - %d\n", n); | ||
518 | } | ||
519 | UML_SIGLONGJMP(*switch_buf, 1); | ||
520 | } | ||
521 | |||
522 | void initial_thread_cb_skas(void (*proc)(void *), void *arg) | ||
523 | { | ||
524 | sigjmp_buf here; | ||
525 | int enable; | ||
526 | |||
527 | cb_proc = proc; | ||
528 | cb_arg = arg; | ||
529 | cb_back = &here; | ||
530 | |||
531 | block_signals(); | ||
532 | if(UML_SIGSETJMP(&here, enable) == 0) | ||
533 | UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK); | ||
534 | unblock_signals(); | ||
535 | |||
536 | cb_proc = NULL; | ||
537 | cb_arg = NULL; | ||
538 | cb_back = NULL; | ||
539 | } | ||
540 | |||
541 | void halt_skas(void) | ||
542 | { | ||
543 | block_signals(); | ||
544 | UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_HALT); | ||
545 | } | ||
546 | |||
547 | void reboot_skas(void) | ||
548 | { | ||
549 | block_signals(); | ||
550 | UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT); | ||
551 | } | ||
552 | |||
553 | void switch_mm_skas(struct mm_id *mm_idp) | ||
554 | { | ||
555 | int err; | ||
556 | |||
557 | #warning need cpu pid in switch_mm_skas | ||
558 | if(proc_mm){ | ||
559 | err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0, | ||
560 | mm_idp->u.mm_fd); | ||
561 | if(err) | ||
562 | panic("switch_mm_skas - PTRACE_SWITCH_MM failed, " | ||
563 | "errno = %d\n", errno); | ||
564 | } | ||
565 | else userspace_pid[0] = mm_idp->u.pid; | ||
566 | } | ||